2024
Garrido-Labrador, José Luis; Serrano-Mamolar, Ana; Maudes-Raedo, Jesús; Rodríguez, Juan José; García-Osorio, César
Ensemble methods and semi-supervised learning for information fusion: A review and future research directions Journal Article
In: Information Fusion, vol. 107, 2024.
Abstract | Links | BibTeX | Tags: Bibliographic review, Ensemble learning, Experimental protocol, Information fusion, Label scarsity, Research trends, Semi-supervised ensemble classification, Semi-supervised learning
@article{garrido2024ensemble,
title = {Ensemble methods and semi-supervised learning for information fusion: A review and future research directions},
author = {José Luis Garrido-Labrador and Ana Serrano-Mamolar and Jesús Maudes-Raedo and Juan José Rodríguez and César García-Osorio},
url = {https://doi.org/10.1016/j.inffus.2024.102310},
doi = {10.1016/j.inffus.2024.102310},
year = {2024},
date = {2024-02-02},
urldate = {2024-02-02},
journal = {Information Fusion},
volume = {107},
abstract = {Advances over the past decade at the intersection of information fusion methods and Semi-Supervised Learning (SSL) are investigated in this paper that grapple with challenges related to limited labelled data. To do so, a bibliographic review of papers published since 2013 is presented, in which ensemble methods are combined with new machine learning algorithms. A total of 128 new proposals using SSL algorithms for ensemble construction are identified and classified. All the methods are categorised by approach, ensemble type, and base classifier. Experimental protocols, pre-processing, dataset usage, unlabelled ratios, and statistical tests are also assessed, underlining the major trends, and some shortcomings of particular studies. It is evident from this literature review that foundational algorithms such as self-training and co-training are influencing current developments, and that innovative ensemble …
},
keywords = {Bibliographic review, Ensemble learning, Experimental protocol, Information fusion, Label scarsity, Research trends, Semi-supervised ensemble classification, Semi-supervised learning},
pubstate = {published},
tppubtype = {article}
}
2021
Juez-Gil, Mario; Arnaiz-González, Álvar; Rodríguez, Juan José; López-Nozal, Carlos; García-Osorio, César
Rotation Forest for Big Data Journal Article
In: Information Fusion, vol. 74, pp. 39-49, 2021, ISSN: 1566-2535.
Abstract | Links | BibTeX | Tags: Big data, Ensemble learning, Machine learning, Random forest, Rotation forest, SELECTED, Spark
@article{Juez-Gil2021,
title = {Rotation Forest for Big Data},
author = {Mario Juez-Gil and Álvar Arnaiz-González and Juan José Rodríguez and Carlos López-Nozal and César García-Osorio},
url = {https://www.sciencedirect.com/science/article/pii/S1566253521000634},
doi = {10.1016/j.inffus.2021.03.007},
issn = {1566-2535},
year = {2021},
date = {2021-10-01},
journal = {Information Fusion},
volume = {74},
pages = {39-49},
abstract = {The Rotation Forest classifier is a successful ensemble method for a wide variety of data mining applications. However, the way in which Rotation Forest transforms the feature space through PCA, although powerful, penalizes training and prediction times, making it unfeasible for Big Data. In this paper, a MapReduce Rotation Forest and its implementation under the Spark framework are presented. The proposed MapReduce Rotation Forest behaves in the same way as the standard Rotation Forest, training the base classifiers on a rotated space, but using a functional implementation of the rotation that enables its execution in Big Data frameworks. Experimental results are obtained using different cloud-based cluster configurations. Bayesian tests are used to validate the method against two ensembles for Big Data: Random Forest and PCARDE classifiers. Our proposal incorporates the parallelization of both the PCA calculation and the tree training, providing a scalable solution that retains the performance of the original Rotation Forest and achieves a competitive execution time (in average, at training, more than 3 times faster than other PCA-based alternatives). In addition, extensive experimentation shows that by setting some parameters of the classifier (i.e., bootstrap sample size, number of trees, and number of rotations), the execution time is reduced with no significant loss of performance using a small ensemble.},
keywords = {Big data, Ensemble learning, Machine learning, Random forest, Rotation forest, SELECTED, Spark},
pubstate = {published},
tppubtype = {article}
}