@inproceedings{Richter2016_1000060374,
author = {Richter, Matthias},
year = {2016},
title = {Guided Linear Dimensionality Reduction by Stochastic Gradient Descent : Technical Report IES-2015-05},
pages = {61-73},
booktitle = {Proceedings of the 2015 Joint Workshop of Fraunhofer IOSB and Institute for Anthropomatics, Vision and Fusion Laboratory. Ed.: J. Beyerer},
publisher = {{KIT Scientific Publishing, Karlsruhe}},
isbn = {978-3-7315-0519-8},
issn = {1863-6489},
series = {Karlsruher Schriften zur Anthropomatik / Lehrstuhl f{\"{u}}r Interaktive Echtzeitsysteme, Karlsruher Institut f{\"{u}}r Technologie ; Fraunhofer-Inst. f{\"{u}}r Optronik, Systemtechnik und Bildauswertung IOSB Karlsruhe},
abstract = {The aim of dimensionality reduction is to reduce the number of considered variables without removing the information needed to perform a given task. In explorative data analysis, this translates to preserving the clustering properties of the data, while in a classification setting, only class separation has to be preserved. By far the most popular tools are principal component analysis (PCA) for the former and linear discriminant analysis (LDA) for the latter. Both transform the data to a linear subspace. With PCA, the subspace is chosen so that most of the variance is preserved. However, there is no guarantee that clustering properties or even class separation are preserved too. With LDA, the data is projected to a C - 1 dimensional (where C denotes the number of classes) subspace so that class separation is maximized. Apart from unnecessarily restricting the number of dimensions, LDA might destroy discriminative information if its implicit assumptions (normally distributed data) are violated. In this technical report, we present a novel approach to linear dimensionality reduction. The approach is formulated as an optimization problem, which is solved using stochastic gradient descent (SGD). Like LDA, the aim is to maximize class separability. Like PCA, the dimensionality of the subspace can be specified by the user. As SGD is very sensitive to the initial conditions, we further present a method to determine suitable starting points for the gradient descent.},
language = {english},
volume = {24}
}