@ARTICLE\{IMM2012-06192, author = "M. M{\o}rup and L. K. Hansen", title = "Archetypal analysis for machine learning and data mining", year = "2012", keywords = "Archetypal Analysis", pages = "54-63", journal = "Neurocomputing", volume = "", editor = "", number = "", publisher = "", url = "http://www.sciencedirect.com/science/article/pii/S0925231211006060", abstract = "Archetypal analysis {AA}) proposed by Cutler and Breiman (1994) estimates the principal convex hull {PCH}) of a data set. As such {AA} favors features that constitute representative corners of the data, i.e. distinct aspects or archetypes. We currently show that {AA} enjoys the interpretability of clustering - without being limited to hard assignment and the uniqueness of {SVD} - without being limited to orthogonal representations. In order to do large scale {AA,} we derive an efficient algorithm based on projected gradient as well as an initialization procedure we denote {FURTHESTSUM} that is inspired by the {FURTHESTFIRST} approach widely used for {K-}means (Hochbaum and Shmoys, 1985). We generalize the {AA} procedure to {KERNEL-AA} in order to extract the principal convex hull in potential infinite Hilbert spaces and derive a relaxation of {AA} when the archetypes cannot be represented as convex combinations of the observed data. We further demonstrate that the {AA} model is relevant for feature extraction and dimensionality reduction for a large variety of machine learning problems taken from computer vision, neuroimaging, chemistry, text mining and collaborative filtering leading to highly interpretable representations of the dynamics in the data. Matlab code for the derived algorithms is available for download from www.mortenmorup.dk.", isbn_issn = "0925-2312" }