@CONFERENCE\{IMM2011-06102, author = "S. G. Karadogan and L. Marchegiani and J. Larsen and L. K. Hansen", title = "Top-Down Attention with Features Missing at Random", year = "2011", month = "sep", keywords = "Machine learning, missing data techniques, attention modeling, entropy", booktitle = "International Workshop on Machine Learning for Signal Processing", volume = "", series = "", editor = "", publisher = "{IEEE} Press", organization = "", address = "", url = "http://ieeexplore.ieee.org/search/searchresult.jsp?newsearch=true&queryText=karadogan+larsen&filter=&.x=0&.y=0&tag=1", abstract = "In this paper we present a top-down attention model designed for an environment in which features are missing completely at random. Following (Hansen et al., 2011) we model top-down attention as a sequential decision making process driven by a task - modeled as a classification problem - in an environment with random subsets of features missing, but where we have the possibility to gather additional features among the ones that are missing. Thus, the top-down attention problem is reduced to finding the answer to the question what to measure next? Attention is based on the top-down saliency of the missing features given as the estimated difference in classification confusion (entropy) with and without the given feature. The difference in confusion is computed conditioned on the available set of features. In this work, we make our attention model more realistic by also allowing the initial training phase to take place with incomplete data. Thus, we expand the model to include a missing data technique in the learning process. The top-down attention mechanism is implemented in a Gaussian Discrete mixture model setting where marginals and conditionals are relatively easy to compute. To illustrate the viability of expanded model, we train the mixture model with two different datasets, a synthetic data set and the well-known Yeast dataset of the {UCI} database. We evaluate the new algorithm in environments characterized by different amounts of incompleteness and compare the performance with a system that decides next feature to be measured at random. The proposed top-down mechanism clearly outperforms random choice of the next feature.", isbn_issn = "{DOI} 0.1109/MLSP.2011.6064577" }