@ARTICLE\{IMM2002-01444, author = "U. Kjems and L. K. Hansen and J. Anderson", title = "The Quantitative Evaluation of Functional Neuroimaging Experiments: Mutual Information Learning Curves", year = "2002", keywords = "Modeling, Neuroimaging, Learning Curve", pages = "772-786", journal = "NeuroImage", volume = "15", editor = "", number = "4", publisher = "", url = "http://www2.compute.dtu.dk/pubdb/pubs/1444-full.html", abstract = "Learning curves are presented as an unbiased means for evaluating the performance of models for neuroimaging data analysis. The learning curve measures the predictive performance in terms of the generalization or prediction error as a function of the number of independent examples (e.g., subjects) used to determine the parameters in the model. Cross-validation resampling is used to obtain unbiased estimates of a generic multivariate Gaussian classifier, for training set sizes from 2 to 16 subjects. We apply the framework to four different activation experiments, in this case \$\backslash\$[/sup 15/ O]water data sets, although the framework is equally valid for multisubject fMRI studies. We demonstrate how the prediction error can be expressed as the mutual information between the scan and the scan label, measured in units of bits. The mutual information learning curve can be used to evaluate the impact of different methodological choices, e.g., classification label schemes, preprocessing choices. Another application for the learning curve is to examine the model performance using bias/variance considerations enabling the researcher to determine if the model performance is limited by statistical bias or variance. We furthermore present the sensitivity map as a general method for extracting activation maps from statistical models within the probabilistic framework and illustrate relationships between mutual information and pattern reproducibility as derived in the {NPAIRS} framework described in a companion paper" }