@MASTERSTHESIS\{IMM2014-06733,
author = "M. R. Andersen",
title = "Sparse inference using approximate message passing",
year = "2014",
school = "Technical University of Denmark, Department of Applied Mathematics and Computer Science",
address = "Matematiktorvet, Building 303B, {DK-}2800 Kgs. Lyngby, Denmark, compute@compute.dtu.dk",
type = "",
note = "{DTU} Supervisor: Lars Kai Hansen, lkai@dtu.dk, {DTU} Compute",
url = "http://www.compute.dtu.dk/English.aspx",
abstract = "This thesis deals with probabilistic methods for finding sparse solutions to illposed linear inverse problems using both the single measurement vector (SMV) formulation and multiple measurement vector (MMV) formulation. Specifically, this thesis investigates the novel algorithm called approximate message passing (AMP) and its Bayesian generalization called generalized approximate message passing (GAMP). The {AMP} algorithm was initially proposed by Donoho et al. (2009) to solve the linear inverse problem in the context of compressed sensing and later generalized by Rangan (2010). Both algorithms are based on approximations of sum-product algorithm formulated for factors graphs. Through numerical simulations, it is verified that the {AMP} algorithms are able to achieve superior performance in terms of the sparsity under-sampling trade-off for the basis pursuit problem, while maintaining a low computational complexity. Moreover, the {GAMP} framework is combined with the sparsity promoting spike and slap prior to derive an inference algorithm for the inverse problem in the {SMV} formulation. This algorithm is extended to the {MMV} formulation, which allows the use of multiple measurement vectors by imposing a common sparsity pattern on the measurement vectors. This method, which is coined {AMP-MMV,} provides soft estimates of both the solution and the underlying support. A thorough numerical study verifies the benefits of the {MMV} formulation and compares it to state-of-the-art methods. This comparison shows that the {AMP-MMV} is able to match the recovery capabilities of these methods, while maintaining a computational complexity that is linear in all problem dimensions. Yet another model is considered, the {AMP-DCS} model, which relaxes the assumption of the common sparsity pattern by assuming the sparsity pattern is slowly changing over time according to a binary Markov chain. By means of numerical experiments, it is demonstrated that this approach is preferable to the {SMV} approach. For automatic tuning of the hyperparameters, Expectation-Maximization (EM) algorithms are derived for both the {SMV} and {MMV} formulation."
}