@ARTICLE{Lorentz2014ask, author = {{Lorenz}, D.~A. and {Wenger}, S. and {Sch{\"o}pfer}, F. and {Magnor}, M.}, title = "{A sparse Kaczmarz solver and a linearized Bregman method for online compressed sensing}", journal = {ArXiv e-prints}, archivePrefix = "arXiv", eprint = {1403.7543}, primaryClass = "math.OC", keywords = {Mathematics - Optimization and Control, Computer Science - Computer Vision and Pattern Recognition, Computer Science - Information Theory, Mathematics - Numerical Analysis}, year = 2014, month = mar, adsurl = {http://adsabs.harvard.edu/abs/2014arXiv1403.7543L}, adsnote = {Provided by the SAO/NASA Astrophysics Data System} } @article {hennenfent2008GEOPnii, title = {New insights into one-norm solvers from the {Pareto} curve}, journal = {Geophysics}, volume = {73}, number = {4}, year = {2008}, optmonth ={07}, pages = {A23-A26}, abstract = {Geophysical inverse problems typically involve a trade off between data misfit and some prior. Pareto curves trace the optimal trade off between these two competing aims. These curves are commonly used in problems with two-norm priors where they are plotted on a log-log scale and are known as L-curves. For other priors, such as the sparsity-promoting one norm, Pareto curves remain relatively unexplored. We show how these curves lead to new insights in one-norm regularization. First, we confirm the theoretical properties of smoothness and convexity of these curves from a stylized and a geophysical example. Second, we exploit these crucial properties to approximate the Pareto curve for a large-scale problem. Third, we show how Pareto curves provide an objective criterion to gauge how different one-norm solvers advance towards the solution.}, keywords = {Acquisition, Geophysics, Optimization, Pareto, Processing, SLIM}, doi = {10.1190/1.2944169}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Journals/Geophysics/2008/hennenfent08GEOnii/hennenfent08GEOnii.pdf}, author = {Gilles Hennenfent and Ewout van den Berg and Michael P. Friedlander and Felix J. Herrmann} } @ARTICLE{Lorentz2013, author = {{Lorenz}, D.~A. and {Sch{\"o}pfer}, F. and {Wenger}, S.}, title = "{The Linearized Bregman Method via Split Feasibility Problems: Analysis and Generalizations}", journal = {ArXiv e-prints}, archivePrefix = "arXiv", eprint = {1309.2094}, primaryClass = "math.OC", keywords = {Mathematics - Optimization and Control, Computer Science - Computer Vision and Pattern Recognition, Computer Science - Numerical Analysis, Mathematics - Numerical Analysis, 68U10, 65K10, 90C25}, year = 2013, month = sep, adsurl = {http://adsabs.harvard.edu/abs/2013arXiv1309.2094L}, adsnote = {Provided by the SAO/NASA Astrophysics Data System} } @article{Needell2014199, title = "Paved with good intentions: Analysis of a randomized block Kaczmarz method ", journal = "Linear Algebra and its Applications ", volume = "441", number = "0", pages = "199 - 221", year = "2014", note = "Special Issue on Sparse Approximate Solution of Linear Systems ", issn = "0024-3795", doi = "http://dx.doi.org/10.1016/j.laa.2012.12.022", url = "http://www.sciencedirect.com/science/article/pii/S0024379513000098", author = "Deanna Needell and Joel A. Tropp", keywords = "Block Kaczmarz", keywords = "Projections onto convex sets", keywords = "Algebraic reconstruction technique", keywords = "Matrix paving ", abstract = "Abstract The block Kaczmarz method is an iterative scheme for solving overdetermined least-squares problems. At each step, the algorithm projects the current iterate onto the solution space of a subset of the constraints. This paper describes a block Kaczmarz algorithm that uses a randomized control scheme to choose the subset at each step. This algorithm is the first block Kaczmarz method with an (expected) linear rate of convergence that can be expressed in terms of the geometric properties of the matrix and its submatrices. The analysis reveals that the algorithm is most effective when it is given a good row paving of the matrix, a partition of the rows into well-conditioned blocks. The operator theory literature provides detailed information about the existence and construction of good row pavings. Together, these results yield an efficient block Kaczmarz scheme that applies to many overdetermined least-squares problem. " } @INPROCEEDINGS{MY13, author={Mansour, H. and Yilmaz, O.}, booktitle={Global Conference on Signal and Information Processing (GlobalSIP), 2013 IEEE}, title={A sparse randomized Kaczmarz algorithm}, year={2013}, optmonth = {Dec}, pages={621-621}, abstract={In this paper, we propose a modification that speeds up the convergence of the randomized Kaczmarz algorithm for systems of linear equations with sparse solutions. The speedup is achieved by projecting every iterate onto a weighted row of the linear system while maintaining the random row selection criteria of Strohmer and Vershynin. While the Kaczmarz algorithm and its variants can only find solutions to overdetermined linear systems, our algorithm surprisingly succeeds in finding sparse solutions to underdetermined linear systems as well. Our numerical experiments demonstrate the accelerated convergence in the overdetermined case as well as the sparse recovery capabilities of our approach in the underdetermined case.}, keywords={convergence of numerical methods;iterative methods;random processes;randomised algorithms;convergence;iterative methods;linear equations;numerical analysis;overdetermined linear systems;random row selection criteria;sparse randomized Kaczmarz algorithm;sparse recovery capabilities;sparse solutions;underdetermined linear systems;weighted row;Algorithm design and analysis;Convergence;Equations;Laboratories;Linear systems;Vectors}, doi={10.1109/GlobalSIP.2013.6736958} } @article{Strohmer2009, year={2009}, issn={1069-5869}, journal={Journal of Fourier Analysis and Applications}, volume={15}, number={2}, doi={10.1007/s00041-008-9030-4}, title={A Randomized Kaczmarz Algorithm with Exponential Convergence}, url={http://dx.doi.org/10.1007/s00041-008-9030-4}, publisher={SP Birkhäuser Verlag Boston}, keywords={Kaczmarz algorithm; Randomized algorithm; Random matrix; Convergence rate}, author={Strohmer, Thomas and Vershynin, Roman}, pages={262-278}, language={English} } @article {friedlander2007SJOero, title = {Exact regularization of convex programs}, journal = {SIAM Journal on Optimization}, volume = {18}, number = {4}, year = {2007}, month = {05}, pages = {1326-1350}, abstract = {The regularization of a convex program is exact if all solutions of the regularized problem are also solutions of the original problem for all values of the regularization parameter below some positive threshold. For a general convex program, we show that the regularization is exact if and only if a certain selection problem has a Lagrange multiplier. Moreover, the regularization parameter threshold is inversely related to the Lagrange multiplier. We use this result to generalize an exact regularization result of Ferris and Mangasarian [Appl. Math. Optim., 23(1991), pp. 266{\textendash}273] involving a linearized selection problem. We also use it to derive necessary and sufficient conditions for exact penalization, similar to those obtained by Bertsekas [Math. Programming, 9(1975), pp. 87{\textendash}99] and by Bertsekas, Nedi , Ozdaglar [Convex Analysis and Optimization, Athena Scientific, Belmont, MA, 2003]. When the regularization is not exact, we derive error bounds on the distance from the regularized solution to the original solution set. We also show that existence of a {\textquoteleft}{\textquoteleft}weak sharp minimum{\textquoteright}{\textquoteright} is in some sense close to being necessary for exact regularization. We illustrate the main result with numerical experiments on the l1 regularization of benchmark (degenerate) linear programs and semidefinite/second-order cone programs. The experiments demonstrate the usefulness of l1 regularization in finding sparse solutions.}, keywords = {Optimization, SLIM}, doi = {10.1137/060675320}, url = {http://www.cs.ubc.ca/~mpf/2007-exact-regularization.html}, author = {Michael P. Friedlander and Paul Tseng} } @article {VanLeeuwen11TRfwiwse, title = {Fast waveform inversion without source encoding}, journal = {Geophysical Prospecting}, volume = {61}, year = {2013}, note = {Article first published online: 10 JULY 2012}, optmonth ={06}, pages = {10-19}, address = {University of British Columbia, Vancouver}, abstract = {Randomized source encoding has recently been proposed as a way to dramatically reduce the costs of full waveform inversion. The main idea is to replace all sequential sources by a small number of simultaneous sources. This introduces random crosstalk in the model updates and special stochastic optimization strategies are required to deal with this. Two problems arise with this approach: i) source encoding can only be applied to fixed-spread acquisition setups, and ii) stochastic optimization methods tend to converge very slowly, relying on averaging to get rid of the cross-talk. Although the slow convergence is partly offset by the low iteration cost, we show that conventional optimization strategies are bound to outperform stochastic methods in the long run. In this paper we argue that we don{\textlnot}{\o}t need randomized source encoding to reap the benefits of stochastic optimization and we review an optimization strategy that combines the benefits of both conventional and stochastic optimization. The method uses a gradually increasing batch of sources. Thus, iterations are very cheap initially and this allows the method to make fast progress in the beginning. As the batch size grows, the method behaves like conventional optimization, allowing for fast convergence. Numerical examples suggest that the stochastic and hybrid method perform equally well with and without source encoding and that the hybrid method outperforms both conventional and stochastic optimization. The method does not rely on source encoding techniques and can thus be applied to non fixed-spread data.}, keywords = {FWI, Optimization, SLIM}, doi = {10.1111/j.1365-2478.2012.01096.x}, url = {http://onlinelibrary.wiley.com/doi/10.1111/j.1365-2478.2012.01096.x/abstract}, author = {Tristan van Leeuwen and Felix J. Herrmann} } @article {herrmann11GPelsqIm, title = {Efficient least-squares imaging with sparsity promotion and compressive sensing}, journal = {Geophysical Prospecting}, volume = {60}, number = {4}, year = {2012}, optmonth ={7}, pages = {696-712}, address = {University of British Columbia, Vancouver}, abstract = {Seismic imaging is a linearized inversion problem relying on the minimization of a least-squares misfit functional as a function of the medium perturbation. The success of this procedure hinges on our ability to handle large systems of equations{\textendash}-whose size grows exponentially with the demand for higher resolution images in more and more complicated areas{\textendash}-and our ability to invert these systems given a limited amount of computational resources. To overcome this "curse of dimensionality" in problem size and computational complexity, we propose a combination of randomized dimensionality-reduction and divide-and-conquer techniques. This approach allows us to take advantage of sophisticated sparsity-promoting solvers that work on a series of smaller subproblems each involving a small randomized subset of data. These subsets correspond to artificial simultaneous-source experiments made of random superpositions of sequential-source experiments. By changing these subsets after each subproblem is solved, we are able to attain an inversion quality that is competitive while requiring fewer computational, and possibly, fewer acquisition resources. Application of this concept to a controlled series of experiments showed the validity of our approach and the relationship between its efficiency{\textendash}-by reducing the number of sources and hence the number of wave-equation solves{\textendash}-and the image quality. Application of our dimensionality-reduction methodology with sparsity promotion to a complicated synthetic with well-log constrained structure also yields excellent results underlining the importance of sparsity promotion.}, keywords = {Compressive Sensing, Imaging, Optimization, SLIM}, doi = {10.1111/j.1365-2478.2011.01041.x}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Journals/GeophysicalProspecting/2012/herrmann11GPelsqIm/herrmann11GPelsqIm.pdf}, url2 = {http://onlinelibrary.wiley.com/doi/10.1111/j.1365-2478.2011.01041.x/full}, author = {Felix J. Herrmann and Xiang Li} } @article{Beck2009, author = {Beck, Amir and Teboulle, Marc}, title = {A Fast Iterative Shrinkage-Thresholding Algorithm for Linear Inverse Problems}, journal = {SIAM J. Img. Sci.}, issue_date = {January 2009}, volume = {2}, number = {1}, month = mar, year = {2009}, issn = {1936-4954}, pages = {183--202}, numpages = {20}, url = {http://dx.doi.org/10.1137/080716542}, doi = {10.1137/080716542}, acmid = {1658364}, publisher = {Society for Industrial and Applied Mathematics}, address = {Philadelphia, PA, USA}, keywords = {deconvolution, global rate of convergence, image deblurring, iterative shrinkage-thresholding algorithm, least squares and \$l_1\$ regularization problems, linear inverse problem, optimal gradient method, two-step iterative algorithms}, } @unpublished {tu2014fis, title = {Fast imaging with surface-related multiples by sparse inversion}, year = {2014}, optmonth ={04}, publisher = {UBC}, note = {Accepted for Geophysical Journal International}, abstract = {In marine exploration seismology, surface-related multiples are usually treated as noise mainly because subsequent processing steps, such as migration-velocity analysis and imaging, require multiple-free data. Failure to remove these wavefield components from the data may lead to erroneous estimates for migration velocity or result in strong coherent artifacts that interfere with the imaged reflectors. On the other hand, multiples interact with the free surface and are therefore exposed more to the subsurface. Hence, multiples carry complementary information compared to the primaries and when processed correctly improve the illumination. Given a sufficiently accurate background velocity model and an estimate for the source signature, we propose a wave-equation based inversion procedure that produces accurate images of velocity perturbations in the subsurface from the total upgoing wavefield including surface-related multiples. Because our method uses subsampling techniques, we obtain high-quality true-amplitude least-squares migrated images at computational costs of roughly a single reverse-time migration with fully sampled data. We also incur a minimal overhead from incorporating the multiples by having the wave-equation solver carry out the multiple-predictions via the inclusion of an areal source. Our method derives its efficacy from promoting curvelet-domain sparsity in the imaged domain and leads to images that are virtually free of artifacts from data that includes multiples. The method is also relatively robust with respect to linearization errors and errors in the background velocity model.}, keywords = {approximate message passing, Compressive Sensing, curvelet, inversion, Kaczmarz, multiples, private}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Submitted/2014/tu2014fis/tu2014fis.pdf}, author = {Ning Tu and Felix J. Herrmann} } @article{vandenberg08ptp, Abstract = {The basis pursuit problem seeks a minimum one-norm solution of an underdetermined least-squares problem. Basis pursuit denoise (BPDN) fits the least-squares problem only approximately, and a single parameter determines a curve that traces the optimal trade-off between the least-squares fit and the one-norm of the solution. We prove that this curve is convex and continuously differentiable over all points of interest, and show that it gives an explicit relationship to two other optimization problems closely related to BPDN. We describe a root-finding algorithm for finding arbitrary points on this curve; the algorithm is suitable for problems that are large scale and for those that are in the complex domain. At each iteration, a spectral gradient-projection method approximately minimizes a least-squares problem with an explicit one-norm constraint. Only matrix-vector operations are required. The primal-dual solution of this problem gives function and derivative information needed for the root-finding method. Numerical experiments on a comprehensive set of test problems demonstrate that the method scales well to large problems.}, Author = {E. van den Berg and M. P. Friedlander}, Title = {Probing the Pareto frontier for basis pursuit solutions}, publisher = {SIAM}, year = {2008}, journal = {SIAM Journal on Scientific Computing}, volume = {31}, number = {2}, pages = {890-912}, keywords = {basis pursuit, convex program, duality, root-finding, Newton's method, projected gradient, one-norm regularization, sparse solutions }, pdf = {http://link.aip.org/link/?SCE/31/890}, doi = {10.1137/080714488} } @article{yin2008, author = {Yin, W. and Osher, S. and Goldfarb, D. and Darbon, J.}, title = {Bregman Iterative Algorithms for $\ell_1$-Minimization with Applications to Compressed Sensing}, journal = {SIAM Journal on Imaging Sciences}, volume = {1}, number = {1}, pages = {143-168}, year = {2008}, doi = {10.1137/070703983}, URL = {http://dx.doi.org/10.1137/070703983}, eprint = {http://dx.doi.org/10.1137/070703983} } @article {vanLeeuwen2013GJImlm, title = {Mitigating local minima in full-waveform inversion by expanding the search space}, journal = {Geophysical Journal International}, volume = {195}, year = {2013}, optmonth ={10}, pages = {661-667}, abstract = {Wave equation based inversions, such as full-waveform inversion and reverse-time migration, are challenging because of their computational costs, memory requirements and reliance on accurate initial models. To confront these issues, we propose a novel formulation of wave equation based inversion based on a penalty method. In this formulation, the objective function consists of a data-misfit term and a penalty term, which measures how accurately the wavefields satisfy the wave equation. This new approach is a major departure from current formulations where forward and adjoint wavefields, which both satisfy the wave equation, are correlated to compute updates for the unknown model parameters. Instead, we carry out the inversions over two alternating steps during which we first estimate the wavefield everywhere, given the current model parameters, source and observed data, followed by a second step during which we update the model parameters, given the estimate for the wavefield everywhere and the source. Because the inversion involves both the synthetic wavefields and the medium parameters, its search space is enlarged so that it suffers less from local minima. Compared to other formulations that extend the search space of wave equation based inversion, our method differs in several aspects, namely (i) it avoids storage and updates of the synthetic wavefields because we calculate these explicitly by finding solutions that obey the wave equation and fit the observed data and (ii) no adjoint wavefields are required to update the model, instead our updates are calculated from these solutions directly, which leads to significant computational savings. We demonstrate the validity of our approach by carefully selected examples and discuss possible extensions and future research.}, doi = {10.1093/gji/ggt258}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Journals/GeophysicalJournalInternational/2013/vanLeeuwen2013GJImlm/vanLeeuwen2013mlm.pdf}, author = {Tristan van Leeuwen and Felix J. Herrmann} }