@misc{Chen2004, author = {Chen, Janet}, file = {:Users/timlin/Documents/Mendeley Desktop/Chen/Chen - 2004 - Group Theory and the Rubik’s Cube.pdf:pdf}, title = {{Group Theory and the Rubik’s Cube}}, year = {2004} } @article{Kabir1995, author = {Kabir, M.M. Nurul and Verschuur, D.J.}, doi = {10.1111/j.1365-2478.1995.tb00257.x}, file = {:Users/timlin/Documents/Mendeley Desktop/Kabir, Verschuur/Kabir, Verschuur - 1995 - Restoration of missing offsets by parabolic Radon transform1.pdf:pdf}, issn = {0016-8025}, journal = {Geophysical Prospecting}, month = apr, number = {3}, pages = {347--368}, title = {{Restoration of missing offsets by parabolic Radon transform1}}, url = {http://doi.wiley.com/10.1111/j.1365-2478.1995.tb00257.x}, volume = {43}, year = {1995} } @article{Riley1976, abstract = {Starting with a 1-D subsurface model, a method is developed for modeling and inverting the class of multiple reflections involving the near‐perfect reflector at the free surface. A solution to the practical problem of estimating the source waveform is discussed, and application of the 1-D algorithm to field data illustrates the successful elimination of seafloor and peg‐leg multiples. Extending the analysis to waves in two dimensions, we make the approximation that the subsurface behaves as an acoustic medium. Based on several numerical and theoretical considerations, the scalar wave equation is split into two separate partial differential equations: one governing propagation of upcoming waves and a second describing downgoing waves. The result is a pair of propagation equations which are coupled where reflectors exist. Finite difference approximations to the initial boundary value problem are developed to integrate numerically the surface reflection seismogram.}, author = {Riley, Don C. and Claerbout, Jon F.}, doi = {10.1190/1.1440638}, issn = {1070485X}, journal = {Geophysics}, language = {en}, month = feb, number = {4}, pages = {592--620}, publisher = {Society of Exploration Geophysicists}, title = {{2-D multiple reflections}}, url = {http://library.seg.org/doi/abs/10.1190/1.1440638}, volume = {41}, year = {1976} } @article{Kennett1979, abstract = {A new approach is presented for the suppression of multiples reflected at the surface of a horizontally layered fluid or elastic medium, recorded at non-zero offsets from the source. The scheme used is to extract the effect of the free surface in the frequency-wavenumber domain and then to replace this surface by a non-reflecting boundary. The multiple suppression operator requires a detailed knowledge of the source time function and the elastic properties of the medium between the source and the surface. For a stratified fluid or a liquid layer overlying a stratified elastic medium, complete multiple suppression can be achieved with noise free data. If only the vertical component is available for an elastic medium an approximate approach may be used which removes most of the multiple energy. Good results may be achieved with this multiple suppression scheme in the presence of noise. The method is designed to be used before records are stacked in a CDP gather.}, author = {Kennett, B. L. N.}, doi = {10.1111/j.1365-2478.1979.tb00987.x}, issn = {0016-8025}, journal = {Geophysical Prospecting}, language = {English}, month = sep, number = {3}, pages = {584--600}, title = {{The suppression of surface multiples on seismic records}}, url = {http://www.earthdoc.org/publication/publicationdetails/?publication=35515}, volume = {27}, year = {1979} } @inproceedings{Cambois2009, abstract = {Pressure and velocity sensors contained in a towed streamer must be properly calibrated before they can be algebraically combined to separate up‐ and down‐going waves. Statistical techniques developed for ocean‐bottom surveys are adapted to account for the marine streamer acquisition geometry. Byproducts of this calibration process provide quality indicators that can be used during acquisition to assess data quality in real‐time. Another unique feature of this process is that it isolates external noises, such as seismic interferences. These identified noises can subsequently be surgically removed from the data.}, author = {Cambois, G. and Carlson, D. and Jones, C. and Lesnes, M. and S\"{o}llner, W. and Tabti, H.}, booktitle = {SEG Technical Program Expanded Abstracts}, doi = {10.1190/1.3255117}, file = {:Users/timlin/Documents/Mendeley Desktop/Cambois et al/Cambois et al. - 2009 - Dual-sensor streamer data Calibration, acquisition QC and attenuation of seismic interferences and other noises.pdf:pdf}, language = {en}, pages = {142--146}, title = {{Dual-sensor streamer data: Calibration, acquisition QC and attenuation of seismic interferences and other noises}}, url = {http://library.seg.org/doi/abs/10.1190/1.3255117}, year = {2009} } @inproceedings{J.Curry2006, author = {{J. Curry}, W. and Shan, G.}, booktitle = {68th EAGE Conference and Exhibition}, doi = {10.3997/2214-4609.201402125}, file = {:Users/timlin/Documents/Mendeley Desktop/J. Curry, Shan/J. Curry, Shan - 2006 - Interpolation of Near Offsets with Multiples and Prediction-Error Filters.pdf:pdf}, month = jun, title = {{Interpolation of Near Offsets with Multiples and Prediction-Error Filters}}, url = {http://www.earthdoc.org/publication/publicationdetails/?publication=155}, year = {2006} } @article{wang2004mpi, abstract = {This paper introduces a fully data‐driven concept, multiple prediction through inversion (MPI), for surface‐related multiple attenuation (SMA). It builds the multiple model not by spatial convolution, as in a conventional SMA, but by updating the attenuated multiple wavefield in the previous iteration to generate a multiple prediction for the new iteration, as is usually the case in an iterative inverse problem. Because MPI does not use spatial convolution, it is able to minimize the edge effect that appears in conventional SMA multiple prediction and to eliminate the need to synthesize near‐offset traces, required by a conventional scheme, so that it can deal with a seismic data set with missing near‐offset traces. The MPI concept also eliminates the need for an explicit surface operator, which is required by conventional SMA and is comprised of the inverse source signature and other effects. This method accounts implicitly for the spatial variation of the surface operator in multiple‐model building and ...}, author = {Wang, Yanghua}, doi = {10.1190/1.1707074}, file = {:Users/timlin/Documents/Mendeley Desktop/Wang/Wang - 2004 - Multiple prediction through inversion A fully data‐driven concept for surface‐related multiple attenuation.pdf:pdf}, issn = {0016-8033}, journal = {Geophysics}, keywords = {attenuation measurement,geophysical signal processing,inverse problems,iterative methods,prediction theory,seismic waves}, language = {en}, month = mar, number = {2}, pages = {547--553}, publisher = {Society of Exploration Geophysicists}, title = {{Multiple prediction through inversion: A fully data‐driven concept for surface‐related multiple attenuation}}, url = {http://library.seg.org/doi/abs/10.1190/1.1707074}, volume = {69}, year = {2004} } @inproceedings{hargreaves2006sma, archivePrefix = {arXiv}, arxivId = {10.1190/1.2370080}, author = {Hargreaves, Neil}, booktitle = {SEG Technical Program Expanded Abstracts 2006}, doi = {10.1190/1.2370080}, eprint = {1.2370080}, file = {:Users/timlin/Documents/Mendeley Desktop/Hargreaves/Hargreaves - 2006 - Surface multiple attenuation in shallow water and the construction of primaries from multiples.pdf:pdf}, language = {en}, month = jan, pages = {2689--2693}, primaryClass = {10.1190}, publisher = {Society of Exploration Geophysicists}, title = {{Surface multiple attenuation in shallow water and the construction of primaries from multiples}}, url = {http://library.seg.org/doi/abs/10.1190/1.2370080}, year = {2006} } @article{Majdanski2011, author = {Majdański, Mariusz and Kostov, Cl\'{e}ment and Kragh, Ed and Moore, Ian and Thompson, Mark and Mispel, Joachim}, doi = {10.1190/geo2010-0337.1}, file = {:Users/timlin/Documents/Mendeley Desktop/Majdański et al/Majdański et al. - 2011 - Attenuation of free-surface multiples by updown deconvolution for marine towed-streamer data(2).pdf:pdf}, issn = {0016-8033}, journal = {Geophysics}, month = nov, number = {6}, pages = {V129--V138}, title = {{Attenuation of free-surface multiples by up/down deconvolution for marine towed-streamer data}}, url = {http://library.seg.org/doi/abs/10.1190/geo2010-0337.1}, volume = {76}, year = {2011} } @conference{lin2014EAGEmas, abstract = {We propose a method to substantially reduce the computational costs of the Robust Estimation of Primaries by Sparse Inversion algorithm, based on a multilevel inversion strategy that shifts early iterations of the method to successively coarser spatial sampling grids. This method requires no change in the core implementation of the original algorithm, and additionally only relies on trace decimation, low-pass filtering, and rudimentary interpolation techniques. We furthermore demonstrate with a synthetic seismic line significant computational speedups using this approach.}, author = {Lin, Tim T Y and Herrmann, F. J.}, booktitle = {76th EAGE Conference and Exhibition}, file = {:Users/timlin/Documents/Mendeley Desktop/Lin, Herrmann/Lin, Herrmann - 2014 - Multilevel acceleration strategy for the robust estimation of primaries by sparse inversion.pdf:pdf}, keywords = {EAGE,EPSI,REPSI,multigrid,multilevel,multiples,multiscale}, title = {{Multilevel acceleration strategy for the robust estimation of primaries by sparse inversion}}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/EAGE/2014/lin2014EAGEmas.pdf}, year = {2014} } @article{Fleischer1996, author = {Fleischer, Gunter and Hofmann, Bernd}, doi = {10.1088/0266-5611/12/4/006}, file = {:Users/timlin/Documents/Mendeley Desktop/Fleischer, Hofmann/Fleischer, Hofmann - 1996 - On inversion rates for the autoconvolution equation.pdf:pdf}, issn = {0266-5611}, journal = {Inverse Problems}, month = aug, number = {4}, pages = {419--435}, title = {{On inversion rates for the autoconvolution equation}}, url = {http://stacks.iop.org/0266-5611/12/i=4/a=006?key=crossref.e89119a055c8a50c2153fd2c575d94b1}, volume = {12}, year = {1996} } @article{Fleischer1999, author = {Fleischer, Gunter and Gorenflo, R. and Hofmann, Bernd}, doi = {10.1002/(SICI)1521-4001(199903)79:3<149::AID-ZAMM149>3.0.CO;2-N}, file = {:Users/timlin/Documents/Mendeley Desktop/Fleischer, Gorenflo, Hofmann/Fleischer, Gorenflo, Hofmann - 1999 - On the Autoconvolution Equation and Total Variation Constraints.pdf:pdf}, issn = {0044-2267}, journal = {ZAMM}, month = mar, number = {3}, pages = {149--159}, title = {{On the Autoconvolution Equation and Total Variation Constraints}}, url = {http://doi.wiley.com/10.1002/\%28SICI\%291521-4001\%28199903\%2979\%3A3\%3C149\%3A\%3AAID-ZAMM149\%3E3.0.CO\%3B2-N}, volume = {79}, year = {1999} } @article{Li2012a, abstract = {ABSTRACTWave-equation-based seismic inversion can be formulated as a nonlinear least-squares problem. The demand for higher-resolution models in more geologically complex areas drives the need to develop techniques that exploit the special structure of full-waveform inversion to reduce the computational burden and to regularize the inverse problem. We meet these goals by using ideas from compressive sensing and stochastic optimization to design a novel Gauss-Newton method, where the updates are computed from random subsets of the data via curvelet-domain sparsity promotion. Two different subset sampling strategies are considered: randomized source encoding, and drawing sequential shots firing at random source locations from marine data with missing near and far offsets. In both cases, we obtain excellent inversion results compared to conventional methods at reduced computational costs.}, author = {Li, Xiang and Aravkin, Aleksandr Y. and van Leeuwen, Tristan and Herrmann, F. J.}, doi = {10.1190/geo2011-0410.1}, file = {:Users/timlin/Documents/Mendeley Desktop/Li et al/Li et al. - 2012 - Fast randomized full-waveform inversion with compressive sensing.pdf:pdf}, issn = {0016-8033}, journal = {Geophysics}, language = {en}, month = may, number = {3}, pages = {A13--A17}, publisher = {Society of Exploration Geophysicists}, title = {{Fast randomized full-waveform inversion with compressive sensing}}, url = {http://library.seg.org/doi/abs/10.1190/geo2011-0410.1}, volume = {77}, year = {2012} } @article{Dragoset2006, author = {Dragoset, B. and Moore, I. and Kostov, C.}, doi = {10.1111/j.1365-2478.2006.00581.x}, file = {:Users/timlin/Documents/Mendeley Desktop/Dragoset, Moore, Kostov/Dragoset, Moore, Kostov - 2006 - The impact of field-survey characteristics on surface-relatedmultiple attenuation.pdf:pdf}, journal = {Geophysical Prospecting}, month = nov, number = {6}, pages = {781--791}, title = {{The impact of field-survey characteristics on surface-relatedmultiple attenuation}}, url = {http://doi.wiley.com/10.1111/j.1365-2478.2006.00581.x}, volume = {54}, year = {2006} } @article{lin2013robustEPSI, abstract = {A recently proposed method called estimation of primaries by sparse inversion (EPSI) avoids the need for adaptive subtraction of approximate multiple predictions by directly inverting for the multiple-free subsurface impulse response as a collection of band-limited spikes. Although it can be shown that the correct primary impulse response is obtained through the sparsest possible solution, the original EPSI algorithm was not designed to take advantage of this result, and instead it relies on a multitude of inversion parameters, such as the level of sparsity per gradient update. We proposed and tested a new algorithm, named robust EPSI, in which we make obtaining the sparsest solution an explicit goal. Our approach remains a gradient-based approach like the original algorithm, but it is derived from a new biconvex optimization framework based on an extended basis-pursuit denoising formulation. Furthermore, because it is based on a general framework, robust EPSI can recover the impulse response in transform domains, such as sparsifying curvelet-based representations, without changing the underlying algorithm. We discovered that the sparsity-minimizing objective of our formulation enabled it to operate successfully on a variety of synthetic and field marine data sets without excessive tweaking of inversion parameters. We also found that recovering the solution in alternate sparsity domains can significantly improve the quality of the directly estimated primaries, especially for weaker late-arrival events. In addition, we found that robust EPSI produces a more artifact-free impulse response compared to the original algorithm.}, author = {Lin, Tim T. Y. and Herrmann, F. J.}, doi = {10.1190/geo2012-0097.1}, file = {:Users/timlin/Documents/Mendeley Desktop/Lin, Herrmann/Lin, Herrmann - 2013 - Robust estimation of primaries by sparse inversion via one-norm minimization.pdf:pdf}, journal = {Geophysics}, keywords = {algorithm,multiples,optimization,sparse,surface-related multiple elimination (SRME)}, language = {en}, month = may, number = {3}, pages = {R133--R150}, publisher = {Society of Exploration Geophysicists}, title = {{Robust estimation of primaries by sparse inversion via one-norm minimization}}, url = {http://library.seg.org/doi/abs/10.1190/geo2012-0097.1}, volume = {78}, year = {2013} } @article{Ahmed2012, abstract = {We consider the problem of recovering two unknown vectors, \$\backslash w\$ and \$\backslash x\$, of length \$L\$ from their circular convolution. We make the structural assumption that the two vectors are members known subspaces, one with dimension \$N\$ and the other with dimension \$K\$. Although the observed convolution is nonlinear in both \$\backslash w\$ and \$\backslash x\$, it is linear in the rank-1 matrix formed by their outer product \$\backslash w\backslash x\^{}*\$. This observation allows us to recast the deconvolution problem as low-rank matrix recovery problem from linear measurements, whose natural convex relaxation is a nuclear norm minimization program. We prove the effectiveness of this relaxation by showing that for "generic" signals, the program can deconvolve \$\backslash w\$ and \$\backslash x\$ exactly when the maximum of \$N\$ and \$K\$ is almost on the order of \$L\$. That is, we show that if \$\backslash x\$ is drawn from a random subspace of dimension \$N\$, and \$\backslash w\$ is a vector in a subspace of dimension \$K\$ whose basis vectors are "spread out" in the frequency domain, then nuclear norm minimization recovers \$\backslash w\backslash x\^{}*\$ without error. We discuss this result in the context of blind channel estimation in communications. If we have a message of length \$N\$ which we code using a random \$L\backslash times N\$ coding matrix, and the encoded message travels through an unknown linear time-invariant channel of maximum length \$K\$, then the receiver can recover both the channel response and the message when \$L\backslash gtrsim N+K\$, to within constant and log factors.}, archivePrefix = {arXiv}, arxivId = {1211.5608}, author = {Ahmed, Ali and Recht, Benjamin and Romberg, Justin}, eprint = {1211.5608}, file = {:Users/timlin/Documents/Mendeley Desktop/Ahmed, Recht, Romberg/Ahmed, Recht, Romberg - 2012 - Blind Deconvolution using Convex Programming.pdf:pdf}, month = nov, pages = {40}, title = {{Blind Deconvolution using Convex Programming}}, url = {http://arxiv.org/abs/1211.5608}, year = {2012} } @article{Bronstein2005, author = {Bronstein, A.M. and Bronstein, M.M. and Zibulevsky, M.}, doi = {10.1109/TSP.2005.847822}, file = {:Users/timlin/Documents/Mendeley Desktop/Bronstein, Bronstein, Zibulevsky/Bronstein, Bronstein, Zibulevsky - 2005 - Relative optimization for blind deconvolution.pdf:pdf}, issn = {1053-587X}, journal = {IEEE Transactions on Signal Processing}, keywords = {Blind deconvolution,Deconvolution,Finite impulse response filter,Gaussian processes,Hessian structure,IIR filters,Iterative algorithms,Kernel,Maximum likelihood estimation,Newton method,Nonlinear optics,Optical control,Optical filters,computational complexity,convergence of numerical methods,fast-convergent algorithm,finite impulse response kernel,gradient method,gradient methods,infinite impulse response restoration kernel,iteration complexity,maximum likelihood,natural gradient,nonlinear function,nonlinear functions,optimisation,quasimaximum likelihood blind deconvolution,relative Newton method,relative optimization,subGaussian source,superGaussian source}, language = {English}, month = jun, number = {6}, pages = {2018--2026}, title = {{Relative optimization for blind deconvolution}}, url = {http://ieeexplore.ieee.org/xpls/abs\_all.jsp?arnumber=1433133' escapeXml='false'/>}, volume = {53}, year = {2005} } @book{Verschuur2006, author = {Verschuur, D. J.}, file = {:Users/timlin/Documents/Mendeley Desktop/Verschuur/Verschuur - 2006 - Seismic multiple removal techniques Past, present and future.pdf:pdf}, pages = {191}, publisher = {EAGE Publications}, title = {{Seismic multiple removal techniques: Past, present and future}}, year = {2006} } @inproceedings{VanGroenestijn2007, author = {van Groenestijn, G. J. A. and Verschuur, D. J.}, booktitle = {SEG Technical Program Expanded Abstracts}, doi = {10.1190/1.2793001}, file = {:Users/timlin/Documents/Mendeley Desktop/van Groenestijn, Verschuur/van Groenestijn, Verschuur - 2007 - Reconstruction of missing near offsets from multiples.pdf:pdf}, number = {1}, pages = {2570--2574}, title = {{Reconstruction of missing near offsets from multiples}}, url = {http://library.seg.org/vsearch/servlet/VerityServlet?KEY=GPYSA7\&smode=strresults\&sort=chron\&maxdisp=25\&threshold=0\&pjournals=GPYSA7,LEEDFF,GMALCH,SEGEAB\&possible1zone=article\&possible4=van+Groenestijn\&possible4zone=author\&bool4=and\&OUTLOG=NO\&viewabs=SEGEAB\&key=DISPLAY\&docID=8\&page=1\&chapter=0}, volume = {26}, year = {2007} } @misc{Weglein2003, abstract = {This paper presents an overview and a detailed description of the key logic steps and mathematical-physicsframework behindthe developmentof practical algorithms for seismic exploration derived from the inverse scattering series. There are both significant symmetries and critical subtle differences between the forward scattering series construction and the inverse scattering series processing of seismic events. These similarities and differences help explain the efficiency and effectiveness ofdifferent inversion objectives. The inverse series performs all of the tasks associated with inversion using the entire wavefield recorded on the measurement surface as input. However, certain terms in the series act as though only one specific task,and no other task,existed. When isolated, these terms constitute a task-specific subseries. We present both therationale for seeking and methods of identifying uncoupled task-specific subseries that accomplish: (1) free-surface multiple removal; (2) internal multiple attenuation; (3) imaging primaries at depth; and (4) inverting for earth material properties. Acombination of forward series analogues and physical intuition is employed to locate those subseries. We show that the sum of the four task- specific subseries does not correspond to the original inverse series since terms with coupled tasks are never considered or computed. Isolated tasks are accomplished sequentially and, after each is achieved, the problem is restarted as though that isolated task had never existed. This strategy avoids choosing portions of the series, at any stage, that correspondto a combination of tasks, i.e., no terms corresponding to coupled tasks are ever computed. This inversion in stages provides a tremendous practical advantage. The achievement of a task is aform of useful information exploited in the redefined and restarted problem; and the latter represents a critically important step in the logic and overall strategy. The individual subseries are analysed and their strengths, limitations and prerequisites exemplified with analytic, numerical and field data examples.}, author = {Weglein, Arthur B and Ara\'{u}jo, Fernanda V and Carvalho, Paulo M and Stolt, Robert H and Matson, Kenneth H. and Coates, Richard T and Corrigan, Dennis and Foster, Douglas J and Shaw, Simon A. and Zhang, Haiyan}, booktitle = {Inverse Problems}, doi = {10.1088/0266-5611/19/6/R01}, file = {:Users/timlin/Documents/Mendeley Desktop/Weglein et al/Weglein et al. - 2003 - Inverse scattering series and seismic exploration.pdf:pdf}, isbn = {9781605601298}, issn = {0266-5611}, month = dec, number = {6}, pages = {R27--R83}, title = {{Inverse scattering series and seismic exploration}}, url = {http://iopscience.iop.org/0266-5611/19/6/R01/}, volume = {19}, year = {2003} } @inproceedings{VanGroenestijn2009a, address = {Amsterdam}, author = {van Groenestijn, G. J. A. and Verschuur, D. J.}, booktitle = {71st EAGE Conference \& Exhibition}, title = {{Estimation of primaries by sparse inversion from blended data}}, year = {2009} } @inproceedings{Lin2011a, author = {Lin, Tim T. Y. and Herrmann, F. J.}, booktitle = {SEG Technical Program Expanded Abstracts}, doi = {10.1190/1.3628116}, pages = {4354}, title = {{Robust source signature deconvolution and the estimation of primaries by sparse inversion}}, url = {http://library.seg.org/vsearch/servlet/VerityServlet?KEY=SEGLIB\&smode=strresults\&sort=rel\&maxdisp=25\&threshold=0\&pjournals=GPYSA7,LEEDFF,GMALCH,SEGEAB,JEEGXX,SAGEEP,SEGBKS\&possible1=Tim+Lin\&possible1zone=article\&SMODE=strsearch\&OUTLOG=NO\&viewabs=SEGEAB\&ke}, volume = {30}, year = {2011} } @article{VanGroenestijn2009, author = {van Groenestijn, G. J. A. and Verschuur, D. J.}, doi = {10.1190/1.3255509}, file = {:Users/timlin/Documents/Mendeley Desktop/van Groenestijn, Verschuur/van Groenestijn, Verschuur - 2009 - Estimation of primaries by sparse inversion applied to updown wavefields.pdf:pdf}, journal = {SEG Technical Program Expanded Abstracts}, pages = {3143}, title = {{Estimation of primaries by sparse inversion applied to up/down wavefields}}, url = {http://library.seg.org/vsearch/servlet/VerityServlet?KEY=SEGLIB\&smode=strresults\&maxdisp=25\&possible1=van+Groenestijn,+G.J.A.\&possible1zone=author\&OUTLOG=NO\&aqs=true\&viewabs=SEGEAB\&key=DISPLAY\&docID=5\&page=0\&chapter=0\&aqs=true}, volume = {28}, year = {2009} } @article{berkhout82smi, author = {Berkhout, A. J. and Pao, Y. H.}, doi = {10.1115/1.3162563}, file = {:Users/timlin/Documents/Mendeley Desktop/Berkhout, Pao/Berkhout, Pao - 1982 - Seismic Migration - Imaging of Acoustic Energy by Wave Field Extrapolation.pdf:pdf}, issn = {00218936}, journal = {Journal of Applied Mechanics}, month = sep, number = {3}, pages = {682}, publisher = {ASME}, shorttitle = {J. Appl. Mech.}, title = {{Seismic Migration - Imaging of Acoustic Energy by Wave Field Extrapolation}}, url = {http://dx.doi.org/10.1115/1.3162563}, volume = {49}, year = {1982} } @article{Tibshirani1996, abstract = {We propose a new method for estimation in linear models. The `lasso' minimizes the residual sum of squares subject to the sum of the absolute value of the coefficients being less than a constant. Because of the nature of this constraint it tends to produce some coefficients that are exactly 0 and hence gives interpretable models. Our simulation studies suggest that the lasso enjoys some of the favourable properties of both subset selection and ridge regression. It produces interpretable models like subset selection and exhibits the stability of ridge regression. There is also an interesting relationship with recent work in adaptive function estimation by Donoho and Johnstone. The lasso idea is quite general and can be applied in a variety of statistical models: extensions to generalized regression models and tree-based models are briefly described.}, author = {Tibshirani, R}, institution = {Department of Statistics, University of Toronto}, issn = {00359246}, journal = {Journal of the Royal Statistical Society Series B Methodological}, number = {1}, pages = {267--288}, pmid = {2346178}, publisher = {JSTOR}, series = {B}, title = {{Regression shrinkage and selection via the lasso}}, url = {http://www.jstor.org/stable/2346178}, volume = {58}, year = {1996} } @article{Daubechies2008, abstract = {Regularization of ill-posed linear inverse problems via ell1 penalization has been proposed for cases where the solution is known to be (almost) sparse. One way to obtain the minimizer of such an ell1 penalized functional is via an iterative soft-thresholding algorithm. We propose an alternative implementation to ell1-constraints, using a gradient method, with projection on ell1-balls. The corresponding algorithm uses again iterative soft-thresholding, now with a variable thresholding parameter. We also propose accelerated versions of this iterative method, using ingredients of the (linear) steepest descent method. We prove convergence in norm for one of these projected gradient methods, without and with acceleration.}, archivePrefix = {arXiv}, arxivId = {0706.4297v2}, author = {Daubechies, Ingrid and Fornasier, Massimo and Loris, Ignace}, doi = {10.1007/s00041-008-9039-8}, eprint = {0706.4297v2}, issn = {10695869}, journal = {Journal of Fourier Analysis and Applications}, number = {5-6}, pages = {764--792}, publisher = {Springer}, title = {{Accelerated projected gradient method for linear inverse problems with sparsity constraints}}, url = {http://www.springerlink.com/index/10.1007/s00041-008-9039-8}, volume = {14}, year = {2008} } @article{Hennenfent2008, author = {Hennenfent, Gilles and van den Berg, Ewout and Friedlander, Michael P. and Herrmann, F. J.}, doi = {10.1190/1.2944169}, issn = {00168033}, journal = {Geophysics}, number = {4}, pages = {A23}, title = {{New insights into one-norm solvers from the Pareto curve}}, url = {http://library.seg.org/getabs/servlet/GetabsServlet?prog=normal\&id=GPYSA7000073000004000A23000001\&idtype=cvips\&gifs=yes\&ref=no}, volume = {73}, year = {2008} } @article{Ziolkowski1999, author = {Ziolkowski, A M and Taylor, D B and Johnston, R G K}, doi = {10.1046/j.1365-2478.1999.00165.x}, issn = {0016-8025}, journal = {Geophysical Prospecting}, month = nov, number = {6}, pages = {841--870}, title = {{Marine seismic wavefield measurement to remove sea-surface multiples}}, url = {http://doi.wiley.com/10.1046/j.1365-2478.1999.00165.x}, volume = {47}, year = {1999} } @article{Verschuur1992, author = {Verschuur, D. J.}, doi = {10.1190/1.1443330}, journal = {Geophysics}, month = sep, number = {9}, pages = {1166}, title = {{Adaptive surface-related multiple elimination}}, url = {http://library.seg.org/getabs/servlet/GetabsServlet?prog=normal\&id=GPYSA7000057000009001166000001\&idtype=cvips\&gifs=yes\&ref=no}, volume = {57}, year = {1992} } @article{Frijlink2011, author = {Frijlink, Martijn O. and van Borselen, Roald G. and S\"{o}llner, Walter}, doi = {10.1111/j.1365-2478.2010.00914.x}, file = {:Users/timlin/Documents/Mendeley Desktop/Frijlink, van Borselen, S\"{o}llner/Frijlink, van Borselen, S\"{o}llner - 2011 - The free surface assumption for marine data-driven demultiple methods.pdf:pdf}, journal = {Geophysical Prospecting}, month = mar, number = {2}, pages = {269--278}, title = {{The free surface assumption for marine data-driven demultiple methods}}, url = {http://doi.wiley.com/10.1111/j.1365-2478.2010.00914.x}, volume = {59}, year = {2011} } @book{Fokkema1993, abstract = {The seismic applications of the reciprocity theorem developed in this book are partly based on lecture notes and publications from Professor de Hoop. Every student Professor de Hoop has taught knows the egg-shaped figure (affectionately known as "de Hoop's egg") that plays such an important role in his theoretical description of acoustic, electromagnetic and elastodynamic wave phenomena.On the one hand this figure represents the domain for the application of a reciprocity theorem in the analysis of a wavefield and on the other hand it symbolizes the power of a consistent wavefield description of this theorem.The roots of the reciprocity theorem lie in Green's theorem for Laplace's equation and Helmholtz's extension to the wave equation. In 1894, J.W. Strutt, who later became Lord Rayleigh, introduced in his book The Theory of Sound this extension under the name of Helmholtz's theorem. Nowadays it is known as Rayleigh's reciprocity theorem.Progress in seismic data processing requires the knowledge of all the theoretical aspects of the acoustic wave theory. The reciprocity theorem was chosen as the central theme of this book as it constitutes the fundaments of the seismic wave theory. In essence, two states are distinguished in this theorem. These can be completely different, although sharing the same time-invariant domain of application, and they are related via an interaction quantity. The particular choice of the two states determines the acoustic application, in turn making it possible to formulate the seismic experiment in terms of a geological system response to a known source function.In linear system theory, it is well known that the response to a known input function can be written as an integral representation where the impulse response acts as a kernel and operates on the input function. Due to the temporal invariance of the system, this integral representation is of the convolution type. In seismics, the temporal behaviour of the system is dealt with in a similar fashion; however the spatial interaction needs a different approach. The reciprocity theorem handles this interaction by identifying one state with the spatial impulse function, also known as the Green's function, while the other state is connected with the actual source distribution. In general, the resulting integral representation is not a spatial convolution. Moreover, the systematic use of the reciprocity theorem leads to a hierarchical description of the seismic experiment in terms of increasing complexity. Also from an educational point of view this approach provides a hierarchy and the student learns to break down the seismic problem into constituent partial solutions.This book should contribute to the understanding that the reciprocity theorem is a powerful tool in the analysis of the seismic experiment.}, author = {Fokkema, J. T. and van den Berg, P. M.}, isbn = {0444890440}, pages = {350}, publisher = {Elsevier Science}, title = {{Seismic applications of acoustic reciprocity}}, year = {1993} } @phdthesis{Biggs:1998wf, address = {Auckland}, author = {Biggs, David S C}, file = {:Users/timlin/Documents/Mendeley Desktop/Biggs/Biggs - 1998 - Accelerated iterative blind deconvolution.pdf:pdf}, month = dec, school = {University of Auckland}, title = {{Accelerated iterative blind deconvolution}}, year = {1998} } @article{amundsen01efs, abstract = {This paper presents a new, wave-equation based method for eliminating the effect of the free surface from marine seismic data without destroying primary amplitudes and without any knowledge of the subsurface. Compared with previously published methods which require an estimate of the source wavelet, the present method has the following characteristics: it does not require any information about the marine source array and its signature, it does not rely on removal of the direct wave from the data, and it does not require any explicit deghosting. Moreover, the effect of the source signature is removed from the data in the multiple elimination process by deterministic signature deconvolution, replacing the original source signature radiated from the marine source array with any desired wavelet (within the data frequency-band) radiated from a monopole point source.The fundamental constraint of the new method is that the vertical derivative of the pressure or the vertical component of the particle velocity is input to the free-surface demultiple process along with pressure recordings. These additional data are routinely recorded in ocean-bottom seismic surveys. The method can be applied to conventional towed streamer pressure data recorded in the water column at a depth which is greater than the depth of the source array only when the pressure derivative can be estimated, or even better, is measured. Since the direct wave and its source ghost is part of the free-surface demultiple, designature process, the direct arrival must be properly measured for the method to work successfully.In the case when the geology is close to horizontally layering, the free-surface multiple elimination method greatly simplifies, reducing to a well-known deterministic deconvolution process which can be applied to common shot gathers (or common receiver gathers or common midpoint gathers when source array variations are negligible) in the tau -p domain or frequency-wavenumber domain.}, author = {Amundsen, Lasse}, journal = {Geophysics}, month = jan, number = {1}, pages = {327--341}, title = {{Elimination of free-surface related multiples without need of the source wavelet}}, volume = {66}, year = {2001} } @article{Birgin:2000va, abstract = {Nonmonotone projected gradient techniques are considered for the minimization of differentiable functions on closed convex sets. The classical projected gradient schemes are extended to include a nonmonotone steplength strategy that is based on the Grippo-Lampariello-Lucidi nonmonotone line search. In particular, the nonmonotone strategy is combined with the spectral gradient choice of steplength to accelerate the convergence process. In addition to the classical projected gradient nonlinear path, the feasible spectral projected gradient is used as a search direction to avoid additional trial projections during the one-dimensional search process. Convergence properties and extensive numerical results are presented.}, author = {Birgin, E G and Martinez, J M and Raydan, Marcos}, journal = {SIAM Journal on Optimization}, month = jan, number = {4}, pages = {1196--1211}, title = {{Nonmonotone spectral projected gradient methods on convex sets}}, volume = {10}, year = {2000} } @article{groenestijn09eps, abstract = {Accurate removal of surface-related multiples remains a challenge in many cases. To overcome typical inaccuracies in current multiple-removal techniques, we have developed a new primary-estimation method: estimation of primaries by sparse inversion (EPSI). EPSI is based on the same primary-multiple model as surface-related multiple elimination (SRME) and also requires no subsurface model. Unlike SRME, EPSI estimates the primaries as unknowns in a multidimensional inversion process rather than in a subtraction process. Furthermore, it does not depend on interpolated missing near-offset data because it can reconstruct missing data simultaneously. Sparseness plays a key role in the new primary-estimation procedure. The method was tested on 2D synthetic data.}, author = {van Groenestijn, G. J. A. and Verschuur, D. J.}, doi = {10.1190/1.3111115}, file = {:Users/timlin/Documents/Mendeley Desktop/van Groenestijn, Verschuur/van Groenestijn, Verschuur - 2009 - Estimating primaries by sparse inversion and application to near-offset data reconstruction.pdf:pdf}, journal = {Geophysics}, month = jan, number = {3}, pages = {A23--A28}, title = {{Estimating primaries by sparse inversion and application to near-offset data reconstruction}}, volume = {74}, year = {2009} } @article{Weglein:1997wp, abstract = {We present a multidimensional multiple-attenuation method that does not require any subsurface information for either surface or internal multiples.To derive these algorithms, we start with a scattering theory description of seismic data, We then introduce and develop several new theoretical concepts concerning the fundamental nature of and the relationship between forward and inverse scattering. These include (1) the idea that the inversion process can be viewed as a series of steps, each with a specific task; (2) the realization that the inverse-scattering series provides an opportunity for separating out subseries with specific and useful tasks; (3) the recognition that these task-specific subseries can have different (and more favorable) data requirements, convergence, and stability conditions than does the original complete inverse series; and, most importantly, (4) the development of the first method for physically interpreting the contribution that individual terms (and pieces of terms) in the inverse series make toward these tasks in the inversion process, which realizes the selection of task-specific subseries.To date, two task-specific subseries have been identified: a series for eliminating free-surface multiples and a series for attenuating internal multiples. These series result in distinct algorithms for free-surface and internal multiples, and neither requires a model of the subsurface reflectors that generate the multiples. The method attenuates multiples while preserving primaries at all offsets; hence, these methods are equally well suited for subsequent poststack structural mapping or prestack amplitude analysis.The method has demonstrated its usefulness and added value for free-surface multiples when (1) the overburden has significant lateral variation, (2) reflectors are curved or dipping, (3) events are interfering, (4) multiples are difficult to identify, and (5) the geology is complex. The internal-multiple algorithm has been tested with good results on band-limited synthetic data; field data tests are planned. This procedure provides an approach for attenuating a significant class of heretofore inaccessible and troublesome multiples.There has been a recent rejuvenation of interest in multiple attenuation technology resulting from current exploration challenges, e.g., in deep water with a variable water bottom or in subsalt plays. These cases are representative of circumstances where 1-D assumptions are often violated and reliable detailed subsurface information is not available typically. The inverse scattering multiple attenuation methods are specifically designed to address these challenging problems. To date it is the only multidimensional multiple attenuation method that does not require 1-D assumptions, moveout differences, or ocean-bottom or other subsurface velocity dr structural information for either free-surface or internal multiples.These algorithms require knowledge of the source signature and near-source traces. We describe several current approaches, e.g., energy minimization and trace extrapolation; for satisfying these prerequisites in a stable and reliable manner.}, author = {Weglein, Arthur B and Gasparotto, Fernanda A and Carvalho, Paulo M and Stolt, Robert H}, doi = {10.1190/1.1444298}, file = {:Users/timlin/Documents/Mendeley Desktop/Weglein et al/Weglein et al. - 1997 - An inverse-scattering series method for attenuating multiples in seismic reflection data.pdf:pdf}, journal = {Geophysics}, month = jan, number = {6}, pages = {1975--1989}, title = {{An inverse-scattering series method for attenuating multiples in seismic reflection data}}, volume = {62}, year = {1997} } @article{Anstey66, author = {Anstey, N A and Newman, P}, journal = {Geophysical Prospecting}, number = {4}, pages = {389--426}, title = {{Part I: The sectional auto-correlogram and Part II: The sectional retro-correlogram}}, volume = {14}, year = {1966} } @mastersthesis{almatar10msc, abstract = {A recent robust multiple-elimination technique, based on the underlying principle that relates primary impulse response to total upgoing wavefield, tries to change the paradigm that sees surface-related multiples as noise that needs to be removed from the data prior to imaging. This technique, estimation of primaries by sparse inversion (EPSI), (van Groenestijn and Verschuur, 2009; Lin and Herrmann, 2009), proposes an inversion procedure during which the source function and surface- free impulse response are directly calculated from the upgoing wavefield using an alternating optimization procedure. EPSI hinges on a delicate interplay between surface-related multiples and pri- maries. Finite aperture and other imperfections may violate this relationship. In this thesis, we investigate how to make EPSI more robust by incorporating curvelet- domain matching in its formulation. Compared to surface-related multiple removal (SRME), where curvelet-domain matching was used successfully, incorporating this step has the additional advantage that matches multiples to multiples rather than predicated multiples to total data as in SRME.}, author = {AlMatar, M H}, keywords = {MSc}, school = {University of British Columbia}, title = {{Estimation of Surface-free Data by Curvelet-domain Matched Filtering and Sparse Inversion}}, year = {2010} } @article{berkhout97eom, abstract = {A review has been given of the surface-related multiple problem by making use of the so-called feedback model. From the resulting equations it has been concluded that the proposed solution does not require any properties of the subsurface. However, source-detector and reflectivity properties of the surface need be specified. Those properties have been quantified in a surface operator and this operator is estimated as part of the multiple removal problem. The surface-related multiple removal algorithm has been formulated in terms of a Neumann series and in terms of an iterative equation. The Neumann formulation requires a nonlinear optimization process for the surface operator; while the iterative formulation needs a number of linear optimizations. The iterative formulation also has the advantage that it can be integrated easily with another multiple removal method. An algorithm for the removal of internal multiples has been proposed as well. This algorithm is an extension of the surface-related method. Removal of internal multiples requires knowledge of the macro velocity model between the surface and the upper boundary of the multiple generating layer. In part II (also published in this issue) the success of the proposed algorithms has been demonstrated on numerical experiments and field data examples.}, author = {Berkhout, A. J. and Verschuur, D. J.}, doi = {10.1190/1.1444261}, journal = {Geophysics}, keywords = {SRME}, number = {5}, pages = {1586--1595}, publisher = {SEG}, title = {{Estimation of multiple scattering by iterative inversion, Part I: Theoretical considerations}}, volume = {62}, year = {1997} } @article{hennenfent10nct, abstract = {We extend our earlier work on the nonequispaced fast discrete curvelet transform (NFDCT) and introduce a second generation of the transform. This new generation differs from the previous one by the approach taken to compute accurate curvelet coefficients from irregularly sampled data. The first generation relies on accurate Fourier coefficients obtained by an l2-regularized inversion of the nonequispaced fast Fourier transform (FFT) whereas the second is based on a direct l1-regularized inversion of the operator that links curvelet coefficients to irregular data. Also, by construction the second generation NFDCT is lossless unlike the first generation NFDCT. This property is particularly attractive for processing irregularly sampled seismic data in the curvelet domain and bringing them back to their irregular record-ing locations with high fidelity. Secondly, we combine the second generation NFDCT with the standard fast discrete curvelet transform (FDCT) to form a new curvelet-based method, coined nonequispaced curvelet reconstruction with sparsity-promoting inversion (NCRSI) for the regularization and interpolation of irregularly sampled data. We demonstrate that for a pure regularization problem the reconstruction is very accurate. The signal-to-reconstruction error ratio in our example is above 40 dB. We also conduct combined interpolation and regularization experiments. The reconstructions for synthetic data are accurate, particularly when the recording locations are optimally jittered. The reconstruction in our real data example shows amplitudes along the main wavefronts smoothly varying with limited acquisition imprint.}, author = {Hennenfent, Gilles and Fenelon, Lloyd and Herrmann, F. J.}, doi = {10.1190/1.3494032}, file = {:Users/timlin/Documents/Mendeley Desktop/Hennenfent, Fenelon, Herrmann/Hennenfent, Fenelon, Herrmann - 2010 - Nonequispaced curvelet transform for seismic data reconstruction A sparsity-promoting approach.pdf:pdf}, institution = {UBC Earth and Ocean Sciences Department}, journal = {Geophysics}, keywords = {curvelet transforms,data acquisition,geophysical}, number = {6}, pages = {WB203--WB210}, publisher = {SEG}, title = {{Nonequispaced curvelet transform for seismic data reconstruction: A sparsity-promoting approach}}, url = {http://link.aip.org/link/?GPY/75/WB203/1}, volume = {75}, year = {2010} } @article{herrmann08nps, abstract = {Seismic data recovery from data with missing traces on otherwise regular acquisition grids forms a crucial step in the seismic processing flow. For instance, unsuccessful recovery leads to imaging artifacts and to erroneous predictions for the multiples, adversely affecting the performance of multiple elimination. A non-parametric transform-based recovery method is presented that exploits the compression of seismic data volumes by recently developed curvelet frames. The elements of this transform are multidimensional and directional and locally resem- ble wavefronts present in the data, which leads to a compressible representation for seismic data. This compression enables us to formulate a new curvelet-based seismic data recovery algorithm through sparsity-promoting inversion. The concept of sparsity-promoting inversion is in itself not new to geophysics. However, the recent insights from the field of `compressed sensing' are new since they clearly identify the three main ingredients that go into a successful formulation of a re- covery problem, namely a sparsifying transform, a sampling strategy that subdues coherent aliases and a sparsity-promoting program that recovers the largest entries of the curvelet-domain vector while explaining the measurements. These concepts are illustrated with a stylized experiment that stresses the importance of the degree of compression by the sparsifying transform. With these findings, a curvelet-based recovery algorithms is developed, which recovers seismic wavefields from seismic data volumes with large percentages of traces missing. During this construction, we benefit from the main three ingredients of compressive sampling, namely the curvelet compression of seismic data, the existence of a favorable sam- pling scheme and the formulation of a large-scale sparsity-promoting solver based on a cooling method. The recovery performs well on synthetic as well as real data and performs better by virtue of the sparsifying property of curvelets. Our results are applicable to other areas such as global seismology.}, author = {Herrmann, F. J. and Hennenfent, Gilles}, doi = {10.1111/j.1365-246X.2007.03698.x}, journal = {Geophysical Journal International}, keywords = {SLIM,curvelet transform,reconstruction}, month = apr, pages = {233--248}, title = {{Non-parametric seismic data recovery with curvelet frames}}, volume = {173}, year = {2008} } @article{vandenberg08ptp, abstract = {The basis pursuit problem seeks a minimum one-norm solution of an underdetermined least-squares problem. Basis pursuit denoise (BPDN) fits the least-squares problem only approximately, and a single parameter determines a curve that traces the optimal trade-off between the least-squares fit and the one-norm of the solution. We prove that this curve is convex and continuously differentiable over all points of interest, and show that it gives an explicit relationship to two other optimization problems closely related to BPDN. We describe a root-finding algorithm for finding arbitrary points on this curve; the algorithm is suitable for problems that are large scale and for those that are in the complex domain. At each iteration, a spectral gradient-projection method approximately minimizes a least-squares problem with an explicit one-norm constraint. Only matrix-vector operations are required. The primal-dual solution of this problem gives function and derivative information needed for the root-finding method. Numerical experiments on a comprehensive set of test problems demonstrate that the method scales well to large problems.}, author = {van den Berg, Ewout and Friedlander, Michael P.}, doi = {10.1137/080714488}, institution = {UBC Computer Science Department}, journal = {SIAM Journal on Scientific Computing}, keywords = {Newton's method,basis pursuit,convex program,duality,one-norm regularization,projected gradient,root-finding,sparse solutions}, month = jan, number = {2}, pages = {890--912}, publisher = {SIAM}, title = {{Probing the Pareto frontier for basis pursuit solutions}}, volume = {31}, year = {2008} } @article{verschuur97eom, abstract = {A surface-related multiple-elimination method can be formulated as an iterative procedure: the output of one iteration step is used as input for the next iteration step (part I of this paper). In this paper (part II) it is shown that the procedure can be made very efficient if a good initial estimate of the multiple-free data set can be provided in the first iteration, and in many situations, the Radon-based multiple-elimination method may provide such an estimate. It is also shown that for each iteration, the inverse source wavelet can be accurately estimated by a linear (least-squares) inversion process. Optionally, source and detector variations and directivity effects can be included, although the examples are given without these options. The iterative multiple elimination process, together with the source wavelet estimation, are illustrated with numerical experiments as well as with field data examples. The results show that the surface-related multiple-elimination process is very effective in time gates where the moveout properties of primaries and multiples are very similar (generally deep data), as well as for situations with a complex multiple-generating system.}, author = {Verschuur, D. J. and Berkhout, A. J.}, doi = {10.1190/1.1444262}, journal = {Geophysics}, keywords = {SRME}, number = {5}, pages = {1596--1611}, publisher = {SEG}, title = {{Estimation of multiple scattering by iterative inversion, Part II: Practical aspects and examples}}, volume = {62}, year = {1997} } @article{Kreimer2013, abstract = {Many standard seismic data processing and imaging techniques require regularly sampled data. Prestack seismic data are multidimensional signals that can be represented via low-rank fourth-order tensors in the frequency‐space (f‐x) domain. We propose to adopt tensor completion strategies to recover unrecorded observations and to improve the signal-to-noise ratio of prestack seismic volumes. Tensor completion can be posed as an inverse problem and solved by minimizing a convex objective function. The objective function contains two terms: a data misfit and a nuclear norm. The data misfit measures the proximity of the reconstructed seismic data to the observations. The nuclear norm constraints the reconstructed data to be a low-rank tensor. In essence, we solve the prestack seismic reconstruction problem via low-rank tensor completion. The cost function of the problem is minimized using the alternating direction method of multipliers. We present synthetic examples to illustrate the behavior of the al...}, author = {Kreimer, Nadia and Stanton, Aaron and Sacchi, Mauricio D.}, doi = {10.1190/geo2013-0022.1}, file = {:Users/timlin/Documents/Mendeley Desktop/Kreimer, Stanton, Sacchi/Kreimer, Stanton, Sacchi - 2013 - Tensor completion based on nuclear norm minimization for 5D seismic data reconstruction.pdf:pdf}, issn = {0016-8033}, journal = {Geophysics}, keywords = {4D,interpolation,optimization,prestack,processing}, language = {en}, month = oct, number = {6}, pages = {V273--V284}, publisher = {Society of Exploration Geophysicists}, title = {{Tensor completion based on nuclear norm minimization for 5D seismic data reconstruction}}, url = {http://library.seg.org/doi/abs/10.1190/geo2013-0022.1}, volume = {78}, year = {2013} } @article{Ma2013, abstract = {We have developed a new algorithm for the reconstruction of seismic traces randomly missing from a uniform grid of a 3D seismic volume. Several algorithms have been developed for such reconstructions, based on properties of the seismic wavefields and on signal processing concepts, such as sparse signal representation in a transform domain. We have investigated a novel approach, originally introduced for noise removal, which is based on the premise that for suitable representation of the seismic data as matrices or tensors, the rank of the seismic data (computed by singular value decomposition) increases with noise or missing traces. Thus, we apply low-rank matrix completion (MC) with a designed texture-patch transformation to 3D seismic data reconstruction. Low-rank components capture geometrically meaningful structures in seismic data that encompass conventional local features such as events and dips. The low-rank MC is based on nuclear-norm minimization. An efficient L1-norm minimizing algorithm...}, author = {Ma, Jianwei}, doi = {10.1190/geo2012-0465.1}, file = {:Users/timlin/Documents/Mendeley Desktop/Ma/Ma - 2013 - Three-dimensional irregular seismic data reconstruction via low-rank matrix completion.pdf:pdf}, issn = {0016-8033}, journal = {Geophysics}, keywords = {interpolation,signal processing,sparse}, language = {en}, month = sep, number = {5}, pages = {V181--V192}, publisher = {Society of Exploration Geophysicists}, title = {{Three-dimensional irregular seismic data reconstruction via low-rank matrix completion}}, url = {http://library.seg.org/doi/abs/10.1190/geo2012-0465.1}, volume = {78}, year = {2013} } @article{kumar2014GEOPemc, abstract = {Despite recent developments in improved acquisition, seismic data often remains undersampled along source and/or receiver coordinates, resulting in incomplete data for key applications such as migration and multiple prediction requiring densely sampled, alias-free wide azimuth data. When seismic data is organized in monochromatic frequency slices, missing-trace interpolation can be cast into a matrix completion problem, where the low-rank structure of seismic data in the appropriate domain can be exploited to recover densely sampled data volumes from data with missing entries. Current approaches that exploit low-rank structure are based on repeated singular value decompositions, which become prohibitively expensive for large-scale problems unless the data is partitioned and processed in small windows. While computationally manageable, our theory and experiments show degraded results when the windows sizes become too small. To overcome this problem, we carry out our interpolations for each frequency independently while working with the complete data in the midpoint-offset domain instead of windowing. For lateral varying geologies that are not too complex, working in the midpoint-offset domain leads to favorable rank minimization recovery because the singular values decay faster while sampling-related artifacts remain full rank. This combination of fast decay and full-rank artifacts agrees with the principles of the compressive sensing paradigm, which is based on exploiting (low-rank) structure, a sampling process that breaks this structure, and a rank-minimizing optimization that restores the signal\{$\backslash$textquoteright\}s structure and interpolates the subsampled data. To make our proposed method computationally viable and practical, we introduce a factorization-based approach that avoids computing the singular values, and that therefore scales to large seismic data problems as long as the factors can be stored in memory. Tests on realistic two- and three-dimensional seismic data show that our method compares favorably, both in terms of computational speed and recovery quality, to existing curvelet-based and tensor-based techniques.}, annote = {Submitted to Geophysics on August 8, 2014.}, author = {Kumar, Rajiv and Silva, Curt Da and Akalin, Okan and Aravkin, Aleksandr Y and Mansour, Hassan and Recht, Ben and Herrmann, Felix J}, journal = {Geophysics}, keywords = {interpolation,low-rank,private}, title = {{Efficient matrix completion for seismic data reconstruction}}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Submitted/2014/kumar2014GEOPemc/kumar2014GEOPemc.pdf}, year = {2015} } @inproceedings{Kumar2013, author = {Kumar, Rajiv and Mansour, Hassan and Herrmann, Felix J and Aravkin, Aleksandr Y}, booktitle = {SEG Technical Program Expanded Abstracts}, doi = {10.1190/segam2013-1165.1}, file = {:Users/timlin/Documents/Mendeley Desktop/Kumar et al/Kumar et al. - 2013 - Reconstruction of seismic wavefields via low-rank matrix factorization in the hierarchical-separable matrix repres.pdf:pdf}, keywords = {2D,frequency-domain,interpolation,sp,transform}, language = {en}, pages = {3628--3633}, title = {{Reconstruction of seismic wavefields via low-rank matrix factorization in the hierarchical-separable matrix representation}}, url = {http://library.seg.org/doi/abs/10.1190/segam2013-1165.1}, year = {2013} }