% This file was created with JabRef 2.9. % Encoding: ISO8859_1 @CONFERENCE{esser2014SIAMISsdc, author = {Ernie Esser}, title = {Solving {DC} programs that promote group 1-sparsity}, year = {2014}, month = {05}, booktitle = {SIAM Conference on Imaging Science}, abstract = {Many interesting applications require solving nonconvex problems that would be convex if not for a group 1-sparsity constraint. Splitting methods that are effective for convex problems can still work well in this setting. We propose several nonconvex penalties that can be used to promote group 1-sparsity in the framework of difference of convex or primal dual hybrid gradient (PDHG) methods. Applications to nonlocal inpainting, linear unmixing and phase unwrapping are demonstrated.}, keywords = {group 1-sparsity, difference of convex, phase unwrapping, nonconvex PDHG, operator splitting}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SIAM/2014/esser2014SIAMISsdc_pres.pdf}, note = {(SIAM Conference on Imaging Science)} } @CONFERENCE{herrmann2014ROSErpe, author = {Felix J. Herrmann}, title = {Relax the physics and expand the search space – {FWI} via {Wavefield} {Reconstruction} {Inversion}}, year = {2014}, month = {05}, booktitle = {ROSE Consortium, Norway, 2014}, keywords = {ROSE, Consortium, FWI, WRI}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/ROSE/2014/herrmann2014ROSErpe.pdf}, note = {(ROSE Consortium)} } @CONFERENCE{herrmann2014EAGEWSrrt, author = {Haneet Wason and Felix Oghenekohwo and Felix J. Herrmann}, title = {Randomization and repeatability in time-lapse marine acquisition}, year = {2014}, month = {04}, booktitle = {EAGE Workshop on Land and Ocean Bottom; Broadband Full Azimuth Seismic Surveys, Spain, 2014}, keywords = {EAGE, workshop, 4D seismic, marine acquisition}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/EAGE/2014/herrmann2014EAGEWSrrt.pdf}, note = {(EAGE Workshop, Spain)} } @CONFERENCE{lago2014CMCCRMN, author = {Rafael Lago and Art Petrenko and Zhilong Fang and Felix J. Herrmann}, title = {{CRMN} method for solving time-harmonic wave equation}, year = {2014}, month = {04}, booktitle = {Copper Mountain Conference}, abstract = {We address the solution of PDEs associated with the wave propagation phenomena in heterogeneous media using CGMN method. It consists of a conjugate gradients method using Kaczmarz double sweeps as preconditioner. This preconditioner has the property of ``symmetrizing" the problem allowing short recursion and Lanczos type of Krylov methods to be employed. In this talk we propose the use of CR, a minimal residual Krylov method closely related to CG. We study the proposed method which we call CRMN and discuss crucial aspects as the behaviour of the norm of the residual and the norm of the true error. We also discuss the result of an inversion of a small subsample of the 3D velocity model SEG/EAGE Overthrust using frugal full-waveform inversion method with CRMN and CGMN for solving the associated PDE, showing a strong interest in studying the behaviour of CRMN for larger realistic cases.}, keywords = {CRMN, CGMN, forward modelling, Helmholtz equation}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/CMC/2014/lago2014CMCCRMN.pdf}, note = {(Copper Mountain)} } @CONFERENCE{lago2014EAGEfst, author = {Rafael Lago and Art Petrenko and Zhilong Fang and Felix J. Herrmann}, title = {Fast solution of time-harmonic wave-equation for full-waveform inversion}, year = {2014}, month = {06}, booktitle = {EAGE}, abstract = {For many full-waveform inversion techniques, the most computationally intensive step is the computation of a numerical solution for the wave equation on every iteration. In the frequency domain approach, this requires the solution of very large, complex, sparse, ill-conditioned linear systems. In this extended abstract we bring out attention specifically to CGMN method for solving PDEs, known for being flexible (i.e. it is able to treat equally acoustic data as well as visco-elastic or more complex scenarios) efficient with respect both to memory and computation time, and controllable accuracy of the final approximation. We propose an improvement for the known CGMN method by imposing a minimal residual condition, which incurs in one extra model vector storage. The resulting algorithm called CRMN enjoys several interesting properties as monotonically nonincreasing behaviour of the norm of the residual and minimal residual, guaranteeing optimal convergence for the relative residual criterion. We discuss numerical experiments both in an isolated PDE solve and also within the inversion procedure, showing that in a realistic scenario we can expect a speedup around 25\% when using CRMN rather than CGMN.}, keywords = {CRMN, CGMN, FWI, time-harmonic wave equation, EAGE}, doi = {10.3997/2214-4609.20140812}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/EAGE/2014/lago2014EAGEfst/lago2014EAGEfst.pdf}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/EAGE/2014/lago2014EAGEfst/lago2014EAGEfst_pres.pdf} } @CONFERENCE{peters2014EAGEweb, author = {Bas Peters and Felix J. Herrmann and Tristan van Leeuwen}, title = {Wave-equation based inversion with the penalty method: adjoint-state versus wavefield-reconstruction inversion}, year = {2014}, month = {06}, booktitle = {EAGE}, abstract = {In this paper we make a comparison between wave-equation based inversions based on the adjoint-state and penalty methods. While the adjoint-state method involves the minimization of a data-misfit and exact solutions of the wave-equation for the current velocity model, the penalty-method aims to first find a wavefield that jointly fits the data and honours the physics, in a least-squares sense. Given this reconstructed wavefield, which is a proxy for the true wavefield in the true model, we calculate updates for the velocity model. Aside from being less nonlinear–the acoustic wave equation is linear in the wavefield and model parameters but not in both–the inversion is carried out over a solution space that includes both the model and the wavefield. This larger search space allows the algortihm to circumnavigate local minima, very much in the same way as recently proposed model extentions try to acomplish. We include examples for low frequencies, where we compare full-waveform inversion results for both methods, for good and bad starting models, and for high frequencies where we compare reverse-time migration with linearized imaging based on wavefield-reconstruction inversion. The examples confirm the expected benefits of the proposed method.}, keywords = {full-waveform inversion, optimization, imaging, penalty method, EAGE}, doi = {10.3997/2214-4609.20140704}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/EAGE/2014/peters2014EAGEweb/peters2014EAGEweb.pdf}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/EAGE/2014/peters2014EAGEweb/peters2014EAGEweb_pres.pdf} } @CONFERENCE{leeuwen2014EAGEntf, author = {Tristan van Leeuwen and Felix J. Herrmann and Bas Peters}, title = {A new take on {FWI}: wavefield reconstruction inversion}, year = {2014}, month = {06}, booktitle = {EAGE}, abstract = {We discuss a recently proposed novel method for waveform inversion: Wavefield Reconstruction Inversion (WRI). As opposed to conventional FWI -- which attempts to minimize the error between observed and predicted data obtained by solving a wave equation -- WRI reconstructs a wave-field from the data and extracts a model-update from this wavefield by minimizing the wave-equation residual. The method does not require explicit computation of an adjoint wavefield as all the necessary information is contained in the reconstructed wavefield. We show how the corresponding model updates can be interpreted physically analogously to the conventional imaging-condition-based approach.}, keywords = {wavefield reconstruction inversion, penalty method, optimization, full-waveform inversion, EAGE}, doi = {10.3997/2214-4609.20140703}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/EAGE/2014/leeuwen2014EAGEntf/leeuwen2014EAGEntf.pdf}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/EAGE/2014/leeuwen2014EAGEntf/leeuwen2014EAGEntf_pres.pdf} } @CONFERENCE{oghenekohwo2014EAGEtls, author = {Felix Oghenekohwo and Ernie Esser and Felix J. Herrmann}, title = {Time-lapse seismic without repetition: reaping the benefits from randomized sampling and joint recovery}, year = {2014}, month = {06}, booktitle = {EAGE}, abstract = {In the current paradigm of 4-D seismic, guaranteeing repeatability in acquisition and processing of the baseline and monitor surveys ranks highest amongst the technical challenges one faces in detecting time-lapse signals. By using recent insights from the field of compressive sensing, we show that the condition of survey repeatability can be relaxed as long as we carry out a sparsitypromoting program that exploits shared information between the baseline and monitor surveys. By inverting for the baseline and monitor survey as the common "background", we are able to compute high-fidelity 4-D differences from carefully selected synthetic surveys that have different sets of source/receivers missing. This synthetic example is proof of concept of an exciting new approach to randomized 4-D acquisition where time-lapse signal can be computed as long as the survey details, such as source/receiver locations are known afterwards.}, keywords = {4-D seismic, time-lapse, joint recovery, EAGE}, doi = {10.3997/2214-4609.20141478}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/EAGE/2014/oghenekohwo2014EAGEtls/oghenekohwo2014EAGEtls.pdf}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/EAGE/2014/oghenekohwo2014EAGEtls/oghenekohwo2014EAGEtls_pres.pdf} } @CONFERENCE{dasilva2014EAGEhtucknoisy, author = {Curt Da Silva and Felix J. Herrmann}, title = {Low-rank promoting transformations and tensor interpolation - applications to seismic data denoising}, year = {2014}, month = {06}, booktitle = {EAGE}, abstract = {In this abstract, we extend our previous work in Hierarchical Tucker (HT) tensor completion, which uses an extremely efficient representation for representing high-dimensional tensors exhibit- ing low-rank structure, to handle subsampled tensors with noisy entries. We consider a "low-noise" case, so that the energies of the noise and the signal are nearly indistinguishable, and a ’high-noise’ case, in which the noise energy is now scaled to the amplitude of the entire data volume. We examine the effect of the noise in terms of the singular values along different matricizations of the data, i.e., reshaping of the tensor along different modes. By interpreting this effect in the context of tensor completion, we demonstrate the inefficacy of denoising by this method in the source-receiver do- main. In light of this observation, we transform the decimated, noisy data in to the midpoint-offset domain, which promotes low-rank behaviour in the signal and high-rank behaviour in the noise. This distinction between signal and noise allows low-rank interpolation to effectively denoise the signal with only a marginal increase in computational cost. We demonstrate the effectiveness of this approach on a 4D frequency slice.}, keywords = {hierarchical tucker, structured tensor, tensor interpolation, Riemannian optimization, low-rank transform, seismic denoising, EAGE}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/EAGE/2014/dasilva2014EAGEhtucknoisy/dasilva2014EAGEhtucknoisy.pdf}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/EAGE/2014/dasilva2014EAGEhtucknoisy/dasilva2014EAGEhtucknoisy_pres.pdf} } @CONFERENCE{esser2014EAGEacp, author = {Ernie Esser and Felix J. Herrmann}, title = {Application of a convex phase retrieval method to blind seismic deconvolution}, year = {2014}, month = {06}, booktitle = {EAGE}, abstract = {A classical strategy for blind seismic deconvolution is to first estimate the autocorrelation of the unknown source wavelet from the data and then recover the wavelet by assuming it has minimum phase. However, computing the minimum phase wavelet directly from the amplitude spectrum can be sensitive to even extremely small errors, especially in the coefficients close to zero. Since the minimum phase requirement follows from an assumption that the wavelet should be as impulsive as possible, we propose to directly estimate an impulsive wavelet by minimizing a weighted l2 penalty subject to a constraint on its amplitude spectrum. This nonconvex model has the form of a phase retrieval problem, in this case recovering a signal given only estimates of the magnitudes of its Fourier coefficients. Following recent work on convex relaxations of phase retrieval problems, we propose a convex semidefinite program for computing an impulsive minimum phase wavelet whose amplitude spectrum is close to a given estimate, and we show that this can be robustly solved by a Douglas Rachford splitting method for convex optimization.}, keywords = {source wavelet estimation, blind deconvolution, convex phase retrieval, EAGE}, doi = {10.3997/2214-4609.20141590}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/EAGE/2014/esser2014EAGEacp/esser2014EAGEacp.pdf}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/EAGE/2014/esser2014EAGEacp/esser2014EAGEacp_poster.pdf} } @CONFERENCE{lin2014EAGEmas, author = {Tim T.Y. Lin and Felix J. Herrmann}, title = {Multilevel acceleration strategy for the robust estimation of primaries by sparse inversion}, year = {2014}, month = {06}, booktitle = {EAGE}, abstract = {We propose a method to substantially reduce the computational costs of the Robust Estimation of Primaries by Sparse Inversion algorithm, based on a multilevel inversion strategy that shifts early iterations of the method to successively coarser spatial sampling grids. This method requires no change in the core implementation of the original algorithm, and additionally only relies on trace decimation, low-pass filtering, and rudimentary interpolation techniques. We furthermore demonstrate with a synthetic seismic line significant computational speedups using this approach.}, keywords = {multiples, EPSI, REPSI, multigrid, multilevel, multiscale, EAGE}, doi = {10.3997/2214-4609.20140672}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/EAGE/2014/lin2014EAGEmas/lin2014EAGEmas.pdf}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/EAGE/2014/lin2014EAGEmas/lin2014EAGEmas_pres.pdf} } @CONFERENCE{zheglova2014EAGEams, author = {Polina Zheglova and Felix J. Herrmann}, title = {Application of matrix square root and its inverse to downward wavefield extrapolation}, year = {2014}, month = {06}, booktitle = {EAGE}, abstract = {In this paper we propose a method for computation of the square root of the Helmholtz operator and its inverse that arise in downward extrapolation methods based on one-way wave equation. Our approach involves factorization of the discretized Helmholtz operator at each depth by extracting the matrix square root after performing the spectral projector in order to eliminate the evanescent modes. The computation of the square root of the discrete Helmholtz operator and its inverse is done using polynomial recursions and can be combined with low rank matrix approximations to reduce the computational cost for large problems. The resulting square root operator is able to model the propagating modes kinematically correctly at the angles of up to 90 degree. Preliminary results on convergence of iterations are presented in this abstract. Potential applications include seismic modeling, imaging and inversion.}, keywords = {square root, modelling, one-way wave equation, extrapolation, EAGE}, doi = {10.3997/2214-4609.20141184}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/EAGE/2014/zheglova2014EAGEams/zheglova2014EAGEams.pdf}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/EAGE/2014/zheglova2014EAGEams/zheglova2014EAGEams_pres.pdf} } @CONFERENCE{fang2014EAGEfuq, author = {Zhilong Fang and Curt Da Silva and Felix J. Herrmann}, title = {Fast uncertainty quantification for {2D} full-waveform inversion with randomized source subsampling}, year = {2014}, month = {06}, booktitle = {EAGE}, abstract = {Uncertainties arise in every area of seismic exploration, especially in full-waveform inversion, which is highly non-linear. In the framework of Bayesian inference, uncertainties can be analyzed by sampling the posterior probability density distribution with a Markov chain Monte-Carlo (McMC) method. We reduce the cost of computing the posterior distribution by working with randomized subsets of sources. These approximations, together with the Gaussian assumption and approximation of the Hessian, leads to a computational tractable uncertainty quantification. Application of this approach to a synthetic leads to standard deviations and confidence intervals that are qualitatively consistent with our expectations.}, keywords = {Uncertainty quantification, FWI, Markov chain Monte Carlo, randomized sources subsampling, EAGE}, doi = {10.3997/2214-4609.20140715}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/EAGE/2014/fang2014EAGEfuq/fang2014EAGEfuq.pdf}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/EAGE/2014/fang2014EAGEfuq/fang2014EAGEfuq_pres.pdf} } @CONFERENCE{petrenko2014EAGEaih, author = {Art Petrenko and Tristan van Leeuwen and Diego Oriato and Simon Tilbury and Felix J. Herrmann}, title = {Accelerating an iterative {Helmholtz} solver with {FPGAs}}, year = {2014}, month = {06}, booktitle = {EAGE}, abstract = {We implement the Kaczmarz row-projection algorithm (Kaczmarz (1937)) on a CPU host + FPGA accelerator platform using techniques of dataflow programming. This algorithm is then used as the preconditioning step in CGMN, a modified version of the conjugate gradients method (Björck and Elfving (1979)) that we use to solve the time-harmonic acoustic isotropic constant density wave equation. Using one accelerator we achieve a speed-up of over 2× compared with one Intel core.}, keywords = {CGMN, FPGA, Helmholtz equation, Kaczmarz, reconfigurable computing, EAGE}, doi = {10.3997/2214-4609.20141141}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/EAGE/2014/petrenko2014EAGEaih/petrenko2014EAGEaih.pdf}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/EAGE/2014/petrenko2014EAGEaih/petrenko2014EAGEaih_pres.pdf} } @CONFERENCE{kumar2014EAGErank, author = {Rajiv Kumar and Aleksandr Y. Aravkin and Ernie Esser and Hassan Mansour and Felix J. Herrmann}, title = {{SVD}-free low-rank matrix factorization : wavefield reconstruction via jittered subsampling and reciprocity}, year = {2014}, month = {06}, booktitle = {EAGE}, abstract = {Recently computationally efficient rank optimization techniques have been studied extensively to develop a new mathematical tool for the seismic data interpolation. So far, matrix completion problems have been discussed where sources are subsample according to a discrete uniform distribution. In this paper, we studied the effect of two different subsampling techniques on seismic data interpolation using rank-regularized formulations, namely jittered subsampling over uniform random subsampling. The other objective of this paper is to combine the fact of source-receiver reciprocity with the rank-minimization techniques to enhance the accuracy of missing-trace interpolation. We illustrate the advantages of jittered subsampling and reciprocity using a seismic line from Gulf of Suez to obtain high quality results for interpolation, a key application in exploration geophysics.}, keywords = {low-rank, interpolation, reciprocity, EAGE}, doi = {10.3997/2214-4609.20141394}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/EAGE/2014/kumar2014EAGErank/kumar2014EAGErank.pdf}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/EAGE/2014/kumar2014EAGErank/kumar2014EAGErank_pres.pdf} } @CONFERENCE{kumar2014EAGEeia, author = {Rajiv Kumar and Tristan van Leeuwen and Felix J. Herrmann}, title = {Extended images in action: efficient {WEMVA} via randomized probing}, year = {2014}, month = {06}, booktitle = {EAGE}, abstract = {Image gathers as a function of subsurface offset are an important tool for velocity analysis in areas of complex geology. In this paper, we offer a new perspective on image gathers by organizing the extended image as a function of all subsurface offsets and all subsurface points into a matrix whose (i,j)^{th} entry captures the interaction between gridpoints i and j. For even small problems, it is infeasible to form and store this matrix. Instead, we propose an efficient algorithm to glean information from the image volume via efficient matrix-vector products. We illustrate how this can be used to construct objective functions for automatic MVA.}, keywords = {extended imaging, MVA, probing, EAGE}, doi = {10.3997/2214-4609.20141492}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/EAGE/2014/kumar2014EAGEeia/kumar2014EAGEeia.pdf}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/EAGE/2014/kumar2014EAGEeia/kumar2014EAGEeia_pres.pdf} } @CONFERENCE{petrenko2014OGHPCaih, author = {Art Petrenko and Felix J. Herrmann and Diego Oriato and Simon Tilbury and Tristan van Leeuwen}, title = {Accelerating an iterative {Helmholtz} solver with {FPGAs}}, year = {2014}, month = {03}, booktitle = {OGHPC}, abstract = {We implement the Kaczmarz row-projection algorithm [Kaczmarz, 1937] on a CPU host + FPGA accelerator platform using techniques of dataflow programming. This algorithm is then used as the preconditioning step in CGMN, a modified version of the conjugate gradients method [Bj?rck and Elfving, 1979] that we use to solve the time-harmonic acoustic isotropic constant density wave equation.}, keywords = {OGHPC, Kaczmarz, CGMN, FPGA, reconfigurable computing, Helmholtz equation}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/OGHPC/petrenko2014OGHPCaih.pdf}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/OGHPC/petrenko2014OGHPCaih_poster.pdf} } @CONFERENCE{herrmann2014CSEGbsw, author = {Felix J. Herrmann}, title = {Breaking structure - why randomized sampling matters}, booktitle = {CSEG Technical Luncheon}, year = {2014}, month = {01}, abstract = {During this talk, I will explain how ideas from compressive sensing and big data can be used to reduce costs of seismic data acquisition and wave-equation based inversion. The key idea is to explore structure within the data by deliberately breaking this structure with randomized sampling, e.g., by randomizing source/receiver positions or by source encoding, followed by an optimization procedure that restores the structure and therefore recovers the fully sampled data. These techniques not only underpin recent advances in missing trace interpolation and simultaneous acquisition but they are also responsible for significant improvements in full-waveform inversion and reverse-time migration. We will illustrate these concepts using a variety of compelling examples on realistic synthetics and field data.}, keywords = {CSEG, randomized sampling}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/CSEG/2014/herrmann2014CSEGbsw_pres.pdf} } @CONFERENCE{herrmann2013EAGEfrtm, author = {Felix J. Herrmann and Ning Tu}, title = {Fast {RTM} with multiples and source estimation}, booktitle = {EAGE/SEG Forum - Turning noise into geological information: The next big step?}, year = {2013}, month = {11}, abstract = {During this talk, we present a computationally efficient (cost of 1-2 RTM's with all data) iterative sparsity-promoting inversion framework where surface-related multiples are jointly imaged with primaries and where the source signature is estimated on the fly. Our imaging algorithm is computationally efficient because it works during each iteration with small independent randomized subsets of data. The multiples are handled by introducing an areal source term that includes the upgoing wavefield. We update the source signature for each iteration using a variable projection method. The resulting algorithm removes imaging artifacts from surface-related multiples, estimates and removes the imprint of the source, recovers true amplitudes, is fast, and robust to linearization errors by virtue of the statistical independence of the subsets of data we are working with at each iteration.}, keywords = {EAGE, SEG, RTM, multiples, source estimation}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/EAGE/2013/herrmann2013EAGEfrtm/herrmann2013EAGEfrtm_pres.pdf} } @CONFERENCE{herrmann2013SPIErse, author = {Felix J. Herrmann}, title = {Randomized sampling in exploration seismology}, booktitle = {SPIE Optics and Photonics: Wavelets and Sparsity XV}, year = {2013}, month = {08}, keywords = {SPIE, randomized sampling, exploration seismology}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SPIE/2013/herrmann2013SPIErse/herrmann2013SPIErse_pres.pdf} } @CONFERENCE{herrmann2013SEGpmc, author = {Tristan van Leeuwen and Felix J. Herrmann}, title = {A penalty method for PDE-constrained optimization with applications to wave-equation based seismic inversion}, booktitle = {SEG Workshop on Computational Mathematics for Geophysics, Houston}, year = {2013}, month = {09}, keywords = {SEG, Houston, penalty method, optimization}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SEG/2013/herrmann2013SEGpmc/herrmann2013SEGpmc_pres.pdf} } @CONFERENCE{dasilva2013SEGhtuck, author = {Curt Da Silva and Felix J. Herrmann}, title = {Structured tensor missing-trace interpolation in the {Hierarchical} {Tucker} format}, booktitle = {SEG Technical Program Expanded Abstracts}, year = {2013}, month = {09}, volume = {32}, pages = {3623-3627}, publisher = {SEG}, abstract = {Owing to the large scale and dimensionality of a 3D seismic experiment, acquiring fully-sampled data according to the Nyquist criterion is an exceedingly arduous and cost-prohibitive task. In this paper, we develop tools to interpolate 5D seismic volumes with randomly missing sources or receivers using a relatively novel tensor format known as the Hierarchical Tucker (HT) format. By exploiting the underlying smooth structure of HT tensors, specifically its smooth manifold structure, we develop solvers which are fast, immediately parallelizable, and SVD-free, making these solvers amenable to large-scale problems where SVD-based projection methods are far too costly. We also build on intuition of multidimensional sampling from the perspective of matrix-completion and demonstrate the ability of our algorithms to recover frequency slices even amidst very high levels of source subsampling on a synthetic large-scale 3D North Sea dataset.}, keywords = {SEG, hierarchical tucker, structured tensor, tensor interpolation, differential geometry, Riemannian optimization, Gauss Newton}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SEG/2013/dasilva2013SEGhtuck/dasilva2013SEGhtuck.pdf}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SEG/2013/dasilva2013SEGhtuck/dasilva2013SEGhtuck_pres.pdf}, doi = {10.1190/segam2013-0709.1} } @CONFERENCE{kumar2013SEGHSS, author = {Rajiv Kumar and Hassan Mansour and Aleksandr Y. Aravkin and Felix J. Herrmann}, title = {Reconstruction of seismic wavefields via low-rank matrix factorization in the hierarchical-separable matrix representation}, booktitle = {SEG Technical Program Expanded Abstracts}, year = {2013}, volume = {32}, month = {09}, pages = {3628-3633}, abstract = {Recent developments in matrix rank optimization have allowed for new computational approaches in the field of seismic data interpolation. In this paper, we propose an approach for seismic data interpolation which incorporates the Hierarchical Semi-Separable Structure (HSS) inside rank-regularized least-squares formulations for the missing-trace interpolation problem. The proposed approach is suitable for large scale problems, since it avoids SVD computations and uses a low-rank factorized formulation instead. We illustrate the advantages of the new HSS approach by interpolating a seismic line from the Gulf of Suez and compare the reconstruction with conventional rank minimization.}, keywords = {SEG, interpolation, HSS}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SEG/2013/kumar2013SEGHSS/kumar2013SEGHSS.pdf}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SEG/2013/kumar2013SEGHSS/kumar2013SEGHSS_pres.pdf}, doi = {10.1190/segam2013-1165.1} } @CONFERENCE{kumar2013SEGAVA, author = {Rajiv Kumar and Tristan van Leeuwen and Felix J. Herrmann}, title = {{AVA} analysis and geological dip estimation via two-way wave-equation based extended images}, booktitle = {SEG Technical Program Expanded Abstracts}, year = {2013}, volume = {32}, month = {09}, pages = {423-427}, abstract = {In this paper, we present an efficient way to compute extended images for all subsurface offsets without explicitly calculating the source and receiver wavefields for all the sources. Because the extended images contain all possible subsurface offsets, we compute the angle-domain image gathers by selecting the subsurface offset that is aligned with the local dip. We also propose a method to compute the local dip information directly from common-image-point gathers. To assess the quality of the angle-domain common-image-points gathers we compute the angle-dependent reflectivity coefficients and compare them with theoretical reflectivity coefficients yielded by the (linearized) Zoeppritz equations for a few synthetic models.}, keywords = {SEG, AVA, dip, wave-equation, extended images}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SEG/2013/kumar2013SEGAVA/kumar2013SEGAVA.pdf}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SEG/2013/kumar2013SEGAVA/kumar2013SEGAVA_pres.pdf}, doi = {10.1190/segam2013-1348.1} } @CONFERENCE{kumar2013SEGMVA, author = {Rajiv Kumar and Tristan van Leeuwen and Felix J. Herrmann}, title = {Efficient {WEMVA} using extended images}, year = {2013}, month = {09}, booktitle = {SEG Workshop on Advances in Model Building, Imaging, and FWI, Houston}, abstract = {Image gathers as a function of subsurface offset are an important tool for velocity analysis in areas of complex geology. Here, we offer a new perspective on image gathers by organizing the extended image as a function of all subsurface offsets and all subsurface points in to a matrix whose (i,j)th entry captures the interaction between gridpoints i and j. For even small problems, it is infeasible to form and store this matrix. Instead, we propose an efficient algorithm to glean information from the image volume via efficient matrix-vector products. We illustrate how this can be used to construct objective functions for automated MVA.}, keywords = {SEG, workshop, MVA, inversion, poster}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SEG/2013/kumar2013SEGMVA/kumar2013SEGMVA_poster.pdf} } @CONFERENCE{li2013SEGodmvdaiedwawe, author = {Xiang Li and Anais Tamalet and Tristan van Leeuwen and Felix J. Herrmann}, title = {Optimization driven model-space versus data-space approaches to invert elastic data with the acoustic wave equation}, booktitle = {SEG Technical Program Expanded Abstracts}, year = {2013}, month = {09}, volume = {32}, pages = {986-990}, abstract = {Inverting data with elastic phases using an acoustic wave equation can lead to erroneous results, especially when the number of iterations is too high, which may lead to over fitting the data. Several approaches have been proposed to address this issue. Most commonly, people apply "data-independent" filtering operations that are aimed to deemphasize the elastic phases in the data in favor of the acoustic phases. Examples of this approach are nested loops over offset range and Laplace parameters. In this paper, we discuss two complementary optimization-driven methods where the minimization process decides adaptively which of the data or model components are consistent with the objective. Specifically, we compare the Student's t misfit function as the data-space alternative and curvelet-domain sparsity promotion as the model-space alternative. Application of these two methods to a realistic synthetic lead to comparable results that we believe can be improved by combining these two methods.}, keywords = {SEG, full-waveform inversion, elastic, least-squares}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SEG/2013/li2013SEGodmvdaiedwawe/li2013SEGodmvdaiedwawe.pdf}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SEG/2013/li2013SEGodmvdaiedwawe/li2013SEGodmvdaiedwawe_pres.pdf}, doi = {10.1190/segam2013-1375.1} } @CONFERENCE{tu2013SEGldi, author = {Ning Tu and Tristan van Leeuwen and Felix J. Herrmann}, title = {Limitations of the deconvolutional imaging condition for two-way propagators}, booktitle = {SEG Technical Program Expanded Abstracts}, year = {2013}, volume = {32}, month = {09}, pages = {3916-3920}, abstract = {The deconvolutional imaging condition has gained wide attention in recent years, as it is often used to image surface-related multiples. However, we noticed on close inspection that this condition was derived from one-way propagation principles. Now that two-way wave-equation based simulations have become more affordable, we revisit the deconvolutional imaging condition and reveal its limitations for two-way propagators. First, it can distort the image due to receiver-side propagation effects. Second, when used to image surface-related multiples, it is not capable of removing all interfering phantom reflectors.}, keywords = {SEG, migration, inversion, multiples}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SEG/2013/tu2013SEGldi/tu2013SEGldi.pdf}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SEG/2013/tu2013SEGldi/tu2013SEGldi_pres.pdf}, doi = {10.1190/segam2013-1440.1} } @CONFERENCE{tu2013SEGcle, author = {Ning Tu and Xiang Li and Felix J. Herrmann}, title = {Controlling linearization errors in $\ell_1$ regularized inversion by rerandomization}, booktitle = {SEG Technical Program Expanded Abstracts}, year = {2013}, volume = {32}, month = {09}, pages = {4640-4644}, abstract = {Linearized inversion is a data fitting procedure that tries to match the observed seismic data with data predicted by linearized modelling. In practice, the observed data is not necessarily in the column space of the linearized modelling operator. This can be caused by lack of an accurate background velocity model or by coherent noises not explained by linearized modelling. Through carefully designed experiments, we ob- serve that a moderate data mismatch does not pose an issue if we can use all the data in the inversion. However, artifacts do arise from the mismatch when randomized dimensionality reduction techniques are adopted to speed up the inversion. To stabilize the inversion for dimensionality reduction with randomized source aggregates, we propose to rerandomize by drawing independent simultaneous sources occasionally during the inversion. The effect of this rerandomization is remarkable because it results in virtually artifact-free images at a cost comparable to a single reverse-time migration. Implications of our method are profound because we are now able to resolve fine-scale steep subsalt features in a computationally feasible manner.}, keywords = {SEG, sparsity, inversion, rerandomization, message passing}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SEG/2013/tu2013SEGcle/tu2013SEGcle.pdf}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SEG/2013/tu2013SEGcle/tu2013SEGcle_pres.pdf}, doi = {10.1190/segam2013-1302.1} } @CONFERENCE{wason2013SEGtjo, author = {Haneet Wason and Felix J. Herrmann}, title = {Time-jittered ocean bottom seismic acquisition}, booktitle = {SEG Technical Program Expanded Abstracts}, year = {2013}, month = {09}, volume = {32}, pages = {1-6}, abstract = {Leveraging ideas from the field of compressed sensing, we show how simultaneous or blended acquisition can be setup as a -- compressed sensing problem. This helps us to design a pragmatic time-jittered marine acquisition scheme where multiple source vessels sail across an ocean-bottom array firing airguns at -- jittered source locations and instances in time, resulting in better spatial sampling, and speedup acquisition. Furthermore, we can significantly impact the reconstruction quality of conventional seismic data (from jittered data) and demonstrate successful recovery by sparsity promotion. In contrast to random (under)sampling, acquisition via jittered (under)sampling helps in controlling the maximum gap size, which is a practical requirement of wavefield reconstruction with localized sparsifying transforms. Results are illustrated with simulations of time-jittered marine acquisition, which translates to jittered source locations for a given speed of the source vessel, for two source vessels.}, keywords = {SEG, acquisition, marine, OBC, jittered sampling, blending, deblending, interpolation}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SEG/2013/wason2013SEGtjo/wason2013SEGtjo.pdf}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SEG/2013/wason2013SEGtjo/wason2013SEGtjo_pres.pdf}, doi = {10.1190/segam2013-1391.1} } @CONFERENCE{lin2013SEGdss, author = {Tim T.Y. Lin and Haneet Wason and Felix J. Herrmann}, title = {Dense shot-sampling via time-jittered marine sources}, booktitle = {SEG Workshop on Simultaneous Sources, Houston}, year = {2013}, month = {09}, keywords = {SEG, Houston, acquisition, simultaneous sources}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SEG/2013/lin2013SEGdss/lin2013SEGdss_pres.pdf} } @CONFERENCE{dasilva2013SAMPTAhtuck, author = {Curt Da Silva and Felix J. Herrmann}, title = {Hierarchical {Tucker} tensor optimization - applications to tensor completion}, year = {2013}, month = {07}, abstract = {In this work, we develop an optimization framework for problems whose solutions are well-approximated by Hierarchical Tucker tensors, an efficient structured tensor format based on recursive subspace factorizations. Using the differential geometric tools presented here, we construct standard optimization algorithms such as Steepest Descent and Conjugate Gradient, for interpolating tensors in HT format. We also empirically examine the importance of one's choice of data organization in the success of tensor recovery by drawing upon insights from the Matrix Completion literature. Using these algorithms, we recover various seismic data sets with randomly missing source pairs.}, keywords = {SAMPTA, hierarchical tucker, structured tensor, tensor interpolation, differential geometry, riemannian optimization}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SAMPTA/2013/dasilva2013SAMPTAhtuck/dasilva2013SAMPTAhtuck.pdf}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SAMPTA/2013/dasilva2013SAMPTAhtuck/dasilva2013SAMPTAhtuck_pres.pdf} } @CONFERENCE{dasilva2013EAGEhtucktensor, author = {Curt Da Silva and Felix J. Herrmann}, title = {Hierarchical {Tucker} tensor optimization - applications to {4D} seismic data interpolation}, booktitle = {EAGE}, year = {2013}, month = {06}, abstract = {In this work, we develop optimization algorithms on the manifold of Hierarchical Tucker (HT) tensors, an extremely efficient format for representing high-dimensional tensors exhibiting particular low-rank structure. With some minor alterations to existing theoretical developments, we develop an optimization framework based on the geometric understanding of HT tensors as a smooth manifold, a generalization of smooth curves/surfaces. Building on the existing research of solving optimization problems on smooth manifolds, we develop Steepest Descent and Conjugate Gradient methods for HT tensors. The resulting algorithms converge quickly, are immediately parallelizable, and do not require the computation of SVDs. We also extend ideas about favourable sampling conditions for missing-data recovery from the field of Matrix Completion to Tensor Completion and demonstrate how the organization of data can affect the success of recovery. As a result, if one has data with randomly missing source pairs, using these ideas, coupled with an efficient solver, one can interpolate large-scale seismic data volumes with missing sources and/or receivers by exploiting the multidimensional dependencies in the data. We are able to recover data volumes amidst extremely high subsampling ratios (in some cases, > 75\%) using this approach.}, keywords = {EAGE, structured tensor, 3D data interpolation, riemannian optimization}, doi = {10.3997/2214-4609.20130390}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/EAGE/2013/dasilva2013EAGEhtucktensor/dasilva2013EAGEhtucktensor.pdf}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/EAGE/2013/dasilva2013EAGEhtucktensor/dasilva2013EAGEhtucktensor_pres.pdf} } @CONFERENCE{kumar2013EAGEsind, author = {Rajiv Kumar and Aleksandr Y. Aravkin and Hassan Mansour and Ben Recht and Felix J. Herrmann}, title = {Seismic data interpolation and denoising using {SVD}-free low-rank matrix factorization}, booktitle = {EAGE}, year = {2013}, month = {06}, abstract = {Recent developments in rank optimization have allowed new approaches for seismic data interpolation and denoising. In this paper, we propose an approach for simultaneous seismic data interpolation and denoising using robust rank-regularized formulations. The proposed approach is suitable for large scale problems, since it avoids SVD computations by using factorized formulations. We illustrate the advantages of the new approach using a seismic line from Gulf of Suez and 5D synthetic seismic data to obtain high quality results for interpolation and denoising, a key application in exploration geophysics.}, keywords = {EAGE, interpolation, denoising}, doi = {10.3997/2214-4609.20130388}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/EAGE/2013/kumar2013EAGEsind/kumar2013EAGEsind.pdf}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/EAGE/2013/kumar2013EAGEsind/kumar2013EAGEsind_pres.pdf} } @CONFERENCE{lin2013EAGEcsd, author = {Tim T.Y. Lin and Felix J. Herrmann}, title = {Cosparse seismic data interpolation}, booktitle = {EAGE}, year = {2013}, month = {06}, abstract = {Many modern seismic data interpolation and redatuming algorithms rely on the promotion of transform-domain sparsity for high-quality results. Amongst the large diversity of methods and different ways of realizing sparse reconstruction lies a central question that often goes unaddressed: is it better for the transform-domain sparsity to be achieved through explicit construction of sparse representations (e.g., by thresholding of small transform-domain coefficients), or by demanding that the algorithm return physical signals which produces sparse coefficients when hit with the forward transform? Recent results show that the two approaches give rise to different solutions when the transform is redundant, and that the latter approach imposes a whole new class of constraints related to where the forward transform produces zero coefficients. From this framework, a new reconstruction algorithm is proposed which may allow better reconstruction from subsampled signaled than what the sparsity assumption alone would predict. In this work we apply the new framework and algorithm to the case of seismic data interpolation under the curvelet domain, and show that it admits better reconstruction than some existing L1 sparsity-based methods derived from compressive sensing for a range of subsampling factors.}, keywords = {EAGE, cosparsity, interpolation, curvelet, algorithm, optimization}, doi = {10.3997/2214-4609.20130387}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/EAGE/2013/lin2013EAGEcsd/lin2013EAGEcsd.pdf}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/EAGE/2013/lin2013EAGEcsd/lin2013EAGEcsd_pres.pdf} } @CONFERENCE{tu2013EAGElsm, author = {Ning Tu and Aleksandr Y. Aravkin and Tristan van Leeuwen and Felix J. Herrmann}, title = {Fast least-squares migration with multiples and source estimation}, booktitle = {EAGE}, year = {2013}, month = {06}, abstract = {The advent of modern computing has made it possible to do seismic imaging using least-squares reverse-time migration. We obtain superior images by solving an optimization problem that recovers the true-amplitude images. However, its success hinges on overcoming several issues, including overwhelming problem size, unknown source wavelet, and interfering coherent events like multiples. In this abstract, we reduce the problem size by using ideas from compressive sensing, and estimate source wavelet by generalized variable projection. We also demonstrate how to invert for subsurface information encoded in surface-related multiples by incorporating the free-surface operator as an areal source in reverse-time migration. Our synthetic examples show that multiples help to improve the resolution of the image, as well as remove the amplitude ambiguity in wavelet estimation.}, keywords = {EAGE, imaging, sparse, source estimation, multiples}, doi = {10.3997/2214-4609.20130727}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/EAGE/2013/tu2013EAGElsm/tu2013EAGElsm.pdf}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/EAGE/2013/tu2013EAGElsm/tu2013EAGElsm_pres.pdf} } @CONFERENCE{vanleeuwen2013EAGErobustFWI, author = {Tristan van Leeuwen and Aleksandr Y. Aravkin and Henri Calandra and Felix J. Herrmann}, title = {In which domain should we measure the misfit for robust full waveform inversion?}, booktitle = {EAGE}, year = {2013}, month = {06}, abstract = {Full-waveform inversion relies on minimizing the difference between observed and modeled data, as measured by some penalty function. A popular choice, of course, is the least-squares penalty. However, when outliers are present in the data, the use of robust penalties such as the Huber or Student's t may significantly improve the results since they put relatively less weight on large residuals. In order for robust penalties to be effective, the outliers must be somehow localized and distinguishable from the good data. We propose to first transform the residual into a domain where the outliers are localized before measuring the misfit with a robust penalty. This is exactly how one would normally devise filters to remove the noise before applying conventional FWI. We propose to merge the two steps and let the inversion process implicitly filter out the noise. Results on a synthetic dataset show the effectiveness of the approach.}, keywords = {EAGE, full waveform inversion}, doi = {10.3997/2214-4609.20130839}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/EAGE/2013/vanleeuwen2013EAGErobustFWI/vanleeuwen2013EAGErobustFWI.pdf}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/EAGE/2013/vanleeuwen2013EAGErobustFWI/vanleeuwen2013EAGErobustFWI_pres.pdf} } @CONFERENCE{wason2013EAGEobs, author = {Haneet Wason and Felix J. Herrmann}, title = {Ocean bottom seismic acquisition via jittered sampling}, booktitle = {EAGE}, year = {2013}, month = {06}, abstract = {We present a pragmatic marine acquisition scheme where multiple source vessels sail across an ocean-bottom array firing at airgunsjittered source locations and instances in time. Following the principles of compressive sensing, we can significantly impact the reconstruction quality of conventional seismic data (from jittered data) and demonstrate successful recovery by sparsity promotion. In contrast to random (under)sampling, acquisition via jittered (under)sampling helps in controlling the maximum gap size, which is a practical requirement of wavefield reconstruction with localized sparsifying transforms. Results are illustrated with simulations of time-jittered marine acquisition, which translates to jittered source locations for a given speed of the source vessel, for two source vessels.}, keywords = {EAGE, acquisition, blended, marine, deblending, interpolation}, doi = {10.3997/2214-4609.20130379}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/EAGE/2013/wason2013EAGEobs/wason2013EAGEobs.pdf}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/EAGE/2013/wason2013EAGEobs/wason2013EAGEobs_pres.pdf} } @CONFERENCE{aravkin2013ICASSPssi, author = {Aleksandr Y. Aravkin and Tristan van Leeuwen and Ning Tu}, title = {Sparse seismic imaging using variable projection}, booktitle = {ICASSP}, year = {2013}, month = {05}, abstract = {We consider an important class of signal processing problems where the signal of interest is known to be sparse, and can be recovered from data given auxiliary information about how this data was generated. For example, a sparse green's function may be recovered from seismic experimental data using sparsity optimization when the source signature is known. Unfortunately, in practice this information is often missing, and must be recovered from data along with the signal using deconvolution techniques. In this paper, we present a novel methodology to simulta- neously solve for the sparse signal and auxiliary parameters using a recently proposed variable projection technique. Our main contribution is to combine variable projection with spar- sity promoting optimization, obtaining an efficient algorithm for large-scale sparse deconvolution problems. We demon- strate the algorithm on a seismic imaging example.}, keywords = {imaging, sparsity, optimization, variable projection}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/ICASSP/2013/aravkin2013ICASSPssi/aravkin2013ICASSPssi.pdf} } @CONFERENCE{petrenko2013HPCSsaoc, author = {Art Petrenko and Tristan van Leeuwen and Felix J. Herrmann}, title = {Software acceleration of {CARP}, an iterative linear solver and preconditioner}, organization = {HPCS}, year = {2013}, month = {06}, abstract = {We present the results of software optimization of a row-wise preconditioner (Component Averaged Row Projections) for the method of conjugate gradients, which is used to solve the diagonally banded Helmholtz system representing frequency domain, isotropic acoustic seismic wave simulation. We demonstrate that in our application, a preconditioner bound to one processor core and accessing memory contiguously reduces execution time by 7\% for matrices having on the order of 108 non-zeros. For reference we note that our C implementation is over 80 times faster than the corresponding code written for a high-level numerical analysis language.}, keywords = {HPCS, Helmholtz equation, Kaczmarz, software, wave propagation, frequency-domain}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/HPCS/2013/petrenko2013HPCSsaoc/petrenko2013HPCSsaoc.pdf}, url2 = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/HPCS/2013/petrenko2013HPCSsaoc/petrenko2013HPCSsaoc_poster.pdf} } @CONFERENCE{herrmann2013KAUSTrse, author = {Felix J. Herrmann}, title = {Randomized sampling in exploration seismology}, booktitle = {KAUST}, organization = {KAUST}, year = {2013}, month = {05}, keywords = {KAUST, randomized sampling, exploration seismology}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/KAUST/2013/herrmann2013KAUSTrse/herrmann2013KAUSTrse_pres.pdf} } @CONFERENCE{herrmann2013SEGOMANrdw, author = {Felix J. Herrmann}, title = {Recent developments in wave-equation based inversion technology}, booktitle = {SEG Workshop on FWI, Oman}, year = {2013}, month = {04}, keywords = {SEG, workshop, Oman, randomized sampling, exploration seismology, 3D, FWI}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SEG/2013/herrmann2013SEGOMANrdw/herrmann2013SEGOMANrdw_pres.pdf} } @CONFERENCE{herrmann2013SIAMdrfwi, author = {Felix J. Herrmann}, title = {Dimensionality reduction in {FWI}}, booktitle = {SIAM}, year = {2013}, month = {02}, keywords = {SIAM, FWI, dimensionality reduction}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SIAM/2013/herrmann2013SIAMdrfwi/herrmann2013SIAMdrfwi_pres.pdf} } @CONFERENCE{miao2013CSEGaospsa, author = {Lina Miao and Felix J. Herrmann}, title = {Acceleration on sparse promoting seismic applications}, booktitle = {CSEG}, year = {2013}, month = {05}, abstract = {Sparse promoting oriented problems are never new in seismic applications. Back in 1970s, geophysicists had well exploited the robustness of sparse solutions. Moreover, with the emerging usage of compressed sensing in recent years, sparse recovery have been favored in dealing with 'curse of dimensionality' in various seismic field acquisition, data processing, and imaging applications. Although sparsity has provided a promising approach, solving for it presents a big challenge. How to work efficiently with the extremely large-scale seismic problem, and how to improve the convergence rate reducing computation time are most frequently asked questions in this content. In this abstract, the author proposed a new algorithm -- PQN$\ell_1$, trying to address those questions. One example on seismic data processing is included.}, keywords = {CSEG, sparsity-promotion, SPG$\ell_1$, projected Quasi Newton}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/CSEG/2013/miao2013CSEGaospsa/miao2013CSEGaospsa.pdf} } @CONFERENCE{oghenekohwo2013CSEGnratld, author = {Felix Oghenekohwo and Felix J. Herrmann}, title = {Assessing the need for repeatability in acquisition of time-lapse data}, booktitle = {CSEG}, year = {2013}, month = {05}, abstract = {There are several factors that affect the repeatability of 4D(time-lapse) seismic data. One of the most significant factors is the repeatability of the acquisition, particularly the locations of the sources and receivers. It is important to repeat the source-receiver locations, used during the baseline survey, in the monitor or repeat survey. Also, it is essential that the stacked data volumes used for time-lapse analysis are created using the same offset ranges for each survey. This condition is crucial in order to be able to produce an image of the same location over a period of time and enhances proper reservoir characterization. The cost of repeating the seismic acquisition is very expensive, as often times, the receiver array has to be left at the same location over the period for which the data will be acquired. In other words, it is important to repeat the acquisition geometry as much as possible. In this talk, we investigate the results of changing the acquisition geometry, by a random placement of the receivers for both the baseline surveys and newer (monitor) surveys. Results show that we are still able to observe any time-lapse effects from the proposed acquisition geometry. Our experiments have been performed on a synthetic model.}, keywords = {acquisition, CSEG, time-lapse, migration}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/CSEG/2013/oghenekohwo2013CSEGnratld/oghenekohwo2013CSEGnratld.pdf}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/CSEG/2013/oghenekohwo2013CSEGnratld/oghenekohwo2013CSEGnratld_pres.pdf} } @CONFERENCE{Mansour11TRwmmw, author = {Hassan Mansour and Ozgur Yilmaz}, title = {Weighted -$\ell_1$ minimization with multiple weighting sets}, booktitle = {Proc. SPIE}, year = {2011}, month = {09}, volume = {8138}, pages = {813809-813809-13}, abstract = {In this paper, we study the support recovery conditions of weighted -$\ell_1$ minimization for signal reconstruction from compressed sensing measurements when multiple support estimate sets with different accuracy are available. We identify a class of signals for which the recovered vector from -$\ell_1$ minimization provides an accurate support estimate. We then derive stability and robustness guarantees for the weighted -$\ell_1$ minimization problem with more than one support estimate. We show that applying a smaller weight to support estimate that enjoy higher accuracy improves the recovery conditions compared with the case of a single support estimate and the case with standard, i.e., non-weighted,-$\ell_1$ minimization. Our theoretical results are supported by numerical simulations on synthetic signals and real audio signals.}, keywords = {compressive sensing, optimization}, notes = {TR-2011-07}, doi = {10.1117/12.894165}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SPIE/2011/Mansour11TRwmmw/Mansour11TRwmmw.pdf} } @CONFERENCE{aravkin2011EAGEnspf, author = {Aleksandr Y. Aravkin and James V. Burke and Felix J. Herrmann and Tristan van Leeuwen}, title = {A nonlinear sparsity promoting formulation and algorithm for full waveform inversion}, booktitle = {EAGE}, year = {2011}, month = {05}, abstract = {Full Waveform Inversion (FWI) is a computational procedure to extract medium parameters from seismic data. FWI is typically formulated as a nonlinear least squares optimization problem, and various regularization techniques are used to guide the optimization because the problem is illposed. In this paper, we propose a novel sparse regularization which exploits the ability of curvelets to efficiently represent geophysical images. We then formulate a corresponding sparsity promoting constrained optimization problem, which we call Nonlinear Basis Pursuit Denoise (NBPDN) and present an algorithm to solve this problem to recover medium parameters. The utility of the NBPDN formulation and efficacy of the algorithm are demonstrated on a stylized cross-well experiment, where a sparse velocity perturbation is recovered with higher quality than the standard FWI formulation (solved with LBFGS). The NBPDN formulation and algorithm can recover the sparse perturbation even when the data volume is compressed to 5 \% of the original size using random superposition.}, keywords = {EAGE, full-waveform inversion, optimization}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/EAGE/2011/aravkin11EAGEnspf/aravkin11EAGEnspf_pres.pdf}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/EAGE/2011/aravkin11EAGEnspf/aravkin11EAGEnspf.pdf}, url2 = {http://earthdoc.eage.org/publication/publicationdetails/?publication=50199} } @CONFERENCE{aravkin2012ICASSProbustb, author = {Aleksandr Y. Aravkin and Michael P. Friedlander and Tristan van Leeuwen}, title = {Robust inversion via semistochastic dimensionality reduction}, booktitle = {ICASSP}, year = {2012}, pages = {5245-5248}, organization = {ICASSP}, abstract = {We consider a class of inverse problems where it is possible to aggregate the results of multiple experiments. This class includes problems where the forward model is the solution operator to linear ODEs or PDEs. The tremendous size of such problems motivates the use dimensionality reduction (DR) techniques based on randomly mixing experiments. These techniques break down, however, when robust data-fitting formulations are used, which are essential in cases of missing data, unusually large errors, and systematic features in the data unexplained by the forward model. We survey robust methods within a statistical framework, and propose a sampling optimization approach that allows DR. The efficacy of the methods are demonstrated for a large-scale seismic inverse problem using the robust Student's t-distribution, where a useful synthetic velocity model is recovered in the extreme scenario of 60\% corrupted data. The sampling approach achieves this recovery using 20\% of the effort required by a direct robust approach.}, keywords = {ICASSP}, doi = {10.1109/ICASSP.2012.6289103}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/ICASSP/2012/AravkinFriedlanderLeeuwen/AravkinFriedlanderLeeuwen.pdf } } @CONFERENCE{aravkin2011SIAMfwi, author = {Aleksandr Y. Aravkin and Felix J. Herrmann and Tristan van Leeuwen and James V. Burke and Xiang Li}, title = {Full waveform inversion with compressive updates}, booktitle = {SIAM}, year = {2011}, organization = {SIAM CS\&E 2011}, abstract = {Full-waveform inversion relies on large multi-experiment data volumes. While improvements in acquisition and inversion have been extremely successful, the current push for higher quality models reveals fundamental shortcomings handling increasing problem sizes numerically. To address this fundamental issue, we propose a randomized dimensionality-reduction strategy motivated by recent developments in stochastic optimization and compressive sensing. In this formulation conventional Gauss-Newton iterations are replaced by dimensionality-reduced sparse recovery problems with source encodings.}, keywords = {SLIM, full-waveform inversion}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SIAM/2011/aravkin2011SIAMfwi/aravkin2011SIAMfwi.pdf} } @CONFERENCE{herrmann2011SEGffw, author = {Aleksandr Y. Aravkin and Felix J. Herrmann and Tristan van Leeuwen and Xiang Li}, title = {Fast full-waveform inversion with compressive sensing}, booktitle = {SEG Technical Program Expanded Abstracts}, year = {2011}, keywords = {SEG, SLIM, full-waveform inversion}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SEG/2011/HerrmannSEG2011fws/HerrmannSEG2011fws.pdf} } @CONFERENCE{aravkin2011EAGEspfwi, author = {Aleksandr Y. Aravkin and Tristan van Leeuwen and James V. Burke and Felix J. Herrmann}, title = {Sparsity promoting formulations and algorithms for {FWI}}, booktitle = {EAGE}, year = {2011}, keywords = {EAGE, full-waveform inversion}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/EAGE/2011/Aravkin2011EAGEspfwi/Aravkin2011EAGEspfwi.pdf} } @CONFERENCE{aravkin2011ICIAMspf, author = {Aleksandr Y. Aravkin and Tristan van Leeuwen and James V. Burke and Felix J. Herrmann}, title = {Sparsity promoting formulations and algorithms for {FWI}}, booktitle = {ICIAM}, year = {2011}, organization = {ICIAM 2011}, abstract = {Full Waveform Inversion (FWI) is a computational procedure to extract medium parameters from seismic data. FWI is typically formulated as a nonlinear least squares optimization problem, and various regularization techniques are used to guide the optimization because the problem is ill-posed. We propose a novel sparse regularization which exploits the ability of curvelets to efficiently represent geophysical images. We then formulate a corresponding sparsity promoting constrained optimization problem, which we solve using an open source algorithm. The techniques are applicable to any inverse problem where sparsity modeling is appropriate. We demonstrate the efficacy of the formulation on a toy example (stylized cross-well experiment) and on a realistic Seismic example (partial Marmoussi model). We also discuss the tradeoff between model fit and sparsity promotion, with a view to extend existing techniques for linear inverse problems to the case where the forward model is nonlinear.}, date-added = {2011-07-15}, keywords = {SLIM,Presentation,Full-waveform inversion,Optimization}, month = {07}, note = {Presented at AMP Medical and Seismic Imaging, 2011, Vancouver BC.}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/ICIAM/2011/aravkin2011ICIAMspf/aravkin2011ICIAMspf_pres.pdf} } @CONFERENCE{aravkin2011ICIAMrfwiu, author = {Aleksandr Y. Aravkin and Tristan van Leeuwen and Felix J. Herrmann}, title = {Robust {FWI} using {Student's} t-distribution}, booktitle = {ICIAM}, year = {2011}, organization = {ICIAM 2011}, abstract = {Iterative inversion algorithms require repeated simulation of 3D time-dependent acoustic, elastic, or electromagnetic wave fields, extending hundreds of wavelengths and hundreds of periods. Also, seismic data is rich in information at every representable scale. Thus simulation-driven optimization approaches to inversion impose great demands on simulator efficiency and accuracy. While computer hardware advances have been of critical importance in bringing inversion closer to practical application, algorithmic advances in simulator methodology have been equally important. Speakers in this two-part session will address a variety of numerical issues arising in the wave simulation, and in its application to inversion. }, date-added = {2011-07-20}, keywords = {SLIM, ICIAM, full-waveform inversion, optimization}, month = {07}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/ICIAM/2011/aravkin2011ICIAMrfwiu/aravkin2011ICIAMrfwiu.pdf} } @CONFERENCE{aravkin2011SEGrobust, author = {Aleksandr Y. Aravkin and Tristan van Leeuwen and Felix J. Herrmann}, title = {Robust full-waveform inversion using the {Student's} t-distribution}, booktitle = {SEG Technical Program Expanded Abstracts}, year = {2011}, month = {09}, volume = {30}, pages = {2669-2673}, organization = {SEG}, abstract = {Full-waveform inversion (FWI) is a computational procedure to extract medium parameters from seismic data. Robust methods for FWI are needed to overcome sensitivity to noise and in cases where modeling is particularly poor or far from the real data generating process. We survey previous robust methods from a statistical perspective, and use this perspective to derive a new robust method by assuming the random errors in our model arise from the Student's t-distribution. We show that in contrast to previous robust methods, the new method progres- sively down-weighs large outliers, effectively ignoring them once they are large enough. This suggests that the new method is more robust and suitable for situations with very poor data quality or modeling. Experiments show that the new method recovers as well or better than previous robust methods, and can recover models with quality comparable to standard methods on noise-free data when some of the data is completely corrupted, and even when a marine acquisition mask is entirely ignored in the modeling. The ability to ignore a marine acquisition mask via robust FWI methods offers an opportunity for stochastic optimization methods in marine acquisition.}, keywords = {SEG, full-waveform inversion, optimization}, doi = {10.1190/1.3627747}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SEG/2011/aravkin11SEGrobust/aravkin11SEGrobust.pdf} } @CONFERENCE{aravkin2012ICASSPfastseis, author = {Aleksandr Y. Aravkin and Xiang Li and Felix J. Herrmann }, title = {Fast seismic imaging for marine data}, booktitle = {ICASSP}, year = {2012}, organization = {ICASSP}, abstract = {Seismic imaging can be formulated as a linear inverse problem where a medium perturbation is obtained via minimization of a least-squares misfit functional. The demand for higher resolution images in more geophysically complex areas drives the need to develop techniques that handle problems of tremendous size with limited computational resources. While seismic imaging is amenable to dimensionality reduction techniques that collapse the data volume into a smaller set of "super-shots", these techniques break down for complex acquisition geometries such as marine acquisition, where sources and receivers move during acquisition. To meet these challenges, we propose a novel method that combines sparsity-promoting (SP) solvers with random subset selection of sequential shots, yielding a SP algorithm that only ever sees a small portion of the full data, enabling its application to very large-scale problems. Application of this technique yields excellent results for a complicated synthetic, which underscores the robustness of sparsity promotion and its suitability for seismic imaging.}, keywords = {ICASSP}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/ICASSP/2012/AravkinLiHerrmann/AravkinLiHerrmann.pdf} } @CONFERENCE{aravkin2012SEGST, author = {Aleksandr Y. Aravkin and Tristan van Leeuwen and Kenneth Bube and Felix J. Herrmann}, title = {On non-uniqueness of the {Student's} t-formulation for linear inverse problems}, booktitle = {SEG Technical Program Expanded Abstracts}, year = {2012}, month = {11}, volume = {31}, pages = {1-5}, organization = {SEG}, abstract = {We review the statistical interpretation of inverse problem formulations, and the motivations for selecting non-convex penalties for robust behaviour with respect to measurement outliers or artifacts in the data. An important downside of using non-convex formulations such as the Student's t is the potential for non-uniqueness, and we present a simple example where the Student's t penalty can be made to have many local minima by appropriately selecting the degrees of freedom parameter. On the other hand, the non-convexity of the Student's t is precisely what gives it the ability to ignore artifacts in the data. We explain this idea, and present a stylized imaging experiment, where the Student's t is able to recover a velocity perturbation from data contaminated by a very peculiar artifact --- data from a different velocity perturbation. The performance of Student's t inversion is investigated empirically for different values of the degrees of freedom parameter, and different initial conditions.}, keywords = {Student's t, robust, non-convex, uniqueness, SEG}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SEG/2012/aravkin2012SEGST/aravkin2012SEGST.pdf}, doi = {10.1190/segam2012-1558.1} } @CONFERENCE{aravkin2012EAGErobust, author = {Aleksandr Y. Aravkin and Tristan van Leeuwen and Henri Calandra and Felix J. Herrmann}, title = {Source estimation for frequency-domain {FWI} with robust penalties}, booktitle = {74th EAGE Conference and Exhibition 2012}, year = {2012}, month = {06}, pages = {P018}, abstract = {Source estimation is an essential component of full waveform inversion. In the standard frequency domain formulation, there is closed form solution for the the optimal source weights, which can thus be cheaply estimated on the fly. A growing body of work underscores the importance of robust modeling for data with large outliers or artifacts that are not captured by the forward model. Effectively, the least-squares penalty on the residual is replaced by a robust penalty, such as Huber, Hybrid `1-`2 or Student’s t. As we will demonstrate, it is essential to use the same robust penalty for source estimation. In this abstract, we present a general approach to robust waveform inversion with robust source estimation. In this general formulation, there is no closed form solution for the optimal source weights so we need to solve a scalar optimization problem to obtain these weights. We can efficiently solve this optimization problem with a Newton-like method in a few iterations. The computational cost involved is of the same order as the usual least-squares source estimation procedure. We show numerical examples illustrating robust source estimation and robust waveform inversion on synthetic data with outliers.}, keywords = {EAGE}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/EAGE/2012/aravkin2012EAGErobust/aravkin2012EAGErobust_pres.pdf}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/EAGE/2012/aravkin2012EAGErobust/aravkin2012EAGErobust.pdf}, url2 = {http://earthdoc.eage.org/publication/publicationdetails/?publication=59196} } @CONFERENCE{vandenberg07VONipo, author = {Ewout van den Berg and Michael P. Friedlander}, title = {In pursuit of a root}, year = {2007}, month = {07}, organization = {Von Neumann Symposium}, quality = {1}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/vonNeuman/2007/vandenberg07VONipo/vandenberg07VONipo.pdf} } @CONFERENCE{beyreuther2005SEGcot, author = {Moritz Beyreuther and Jamin Cristall and Felix J. Herrmann}, title = {Computation of time-lapse differences with {3-D} directional frames}, booktitle = {SEG Technical Program Expanded Abstracts}, year = {2005}, volume = {24}, pages = {2488-2491}, organization = {SEG}, abstract = {We present an alternative method of extracting production related differences from time-lapse seismic data sets. Our method is not based on the actual subtraction of the two data sets, risking the enhancement of noise and introduction of artifacts due to local phase rotation and slightly misaligned events. Rather, it mutes events of the monitor survey with respect to the baseline survey based on the magnitudes of coefficients in a sparse and local atomic decomposition. Our technique is demonstrated to be an effective tool for enhancing the time-lapse signal from surveys which have been cross-equalized. {\copyright}2005 Society of Exploration Geophysicists}, keywords = {SLIM, SEG}, doi = {10.1190/1.2148227}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SEG/2005/Beyreuther05SEGcot/Beyreuther05SEGcot.pdf} } @CONFERENCE{beyreuther2004EAGEcdo, author = {Moritz Beyreuther and Felix J. Herrmann and Jamin Cristall}, title = {Curvelet denoising of {4-D} seismic}, booktitle = {EAGE}, year = {2004}, month = {06}, abstract = {With burgeoning world demand and a limited rate of discovery of new reserves, there is increasing impetus upon the industry to optimize recovery from already existing fields. 4D, or time-lapse, seismic imaging is an emerging technology that holds great promise to better monitor and optimise reservoir production. The basic idea behind 4D seismic is that when multiple 3D surveys are acquired at separate calendar times over a producing field, the reservoir geology will not change from survey to survey but the state of the reservoir fluids will change. Thus, taking the difference between two 3D surveys should remove the static geologic contribution to the data and isolate the time- varying fluid flow component. However, a major challenge in 4D seismic is that acquisition and processing differences between 3D surveys often overshadow the changes caused by fluid flow. This problem is compounded when 4D effects are sought to be derived from vintage 3D data sets that were not originally acquired with 4D in mind. The goal of this study is to remove the acquisition and imaging artefacts from a 4D seismic difference cube using Curveket processing techniques.}, keywords = {SLIM, EAGE}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/EAGE/2004/Beyreuther04EAGEcdo/Beyreuther04EAGEcdo_pres.pdf}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/EAGE/2004/Beyreuther04EAGEcdo/beyreuther2004EAGEcdo_paper.pdf}, url2 = {https://circle.ubc.ca/bitstream/handle/2429/453/EAGE4D2004.pdf?sequence=1}, url3 = {http://earthdoc.eage.org/publication/publicationdetails/?publication=2323} } @CONFERENCE{herrmann2012SEGfwi, author = {Andrew J. Calvert and Ian Hanlon and Mostafa Javanmehri and Rajiv Kumar and Tristan van Leeuwen and Xiang Li and Brendan Smithyman and Eric Takam Takougang and Haneet Wason and Felix J. Herrmann}, title = {{FWI} from the {West} {Coasts}: lessons learned from "Gulf of Mexico Imaging Challenges: What Can Full Waveform Inversion Achieve?"}, booktitle = {SEG Workshop on FWI, Las Vegas}, year = {2012}, organization = {SEG}, keywords = {workshop, FWI, SEG}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SEG/2012/herrmann2012SEGfwi/herrmann2012SEGfwi_pres.pdf} } @CONFERENCE{cristall2004CSEGcpa, author = {Jamin Cristall and Moritz Beyreuther and Felix J. Herrmann}, title = {Curvelet processing and imaging: {4-D} adaptive subtraction}, booktitle = {CSEG Technical Program Expanded Abstracts}, year = {2004}, organization = {CSEG}, abstract = {With burgeoning world demand and a limited rate of discovery of new reserves, there is increasing impetus upon the industry to optimize recovery from already existing fields. 4D, or time-lapse, seismic imaging holds great promise to better monitor and optimise reservoir production. The basic idea behind 4D seismic is that when multiple 3D surveys are acquired at separate calendar times over a producing field, the reservoir geology will not change from survey to survey but the state of the reservoir fluids will change. Thus, taking the difference between two 3D surveys should remove the static geologic contribution to the data and isolate the time-varying fluid flow component. However, a major challenge in 4D seismic is that acquisition and processing differences between 3D surveys often overshadow the changes caused by fluid flow. This problem is compounded when 4D effects are sought to be derived from legacy 3D data sets that were not originally acquired with 4D in mind. The goal of this study is to remove the acquisition and imaging artefacts from a 4D seismic difference cube using Curvelet processing techniques.}, keywords = {SLIM}, month = {05}, url = {http://www.cseg.ca/assets/files/resources/abstracts/2004/059S0201-Cristall_J_Curvelet_4D.pdf} } @CONFERENCE{dasilva2012EAGEprobingprecond, author = {Curt {Da Silva} and Felix J. Herrmann}, title = {Matrix probing and simultaneous sources: a new approach for preconditioning the {Hessian}}, booktitle = {EAGE}, year = {2012}, month = {06}, organization = {EAGE}, abstract = {Recent advances based on the mathematical understanding of the Hessian as, under certain conditions, a pseudo-differential operator have resulted in a new preconditioner by L. Demanet et al. Basing their approach on a suitable basis expansion for the Hessian, by suitably 'probing' the Hessian, i.e. applying the Hessian to a small number of randomized model perturbations, one can obtain an approximation to the inverse Hessian in an efficient manner. Building upon this approach, we consider this preconditioner in the context of least-squares migration and Full Waveform Inversion and specifically dimensionality reduction techniques in these domains. By utilizing previous work in simultaneous sources, we are able to develop an efficient least-squares migration scheme which recovers higher quality images and hence higher quality search directions in the context of a Gauss-Newton method for Full Waveform Inversion while simultaneously avoiding inordinate amounts of additional work.}, keywords = {EAGE, matrix probing, pseudo-differential operator}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/EAGE/2012/dasilva2012EAGEprobingprecond/dasilva2012EAGEprobingprecond_pres.pdf}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/EAGE/2012/dasilva2012EAGEprobingprecond/dasilva2012EAGEprobingprecond.pdf}, url2 = {http://earthdoc.eage.org/publication/publicationdetails/?publication=59193} } @CONFERENCE{erlangga2009EAGEmwi, author = {Yogi A. Erlangga and Felix J. Herrmann}, title = {Migration with implicit solvers for the time-harmonic {Helmholtz} equation}, booktitle = {EAGE}, year = {2009}, month = {06}, abstract = {From the measured seismic data, the location and the amplitude of reflectors can be determined via a migration algorithm. Classically, following Claerbout{\textquoteright}s imaging principle [2], a reflector is located at the position where the source{\textquoteright}s forward-propagated wavefield correlates with the backward-propagated wavefield of the receiver data. Lailly and Tarantola later showed that this imaging principle is an instance of inverse problems, with the associated migration operator formulated via a least-squares functional; see [6, 12, 13]. Furthermore, they showed that the migrated image is associated with the gradient of this functional with respect to the image. If the solution of the least-squares functional is done iteratively, the correlation-based image coincides up to a constant with the first iteration of a gradient method. In practice, this migration is done either in the time domain or in the frequency domain. In the frequency-domain migration, the main bottleneck thus far, which renders its full implementation to large scale problems, is the lack of efficient solvers for computing wavefields. Robust direct methods easily run into excessive memory requirements as the size of the problem increases. On the other hand, iterative methods, which are less demanding in terms of memory, suffered from lack of convergence. During the past years, however, progress has been made in the development of an efficient iterative method [4, 3] for the frequency-domain wavefield computations. In this paper, we will show the significance of this method (called MKMG) in the context of the frequency-domain migration, where multi-shot-frequency wavefields (of order of 10,000 related wavefields) need to be computed.}, keywords = {EAGE}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/EAGE/2009/Erlangga09EAGEmwi/Erlangga09EAGEmwi_pres.pdf}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/EAGE/2009/Erlangga09EAGEmwi/Erlangga09EAGEmwi.pdf}, url2 = {http://earthdoc.eage.org/publication/publicationdetails/?publication=23955} } @CONFERENCE{erlangga2009SEGswi, author = {Yogi A. Erlangga and Felix J. Herrmann}, title = {Seismic waveform inversion with {Gauss-Newton-Krylov} method}, booktitle = {SEG Technical Program Expanded Abstracts}, year = {2009}, month = {10}, volume = {28}, pages = {2357-2361}, organization = {SEG}, abstract = {This abstract discusses an implicit implementation of the Gauss-Newton method, used for the frequency-domain full-waveform inversion, where the inverse of the Hessian for the update is never formed explicitly. Instead, the inverse of the Hessian is computed approximately by a conjugate gradient (CG) method, which only requires the action of the Hessian on the CG search direction. This procedure avoids an excessive computer storage, usually needed for storing the Hessian, at the expense of extra computational work in CG. An effective preconditioner for the Hessian is important to improve the convergence of CG, and hence to reduce the overall computational work.}, keywords = {SEG}, doi = {10.1190/1.3255332}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SEG/2009/erlangga09SEGswi/erlangga09SEGswi_pres.pdf}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SEG/2009/erlangga09SEGswi/erlangga09SEGswi.pdf} } @CONFERENCE{erlangga2008SEGaim, author = {Yogi A. Erlangga and Felix J. Herrmann}, title = {An iterative multilevel method for computing wavefields in frequency-domain seismic inversion}, booktitle = {SEG Technical Program Expanded Abstracts}, year = {2008}, volume = {27}, pages = {1957-1960}, organization = {SEG}, abstract = {We describe an iterative multilevel method for solving linear systems representing forward modeling and back propagation of wavefields in frequency-domain seismic inversions. The workhorse of the method is the so-called multilevel Krylov method, applied to a multigrid-preconditioned linear system, and is called multigrid-multilevel Krylov (MKMG) method. Numerical experiments are presented for 2D Marmousi synthetic model for a range of frequencies. The convergence of the method is fast, and depends only mildly on frequency. The method can be considered as the first viable alternative to LU factorization, which is practically prohibitive for 3D seismic inversions.}, keywords = {SLIM, SEG}, doi = {10.1190/1.3059279}, month = {11}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SEG/2008/erlangga08SEGaim/erlangga08SEGaim_pres.pdf}, url = { https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SEG/2008/erlangga08SEGaim/erlangga08SEGaim.pdf } } @CONFERENCE{eso2008SEGira, author = {R. A. Eso and S. Napier and Felix J. Herrmann and D. W. Oldenburg}, title = {Iterative reconstruction algorithm for non-linear operators}, booktitle = {SEG Technical Program Expanded Abstracts}, year = {2008}, volume = {27}, pages = {579-583}, organization = {SEG}, abstract = {Iterative soft thresholding of a models wavelet coefficients can be used to obtain models that are sparse with respect to a known basis function. We generate sparse models for non-linear forward operators by applying the soft thresholding operator to the model obtained through a Gauss-Newton iteration and apply the technique in a synthetic 2.5D DC resistivity crosswell tomographic example.}, keywords = {SLIM, SEG}, doi = {10.1190/1.3063719}, month = {11}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SEG/2008/eso08SEGira/eso08SEGira.pdf } } @CONFERENCE{fomel2007ICASSPrepro, author = {Sergey Fomel and Gilles Hennenfent}, title = {Reproducible computational experiments using scons}, booktitle = {ICASSP}, year = {2007}, organization = {ICASSP}, abstract = {SCons (from Software Construction) is a well-known open- source program designed primarily for building software. In this paper, we describe our method of extending SCons for managing data processing flows and reproducible computational experiments. We demonstrate our usage of SCons with a simple example.}, keywords = {ICASSP}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/ICASSP/2007/fomel07ICASSPrepro/fomel07ICASSPrepro.pdf } } @CONFERENCE{friedlander2009NUalssr, author = {Michael P. Friedlander}, title = {Algorithms for large-scale sparse reconstruction}, booktitle = {IEMS}, year = {2009}, address = {Northwestern University}, organization = {IEMS Colloquim Speaker}, keywords = {minimization, SLIM} } @CONFERENCE{friedlander2009VIETcsgpa, author = {Michael P. Friedlander}, title = {Computing sparse and group-sparse approximations}, booktitle = {VIET}, year = {2009}, address = {Hanoi, Vietnam}, organization = {2009 High Performance Scientific Computing Conference}, keywords = {minimization, SLIM} } @CONFERENCE{friedlander2008SIAMasa, author = {Michael P. Friedlander}, title = {Active-set approaches to basis pursuit denoising}, booktitle = {SIAM Optimization}, year = {2008}, organization = {SIAM Optimization}, file = {http://www.cs.ubc.ca/~mpf/public/mpf08siopt.pdf:PDF}, keywords = {SLIM}, month = {05}, url = {http://www.cs.ubc.ca/~mpf/public/mpf08siopt.pdf} } @CONFERENCE{friedlander2008SINBADafl, author = {Michael P. Friedlander}, title = {Algorithms for large-scale sparse reconstruction}, booktitle = {SINBAD 2008}, year = {2008}, abstract = {Many signal processing applications seek to approximate a signal as a linear combination of only a few elementary atoms drawn from a large collection. This is known as sparse reconstruction, and the theory of compressed sensing allows us to pose it as a structured convex optimization problem. I will discuss the role of duality in revealing some unexpected and useful properties of these problems, and will show how they can lead to practical, large-scale algorithms. I will also describe some applications of these algorithms.}, keywords = {SINBAD, SLIM}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2008/friedlander2008SINBADafl/friedlander2008SINBADafl.pdf} } @CONFERENCE{friedlander2008WCOMasm, author = {Michael P. Friedlander and M. A. Saunders}, title = {Active-set methods for basis pursuit}, booktitle = {WCOM}, year = {2008}, organization = {West Coast Opitmization Meeting (WCOM)}, abstract = {Many imaging and compressed sensing applications seek sparse solutions to large under-determined least-squares problems. The basis pursuit (BP) approach minimizes the 1-norm of the solution, and the BP denoising (BPDN) approach balances it against the least-squares fit. The duals of these problems are conventional linear and quadratic programs. We introduce a modified parameterization of the BPDN problem and explore the effectiveness of active-set methods for solving its dual. Our basic algorithm for the BP dual unifies several existing algorithms and is applicable to large-scale examples.}, file = {http://www.cs.ubc.ca/~mpf/public/mpf08siopt.pdf:PDF}, month = {07}, url = {http://www.cs.ubc.ca/~mpf/public/mpf08siopt.pdf} } @CONFERENCE{frijlink2010EAGEcos, author = {M. O. Frijlink and Reza Shahidi and Felix J. Herrmann and R. G. van Borselen}, title = {Comparison of standard adaptive subtraction and primary-multiple separation in the curvelet domain}, booktitle = {EAGE}, year = {2010}, month = {06}, abstract = {In recent years, data-driven multiple prediction methods and wavefield extrapolation methods have proven to be powerful methods to attenuate multiples from data acquired in complex 3-D geologic environments. These methods make use of a two-stage approach, where first the multiples (surface-related and / or internal) multiples are predicted before they are subtracted from the original input data in an adaptively. The quality of these predicted multiples often raises high expectations for the adaptive subtraction techniques, but for various reasons these expectations are not always met in practice. Standard adaptive subtraction methods use the well-known minimum energy criterion, stating that the total energy after optimal multiple attenuation should be minimal. When primaries and multiples interfere, the minimum energy criterion is no longer appropriate. Also, when multiples of different orders interfere, adaptive energy minimization will lead to a compromise between different amplitudes corrections for the different orders of multiples. This paper investigates the performance of two multiple subtraction schemes for a real data set that exhibits both interference problems. Results from an adaptive subtraction in the real curvelet domain, separating primaries and multiples, are compared to those obtained using a more conventional adaptive subtraction method in the spatial domain.}, keywords = {EAGE}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/EAGE/2010/frijlink10EAGEcos/frijlink10EAGEcos.pdf}, url2 = {http://earthdoc.eage.org/publication/publicationdetails/?publication=39875} } @CONFERENCE{hennenfent2008SINBADnii2, author = {Gilles Hennenfent}, title = {New insights into one-norm solvers from the {Pareto} curve}, booktitle = {SINBAD}, year = {2008}, abstract = {Several geophysical ill-posed inverse problems are successfully solved by promoting sparsity using one-norm regularization. The practicality of this approach depends on the effectiveness of the one-norm solver used and on its robustness under limited number of iterations. We propose an approach to understand the behavior and evaluate the performance of one-norm solvers. The technique consists of tracking on a graph the data misfit versus the one norm of successive iterates. By comparing the solution paths to the Pareto curve, we are able to assess the performance of the solvers and the quality of the solutions. Such an assessment is particularly relevant given the renewed interest in one-norm regularization.}, keywords = {SINBAD, SLIM}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2008/hennenfent2008SINBADnii2/hennenfent2008SINBADnii2.pdf} } @CONFERENCE{hennenfent2008SINBADsdw2, author = {Gilles Hennenfent}, title = {Simply denoise: wavefield reconstruction via jittered undersampling}, booktitle = {SINBAD 2008}, year = {2008}, abstract = {We present a new discrete undersampling scheme designed to favor wavefield reconstruction by sparsity-promoting inversion with transform elements that are localized in the Fourier domain. Our work is motivated by empirical observations in the seismic community, corroborated by recent results from compressive sampling, which indicate favorable (wavefield) reconstructions from random as opposed to regular undersampling. As predicted by theory, random undersampling renders coherent aliases into harmless incoherent random noise, effectively turning the interpolation problem into a much simpler denoising problem. A practical requirement of wavefield reconstruction with localized sparsifying transforms is the control on the maximum gap size. Unfortunately, random undersampling does not provide such a control and the main purpose of this paper is to introduce a sampling scheme, coined jittered undersampling, that shares the benefits of random sampling, while offering control on the maximum gap size. Our contribution of jittered sub-Nyquist sampling proofs to be key in the formulation of a versatile wavefield sparsity-promoting recovery scheme that follows the principles of compressive sampling. After studying the behavior of the jittered-undersampling scheme in the Fourier domain, its performance is studied for curvelet recovery by sparsity-promoting inversion (CRSI). Our findings on synthetic and real seismic data indicate an improvement of several decibels over recovery from regularly-undersampled data for the same amount of data collected.}, keywords = {Presentation, SINBAD, SLIM}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2008/hennenfent2008SINBADsdw2/hennenfent2008SINBADsdw2.pdf} } @CONFERENCE{hennenfent07gradsem, author = {Gilles Hennenfent}, title = {Reproducible research in computational (geo)sciences}, booktitle = {Graduate seminar series}, year = {2007}, month = {01}, organization = {Graduate Seminar Series}, owner = {Shruti}, quality = {1}, timestamp = {2013.01.16}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/Misc/hennenfent07gradsem.pdf} } @CONFERENCE{hennenfent2007SINBADjdn, author = {Gilles Hennenfent}, title = {Just denoise: nonlinear recovery from randomly sampled data}, booktitle = {SINBAD 2007}, year = {2007}, abstract = {In this talk, we turn the interpolation problem of coarsely-sampled data into a denoising problem. From this point of view, we illustrate the benefit of random sampling at sub-Nyquist rate over regular sampling at the same rate. We show that, using nonlinear sparsity-promoting optimization, coarse random sampling may actually lead to significantly better wavefield reconstruction than equivalent regularly sampled data.}, keywords = {Presentation, SINBAD, SLIM} } @CONFERENCE{hennenfent06SINBADscons, author = {Gilles Hennenfent}, title = {Basic Processing flows with SCons}, booktitle = {SINBAD}, year = {2006}, organization = {SINBAD}, quality = {1}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2006/hennenfent06SINBADscons/hennenfent06SINBADscons.pdf} } @CONFERENCE{hennenfent06SINBADssr, author = {Gilles Hennenfent}, title = {A primer on stable signal recovery}, booktitle = {SINBAD}, year = {2006}, organization = {SINBAD}, quality = {1}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2006/hennenfent06SINBADssr/hennenfent06SINBADssr.pdf} } @CONFERENCE{hennenfent2006SINBADapo, author = {Gilles Hennenfent}, title = {A primer on sparsity transforms: curvelets and wave atoms}, booktitle = {SINBAD 2006}, year = {2006}, abstract = {During this presentation an introduction will be given on the method of stable recovery from noisy and incomplete data. Strong recovery conditions that guarantee the recovery for arbitrary acquisition geometries will be reviewed and numerical recovery examples will be presented.}, keywords = {Presentation, SINBAD, SLIM}, presentation = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2006/hennenfent2006SINBADapo/hennenfent2006SINBADapo.pdf} } @CONFERENCE{hennenfent2006SINBADros, author = {Gilles Hennenfent}, title = {Recovery of seismic data: practical considerations}, booktitle = {SINBAD 2006}, year = {2006}, abstract = {We propose a method for seismic data interpolation based on 1) the reformulation of the problem as a stable signal recovery problem and 2) the fact that seismic data is sparsely represented by curvelets. This method does not require information on the seismic velocities. Most importantly, this formulation potentially leads to an explicit recovery condition. We also propose a large-scale problem solver for the l1-regularization minimization involved in the recovery and successfully illustrate the performance of our algorithm on 2D synthetic and real examples.}, keywords = {Presentation, SINBAD, SLIM}, presentation = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2006/hennenfent2006SINBADros/hennenfent2006SINBADros.pdf} } @CONFERENCE{hennenfent2006SINBADtnf, author = {Gilles Hennenfent}, title = {The {Nonuniform} {Fast} {Discrete} {Curvelet} {Transform} ({NFDCT})}, booktitle = {SINBAD 2006}, year = {2006}, abstract = {The authors present an extension of the fast discrete curvelet transform (FDCT) to nonuniformly sampled data. This extension not only restores curvelet compression rates for nonuniformly sampled data but also removes noise and maps the data to a regular grid.}, keywords = {Presentation, SINBAD, SLIM}, presentation = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2006/hennenfent2006SINBADtnf/hennenfent2006SINBADtnf.pdf} } @CONFERENCE{hennenfent2008SEGonri, author = {Gilles Hennenfent and Felix J. Herrmann}, title = {One-norm regularized inversion: learning from the {Pareto} curve}, booktitle = {SEG Technical Program Expanded Abstracts}, year = {2008}, organization = {SEG}, abstract = {Geophysical inverse problems typically involve a trade off between data misfit and some prior. Pareto curves trace the optimal trade off between these two competing aims. These curves are commonly used in problems with two-norm priors where they are plotted on a log-log scale and are known as L-curves. For other priors, such as the sparsity-promoting one norm, Pareto curves remain relatively unexplored. First, we show how these curves provide an objective criterion to gauge how robust one-norm solvers are when they are limited by a maximum number of matrix-vector products that they can perform. Second, we use Pareto curves and their properties to define and compute one-norm compressibilities. We argue this notion is key to understand one-norm regularized inversion. Third, we illustrate the correlation between the one-norm compressibility and the perfor- mance of Fourier and curvelet reconstructions with sparsity promoting inversion.}, keywords = {SEG}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SEG/2008/hennenfent08SEGonri/hennenfent08SEGonri.pdf} } @CONFERENCE{hennenfent2007EAGEcrw, author = {Gilles Hennenfent and Felix J. Herrmann}, title = {Curvelet reconstruction with sparsity-promoting inversion: successes and challenges}, booktitle = {EAGE Workshop on Curvelets, contourlets, seislets, … in seismic data processing - where are we and where are we going?}, year = {2007}, month = {06}, abstract = {In this overview of the recent Curvelet Reconstruction with Sparsity-promoting Inversion (CRSI) method, we present our latest 2-D and 3-D interpolation results on both synthetic and real datasets. We compare these results to interpolated data using other existing methods. Finally, we discuss the challenges related to sparsity-promoting solvers for the large-scale problems the industry faces.}, keywords = {Presentation, SLIM, EAGE}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/EAGE/2007/hennenfent07EAGEcrw/hennenfent07EAGEcrw_pres.pdf}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/EAGE/2007/hennenfent07EAGEcrw/hennenfent07EAGEcrw.pdf}, url2 = {http://earthdoc.eage.org/publication/publicationdetails/?publication=7553} } @CONFERENCE{hennenfent2007EAGEisf, author = {Gilles Hennenfent and Felix J. Herrmann}, title = {Irregular sampling: from aliasing to noise}, booktitle = {EAGE}, year = {2007}, month = {06}, abstract = {Seismic data is often irregularly and/or sparsely sampled along spatial coordinates. We show that these acquisition geometries are not necessarily a source of adversity in order to accurately reconstruct adequately-sampled data. We use two examples to illustrate that it may actually be better than equivalent regularly subsampled data. This comment was already made in earlier works by other authors. We explain this behavior by two key observations. Firstly, a noise-free underdetermined problem can be seen as a noisy well-determined problem. Secondly, regularly subsampling creates strong coherent acquisition noise (aliasing) difficult to remove unlike the noise created by irregularly subsampling that is typically weaker and Gaussian-like.}, keywords = {Presentation, SLIM, EAGE}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/EAGE/2007/hennenfent07EAGEisf/hennenfent07EAGEisf_pres.pdf}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/EAGE/2007/hennenfent07EAGEisf/hennenfent07EAGEisf.pdf}, url2 = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/EAGE/2007/hennenfent07EAGEisf/hennenfent07EAGEisf_WS.pdf}, url3 = {http://earthdoc.eage.org/publication/publicationdetails/?publication=6487} } @CONFERENCE{hennenfent2007SEGrsn, author = {Gilles Hennenfent and Felix J. Herrmann}, title = {Random sampling: new insights into the reconstruction of coarsely sampled wavefields}, booktitle = {SEG Technical Program Expanded Abstracts}, year = {2007}, pages = {2575-2579}, organization = {SEG}, abstract = {In this paper, we turn the interpolation problem of coarsely-sampled data into a denoising problem. From this point of view, we illustrate the benefit of random sampling at sub-Nyquist rate over regular sampling at the same rate. We show that, using nonlinear sparsity-promoting optimization, coarse random sampling may actually lead to significantly better wavefield reconstruction than equivalent regularly sampled data. {\copyright}2007 Society of Exploration Geophysicists}, keywords = {Presentation, SLIM, SEG}, doi = {10.1190/1.2793002}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SEG/2007/hennenfent07SEGrsn/hennenfent07SEGrsn_pres.pdf}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SEG/2007/hennenfent07SEGrsn/hennenfent07SEGrsn.pdf } } @CONFERENCE{hennenfent2007SINBADrii, author = {Gilles Hennenfent and Felix J. Herrmann}, title = {Recent insights in $\ell_1$ solvers}, booktitle = {SINBAD 2007}, year = {2007}, abstract = {During this talk, an overview is given on our work on norm-one solvers as part of the DNOISE project. Gilles will explain the ins and outs of our iterative thresholding solver based on log cooling while Felix will present the work of Michael Friedlander "A Newton root-finding algorithms for large-scale basis pursuit denoise". Both approaches involve the solution of the basis pursuit problem that seeks a minimum one-norm solution of an underdetermined least-squares problem. Basis pursuit denoise (BPDN) fits the least-squares problem only approximately, and a single parameter determines a curve that traces the trade-off between the least-squares fit and the one-norm of the solution. In the work of Friedlander, it is shown show that the function that describes this curve is convex and continuously differentiable over all points of interest. They describe an efficient procedure for evaluating this function and its derivatives. As a result, they can compute arbitrary points on this curve. Their method is suitable for large-scale problems. Only matrix-vector operations are required. This is joint work with Ewout van der Berg and Michael P. Friedlander}, keywords = {Presentation, SINBAD, SLIM} } @CONFERENCE{hennenfent2006SEGaos, author = {Gilles Hennenfent and Felix J. Herrmann}, title = {Application of stable signal recovery to seismic data interpolation}, booktitle = {SEG Technical Program Expanded Abstracts}, year = {2006}, pages = {2797-2801}, organization = {SEG}, abstract = {We propose a method for seismic data interpolation based on 1) the reformulation of the problem as a stable signal recovery problem and 2) the fact that seismic data is sparsely represented by curvelets. This method does not require information on the seismic velocities. Most importantly, this formulation potentially leads to an explicit recovery condition. We also propose a large-scale problem solver for the 1-regularization minimization involved in the recovery and successfully illustrate the performance of our algorithm on 2D synthetic and real examples. {\copyright}2006 Society of Exploration Geophysicists}, keywords = {Presentation, SLIM, curvelets, interpolation, seismic data, regularization minimization, iterative thresholding, amplitude, SEG, continuity, fast transform}, doi = {10.1190/1.2370105}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SEG/2006/hennenfent06SEGaos/hennenfent06SEGaos_pres.pdf}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SEG/2006/hennenfent06SEGaos/hennenfent06SEGaos.pdf} } @CONFERENCE{hennenfent2005SEGscd, author = {Gilles Hennenfent and Felix J. Herrmann}, title = {Sparseness-constrained data continuation with frames: applications to missing traces and aliased signals in {2/3-D}}, booktitle = {SEG Technical Program Expanded Abstracts}, year = {2005}, pages = {2162-2165}, organization = {SEG}, abstract = {We present a robust iterative sparseness-constrained interpolation algorithm using 2-/3-D curvelet frames and Fourier-like transforms that exploits continuity along reflectors in seismic data. By choosing generic transforms, we circumvent the necessity to make parametric assumptions (e.g. through linear/parabolic Radon or demigration) regarding the shape of events in seismic data. Simulation and real data examples for data with moderately sized gaps demonstrate that our algorithm provides interpolated traces that accurately reproduce the wavelet shape as well as the AVO behavior. Our method also shows good results for de-aliasing judged by the behavior of the ($f-k$)-spectrum before and after regularization. {\copyright}2005 Society of Exploration Geophysicists}, keywords = {Presentation, SEG, SLIM}, doi = {10.1190/1.2148142}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SEG/2005/Hennenfent05SEGscd/Hennenfent05SEGscd_pres.pdf}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SEG/2005/Hennenfent05SEGscd/Hennenfent05SEGscd.pdf } } @CONFERENCE{hennenfent2004SEGtta, author = {Gilles Hennenfent and Felix J. Herrmann}, title = {Three-term amplitude-versus-offset ({AVO}) inversion revisited by curvelet and wavelet transforms}, booktitle = {SEG Technical Program Expanded Abstracts}, year = {2004}, pages = {211-214}, organization = {SEG}, abstract = {We present a new method to stabilize the three-term AVO inversion using Curvelet and Wavelet transforms. Curvelets are basis functions that effectively represent otherwise smooth objects having discontinuities along smooth curves. The applied formalism explores them to make the most of the continuity along reflectors in seismic images. Combined with Wavelets, Curvelets are used to denoise the data by penalizing high frequencies and small contributions in the AVO-cube. This approach is based on the idea that rapid amplitude changes along the ray-parameter axis are most likely due to noise. The AVO-inverse problem is linearized, formulated and solved for all (x, z) at once. Using densities and velocities of the Marmousi model to define the fluctuations in the elastic properties, the performance of the proposed method is studied and compared with the smoothing along the ray-parameter direction only. We show that our method better approximates the true data after the denoising step, especially when noise level increases. {\copyright}2004 Society of Exploration Geophysicists}, keywords = {Presentation, SLIM, SEG}, doi = {10.1190/1.1851201}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SEG/2004/Hennenfent04SEGtta/Hennenfent04SEGtta_pres.pdf}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SEG/2004/Hennenfent04SEGtta/Hennenfent04SEGtta.pdf } } @CONFERENCE{hennenfent2005CSEGscs, author = {Gilles Hennenfent and Felix J. Herrmann and R. Neelamani}, title = {Sparseness-constrained seismic deconvolution with curvelets}, booktitle = {CSEG Technical Program Expanded Abstracts}, year = {2005}, organization = {CSEG}, abstract = {Continuity along reflectors in seismic images is used via Curvelet representation to stabilize the convolution operator inversion. The Curvelet transform is a new multiscale transform that provides sparse representations for images that comprise smooth objects separated by piece-wise smooth discontinuities (e.g. seismic images). Our iterative Curvelet-regularized deconvolution algorithm combines conjugate gradient-based inversion with noise regularization performed using non-linear Curvelet coefficient thresholding. The thresholding operation enhances the sparsity of Curvelet representations. We show on a synthetic example that our algorithm provides improved resolution and continuity along reflectors as well as reduced ringing effect compared to the iterative Wiener-based deconvolution approach.}, keywords = {Presentation, SLIM}, month = {05}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/CSEG/2005/Hennenfent05CSEGscs/Hennenfent05CSEGscs_pres.pdf}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/CSEG/2005/Hennenfent05CSEGscs/Hennenfent05CSEGscs.pdf} } @CONFERENCE{hennenfent2005EAGEsdr, author = {Gilles Hennenfent and R. Neelamani and Felix J. Herrmann}, title = {Seismic deconvolution revisited with curvelet frames}, booktitle = {EAGE}, year = {2005}, month = {06}, abstract = {We propose an efficient iterative curvelet-regularized deconvolution algorithm that exploits continuity along reflectors in seismic images. Curvelets are a new multiscale transform that provides sparse representations for images (such as seismic images) that comprise smooth objects separated by piece-wise smooth discontinuities. Our technique combines conjugate gradient-based convolution operator inversion with noise regularization that is performed using non-linear curvelet coefficient shrinkage (thresholding). The shrinkage operation leverages the sparsity of curvelets representations. Simulations demonstrate that our algorithm provides improved resolution compared to the traditional Wiener-based deconvolution approach.}, keywords = {Presentation, SLIM, EAGE}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/EAGE/2005/Hennenfent05EAGEsdr/Hennenfent05EAGEsdr_poster.pdf}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/EAGE/2005/Hennenfent05EAGEsdr/Hennenfent05EAGEsdr.pdf}, url2 = {http://earthdoc.eage.org/publication/publicationdetails/?publication=1383} } @CONFERENCE{herrmann2003SPIEmsa, author = {Felix J. Herrmann}, title = {Multifractional splines: application to seismic imaging}, booktitle = {Proceedings of SPIE Technical Conference on Wavelets: Applications in Signal and Image Processing X}, year = {2003}, editor = {Michael A. Unser and Akram Aldroubi and Andrew F. Laine}, volume = {5207}, pages = {240-258}, organization = {SPIE}, abstract = {Seismic imaging commits itself to locating singularities in the elastic properties of the Earth{\textquoteright}s subsurface. Using the high-frequency ray-Born approximation for scattering from non-intersecting smooth interfaces, seismic data can be represented by a generalized Radon transform mapping the singularities in the medium to seismic data. Even though seismic data are bandwidth limited, signatures of the singularities in the medium carry through this transform and its inverse and this mapping property presents us with the possibility to develop new imaging techniques that preserve and characterize the singularities from incomplete, bandwidth-limited and noisy data. In this paper we propose a non-adaptive Curvelet/Contourlet technique to image and preserve the singularities and a data-adaptive Matching Pursuit method to characterize these imaged singularities by Multi-fractional Splines. This first technique borrows from the ideas within the Wavelet-Vaguelette/Quasi-SVD approach. We use the almost diagonalization of the scattering operator to approximately compensate for (i) the coloring of the noise and hence facilitate estimation; (ii) the normal operator itself. Results of applying these techniques to seismic imaging are encouraging although many open fundamental questions remain.}, keywords = {Presentation, SLIM}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SPIE/2003/herrmann2003SPIEmsa/herrmann2003SPIEmsa.pdf} } @CONFERENCE{Herrmann13NIPSrse, author = {Felix J. Herrmann}, title = {Randomized sampling in exploration seismology}, booktitle = {NIPS}, year = {2013}, timestamp = {2013.01.09}, url = {http://techtalks.tv/talks/randomized-sampling-in-exploration-seismology/57871/}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/NIPS/2013/Herrmann13NIPSrse/Herrmann13NIPSrse.pdf} } @CONFERENCE{herrmann2012EAGEpmr, author = {Felix J. Herrmann}, title = {Pass on the message: recent insights in large-scale sparse recovery}, booktitle = {74th EAGE Conference and Exhibition 2012}, year = {2012}, month = {06}, pages = {B022}, abstract = {Data collection, data processing, and imaging in exploration seismology increasingly hinge on large-scale sparsity promoting solvers to remove artifacts caused by efforts to reduce costs. We show how the inclusion of a "message term" in the calculation of the residuals improves the convergence of these iterative solvers by breaking correlations that develop between the model iterate and the linear system that needs to be inverted. We compare this message-passing scheme to state-of-the-art solvers for problems in missing-trace interpolation and in dimensionality-reduced imaging with phase encoding.}, keywords = {EAGE, message passing, sparse inversion}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/EAGE/2012/herrmann2012EAGEpmr/herrmann2012EAGEpmr_pres.pdf}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/EAGE/2012/herrmann2012EAGEpmr/herrmann2012EAGEpmr.pdf}, url2 = {http://earthdoc.eage.org/publication/publicationdetails/?publication=58935} } @CONFERENCE{herrmann2012SEGals, author = {Felix J. Herrmann}, title = {Accelerated large-scale inversion with message passing}, booktitle = {SEG Technical Program Expanded Abstracts}, year = {2012}, month = {11}, volume = {31}, pages = {1-6}, organization = {SEG}, abstract = {To meet current-day challenges, exploration seismology increasingly relies on more and more sophisticated algorithms that require multiple paths through all data. This requirement leads to problems because the size of seismic data volumes is increasing exponentially, exposing bottlenecks in IO and computational capability. To overcome these bottlenecks, we follow recent trends in machine learning and compressive sensing by proposing a sparsity-promoting inversion technique that works on small randomized subsets of data only. We boost the performance of this algorithm significantly by modifying a state-of-the-art l1-norm solver to benefit from message passing, which breaks the build up of correlations between model iterates and the randomized linear forward model. We demonstrate the performance of this algorithm on a toy sparse-recovery problem and on a realistic reverse-time-migration example with random source encoding. The improvements in speed, memory use, and output quality are truly remarkable.}, keywords = {imaging, optimization, compressive sensing, SEG}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SEG/2012/herrmann2012SEGals/herrmann2012SEGals_pres.pdf}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SEG/2012/herrmann2012SEGals/herrmann2012SEGals.pdf}, doi = {10.1190/segam2012-0847.1} } @CONFERENCE{herrmann2012SSPamp, author = {Felix J. Herrmann}, title = {Approximate message passing meets exploration seismology}, booktitle = {2012 IEEE Statistical Signal Processing Workshop (SSP) (SSP'12)}, year = {2012}, address = {Ann Arbor, Michigan, USA}, organization = {IEEE}, abstract = {Data collection, data processing, and imaging in exploration seismology increasingly hinge on large-scale sparsity promoting solvers to remove artifacts caused by efforts to reduce costs. We show how the inclusion of a 'message term' in the calculation of the residuals improves the convergence of these iterative solvers by breaking correlations that develop between the model iterate and the linear system that needs to be inverted. We compare this message-passing scheme to state-of-the-art solvers for problems in missing-trace interpolation and in dimensionality-reduced imaging with phase en- coding.}, keywords = {exploration seismology, compressive sensing, transform-domain sparsity promotion, seismic imaging}, month = {03}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SSP/2012/herrmann2012SSPamp/herrmann2012SSPamp_pres.pdf}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SSP/2012/herrmann2012SSPamp/herrmann2012SSPamp.pdf} } @CONFERENCE{herrmann2012UW, author = {Felix J. Herrmann}, title = {Compressive sensing and sparse recovery in exploration seismology}, booktitle = {Talk at University of Wisconsin}, year = {2012}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/Wisconsin/2012/herrmann2012UW/herrmann2012UW.pdf}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/Wisconsin/2012/herrmann2012UW/herrmann2012UW.pdf} } @CONFERENCE{herrmann11SLIMsummer2, author = {Felix J. Herrmann}, title = {Lecture 2. {Gene} {Golub} {SIAM} {Summer} {School} {July} 4 - 15, 2011}, booktitle = {SLIM}, year = {2011}, keywords = {Presentation}, month = {08}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SIAM/2011/herrmann11SLIMsummer2/herrmann11SLIMsummer2.pdf}, timestamp = {2011.08.05} } @CONFERENCE{herrmann2011SLIMsummer1, author = {Felix J. Herrmann}, title = {{Gene} {Golub} {SIAM} {Summer} {School} {July} 4 - 15, 2011}, booktitle = {SLIM}, year = {2011}, keywords = {Presentation}, month = {08}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SIAM/2011/herrmann11SLIMsummer1/herrmann11SLIMsummer1.pdf}, timestamp = {2011.08.05} } @CONFERENCE{herrmann2010EAGErss, author = {Felix J. Herrmann}, title = {Randomized sampling strategies}, booktitle = {EAGE}, year = {2010}, month = {06}, abstract = {Seismic exploration relies on the collection of massive data volumes that are subsequently mined for information during seismic processing. While this approach has been extremely successful in the past, the current trend towards higher quality images in increasingly complicated regions continues to reveal fundamental shortcomings in our workflows for high-dimensional data volumes. Two causes can be identified. First, there is the so-called {\textquoteleft}{\textquoteleft}curse of dimensionality{\textquoteright}{\textquoteright} exemplified by Nyquist{\textquoteright}s sampling criterion, which puts disproportionate strain on current acquisition and processing systems as the size and desired resolution of our survey areas continues to increase. Secondly, there is the recent {\textquoteleft}{\textquoteleft}departure from Moore{\textquoteright}s law{\textquoteright}{\textquoteright} that forces us to lower our expectations to compute ourselves out of this curse of dimensionality. In this paper, we offer a way out of this situation by a deliberate randomized subsampling combined with structure-exploiting transform-domain sparsity promotion. Our approach is successful because it reduces the size of seismic data volumes without loss of information. As such we end up with a new technology where the costs of acquisition and processing are no longer dictated by the size of the acquisition but by the transform-domain sparsity of the end-product.}, keywords = {Presentation, EAGE}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/EAGE/2010/herrmann10EAGErss/herrmann10EAGErss_pres.pdf}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/EAGE/2010/herrmann10EAGErss/herrmann10EAGErss.pdf}, url2 = {http://earthdoc.eage.org/publication/publicationdetails/?publication=39131} } @CONFERENCE{herrmann2010IRISsns, author = {Felix J. Herrmann}, title = {Sub-{Nyquist} sampling and sparsity: getting more information from fewer samples}, booktitle = {IRIS}, year = {2010}, abstract = {Many seismic exploration techniques rely on the collection of massive data volumes. While this approach has been extremely successful in the past, current efforts toward higher resolution images in increasingly complicated regions of the Earth continue to reveal fundamental shortcomings in our workflows. Chiefly amongst these is the so-called {\textquoteleft}{\textquoteleft}curse of dimensionality{\textquoteright}{\textquoteright} exemplified by Nyquist{\textquoteright}s sampling criterion, which disproportionately strains current acquisition and processing systems as the size and desired resolution of our survey areas continues to increase. In this presentation, we offer an alternative sampling method leveraging recent insights from compressive sensing towards seismic acquisition and processing of severely under-sampled data. The main outcome of this approach is a new technology where acquisition and processing related costs are no longer determined by overly stringent sampling criteria, such as Nyquist. At the heart of our approach lies randomized incoherent sampling that breaks subsampling related interferences by turning them into harmless noise, which we subsequently remove by promoting transform-domain sparsity. Now, costs no longer grow significantly with resolution and dimensionality of the survey area, but instead depend on transform-domain sparsity only. Our contribution is twofold. First, we demonstrate by means of carefully designed numerical experiments that compressive sensing can successfully be adapted to seismic exploration. Second, we show that accurate recovery can be accomplished for compressively sampled data volumes sizes that exceed the size of conventional transform-domain data volumes by only a small factor. Because compressive sensing combines transformation and encoding by a single linear encoding step, this technology is directly applicable to acquisition and to dimensionality reduction during processing. In either case, sampling, storage, and processing costs scale with transform-domain sparsity. Many seismic exploration techniques rely on the collection of massive data volumes. While this approach has been extremely successful in the past, current efforts toward higher resolution images in increasingly complicated regions of the Earth continue to reveal fundamental shortcomings in our workflows. Chiefly amongst these is the so-called {\textquoteleft}{\textquoteleft}curse of dimensionality{\textquoteright}{\textquoteright} exemplified by Nyquist{\textquoteright}s sampling criterion, which disproportionately strains current acquisition and processing systems as the size and desired resolution of our survey areas continues to increase. In this presentation, we offer an alternative sampling method leveraging recent insights from compressive sensing towards seismic acquisition and processing of severely under-sampled data. The main outcome of this approach is a new technology where acquisition and processing related costs are no longer determined by overly stringent sampling criteria, such as Nyquist. At the heart of our approach lies randomized incoherent sampling that breaks subsampling related interferences by turning them into harmless noise, which we subsequently remove by promoting transform-domain sparsity. Now, costs no longer grow significantly with resolution and dimensionality of the survey area, but instead depend on transform-domain sparsity only. Our contribution is twofold. First, we demonstrate by means of carefully designed numerical experiments that compressive sensing can successfully be adapted to seismic exploration. Second, we show that accurate recovery can be accomplished for compressively sampled data volumes sizes that exceed the size of conventional transform-domain data volumes by only a small factor. Because compressive sensing combines transformation and encoding by a single linear encoding step, this technology is directly applicable to acquisition and to dimensionality reduction during processing. In either case, sampling, storage, and processing costs scale with transform-domain sparsity.}, keywords = {Presentation}, note = {Presented at the IRIS Workshop}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/IRIS/2010/herrmann2010IRISsns/herrmann2010IRISsns.pdf} } @CONFERENCE{herrmann2010MATHIAScssr, author = {Felix J. Herrmann}, title = {Compressive sensing and sparse recovery in exploration seismology}, booktitle = {MATHIAS}, year = {2010}, abstract = {During this presentation, I will talk about how recent results from compressive sensing and sparse recovery can be used to solve problems in exploration seismology where incomplete sampling is ubiquitous. I will also talk about how these ideas apply to dimensionality reduction of full-waveform inversion by randomly phase encoded sources.}, keywords = {Presentation}, note = {Presented at MATHIAS 2010 organized by Total SA. Paris}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/MATHIAS/2010/herrmann2010MATHIAScssr/herrmann2010MATHIAScssr.pdf} } @CONFERENCE{herrmann2009PIMScssr1, author = {Felix J. Herrmann}, title = {Compressed sensing and sparse recovery in exploration seismology}, booktitle = {PIMS}, year = {2009}, abstract = {In this course, I will present how recent results from compressed sensing and sparse recovery apply to exploration seismology. During the first lecture, I will present the basic principles of compressive sensing; the importance of random jitter sampling and sparsifying transforms; and large-scale one-norm solvers. I will discuss the application of these techniques to missing trace interpolation. The second lecture will be devoted to coherent signal separation based on curveletdomain matched filtering and Bayesian separation with sparsity promotion. Applications of these techniques to the primary-multiple wavefield-separation problem on real data will be discussed as well. The third lecture will be devoted towards sparse recovery in seismic modeling and imaging and includes the problem of preconditioning the imaging operators, and the recovery from simultaneous source-acquired data}, keywords = {Presentation}, note = {Lecture I presented at the PIMS Summer School on Seismic Imaging, Seattle}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/PIMS/2009/herrmann2009PIMScssr1/herrmann2009PIMScssr1.pdf} } @CONFERENCE{herrmann2009PIMScssr2, author = {Felix J. Herrmann}, title = {Compressed sensing and sparse recovery in exploration seismology}, booktitle = {PIMS}, year = {2009}, abstract = {In this course, I will present how recent results from compressed sensing and sparse recovery apply to exploration seismology. During the first lecture, I will present the basic principles of compressive sensing; the importance of random jitter sampling and sparsifying transforms; and large-scale one-norm solvers. I will discuss the application of these techniques to missing trace interpolation. The second lecture will be devoted to coherent signal separation based on curveletdomain matched filtering and Bayesian separation with sparsity promotion. Applications of these techniques to the primary-multiple wavefield-separation problem on real data will be discussed as well. The third lecture will be devoted towards sparse recovery in seismic modeling and imaging and includes the problem of preconditioning the imaging operators, and the recovery from simultaneous source-acquired data.}, keywords = {Presentation}, note = {Lecture II presented at the PIMS Summer School on Seismic Imaging, Seattle}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/PIMS/2009/herrmann2009PIMScssr2/herrmann2009PIMScssr2.pdf} } @CONFERENCE{herrmann2009PIMScssr3, author = {Felix J. Herrmann}, title = {Compressed sensing and sparse recovery in exploration seismology}, booktitle = {PIMS}, year = {2009}, abstract = {In this course, I will present how recent results from compressed sensing and sparse recovery apply to exploration seismology. During the first lecture, I will present the basic principles of compressive sensing; the importance of random jitter sampling and sparsifying transforms; and large-scale one-norm solvers. I will discuss the application of these techniques to missing trace interpolation. The second lecture will be devoted to coherent signal separation based on curveletdomain matched filtering and Bayesian separation with sparsity promotion. Applications of these techniques to the primary-multiple wavefield-separation problem on real data will be discussed as well. The third lecture will be devoted towards sparse recovery in seismic modeling and imaging and includes the problem of preconditioning the imaging operators, and the recovery from simultaneous source-acquired data.}, keywords = {Presentation}, note = {Lecture III presented at the PIMS Summer School on Seismic Imaging, Seattle}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/PIMS/2009/herrmann2009PIMScssr3/herrmann2009PIMScssr3.pdf} } @CONFERENCE{herrmann2009SEGcib, author = {Felix J. Herrmann}, title = {Compressive imaging by wavefield inversion with group sparsity}, booktitle = {SEG Technical Program Expanded Abstracts}, year = {2009}, month = {10}, volume = {28}, pages = {2337-2341}, organization = {SEG}, abstract = {Migration relies on multi-dimensional correlations between source- and residual wavefields. These multi-dimensional correlations are computationally expensive because they involve operations with explicit and full matrices that contain both wavefields. By leveraging recent insights from compressive sampling, we present an alternative method where linear correlation-based imaging is replaced by imaging via multidimensional deconvolutions of compressibly sampled wavefields. Even though this approach goes at the expense of having to solve a sparsity-promotion recovery program for the image, our wavefield inversion approach has the advantage of reducing the system size in accordance to transform-domain sparsity of the image. Because seismic images also exhibit a focusing of the energy towards zero offset, the compressive-wavefield inversion itself is carried out using a recent extension of one-norm solver technology towards matrix-valued problems. These so-called hybrid $(1,\,2)$-norm solvers allow us to penalize pre-stack energy away from zero offset while exploiting joint sparsity amongst near-offset images. Contrary to earlier work to reduce modeling and imaging costs through random phase-encoded sources, our method compressively samples wavefields in model space. This approach has several advantages amongst which improved system-size reduction, and more flexibility during subsequent inversions for subsurface properties.}, keywords = {Presentation, SEG}, doi = {10.1190/1.3255328}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SEG/2009/herrmann09SEGcib/herrmann09SEGcib_pres.pdf}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SEG/2009/herrmann09SEGcib/herrmann09SEGcib.pdf} } @CONFERENCE{herrmann2009SEGrpl, author = {Felix J. Herrmann}, title = {Reflector-preserved lithological upscaling}, booktitle = {SEG Technical Program Expanded Abstracts}, year = {2009}, month = {10}, volume = {28}, pages = {3466-3470}, organization = {SEG}, abstract = {By combining Percolation models with lithological smoothing, we arrive at method for upscaling rock elastic constants that preserves reflections. In this approach, the Percolation model predicts sharp onsets in the elastic moduli of sand-shale mixtures when the shales reach a critical volume fraction. At that point, the shale inclusions form a connected cluster, and the macroscopic rock properties change with the power-law growth of the cluster. This switch-like nonlinearity preserves singularities, and hence reflections, even if no sharp transition exists in the lithology or if they are smoothed out using standard upscaling procedures.}, keywords = {Presentation, SEG}, doi = {10.1190/1.3255582}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SEG/2009/herrmann09SEGrpl/herrmann09SEGrpl_pres.pdf}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SEG/2009/herrmann09SEGrpl/herrmann09SEGrpl.pdf} } @CONFERENCE{herrmann2009SEGsns, author = {Felix J. Herrmann}, title = {Sub-{Nyquist} sampling and sparsity: how to get more information from fewer samples}, booktitle = {SEG Technical Program Expanded Abstracts}, year = {2009}, month = {10}, volume = {28}, pages = {3410-3415}, organization = {SEG}, abstract = {Seismic exploration relies on the collection of massive data volumes that are subsequently mined for information during seismic processing. While this approach has been extremely successful in the past, the current trend of incessantly pushing for higher quality images in increasingly complicated regions of the Earth continues to reveal fundamental shortcomings in our workflows to handle massive high-dimensional data volumes. Two causes can be identified as the main culprits responsible for this barrier. First, there is the so-called {\textquoteleft}{\textquoteleft}curse of dimensionality{\textquoteright}{\textquoteright} exemplified by Nyquist{\textquoteright}s sampling criterion, which puts disproportionate strain on current acquisition and processing systems as the size and desired resolution of our survey areas continues to increase. Secondly, there is the recent {\textquoteleft}{\textquoteleft}departure from Moore{\textquoteright}s law{\textquoteright}{\textquoteright} that forces us to lower our expectations to compute ourselves out of this curse of dimensionality. In this paper, we offer a way out of this situation by a deliberate \emph{randomized} subsampling combined with structure-exploiting transform-domain sparsity promotion. Our approach is successful because it reduces the size of seismic data volumes without loss of information. Because of this size reduction both impediments are removed and we end up with a new technology where the costs of acquisition and processing are no longer dictated by the \emph{size of the acquisition} but by the transform-domain \emph{sparsity} of the end-product after processing.}, keywords = {Presentation, SEG}, doi = {10.1190/1.3255570}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SEG/2009/herrmann09SEGsns/herrmann09SEGsns_pres.pdf}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SEG/2009/herrmann09SEGsns/herrmann09SEGsns.pdf} } @CONFERENCE{herrmann2008IONcsa, author = {Felix J. Herrmann}, title = {Compressive sampling: a new paradigm for seismic data acquistion and processing?}, booktitle = {ION}, year = {2008}, abstract = {Seismic data processing and imaging are firmly rooted in the well-established paradigm of regular Nyquist sampling. Faced with a typical uncooperative environment, practitioners of seismic data acquisition make all efforts to comply to this theory by creating regularly-sampled seismic-data volumes that are suitable for Fourier-based processing flows. The current advent of new alternative transform domains{\textendash}- such as the sparsifying curvelet domain, where seismic data is decomposed into localized, multiscale and multidirectional plane waves{\textendash}- opens the possibility to change this paradigm by no longer combating sampling irregularity but by embracing it. During this talk, we show that as long as seismic data volumes permit a compressible representation{\textendash}-i.e., data can be represented as a superposition of relatively few number of elementary waveforms{\textendash}- Nyquist sampling is unnecessary pessimistic. So far, nothing new, we all know from the work on Fourier- or other transform-based seismic-data regularization methodologies that wavefields can be recovered accurately from sub-Nyquist samplings through some sort of optimization procedure. What is new, however, are recent insights from the field of "compressive sampling", which dictate the conditions that guarantee or, at least, in practice provide conditions that favor sparsity-promoting recovery from sub-Nyquist sampling. Random sub-sampling, or to be more precise, jitter sub-sampling creates favorable conditions for curvelet-based recovery. We explain this phenomenon by arguing that this type of sampling leads to noisy data, hence our slogan "Simply denoise: wavefield reconstruction via jittered undersampling", where we bank on separating incoherent sub-sampling noise with curvelet-domain sparsity promotion. During our presentation, we introduce you to what curvelets are, why random jitter sampling is important and why this opens a pathway towards a new paradigm of curvelet-domain seismic data processing. Our claims will be supported by examples on synthetic and field data. This is joint work with Gilles Hennenfent, PhD. student at SLIM.}, keywords = {ION, Presentation, SLIM}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/ION/herrmann2008IONcsa/herrmann08ion_pres.pdf} } @CONFERENCE{herrmann2008SEGcdm3, author = {Felix J. Herrmann}, title = {Curvelet-domain matched filtering}, booktitle = {SEG Technical Program Expanded Abstracts}, year = {2008}, volume = {27}, pages = {3643-3649}, organization = {SEG}, abstract = {Matching seismic wavefields lies at the heart of seismic processing whether one is adaptively subtracting multiples predictions or groundroll. In both cases, the predictions are matched to the actual to-be-separated wavefield components in the observed data. The success of these wavefield matching procedures depends on our ability to (i) control possible overfitting, which may lead to accidental removal of primary energy, (ii) handle data with nonunique dips, and (iii) apply wavefield separation after matching stably. In this paper, we show that the curvelet transform allows us to address these issues by imposing smoothness in phase space, by using their capability to handle conflicting dips, and by leveraging their ability to represent seismic data sparsely.}, keywords = {SEG, SLIM}, doi = {10.1190/1.3064089}, month = {11}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SEG/2008/Herrmann08SEGcdm3/Herrmann08SEGcdm3.pdf } } @CONFERENCE{herrmann2008SEGgbu, author = {Felix J. Herrmann}, title = {Seismic noise: the good, the bad, \& the ugly}, booktitle = {SEG Technical Program Expanded Abstracts}, year = {2008}, keywords = {Presentation, SEG, SLIM}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SEG/2008/herrmann08SEGgbu/herrmann08SEGgbu.pdf} } @CONFERENCE{herrmann2008SINBADacd2, author = {Felix J. Herrmann}, title = {Adaptive curvelet-domain primary-multiple separation}, booktitle = {SINBAD}, year = {2008}, organization = {SINBAD}, note = {SINBAD 2008}, abstract = {In many exploration areas, successful separation of primaries and multiples greatly determines the quality of seismic imaging. Despite major advances made by Surface-Related Multiple Elimination (SRME), amplitude errors in the predicted multiples remain a problem. When these errors vary for each type of multiple differently (as a function of offset, time and dip), these amplitude errors pose a serious challenge for conventional least-squares matching and for the recently introduced separation by curvelet-domain thresholding. We propose a data-adaptive method that corrects amplitude errors, which vary smoothly as a function of location, scale (frequency band) and angle. In that case, the amplitudes can be corrected by an element-wise curvelet-domain scaling of the predicted multiples. We show that this scaling leads to a successful estimation of the primaries, despite amplitude, sign, timing and phase errors in the predicted multiples. Our results on synthetic and real data show distinct improvements over conventional least-squares matching, in terms of better suppression of multiple energy and high-frequency clutter and better recovery of the estimated primaries.}, keywords = {Presentation, SINBAD, SLIM}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2008/herrmann2008SINBADacd2/herrmann2008SINBADacd2.pdf} } @CONFERENCE{herrmann2008SINBADfwr, author = {Felix J. Herrmann}, title = {(De)-{Focused} wavefield reconstructions}, booktitle = {SINBAD 2008}, year = {2008}, keywords = {Presentation, SINBAD, SLIM}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2008/herrmann2008SINBADfwr/herrmann2008SINBADfwr.pdf} } @CONFERENCE{herrmann2008SINBADpsm, author = {Felix J. Herrmann}, title = {Phase-space matched filtering and migration preconditioning}, booktitle = {SINBAD 2008}, year = {2008}, abstract = {During this talk, I will report on new phase-space regularization functionals defined in terms of splines. This spline representation reduces the dimensionality of estimating our phase-space matched filter. We will discuss how this filter can be used in migration preconditioning. This is joint work with Christiaan Stolk.}, keywords = {Presentation, SINBAD, SLIM}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2008/herrmann2008SINBADpsm/herrmann2008SINBADpsm.pdf} } @CONFERENCE{herrmann2008SINBADs2c, author = {Felix J. Herrmann}, title = {{SINBAD} 2008 Consortium meeting}, booktitle = {SINBAD 2008}, year = {2008}, keywords = {Presentation, SINBAD, SLIM}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2008/herrmann2008SINBADs2c/herrmann2008SINBADs2c.pdf} } @CONFERENCE{herrmann2007AIPsit, author = {Felix J. Herrmann}, title = {Seismic inversion through operator overloading}, booktitle = {AIP}, year = {2007}, abstract = {Inverse problems in (exploration) seismology are known for their large to very large scale. For instance, certain sparsity-promoting inversion techniques involve vectors that easily exceed 230 unknowns while seismic imaging involves the construction and application of matrix-free discretized operators where single matrix-vector evaluations may require hours, days or even weeks on large compute clusters. For these reasons, software development in this field has remained the domain of highly technical codes programmed in low-level languages with little eye for easy development, code reuse and integration with (nonlinear) programs that solve inverse problems. Following ideas from the Symes{\textquoteright} Rice Vector Library and Bartlett{\textquoteright}s C++ object-oriented interface, Thyra, and Reduction/Transformation operators (both part of the Trilinos software package), we developed a software-development environment based on overloading. This environment provides a pathway from in-core prototype development to out-of-core and MPI {\textquoteright}production{\textquoteright} code with a high level of code reuse. This code reuse is accomplished by integrating the out-of-core and MPI functionality into the dynamic object-oriented programming language Python. This integration is implemented through operator overloading and allows for the development of a coordinate-free solver framework that (i) promotes code reuse; (ii) analyses the statements in an abstract syntax tree and (iii) generates executable statements. In the current implementation, we developed an interface to generate executable statements for the out-of-core unix-pipe based (seismic) processing package RSF-Madagascar (rsf.sf.net). The modular design allows for interfaces to other seismic processing packages and to in-core Python packages such as numpy. So far, the implementation overloads linear operators and elementwise reduction/transformation operators. We are planning extensions towards nonlinear operators and integration with existing (parallel) solver frameworks such as Trilinos.}, keywords = {AIP, Presentation, SLIM}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/AIP/2007/herrmann07AIPsit/herrmann07AIPsit.pdf} } @CONFERENCE{herrmann2007AIPssd, author = {Felix J. Herrmann}, title = {Stable seismic data recovery}, booktitle = {AIP}, year = {2007}, abstract = {In this talk, directional frames, known as curvelets, are used to recover seismic data and images from noisy and incomplete data. Sparsity and invariance properties of curvelets are exploited to formulate the recovery by a {\textquoteleft}1-norm promoting program. It is shown that our data recovery approach is closely linked to the recent theory of {\textquoteleft}{\textquoteleft}compressive sensing{\textquoteright}{\textquoteright} and can be seen as a first step towards a nonlinear sampling theory for wavefields. The second problem that will be discussed concerns the recovery of the amplitudes of seismic images in clutter. There, the invariance of curvelets is used to approximately invert the Gramm operator of seismic imaging. In the high-frequency limit, this Gramm matrix corresponds to a pseudo-differential operator, which is near diagonal in the curvelet domain.In this talk, directional frames, known as curvelets, are used to recover seismic data and images from noisy and incomplete data. Sparsity and invariance properties of curvelets are exploited to formulate the recovery by a l1-norm promoting program. It is shown that our data recovery approach is closely linked to the recent theory of {\textquoteleft}{\textquoteleft}compressive sensing{\textquoteright}{\textquoteright} and can be seen as a first step towards a nonlinear sampling theory for wavefields. The second problem that will be discussed concerns the recovery of the amplitudes of seismic images in clutter. There, the invariance of curvelets is used to approximately invert the Gramm operator of seismic imaging. In the high-frequency limit, this Gramm matrix corresponds to a pseudo-differential operator, which is near diagonal in the curvelet domain.}, keywords = {AIP, Presentation, SLIM}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/AIP/2007/herrmann07AIPssd/herrmann07AIPssd.pdf} } @CONFERENCE{herrmann2007AMScsi, author = {Felix J. Herrmann}, title = {Compressive seismic imaging}, booktitle = {AMS Von Neumann}, year = {2007}, abstract = {Seismic imaging involves the solution of an inverse-scattering problem during which the energy of (extremely) large data volumes is collapsed onto the Earth's reflectors. We show how the ideas from "compressive sampling" can alleviate this task by exploiting the curvelet transform's "wavefront-set detection" capability and "invariance" property under wave propagation. First, a wavelet-vaguellete technique is reviewed, where seismic amplitudes are recovered from complete data by diagonalizing the Gramm matrix of the linearized scattering problem. Next, we show how the recovery of seismic wavefields from incomplete data can be cast into a compressive sampling problem, followed by a proposal to compress wavefield extrapolation operators via compressive sampling in the modal domain. During the latter approach, we explicitly exploit the mutual incoherence between the eigenfunctions of the Helmholtz operator and the curvelet frame elements that compress the extrapolated wavefield. This is joint work with Gilles Hennenfent, Peyman Moghaddam, Tim Lin, Chris Stolk and Deli Wang.}, keywords = {AMS Von Neumann, Presentation, SLIM}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/vonNeuman/2007/herrmann07AMScsi/herrmann07AMScsi_pres.pdf} } @CONFERENCE{herrmann2007COIPpti, author = {Felix J. Herrmann}, title = {Phase transitions in explorations seismology: statistical mechanics meets information theory}, booktitle = {COIP}, year = {2007}, abstract = {In this paper, two different applications of phase transitions to exploration seismology will be discussed. The first application concerns a phase diagram ruling the recovery conditions for seismic data volumes from incomplete and noisy data while the second phase transition describes the behavior of bi-compositional mixtures as a function of the volume fraction. In both cases, the phase transitions are the result of randomness in large system of equations in combination with nonlinearity. The seismic recovery problem from incomplete data involves the inversion of a rectangular matrix. Recent results from the field of "compressive sensing" provide the conditions for a successful recovery of functions that are sparse in some basis (wavelet) or frame (curvelet) representation, by means of a sparsity ($\ell_1$-norm) promoting nonlinear program. The conditions for a successful recovery depend on a certain randomness of the matrix and on two parameters that express the matrix{\textquoteright} aspect ratio and the ratio of the number of nonzero entries in the coefficient vector for the sparse signal representation over the number of measurements. It appears that the ensemble average for the success rate for the recovery of the sparse transformed data vector by a nonlinear sparsity promoting program, can be described by a phase transition, demarcating the regions for the two ratios for which recovery of the sparse entries is likely to be successful or likely to fail. Consistent with other phase transition phenomena, the larger the system the sharper the transition. The randomness in this example is related to the construction of the matrix, which for the recovery of spike trains corresponds to the randomly restricted Fourier matrix. It is shown, that these ideas can be extended to the curvelet recovery by sparsity-promoting inversion (CRSI). The second application of phase transitions in exploration seismology concerns the upscaling problem. To counter the intrinsic smoothing of singularities by conventional equivalent medium upscaling theory, a percolation-based nonlinear switch model is proposed. In this model, the transport properties of bi-compositional mixture models for rocks undergo a sudden change in the macroscopic transport properties as soon as the volume fraction of the stronger material reaches a critical point. At this critical point, the stronger material forms a connected cluster, which leads to the creation of a cusp-like singularity in the elastic moduli, which in turn give rise to specular reflections. In this model, the reflectivity is no longer explicitly due to singularities in the rocks composition. Instead, singularities are created whenever the volume fraction exceeds the critical point. We will show that this concept can be used for a singularity-preserved lithological upscaling.}, keywords = {Presentation, SLIM}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/COIP/2007/herrmann07COIPpti/herrmann07COIPpti_pres.pdf} } @CONFERENCE{herrmann2007CYBERsmc, author = {Felix J. Herrmann}, title = {Seismology meets compressive sampling}, booktitle = {Cyber}, year = {2007}, abstract = {Presented at Cyber-Enabled Discovery and Innovation: Knowledge Extraction as a success story lecture. See for more detail https://www.ipam.ucla.edu/programs/cdi2007/}, keywords = {Cyber, Presentation, SLIM}, note = {Presented at the joint NSF-IPAM meeting. Los Angeles. October, 2007}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/Cyber/2007/herrmann07CYBERsmc/herrmann07CYBERsmc.pdf} } @CONFERENCE{herrmann2007EAGErdi, author = {Felix J. Herrmann}, title = {Recent developments in curvelet-based seismic processing}, booktitle = {EAGE}, year = {2007}, month = {06}, abstract = {Combinations of parsimonious signal representations with nonlinear sparsity promoting programs hold the key to the next-generation of seismic data processing algorithms, since they allow for a formulation that is stable w.r.t. noise \& incomplete data do not require prior information on the velocity or locations and dips of the events}, keywords = {Presentation, SLIM, EAGE}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/EAGE/2007/herrmann07EAGErdi/herrmann07EAGErdi_pres.pdf}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/EAGE/2007/herrmann07EAGErdi/herrmann07EAGErdi.pdf}, url2 = {http://earthdoc.eage.org/publication/publicationdetails/?publication=7548} } @CONFERENCE{herrmann2007EAGEsrm, author = {Felix J. Herrmann}, title = {Surface related multiple prediction from incomplete data}, booktitle = {EAGE}, year = {2007}, month = {06}, abstract = {Incomplete data, unknown source-receiver signatures and free-surface reflectivity represent challenges for a successful prediction and subsequent removal of multiples. In this paper, a new method will be represented that tackles these challenges by combining what we know about wavefield (de-)focussing, by weighted convolutions/correlations, and recently developed curvelet-based recovery by sparsity-promoting inversion (CRSI). With this combination, we are able to leverage recent insights from wave physics towards a nonlinear formulation for the multiple-prediction problem that works for incomplete data and without detailed knowledge on the surface effects.}, keywords = {Presentation, SLIM, EAGE}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/EAGE/2007/herrmann07EAGEsrm/herrmann07EAGEsrm_pres.pdf}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/EAGE/2007/herrmann07EAGEsrm/herrmann07EAGEsrm.pdf}, url2 = {http://earthdoc.eage.org/publication/publicationdetails/?publication=6496} } @CONFERENCE{herrmann2007PIMScsm, author = {Felix J. Herrmann}, title = {Compressive sampling meets seismic imaging}, booktitle = {PIMS}, year = {2007}, keywords = {PIMS, Presentation, SLIM}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/PIMS/2007/herrmann07PIMScsm/herrmann07PIMScsm_pres.pdf} } @CONFERENCE{herrmann2007SEGmpf, author = {Felix J. Herrmann}, title = {Multiple prediction from incomplete data with the focused curvelet transform}, booktitle = {SEG Technical Program Expanded Abstracts}, year = {2007}, volume = {26}, pages = {2505-2600}, abstract = {Incomplete data represents a major challenge for a successful prediction and subsequent removal of multiples. In this paper, a new method will be represented that tackles this challenge in a two-step approach. During the first step, the recenly developed curvelet-based recovery by sparsity-promoting inversion (CRSI) is applied to the data, followed by a prediction of the primaries. During the second high-resolution step, the estimated primaries are used to improve the frequency content of the recovered data by combining the focal transform, defined in terms of the estimated primaries, with the curvelet transform. This focused curvelet transform leads to an improved recovery, which can subsequently be used as input for a second stage of multiple prediction and primary-multiple separation.}, keywords = {SEG, Presentation, SLIM}, doi = {10.1190/1.2792987}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SEG/2007/herrmann07SEGmpf/herrmann07SEGmpf_pres.pdf}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SEG/2007/herrmann07SEGmpf/herrmann07SEGmpf.pdf} } @CONFERENCE{herrmann2007SINBADcwe, author = {Felix J. Herrmann}, title = {Compressed wavefield extrapolation}, booktitle = {SINBAD 2007}, year = {2007}, abstract = {An explicit algorithm for the extrapolation of one-way wavefields is proposed which combines recent developments in information theory and theoretical signal processing with the physics of wave propagation. Because of excessive memory requirements, explicit formulations for wave propagation have proven to be a challenge in 3-D. By using ideas from {\textquoteleft}{\textquoteleft}compressed sensing{\textquoteright}{\textquoteright}, we are able to formulate the (inverse) wavefield extrapolation problem on small subsets of the data volume, thereby reducing the size of the operators. According to compressed sensing theory, signals can successfully be recovered from an incomplete set of measurements when the measurement basis is incoherent with the representation in which the wavefield is sparse. In this new approach, the eigenfunctions of the Helmholtz operator are recognized as a basis that is incoherent with curvelets that are known to compress seismic wavefields. By casting the wavefield extrapolation problem in this framework, wavefields can successfully be extrapolated in the modal domain via a computationally cheaper operation. A proof of principle for the {\textquoteleft}{\textquoteleft}compressed sensing{\textquoteright}{\textquoteright} method is given for wavefield extrapolation in 2-D. The results show that our method is stable and produces identical results compared to the direct application of the full extrapolation operator. This is joint work with Tim Lin.}, keywords = {Presentation, SINBAD, SLIM}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2007/herrmann2007SINBADcwe/herrmann2007SINBADcwe.pdf} } @CONFERENCE{herrmann2007SINBADfrw, author = {Felix J. Herrmann}, title = {Focused recovery with the curvelet transform}, booktitle = {SINBAD 2007}, year = {2007}, abstract = {Incomplete data represents a major challenge for a successful prediction and subsequent removal of multiples. In this paper, a new method will be represented that tackles this challenge in a two-step approach. During the first step, the recently developed curvelet-based recovery by sparsity-promoting inversion (CRSI) is applied to the data, followed by a prediction of the primaries. During the second high-resolution step, the estimated primaries are used to improve the frequency content of the recovered data by combining the focal transform, defined in terms of the estimated primaries, with the curvelet transform. This focused curvelet transform leads to an improved recovery, which can subsequently be used as input for a second stage of multiple prediction and primary-multiple separation. This is joint work with Deli Wang and Gilles Hennenfent.}, keywords = {Presentation, SINBAD, SLIM}, presentation = {https://www.slim.eos.ubc.ca/sites/data/Papers/Herrmann2007SINBADfoc.pdf} } @CONFERENCE{herrmann2007SINBADrdi2, author = {Felix J. Herrmann}, title = {Recent developments in primary-multiple separation}, booktitle = {SINBAD 2007}, year = {2007}, abstract = {In this talk, we present a novel primary-multiple separation scheme which makes use of the sparsity of both primaries and multiples in a transform domain, such as the curvelet transform, to provide estimates of each. The proposed algorithm utilizes seismic data as well as the output of a preliminary step that provides (possibly) erroneous predictions of the multiples. The algorithm separates the signal components, i.e., the primaries and multiples, by solving an optimization problem that assumes noisy input data and can be derived from a Bayesian perspective. More precisely, the optimization problem can be arrived at via an assumption of a weighted Laplacian distribution for the primary and multiple coefficients in the transform domain and of white Gaussian noise contaminating both the seismic data and the preliminary prediction of the multiples, which both serve as input to the algorithm. Time permitted, we will also briefly discuss a propasal for adaptive curvelet-domain matched filtering. This is joint work with Deli Wang, Rayan Saaba, {\o}zgur Yilmaz and Eric Verschuur.}, keywords = {Presentation, SINBAD, SLIM}, presentation = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2007/herrmann07SINBADrdi2/herrmann07SINBADrdi2_pres.pdf} } @CONFERENCE{herrmann2007SINBADsia2, author = {Felix J. Herrmann}, title = {Seismic image amplitude recovery}, booktitle = {SINBAD 2007}, year = {2007}, abstract = {In this talk, we recover the amplitude of a seismic image by approximating the normal (demigration-migration) operator. In this approximation, we make use of the property that curvelets remain invariant under the action of the normal operator. We propose a seismic amplitude recovery method that employs an eigenvalue like decomposition for the normal operator using curvelets as eigen-vectors. Subsequently, we propose an approximate nonlinear singularity-preserving solution to the least-squares seismic imaging problem with sparseness in the curvelet domain and spatial continuity constraints. Our method is tested with a reverse-time {\textquoteright}wave-equation{\textquoteright} migration code simulating the acoustic wave equation on the SEG-AA salt model. This is joint work with Peyman Moghaddam and Chris Stolk (University of Twente)}, keywords = {Presentation, SINBAD, SLIM} } @CONFERENCE{herrmann2007SLIMfsd, author = {Felix J. Herrmann}, title = {From seismic data to the composition of rocks: an interdisciplinary and multiscale approach to exploration seismology}, booktitle = {Berkhout{\textquoteright}s valedictory address: the conceptual approach of understanding}, year = {2007}, abstract = {In this essay, a nonlinear and multidisciplinary approach is presented that takes seismic data to the composition of rocks. The presented work has deep roots in the {\textquoteleft}gedachtengoed{\textquoteright} (philosophy) of Delphi spearheaded by Guus Berkhout. Central themes are multiscale, object-orientation and a multidisciplinary approach.}, keywords = {SLIM}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/Misc/herrmann07SLIMfsd/herrmann07SLIMfsd.pdf } } @CONFERENCE{herrmann2006SINBADapo1, author = {Felix J. Herrmann}, title = {A primer on sparsity transforms: curvelets and wave atoms}, booktitle = {SINBAD 2006}, year = {2006}, abstract = {During this presentation an overview will be given on the different sparsity transforms that are used at SLIM. Emphasis will be on two directional and multiscale wavelet transforms, namely the curvelet and the recently introduced wave-atom transforms. The main properties of these transforms will be listed and their performance on seismic data will be discussed.}, keywords = {Presentation, SINBAD, SLIM}, presentation = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2006/hennenfent2006SINBADapo/hennenfent2006SINBADapo.pdf} } @CONFERENCE{herrmann2006SINBADapow, author = {Felix J. Herrmann}, title = {A primer on weak conditions for stable recovery}, booktitle = {SINBAD 2006}, year = {2006}, abstract = {During this presentation an introduction will be given on the method of stable recovery from noisy and incomplete data. Weak recovery conditions that guarantee the recovery for typical acquisition geometries will be reviewed and numerical recovery examples will be presented. The advantage of these weak conditions is that they are less pessimistic and {\textquoteleft}verifiable{\textquoteright} or very large-scale acquisition geometries.}, keywords = {Presentation, SINBAD, SLIM}, presentation = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2006/herrmann2006SINBADapow/herrmann2006SINBADapow.pdf} } @CONFERENCE{herrmann2006SINBADmpf, author = {Felix J. Herrmann}, title = {Multiple prediction from incomplete data}, booktitle = {SINBAD 2006}, year = {2006}, keywords = {Presentation, SINBAD, SLIM}, presentation = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2006/herrmann2006SINBADmpf/herrmann2006SINBADmpf.pdf} } @CONFERENCE{herrmann2006SINBADom, author = {Felix J. Herrmann}, title = {Opening meeting}, booktitle = {SINBAD 2006}, year = {2006}, keywords = {Presentation, SINBAD, SLIM}, presentation = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2006/herrmann2006SINBADom/herrmann2006SINBADom.pdf} } @CONFERENCE{herrmann2006SINBADsac, author = {Felix J. Herrmann}, title = {Sparsity- and continuity-promoting seismic image recovery with curvelets}, booktitle = {SINBAD 2006}, year = {2006}, abstract = {A nonlinear singularity-preserving solution to seismic image recovery with sparseness and continuity constraints is proposed. The method explicitly explores the curvelet transform as a directional frame expansion that, by virtue of its sparsity on seismic images and its invariance under the Hessian of the linearized imaging problem, allows for a stable recovery of the migration amplitudes from noisy data. The method corresponds to a preconditioning that corrects the amplitudes during a post-processing step. The solution is formulated as a nonlinear optimization problem where sparsity in the curvelet domain as well as continuity along the imaged reflectors are jointly promoted. To enhance sparsity, the l1-norm on the curvelet coefficients is minimized while continuity is promoted by minimizing an anisotropic diffusion norm on the image. The performance of the recovery scheme is evaluated with {\textquoteright}wave-equation{\textquoteright} migration code on a synthetic dataset. This is joint work with Peyman Moghaddam.}, keywords = {Presentation, SINBAD, SLIM}, presentation = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2006/herrmann2006SINBADsac/herrmann2006SINBADsac.pdf} } @CONFERENCE{herrmann2006SINBADsra, author = {Felix J. Herrmann}, title = {Stable recovery and separation of seismic data}, booktitle = {SINBAD 2006}, year = {2006}, abstract = {During this presentation an overview will be given on how seismic data regularization and separation problems can be cast into the framework of stable signal recovery. It is shown that the successful solution of these two problems depends on the existence of signal expansions that are compressible. Preliminary examples will be shown.}, keywords = {Presentation, SINBAD, SLIM}, presentation = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2006/herrmann2006SINBADsra/herrmann2006SINBADsra.pdf} } @CONFERENCE{herrmann2004CSEGcia, author = {Felix J. Herrmann}, title = {Curvelet imaging and processing: an overview}, booktitle = {CSEG Technical Program Expanded Abstracts}, year = {2004}, organization = {CSEG}, abstract = {In this paper an overview is given on the application of directional basis functions, known under the name Curvelets/Contourlets, to various aspects of seismic processing and imaging. Key conceps in the approach are the use of (i) directional basis functions that localize in both domains (e.g. space and angle); (ii) non-linear estimation, which corresponds to localized muting on the coefficients, possibly supplemented by constrained optimization (iii) invariance of the basis functions under the imaging operators. We will discuss applications that include multiple and ground roll removal; sparseness-constrained least-squares migration and the computation of 4-D difference cubes.}, keywords = {Presentation, SLIM}, month = {05}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/CSEG/2004/Herrmann04CSEGcia/Herrmann04CSEGcia.pdf}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/CSEG/2004/Herrmann04CSEGcia/Herrmann04CSEGcia_paper.pdf} } @CONFERENCE{herrmann2003SEGoiw, author = {Felix J. Herrmann}, title = {"Optimal" imaging with curvelets}, booktitle = {SEG Technical Program Expanded Abstracts}, year = {2003}, volume = {22}, pages = {997-1000}, abstract = {In this paper we present a non-linear edge-preserving solution to linear inverse scattering problems based on optimal basis-function decompositions. Optimality of the basis functions allow us to (i) reduce the dimensionality of the inverse problem; (ii) devise non-linear thresholding operators that approximate minimax (minimize the maximal mean square error given the worst possible prior) and that significantly improve the signal-to-noise ratio on the image. We present a reformulation of the standard generalized least-squares formulation of the seismic inversion problem into a formulation based on thresholding, where the singular values, vectors and linear estimators are replaced by quasi-singular values, basis-functions and thresholding. To limit the computational burden we use a Monte-Carlo sampling method to compute the quasi-singular values. With the proposed method, we aim to significantly improve the signal-to-noise ratio (SNR) on the model space and hence the resolution of the seismic image. While classical Tikhonov-regularized methods only gain the square-root of the SNR on the data for the SNR on the model our method scales almost linearly. This significant improvement of the SNR allows us to discern events at high frequencies which would normally be in the noise.}, keywords = {Presentation, SEG, SLIM}, doi = {10.1190/1.1818117}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SEG/2003/Herrmann03SEGoiw/Herrmann03SEGoiw_pres.pdf} } @CONFERENCE{herrmann2001EAGEsas, author = {Felix J. Herrmann}, title = {Scaling and seismic reflectivity: implications of scaling on {AVO}}, booktitle = {EAGE Technical Program Expanded Abstracts}, year = {2001}, organization = {EAGE}, abstract = {AVO analysis of seismic data is based on the assumption that transitions in the earth consist of jump discontinuities only. Generalization of these transitions to more realistic transitions shows a drastic change in observed AVO behavior, especially for the large angles currently attained by increasing cable lengths. We propose a simple ities. After renormalization, the inverted fluctuations regain their relative magnitudes which, due to the scaling, may have been significantly distorted.}, keywords = {SLIM}, month = {06}, } @CONFERENCE{herrmann2011ICIAMconvexcompfwi, author = {Felix J. Herrmann and Aleksandr Y. Aravkin and Tristan van Leeuwen and Xiang Li}, title = {{FWI} with sparse recovery: a convex-composite approach}, booktitle = {ICIAM}, year = {2011}, organization = {ICIAM 2011}, abstract = {Iterative inversion algorithms require repeated simulation of 3D time-dependent acoustic, elastic, or electromagnetic wave fields, extending hundreds of wavelengths and hundreds of periods. Also, seismic data is rich in information at every representable scale. Thus simulation-driven optimization approaches to inversion impose great demands on simulator efficiency and accuracy. While computer hardware advances have been of critical importance in bringing inversion closer to practical application, algorithmic advances in simulator methodology have been equally important. Speakers in this two-part session will address a variety of numerical issues arising in the wave simulation, and in its application to inversion.}, date-added = {2011-07-20}, keywords = {ICIAM, full-waveform inversion, optimization}, month = {07}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/ICIAM/2011/herrmann2011ICIAMconvexcompfwi/herrmann2011ICIAMconvexcompfwi.pdf} } @CONFERENCE{herrmann2011SLRAfwi, author = {Felix J. Herrmann and Aleksandr Y. Aravkin and Xiang Li and Tristan van Leeuwen}, title = {Full waveform inversion with compressive updates}, booktitle = {SLRA}, year = {2011}, organization = {Sparse and Low Rank Approximation 2011}, abstract = {Full-waveform inversion relies on large multi-experiment data volumes. While improvements in acquisition and inversion have been extremely successful, the current push for higher quality models reveals fundamental shortcomings handling increasing problem sizes numerically. To address this fundamental issue, we propose a randomized dimensionality-reduction strategy motivated by recent developments in stochastic optimization and compressive sensing. In this formulation conventional Gauss-Newton iterations are replaced by dimensionality-reduced sparse recovery problems with source encodings.}, keywords = {full-waveform inversion}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SLRA/2011/herrmann2011SLRAfwi/herrmann2011SLRAfwi.pdf} } @CONFERENCE{herrmann2006SINBADpms, author = {Felix J. Herrmann and Urs Boeniger and D. J. Verschuur}, title = {Primary-multiple separation by curvelet frames}, booktitle = {SINBAD 2006}, year = {2006}, volume = {170}, pages = {781-799}, organization = {Geophysical Journal International}, abstract = {Predictive multiple suppression methods consist of two main steps: a prediction step, during which multiples are predicted from seismic data, and a primary-multiple separation step, during which the predicted multiples are {\textquoteright}matched{\textquoteright} with the true multiples in the data and subsequently removed. The last step is crucial in practice: an incorrect separation will cause residual multiple energy in the result or may lead to a distortion of the primaries, or both. To reduce these adverse effects, a new transformed-domain method is proposed where primaries and multiples are separated rather than matched. This separation is carried out on the basis of differences in the multiscale and multidirectional characteristics of these two signal components. Our method uses the curvelet transform, which maps multidimensional data volumes into almost orthogonal localized multidimensional prototype waveforms that vary in directional and spatio-temporal content. Primaries-only and multiples-only signal components are recovered from the total data volume by a nonlinear optimization scheme that is stable under noisy input data. During the optimization, the two signal components are separated by enhancing sparseness (through weighted l1-norms) in the transformed domain subject to fitting the observed data as the sum of the separated components to within a user-defined tolerance level. Whenever the prediction for the two signal components in the transformed domain correlate, the recovery is suppressed while for regions where the correlation is small the method seeks the sparsest set of coefficients that represent each signal component. Our algorithm does not seek a matched filter and as such it differs fundamentally from traditional adaptive subtraction methods. The method derives its stability from the sparseness obtained by a non-parametric multiscale and multidirectional overcomplete signal representation. This sparsity serves as prior information and allows for a Bayesian interpretation of our method during which the log-likelihood function is minimized while the two signal components are assumed to be given by a superposition of prototype waveforms, drawn independently from a probability function that is weighted by the predicted primaries and multiples. In this paper, the predictions are based on the data-driven surface-related multiple elimination (SRME) method. Synthetic and field data examples show a clean separation leading to a considerable improvement in multiple suppression compared to the conventional method of adaptive matched filtering. This improved separation translates into an improved stack.}, keywords = {Presentation, SINBAD, SLIM}, doi = {10.1111/j.1365-246X.2007.03360.x}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2006/herrmann2006SINBADpms/herrmann2006SINBADpms.pdf} } @CONFERENCE{herrmann2009EAGEcsa, author = {Felix J. Herrmann and Yogi A. Erlangga and Tim T.Y. Lin}, title = {Compressive sensing applied to full-waveform inversion}, booktitle = {EAGE}, year = {2009}, month = {06}, abstract = {With the recent resurgence of full-waveform inversion, the computational cost of solving forward modeling problems has become{\textendash}-aside from issues with non-uniqueness{\textendash}-one of the major impediments withstanding successful application of this technology to industry-size data volumes. To overcome this impediment, we argue that further improvements in this area will depend on a problem formulation with a computational complexity that is no longer strictly determined by the size of the discretization but by transform-domain sparsity of its solution. In this new paradigm, we bring computational costs in par with our ability to compress seismic data and images. This premise is related to two recent developments. First, there is the new field of compressive sensing (CS in short throughout the paper, Cand{\textquoteleft}es et al., 2006; Donoho, 2006){\textendash}-where the argument is made, and rigorously proven, that compressible signals can be recovered from severely sub-Nyquist sampling by solving a sparsity promoting program. Second, there is in the seismic community the recent resurgence of simultaneous-source acquisition (Beasley, 2008; Krohn and Neelamani, 2008; Herrmann et al., 2009; Berkhout, 2008; Neelamani et al., 2008), and continuing efforts to reduce the cost of seismic modeling, imaging, and inversion through phase encoding of simultaneous sources (Morton and Ober, 1998; Romero et al., 2000; Krohn and Neelamani, 2008; Herrmann et al., 2009), removal of subsets of angular frequencies (Sirgue and Pratt, 2004; Mulder and Plessix, 2004; Lin et al., 2008) or plane waves (Vigh and Starr, 2008). By using CS principles, we remove sub-sampling interferences asocciated with these approaches through a combination of exploiting transform-domain sparsity, properties of certain sub-sampling schemes, and the existence of sparsity promoting solvers.}, keywords = {EAGE}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/EAGE/2009/Herrmann09EAGEcsa/Herrmann09EAGEcsa_pres.pdf}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/EAGE/2009/Herrmann09EAGEcsa/Herrmann09EAGEcsa.pdf}, url2 = {http://earthdoc.eage.org/publication/publicationdetails/?publication=23961} } @CONFERENCE{herrmann2009IAPcsisa, author = {Felix J. Herrmann and Yogi A. Erlangga and Tim T.Y. Lin}, title = {Compressive seismic imaging with simultaneous acquisition}, booktitle = {IAP}, year = {2009}, abstract = {The shear size of seismic data volumes forms one of the major impediments for the inversion of seismic data. Turning forward modeling and inversion into a compressive sensing (CS) problem - where simulated data are recovered from a relatively small number of independent sources - can effectively mitigate this high-cost impediment. Our key contribution lies in the design of a sub-sampling operator that commutes with the time-harmonic Helmholtz system. As in compressive sensing, this leads to a reduction of simulation cost. This reduction is commensurate with the transform-domain sparsity of the solution., implying that computational costs are no longer determined by the size of the discretization but by transform-domain sparsity of the solution of the CS problem that recovers the data. The combination of this sub-sampling strategy with our recent work on preconditioned implicit solvers for the time-harmonic Helmholtz equation provides a viable alternative to full-waveform inversion schemes based on explicit time-domain finite-difference methods.}, keywords = {Presentation}, note = {Presented at the IAP meeting, Vienna}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/AIP/2009/herrmann2009IAPcsisa/Herrmann09AIP1.pdf} } @CONFERENCE{herrmann2009SAMPTAcws, author = {Felix J. Herrmann and Yogi A. Erlangga and Tim T.Y. Lin}, title = {Compressive-wavefield simulations}, booktitle = {SAMPTA}, year = {2009}, organization = {SAMPTA}, abstract = {Full-waveform inversion{\textquoteright}s high demand on computational resources forms, along with the non-uniqueness problem, the major impediment withstanding its widespread use on industrial-size datasets. Turning modeling and inversion into a compressive sensing problem{\textendash}-where simulated data are recovered from a relatively small number of independent simultaneous sources{\textendash}-can effectively mitigate this high-cost impediment. The key is in showing that we can design a sub-sampling operator that commutes with the time-harmonic Helmholtz system. As in compressive sensing, this leads to a reduction in simulation cost. Moreover, this reduction is commensurate with the transform-domain sparsity of the solution, implying that computational costs are no longer determined by the size of the discretization but by transform-domain sparsity of the solution of the CS problem which forms our data. The combination of this sub-sampling strategy with our recent work on implicit solvers for the Helmholtz equation provides a viable alternative to full-waveform inversion schemes based on explicit finite-difference methods.}, keywords = {SAMPTA}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SAMPTA/2009/Herrmann09SAMPTAcws/Herrmann09SAMPTAcws.pdf} } @CONFERENCE{herrmann2008SIAMcsm, author = {Felix J. Herrmann and Yogi A. Erlangga and Tim T.Y. Lin}, title = {Compressive sampling meets seismic imaging}, booktitle = {SIAM}, year = {2008}, abstract = {Compressive sensing has led to fundamental new insights in the recovery of compressible signals from sub-Nyquist samplings. It is shown how jittered subsampling can be used to create favorable recovery conditions. Applications include mitigation of incomplete acquisitions and wavefield computations. While the former is a direct adaptation of compressive sampling, the latter application represents a new way of compressing wavefield extrapolation operators. Operators are not diagonalized but are compressively sampled reducing the computational costs.}, keywords = {Presentation, SIAM, SLIM}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SIAM/2008/herrmann2008SIAMcsm/herrmann2008SIAMcsm.pdf} } @CONFERENCE{herrmann2008SINBADitc, author = {Felix J. Herrmann and Yogi A. Erlangga and Tim T.Y. Lin and Cody R. Brown}, title = {Introduction to compressive (wavefield) computation}, booktitle = {SINBAD 2008}, year = {2008}, keywords = {Presentation, SINBAD, SLIM}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2008/herrmann2008SINBADitc/herrmann2008SINBADitc.pdf} } @CONFERENCE{herrmann2005CSEGnld, author = {Felix J. Herrmann and Gilles Hennenfent}, title = {Non-linear data continuation with redundant frames}, booktitle = {CSEG Technical Program Expanded Abstracts}, year = {2005}, organization = {CSEG}, abstract = {We propose an efficient iterative data interpolation method using continuity along reflectors in seismic images via curvelet and discrete cosine transforms. The curvelet transform is a new multiscale transform that provides sparse representations for images that comprise smooth objects separated by piece-wise smooth discontinuities (e.g. seismic images). The advantage of using curvelets is that these frames are sparse for high-frequency caustic-free solutions of the wave-equation. Since we are dealing with less than ideal data (e.g. bandwidth-limited), we compliment the curvelet frames with the discrete cosine transform. The latter is motivated by the successful data continuation with the discrete Fourier transform. By choosing generic basis functions we circumvent the necessity to make parametric assumptions (e.g., through linear/parabolic Radon or demigration) regarding the shape of events in seismic data. Synthetic and real data examples demonstrate that our algorithm provides interpolated traces that accurately reproduce the wavelet shape as well as the AVO behavior along events in shot gathers.}, keywords = {Presentation, SLIM}, month = {05}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/CSEG/2005/Herrmann05CSEGnld/Herrmann05CSEGnld_pres.pdf}, url = {http://www.cseg.ca/assets/files/resources/abstracts/2005/101S0201-Herrmann_F_Non_Linear_Data_Continuation.pdf} } @CONFERENCE{herrmann2005EAGErcd, author = {Felix J. Herrmann and Gilles Hennenfent}, title = {Robust curvelet-domain data continuation with sparseness constraints}, booktitle = {EAGE}, year = {2005}, month = {06}, abstract = {A robust data interpolation method using curvelets frames is presented. The advantage of this method is that curvelets arguably provide an optimal sparse representation for solutions of wave equations with smooth coefficients. As such curvelets frames circum- vent {\textendash} besides the assumption of caustic-free data {\textendash} the necessity to make parametric assumptions (e.g. through linear/parabolic Radon or demigration) regarding the shape of events in seismic data. A brief sketch of the theory is provided as well as a number of examples on synthetic and real data.}, keywords = {Presentation, SLIM, EAGE}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/EAGE/2005/Herrmann05EAGErcd/Herrmann05EAGErcd.pdf}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/EAGE/2005/Herrmann05EAGErcd/Herrmann05EAGErcd.pdf}, url2 = {http://earthdoc.eage.org/publication/publicationdetails/?publication=1112} } @CONFERENCE{herrmann2007EAGEsia, author = {Felix J. Herrmann and Gilles Hennenfent and Peyman P. Moghaddam}, title = {Seismic imaging and processing with curvelets}, booktitle = {EAGE}, year = {2007}, month = {06}, abstract = {In this paper, we present a nonlinear curvelet-based sparsity-promoting formulation for three problems in seismic processing and imaging namely, seismic data regularization from data with large percentages of traces missing; seismic amplitude recovery for sub-salt images obtained by reverse-time migration and primary-multiple separation, given an inaccurate multiple prediction. We argue why these nonlinear formulations are beneficial.}, keywords = {Presentation, SLIM, EAGE}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/EAGE/2007/herrmann07EAGEsia/herrmann07EAGEsia_pres.pdf}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/EAGE/2007/herrmann07EAGEsia/herrmann07EAGEsia.pdf}, url2 = {http://earthdoc.eage.org/publication/publicationdetails/?publication=7075} } @CONFERENCE{Herrmann2011BG, author = {Felix J. Herrmann and Tristan van Leeuwen}, title = {{SINBAD's} research program}, year = {2011}, month = {11}, owner = {Shruti}, quality = {1}, timestamp = {2013.01.16}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/Misc/Herrmann2011BG.pdf} } @CONFERENCE{herrmann2011EAGEefmsp, author = {Felix J. Herrmann and Xiang Li}, title = {Efficient least-squares migration with sparsity promotion}, booktitle = {EAGE}, year = {2011}, month = {05}, abstract = {Seismic imaging relies on the collection of multi-experimental data volumes in combination with a sophisticated back-end to create high-fidelity inversion results. While significant improvements have been made in linearized inversion, the current trend of incessantly pushing for higher quality models in increasingly complicated regions reveals fundamental shortcomings in handling increasing problem sizes numerically. The so-called ``curse of dimensionality" is the main culprit because it leads to an exponential growth in the number of sources and the corresponding number of wavefield simulations required by ``wave-equation" migration. We address this issue by reducing the number of sources by a randomized dimensionality reduction technique that combines recent developments in stochastic optimization and compressive sensing. As a result, we replace the cur- rent formulations of imaging that rely on all data by a sequence of smaller imaging problems that use the output of the previous inversion as input for the next. Empirically, we find speedups of at least one order-of-magnitude when each reduced experiment is considered theoretically as a separate compressive-sensing experiment.}, keywords = {Presentation, EAGE, imaging}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/EAGE/2011/herrmann11EAGEefmsp/herrmann11EAGEefmsp_pres.pdf}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/EAGE/2011/herrmann11EAGEefmsp/herrmann11EAGEefmsp.pdf}, url2 = {http://earthdoc.eage.org/publication/publicationdetails/?publication=50333} } @CONFERENCE{herrmann2010EAGErds, author = {Felix J. Herrmann and Xiang Li}, title = {Randomized dimensionality reduction for full-waveform inversion}, booktitle = {EAGE}, year = {2010}, month = {06}, abstract = {Full-waveform inversion relies on the collection of large multi-experiment data volumes in combination with a sophisticated back-end to create high-fidelity inversion results. While improvements in acquisition and inversion have been extremely successful, the current trend of incessantly pushing for higher quality models in increasingly complicated regions of the Earth continues to reveal fundamental shortcomings in our ability to handle the ever increasing problem size numerically. Two causes can be identified as the main culprits responsible for this barrier. First, there is the so-called {\textquoteleft}{\textquoteleft}curse of dimensionality{\textquoteright}{\textquoteright} exemplified by Nyquist{\textquoteright}s sampling criterion, which puts disproportionate strain on current acquisition and processing systems as the size and desired resolution of our survey areas continues to increase. Secondly, there is the recent {\textquoteleft}{\textquoteleft}departure from Moore{\textquoteright}s law{\textquoteright}{\textquoteright} that forces us to lower our expectations to compute ourselves out of this. In this paper, we address this situation by randomized dimensionality reduction, which we adapt from the field of compressive sensing. In this approach, we combine deliberate randomized subsampling with structure-exploiting transform-domain sparsity promotion. Our approach is successful because it reduces the size of seismic data volumes without loss of information. With this reduction, we compute Newton-like updates at the cost of roughly one gradient update for the fully-sampled wavefield.}, keywords = {Presentation, EAGE}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/EAGE/2010/herrmann10EAGErds/herrmann10EAGErds_pres.pdf}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/EAGE/2010/herrmann10EAGErds/herrmann10EAGErds.pdf}, url2 = {http://earthdoc.eage.org/publication/publicationdetails/?publication=39352} } @CONFERENCE{herrmann2011SPIEmsp, author = {Felix J. Herrmann and Xiang Li and Aleksandr Y. Aravkin and Tristan van Leeuwen}, title = {A modified, sparsity promoting, {Gauss-Newton} algorithm for seismic waveform inversion}, booktitle = {Proc. SPIE}, year = {2011}, number = {81380V}, abstract = {Images obtained from seismic data are used by the oil and gas industry for geophysical exploration. Cutting-edge methods for transforming the data into interpretable images are moving away from linear approximations and high-frequency asymptotics towards Full Waveform Inversion (FWI), a nonlinear data-fitting procedure based on full data modeling using the wave-equation. The size of the problem, the nonlinearity of the forward model, and ill-posedness of the formulation all contribute to a pressing need for fast algorithms and novel regularization techniques to speed up and improve inversion results. In this paper, we design a modified Gauss-Newton algorithm to solve the PDE- constrained optimization problem using ideas from stochastic optimization and compressive sensing. More specifically, we replace the Gauss-Newton subproblems by randomly subsampled, -$\ell_1$ regularized subproblems. This allows us us significantly reduce the computational cost of calculating the updates and exploit the compressibility of wavefields in Curvelets. We explain the relationships and connections between the new method and stochastic optimization and compressive sensing (CS), and demonstrate the efficacy of the new method on a large-scale synthetic seismic example.}, issn = {1}, keywords = {SLIM, compressive sensing, optimization, full-waveform inversion}, notes = {TR-2011-05}, doi = {10.1117/12.893861}, month = {08}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SPIE/2011/herrmann2011SPIEmsp/herrmann2011SPIEmsp.pdf} } @CONFERENCE{herrmann2007EAGEjda, author = {Felix J. Herrmann and Peyman P. Moghaddam}, title = {Just diagonalize: a curvelet-based approach to seismic amplitude recovery}, booktitle = {EAGE Workshop on Curvelets, contourlets, seislets, … in seismic data processing - where are we and where are we going?}, year = {2007}, month = {06}, abstract = {In his presentation we present a nonlinear curvelet-based sparsity-promoting formulation for the recovery of seismic amplitudes. We show that the curvelet's wavefront detection capability and invariance under wave propagation lead to a formulation of this recovery problem that is stable under noise and missing data. {\copyright}2007 Society of Exploration Geophysicists}, keywords = {Presentation, SLIM, EAGE}, presentation = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2007/herrmann2007EAGEjda/herrmann2007EAGEjda_pres.pdf}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2007/herrmann2007EAGEjda/herrmann2007EAGEjda_paper.pdf}, url2 = {http://earthdoc.eage.org/publication/publicationdetails/?publication=7555} } @CONFERENCE{herrmann2005CSEGnlr, author = {Felix J. Herrmann and Peyman P. Moghaddam}, title = {Non-linear regularization in seismic imaging}, booktitle = {CSEG Technical Program Expanded Abstracts}, year = {2005}, organization = {CSEG}, abstract = {Two complementary solution strategies to the least-squares imaging problem with sparseness \& continuity continuity constraints are proposed. The applied formalism explores the sparseness of curvelets coefficients of the reflectivity and their invariance under the demigration-migration operator. We achieve the solution by jointly minimizing a weighted l1-norm on the curvelet coefficients and an anisotropic difussion or total variation norm on the imaged reflectivity model. The l1-norm exploits the sparsenss of the reflectivity in the curvelet domain whereas the anisotropic norm enhances the continuity along the reflections while removing artifacts residing in between reflectors. While the two optimization methods (convex versus non-convex) share the same type of regularization, they differ in flexibility how to handle additional constraints on the coefficients of the imaged reflectivity and in computational expense. A brief sketch of the theory is provided along with a number of synthetic examples.}, keywords = {Presentation, SLIM}, month = {05}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/CSEG/2005/Herrmann05CSEGnlr/Herrmann05CSEGnlr_pres.pdf}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/CSEG/2005/Herrmann05CSEGnlr/Herrmann05CSEGnlr.pdf} } @CONFERENCE{herrmann2004CSEGcia2, author = {Felix J. Herrmann and Peyman P. Moghaddam}, title = {Curvelet imaging and processing: sparseness-constrained least-squares migration}, booktitle = {CSEG Technical Program Expanded Abstracts}, year = {2004}, organization = {CSEG}, abstract = {A non-linear edge-preserving solution to the least-squares migration problem with sparseness constraints is introduced. The applied formalism explores Curvelets as basis functions that, by virtue of their sparseness and locality, not only allow for a reduction of the dimensionality of the imaging problem but which also naturally lead to a non-linear solution with significantly improved signal-to-noise ratio. Additional conditions on the image are imposed by solving a constrained optimization problem on the estimated Curvelet coefficients initialized by thresholding. This optimization is designed to also restore the amplitudes by (approximately) inverting the normal operator, which is, like-wise to the (de)-migration operators, almost diagonalized by the Curvelet transform.}, keywords = {Presentation, SLIM}, month = {05}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/CSEG/2004/Herrmann04CSEGcia2/Herrmann04CSEGcia2.pdf}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/CSEG/2004/Herrmann04CSEGcia2/Herrmann04CSEGcia2_paper.pdf} } @CONFERENCE{herrmann2004EAGEcdl, author = {Felix J. Herrmann and Peyman P. Moghaddam}, title = {Curvelet-domain least-squares migration with sparseness constraints}, booktitle = {EAGE}, year = {2004}, month = {06}, abstract = {A non-linear edge-preserving solution to the least-squares migration problem with sparseness constraints is introduced. The applied formalism explores Curvelets as basis functions that, by virtue of their sparseness and locality, not only allow for a reduction of the dimensionality of the imaging problem but which also naturally lead to a non-linear solution with significantly improved signal-to-noise ratio. Additional conditions on the image are imposed by solving a constrained optimization problem on the estimated Curvelet coefficients initialized by thresholding. This optimization is designed to also restore the amplitudes by (approximately) inverting the normal operator, which is like-wise the (de)-migration operators, almost diagonalized by the Curvelet transform.}, keywords = {Presentation, SLIM, EAGE}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/EAGE/2004/Herrmann04EAGEcdl/Herrmann04EAGEcdl_pres.pdf}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/EAGE/2004/Herrmann04EAGEcdl/Herrmann04EAGEcdl.pdf}, url2 = {http://earthdoc.eage.org/publication/publicationdetails/?publication=2073} } @CONFERENCE{herrmann2004EAGEcdp, author = {Felix J. Herrmann and Peyman P. Moghaddam}, title = {Curvelet-domain preconditioned 'wave-equation' depth-migration with sparseness and illumination constraints}, booktitle = {EAGE Technical Program Expanded Abstracts}, year = {2004}, organization = {EAGE}, abstract = {A non-linear edge-preserving solution to the least-squares migration problem with sparseness constraints is introduced. The applied formalism explores Curvelets as basis functions that, by virtue of their sparseness and locality, not only allow for a reduction of the dimensionality of the imaging problem but which also naturally lead to a non-linear solution with significantly improved signal- to-noise ratio. Additional conditions on the image are imposed by solving a constrained optimization problem on the estimated Curvelet coefficients initialized by thresholding. This optimization is designed to also restore the amplitudes by (approximately) inverting the normal operator, which is like-wise the (de)-migration operators, almost diagonalized by the Curvelet transform.}, keywords = {SLIM}, month = {06}, url = {https://www.slim.eos.ubc.ca/Publications/Public/TechReport/2004/herrmann2004EAGEcdp/herrmann2004EAGEcdp.pdf} } @CONFERENCE{herrmann2004SEGcbn, author = {Felix J. Herrmann and Peyman P. Moghaddam}, title = {Curvelet-based non-linear adaptive subtraction with sparseness constraints}, booktitle = {SEG Technical Program Expanded Abstracts}, year = {2004}, volume = {23}, pages = {1977-1980}, organization = {SEG}, abstract = {In this paper an overview is given on the application of directional basis functions, known under the name Curvelets/Contourlets, to various aspects of seismic processing and imaging, which involve adaptive subtraction. Key concepts in the approach are the use of directional basis functions that localize in both domains (e.g. space and angle); non-linear estimation, which corresponds to localized muting on the coefficients, possibly supplemented by constrained optimization. We will discuss applications that include multiple, ground-roll removal and migration denoising. {\copyright}2004 Society of Exploration Geophysicists}, keywords = {Presentation, SLIM, SEG}, doi = {10.1190/1.1851181}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SEG/2004/Herrmann04SEGcbn/Herrmann04SEGcbn_pres.pdf}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SEG/2004/Herrmann04SEGcbn/Herrmann04SEGcbn.pdf } } @CONFERENCE{herrmann2005EAGEosf, author = {Felix J. Herrmann and Peyman P. Moghaddam and R. Kirlin}, title = {Optimization strategies for sparseness- and continuity-enhanced imaging: theory}, booktitle = {EAGE}, year = {2005}, month = {06}, abstract = {Two complementary solution strategies to the least-squares migration problem with sparseness- and continuity constraints are proposed. The applied formalism explores the sparseness of curvelets on the reflectivity and their invariance under the demigration-migration operator. Sparseness is enhanced by (approximately) minimizing a (weighted) l1-norm on the curvelet coefficients. Continuity along imaged reflectors is brought out by minimizing the anisotropic diffusion or total variation norm which penalizes variations along and in between reflectors. A brief sketch of the theory is provided as well as a number of synthetic examples. Technical details on the implementation of the optimization strategies are deferred to an accompanying paper: implementation.}, keywords = {SLIM, EAGE}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/EAGE/2005/Herrmann05EAGEosf/Herrmann05EAGEosf.pdf}, url2 = {http://earthdoc.eage.org/publication/publicationdetails/?publication=1343} } @CONFERENCE{herrmann2008SEGcdm, author = {Felix J. Herrmann and Peyman P. Moghaddam and Deli Wang}, title = {Curvelet-domain matched filtering}, booktitle = {SEG Technical Program Expanded Abstracts}, year = {2008}, volume = {27}, number = {3643-3649}, organization = {SEG}, abstract = {Matching seismic wavefields and images lies at the heart of many pre-/post-processing steps part of seismic imaging{\textendash}- whether one is matching predicted wavefield components, such as multiples, to the actual to-be-separated wavefield components present in the data or whether one is aiming to restore migration amplitudes by scaling, using an image-to-remigrated-image matching procedure to calculate the scaling coefficients. The success of these wavefield matching procedures depends on our ability to (i) control possible overfitting, which may lead to accidental removal of energy or to inaccurate image-amplitude corrections, (ii) handle data or images with nonunique dips, and (iii) apply subsequent wavefield separations or migraton amplitude corrections stably. In this paper, we show that the curvelet transform allows us to address all these issues by imposing smoothness in phase space, by using their capability to handle conflicting dips, and by leveraging their ability to represent seismic data and images sparsely. This latter property renders curvelet-domain sparsity promotion an effective prior.}, keywords = {SLIM,Presentation, SEG}, month = {08}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SEG/2008/herrmann08SEGcdm/herrmann08SEGcdm_pres.pdf}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SEG/2008/herrmann08SEGcdm/herrmann08SEGcdm.pdf } } @CONFERENCE{herrmann09EAGEbnrs, author = {Felix J. Herrmann and Gang Tang and Reza Shahidi and Gilles Hennenfent and Tim T.Y. Lin}, title = {Beating Nyquist by randomized sampling}, booktitle = {EAGE Technical Program Expanded Abstracts}, year = {2009}, keywords = {Presentation}, note = {Presented at the EAGE (workshop), Amsterdam}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/EAGE/2009/herrmann09EAGEbnrs/herrmann09EAGEbnrs.pdf} } @CONFERENCE{herrmann2005EAGErcd1, author = {Felix J. Herrmann and D. J. Verschuur}, title = {Robust curvelet-domain primary-multiple separation with sparseness constraints}, booktitle = {EAGE}, year = {2005}, month = {06}, abstract = {A non-linear primary-multiple separation method using curvelets frames is presented. The advantage of this method is that curvelets arguably provide an optimal sparse representation for both primaries and multiples. As such curvelets frames are ideal candidates to separate primaries from multiples given inaccurate predictions for these two data components. The method derives its robustness regarding the presence of noise; errors in the prediction and missing data from the curvelet frame{\textquoteright}s ability (i) to represent both signal components with a limited number of multi-scale and directional basis functions; (ii) to separate the components on the basis of differences in location, orientation and scales and (iii) to minimize correlations between the coefficients of the two components. A brief sketch of the theory is provided as well as a number of examples on synthetic and real data.}, keywords = {SLIM, EAGE}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/EAGE/2005/Herrmann05EAGErcd1/Herrmann05EAGErcd1.pdf}, url2 = {http://earthdoc.eage.org/publication/publicationdetails/?publication=1384} } @CONFERENCE{herrmann2004CSEGcia1, author = {Felix J. Herrmann and D. J. Verschuur}, title = {Curvelet imaging and processing: adaptive multiple elimination}, booktitle = {CSEG Technical Program Expanded Abstracts}, year = {2004}, organization = {CSEG}, abstract = {Predictive multiple suppression methods consist of two main steps: a prediction step, in which multiples are predicted from the seismic data, and a subtraction step, in which the predicted multiples are matched with the true multiples in the data. The last step appears crucial in practice: an incorrect adaptive subtraction method will cause multiples to be sub-optimally subtracted or primaries being distorted, or both. Therefore, we propose a new domain for separation of primaries and multiples via the Curvelet transform. This transform maps the data into almost orthogonal localized events with a directional and spatial-temporal component. The multiples are suppressed by thresholding the input data at those Curvelet components where the predicted multiples have large amplitudes. In this way the more traditional filtering of predicted multiples to fit the input data is avoided. An initial field data example shows a considerable improvement in multiple suppression.}, keywords = {Presentation, SLIM}, month = {05}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/CSEG/2004/Herrmann04CSEGcia1/Herrmann04CSEGcia1.pdf}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/CSEG/2004/Herrmann04CSEGcia1/Herrmann04CSEGcia1_paper.pdf} } @CONFERENCE{herrmann2004EAGEsop, author = {Felix J. Herrmann and D. J. Verschuur}, title = {Separation of primaries and multiples by non-linear estimation in the curvelet domain}, booktitle = {EAGE Technical Program Expanded Abstracts}, year = {2004}, organization = {EAGE}, abstract = {Predictive multiple suppression methods consist of two main steps: a prediction step, in which multiples are predicted from the seismic data, and a subtraction step, in which the predicted multiples are matched with the true multiples in the data. The last step appears crucial in practice: an incorrect adaptive subtraction method will cause multiples to be sub-optimally subtracted or primaries being distorted, or both. Therefore, we propose a new domain for separation of primaries and multiples via the Curvelet transform. This transform maps the data into almost orthogonal localized events with a directional and spatial-temporal component. The multiples are suppressed by thresholding the input data at those Curvelet components where the predicted multiples have large amplitudes. In this way the more traditional filtering of predicted multiples to fit the input data is avoided. An initial field data example shows a considerable improvement in multiple suppression.}, keywords = {SLIM}, month = {06}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/EAGE/2004/herrmann2004EAGEsop/herrmann2004EAGEsop.pdf} } @CONFERENCE{herrmann2004SEGcdm, author = {Felix J. Herrmann and D. J. Verschuur}, title = {Curvelet-domain multiple elimination with sparseness constraints}, booktitle = {SEG Technical Program Expanded Abstracts}, year = {2004}, volume = {23}, pages = {1333-1336}, organization = {SEG}, abstract = {Predictive multiple suppression methods consist of two main steps: a prediction step, in which multiples are predicted from the seismic data, and a subtraction step, in which the predicted multiples are matched with the true multiples in the data. The last step appears crucial in practice: an incorrect adaptive subtraction method will cause multiples to be sub-optimally subtracted or primaries being distorted, or both. Therefore, we propose a new domain for separation of primaries and multiples via the Curvelet transform. This transform maps the data into almost orthogonal localized events with a directional and spatial-temporal component. The multiples are suppressed by thresholding the input data at those Curvelet components where the predicted multiples have large amplitudes. In this way the more traditional filtering of predicted multiples to fit the input data is avoided. An initial field data example shows a considerable improvement in multiple suppression. {\copyright}2004 Society of Exploration Geophysicists}, keywords = {Presentation, SLIM, SEG}, doi = {10.1190/1.1851110}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SEG/2004/Herrmann04SEGcdm/Herrmann04SEGcdm_pres.pdf}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SEG/2004/Herrmann04SEGcdm/Herrmann04SEGcdm.pdf} } @CONFERENCE{herrmann2008SEGswi, author = {Felix J. Herrmann and Deli Wang}, title = {Seismic wavefield inversion with curvelet-domain sparsity promotion}, booktitle = {SEG Technical Program Expanded Abstracts}, year = {2008}, volume = {27}, pages = {2497-2501}, organization = {SEG}, abstract = {Inverting seismic wavefields lies at the heart of seismic data processing and imaging{\textendash}- whether one is applying {\textquoteleft}{\textquoteleft}a poor man{\textquoteright}s inverse{\textquoteright}{\textquoteright} by correlating wavefields during imaging or whether one inverts wavefields as part of a focal transform interferrometric deconvolution or as part of computing the {\textquoteright}data verse{\textquoteright}. The success of these wavefield inversions depends on the stability of the inverse with respect to data imperfections such as finite aperture, bandwidth limitation, and missing data. In this paper, we show how curvelet domain sparsity promotion can be used as a suitable prior to invert seismic wavefields. Examples include, seismic data regularization with the focused curvelet-based recovery by sparsity-promoting inversion (fCRSI), which involves the inversion of the primary-wavefield operator, the prediction of multiples by inverting the adjoint of the primary operator, and finally the inversion of the data itself {\textendash}- the so-called {\textquoteright}data inverse{\textquoteright}. In all cases, curvelet-domain sparsity leads to a stable inversion.}, keywords = {Presentation, SLIM, SEG}, doi = {10.1190/1.3063862}, month = {11}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SEG/2008/herrmann08SEGswi/herrmann08SEGswi_pres.pdf}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SEG/2008/herrmann08SEGswi/herrmann08SEGswi.pdf } } @CONFERENCE{herrmann2007SEGsdp, author = {Felix J. Herrmann and Deli Wang and Gilles Hennenfent and Peyman P. Moghaddam}, title = {Seismic data processing with curvelets: a multiscale and nonlinear approach}, booktitle = {SEG Technical Program Expanded Abstracts}, year = {2007}, volume = {26}, pages = {2220-2224}, organization = {SEG}, abstract = {In this abstract, we present a nonlinear curvelet-based sparsity-promoting formulation of a seismic processing flow, consisting of the following steps: seismic data regularization and the restoration of migration amplitudes. We show that the curvelet{\textquoteright}s wavefront detection capability and invariance under the migration-demigration operator lead to a formulation that is stable under noise and missing data. {\copyright}2007 Society of Exploration Geophysicists}, keywords = {Presentation, SLIM, SEG}, doi = {10.1190/1.2792927}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SEG/2007/herrmann07SEGsdp/herrmann07SEGsdp_pres.pdf}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SEG/2007/herrmann07SEGsdp/herrmann07SEGsdp.pdf } } @CONFERENCE{herrmann2012EAGEcsm, author = {Felix J. Herrmann and Haneet Wason}, title = {Compressive sensing in marine acquisition and beyond}, booktitle = {EAGE}, year = {2012}, month = {06}, abstract = {Simultaneous-source marine acquisition is an example of compressive sensing where acquisition with a single vessel is replaced by simultaneous acquisition by multiple vessels with sources that fire at randomly dithered times. By identifying simultaneous acquisition as compressive sensing, we are able to design acquisitions that favour recovery by sparsity promotion. Compared to conventional processing that yields estimates for sequential data, sparse recovery leads to significantly improved results for simultaneous data volumes that are collected in shorter times. These improvements are the result of proper design of the acquisition, selection of the appropriate transform domain, and solution of the recovery problem by sparsity promotion. During this talk, we will show how these design principles can be applied to marine acquisition and to other problems in exploration seismology that can benefit from compressive sensing.}, keywords = {EAGE, workshop, acquisition, marine}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/EAGE/2012/herrmann2012EAGEcsm/herrmann2012EAGEcsm_pres.pdf}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/EAGE/2012/herrmann2012EAGEcsm/herrmann2012EAGEcsm.pdf}, url2 = {http://earthdoc.eage.org/publication/publicationdetails/?publication=59854} } @CONFERENCE{herrmann2007SEGsnt, author = {Felix J. Herrmann and D. Wilkinson}, title = {Seismic noise: the good, the bad and the ugly}, booktitle = {SEG Summer Research Workshop: Seismic Noise: Origins Preventions, Mitigation, Utilization}, year = {2007}, note = {Presented at SEG Summer Research Workshop: Seismic Noise: Origins, Prevention, Mitigation, Utilization}, abstract = {In this paper, we present a nonlinear curvelet-based sparsity-promoting formulation for three problems related to seismic noise, namely the {\textquoteright}good{\textquoteright}, corresponding to noise generated by random sampling; the {\textquoteright}bad{\textquoteright}, corresponding to coherent noise for which (inaccurate) predictions exist and the {\textquoteright}ugly{\textquoteright} for which no predictions exist. We will show that the compressive capabilities of curvelets on seismic data and images can be used to tackle these three categories of noise-related problems.}, keywords = {SLIM, SEG}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SEG/2007/herrmann07SEGsnt/herrmann07SEGsnt.pdf } } @CONFERENCE{johnson2008SINBADsdi, author = {James Johnson and Gilles Hennenfent}, title = {Seismic data interpolation with symmetry}, booktitle = {SINBAD 2008}, year = {2008}, abstract = {Due to the physics of reciprocity seismic data sets are symmetric in the source and receiver coordinates. Often seismic data sets are incomplete and the missing data must be interpolated. Typically, missing traces do not occur symmetrically. The purpose of this project is to extend the current formulation for solving the seismic interpolation problems in such a way that they enforce reciprocity. The method decomposes the seismic data volume into symmetric and antisymmetric parts. This decomposition leads to an augmented system of equations for the L1-solver that promotes sparsity in the curvelet domain. Interpolation is carried out on the entire system during which the asymmetric component of the volume is forced to zero, while the symmetric part of the data volume is matched to the measured data.}, keywords = {Presentation, SINBAD, SLIM}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2008/johnson2008SINBADsdi/johnson2008SINBADsdi.pdf} } @CONFERENCE{johnson2010EAGEeop, author = {James Johnson and Tim T.Y. Lin and Felix J. Herrmann}, title = {Estimation of primaries via sparse inversion with reciprocity}, booktitle = {EAGE}, year = {2010}, abstract = {Accurate removal of surface related multiples is a key step in seismic data processing. The industry standard for removing multiples is SRME, which involves convolving the data with itself to predict the multiples, followed by an adaptive subtraction procedure to recover the primaries (Verschuur and Berkhout, 1997). Other methods involve multidimensional division of the up-going and down-going wavefields (Amundsen, 2001). However, this approach may suffer from stability problems. With the introduction of the {\textquoteleft}{\textquoteleft}estimation of primaries by sparse inversion{\textquoteright}{\textquoteright}(EPSI), van Groenestijn and Verschuur (2009) recentely reformulated SRME to jointly estimate the surface-free impulse response and the source signature directly from the data. The advantage of EPSI is that it recovers the primary response directly, and does not require a second processing step for the subtraction of estimated multiples from the original data. However, because it estimates both the primary impulse response and source signature from the data EPSI must be regularized. Motivated by recent successful application of the curvelet transform in seismic data processing (Herrmann et al., 2007), we formulate EPSI as a bi-convex optimization problem that seeks sparsity on the surface-free Green{\textquoteright}s function and Fourier-domain smoothness on the source wavelet. Our main contribution compared to previous work (Lin and Herrmann, 2009), and the contribution of that author to the proceedings of this meeting(Lin and Herrmann, 2010), is that we employ the physical principle of as source-receiver reciprocity to improve the inversion.}, keywords = {EAGE}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/EAGE/2010/johnson10EAGEeop/johnson10EAGEeop.pdf} } @CONFERENCE{jumah2011SEGdrepsi, author = {Bander Jumah and Felix J. Herrmann}, title = {Dimensionality-reduced estimation of primaries by sparse inversion}, booktitle = {SEG Technical Program Expanded Abstracts}, year = {2011}, month = {09}, volume = {30}, pages = {3520-3525}, organization = {SEG}, abstract = {Data-driven methods---such as the estimation of primaries by sparse inversion---suffer from the "curse of dimensionality", which leads to disproportional growth in computational and storage demands when moving to realistic 3-D field data. To remove this fundamental impediment, we propose a dimensionality reduction technique where the "data matrix" is approximated adaptively by a randomized low-rank approximation. Compared to conventional methods, our approach has the advantage that the cost of the low-rank approximation is reduced significantly, which may lead to considerable reductions in storage and computational costs of the sparse inversion. Application of the proposed formalism to synthetic data shows that significant improvements are achievable at low computational overhead required to compute the low-rank approximations.}, keywords = {Presentation, SEG, processing}, doi = {10.1190/1.3627931}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SEG/2011/Jumah11SEGdrepsi/Jumah11SEGdrepsi_pres.pdf}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SEG/2011/Jumah11SEGdrepsi/Jumah11SEGdrepsi.pdf} } @CONFERENCE{kumar2008SINBADcd, author = {Vishal Kumar}, title = {Curvelet denoising}, booktitle = {SINBAD 2008}, year = {2008}, abstract = {The separation of signal and noise is an important issue in seismic data processing. By noise we refer to the incoherent noise which is present in the data. In our case, we showed curvelets concentrate seismic signal energy in few significant coefficients unlike noise energy that is spread all over the coefficients. The sparsity of seismic data in the curvelet domain makes curvelets an ideal choice for separating the noise from the seismic data. In our approach the denoising problem is framed as curvelet-regularized inversion problem. After initial processing, we applied the algorithm to the poststack data and compared our results with conventional wavelet denoising.}, keywords = {Presentation, SINBAD, SLIM}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2008/kumar2008SINBADcd/kumar2008SINBADcd.pdf} } @CONFERENCE{kumar2008SINBADcrd, author = {Vishal Kumar}, title = {Curvelet-regularized deconvolution}, booktitle = {SINBAD 2008}, year = {2008}, abstract = {The removal of source signature from seismic data is an important step in seismic data processing. The Curvelet transform provides sparse representations for images that comprise smooth objects separated by piece-wise smooth discontinuities (e.g. seismic reflectivity). In this approach the sparseness of reflectivity in Curvelet domain is used as a prior to stabilize the inversion process. Our Curvelet-regularized deconvolution algorithm uses recently developed SPGL1 solver which does adaptive sampling of the trade-off curve. We applied the algorithm on a synthetic example and compared our results with that of Spiky deconvolution approach.}, keywords = {Presentation, SINBAD, SLIM}, presentation = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2008/kumar08SINBADcrd/kumar08SINBADcrd_pres.pdf}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2008/kumar2008SINBADcrd/kumar2008SINBADcrd.pdf} } @CONFERENCE{kumar2009SEGins, author = {Vishal Kumar and Felix J. Herrmann}, title = {Incoherent noise suppression with curvelet-domain sparsity}, booktitle = {SEG Technical Program Expanded Abstracts}, year = {2009}, month = {10}, volume = {28}, pages = {3356-3360}, organization = {SEG}, abstract = {The separation of signal and noise is a key issue in seismic data processing. By noise we refer to the incoherent noise that is present in the data. We use the recently introduced multiscale and multidirectional curvelet transform for suppression of random noise. The curvelet transform decomposes data into directional plane waves that are local in nature. The coherent features of the data occupy the large coefficients in the curvelet domain, whereas the incoherent noise lives in the small coefficients. In other words, signal and noise have minimal overlap in the curvelet domain. This gives us a chance to use curvelets to suppress noise present in data.}, keywords = {Presentation, SEG}, doi = {10.1190/1.3255557}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SEG/2009/kumar09SEGins/kumar09SEGins_pres.pdf}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SEG/2009/kumar09SEGins/kumar09SEGins.pdf} } @CONFERENCE{kumar2008CSEGcrs, author = {Vishal Kumar and Felix J. Herrmann}, title = {Curvelet-regularized seismic deconvolution}, booktitle = {CSEG Technical Program Expanded Abstracts}, year = {2008}, organization = {CSEG}, abstract = {There is an inherent continuity along reflectors of a seismic image. We use the recently introduced multiscale and multidirectional curvelet transform to exploit this continuity along reflectors for cases in which the assumption of spiky reflectivity may not hold. We show that such type of seismic reflectivity can be represented in the curvelet-domain by a vector whose entries decay rapidly. This curvelet-domain compression of reflectivity opens new perspectives towards solving classical problems in seismic processing including the deconvolution problem. In this paper, we present a formulation that seeks curvelet-domain sparsity for non-spiky reflectivity and we compare our results with those of spiky deconvolution.}, keywords = {Presentation, SLIM}, month = {05}, presentation = {http://www.slim.eos.ubc.ca/Publications/Public/Conferences/CSEG/2008/kumar2008CSEGcrs/kumar2008CSEGcrs_pres.pdf}, url = {http://www.slim.eos.ubc.ca/Publications/Public/Conferences/CSEG/2008/kumar2008CSEGcrs/kumar2008CSEGcrs.pdf} } @CONFERENCE{kumar2008SEGdwc, author = {Vishal Kumar and Felix J. Herrmann}, title = {Deconvolution with curvelet-domain sparsity}, booktitle = {SEG Technical Program Expanded Abstracts}, year = {2008}, volume = {27}, pages = {1996-2000}, organization = {SEG}, abstract = {There is an inherent continuity along reflectors of a seismic image. We use the recently introduced multiscale and multidirectional curvelet transform to exploit this continuity along reflectors for cases in which the assumption of spiky reflectivity may not hold. We show that such type of seismic reflectivity can be represented in the curvelet-domain by a vector whose entries decay rapidly. This curvelet-domain compression of reflectivity opens new perspectives towards solving classical problems in seismic processing including the deconvolution problem. In this paper, we present a formulation that seeks curvelet-domain sparsity for non-spiky reflectivity and we compare our results with those of spiky deconvolution.}, keywords = {SLIM, Presentation, SEG}, doi = {10.1190/1.3059287}, month = {11}, presentation = { https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SEG/2008/kumar08SEGdwc/kumar08SEGdwc_pres.pdf}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SEG/2008/kumar08SEGdwc/kumar08SEGdwc.pdf } } @CONFERENCE{lebed2008SINBADaoc, author = {Evgeniy Lebed}, title = {Curvelet / {Surfacelet} comparison}, booktitle = {SINBAD}, year = {2008}, abstract = {Curvelets and Surfacelets are two transforms that aim to achieve a multiscale and a multidirectional decomposition of arbitrary N-dimensional ($N>=2$) signals. While both transforms are Fourier-based, their construction is intrinsically different. In this talk we will give and overview of the construction of the two transforms, and explore their properties such as frequency domain / spatial domain coherence, sparsity, redundancy and computational complexity.}, keywords = {Presentation, SINBAD, SLIM}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2008/lebed2008SINBADaoc/lebed2008SINBADaoc.pdf} } @CONFERENCE{lebed2008SINBADaoc1, author = {Evgeniy Lebed}, title = {Applications of curvelets/surfacelets to seismic data processing}, booktitle = {SINBAD}, year = {2008}, abstract = {In this talk we explore several applications of the curvelet and surfacelet transforms to seismic data processing. The first application is stable signal recovery in the physical domain - seismic data acquisition is often limited by physical and economic constraints, and the goal is to interpolate the data from a given subset of seismic traces. The second application is signal recovery in a transform domain - we assume that our data comes in a form of a random subset of temporal frequencies and the goal is to recover the missing frequencies from this data. Since seismic signals are generally not bandwidth limited, this in fact becomes an anti-aliasing problem. In both these problems the recovery is resolved via a robust $\ell$_1 solver that exploits the sparsity of the signals in curvelet/surfacelet domains. In the last application we explore the problem of primary-multiple separation by simple thresholding.}, keywords = {Presentation, SINBAD, SLIM}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2008/lebed2008SINBADaoc1/lebed2008SINBADaoc1.pdf} } @CONFERENCE{lebed2008SEGhggt, author = {Evgeniy Lebed and Felix J. Herrmann}, title = {A hitchhiker{\textquoteright}s guide to the galaxy of transform-domain sparsification}, booktitle = {SEG Technical Program Expanded Abstracts}, year = {2008}, organization = {SEG}, abstract = {The ability to efficiently and sparsely represent seismic data is becoming an increasingly important problem in geophysics. Over the last decade many transforms such as wavelets, curvelets, contourlets, surfacelets, shearlets, and many other types of {\textquoteright}x-lets{\textquoteright} have been developed to try to resolve this issue. In this abstract we compare the properties of four of these commonly used transforms, namely the shift-invariant wavelets, complex wavelets, curvelets and surfacelets. We also briefly explore the performance of these transforms for the problem of recovering seismic wavefields from incomplete measurements.}, keywords = {SEG}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SEG/2008/lebed08SEGhggt/lebed08SEGhggt.pdf} } @CONFERENCE{vanleeuwen2011ICIAMcbmcwe, author = {Tristan van Leeuwen}, title = {A correlation-based misfit criterion for wave-equation traveltime tomography}, booktitle = {ICIAM}, year = {2011}, organization = {ICIAM 2011}, abstract = {The inference of subsurface medium parameters from seismic data can be posed as a PDE-constrained data-fitting procedure. This approach is successful in reconstructing medium perturbations that are in the order of the wavelength. In practice, the data lack low frequency content and this means that one needs a good initial guess of the slowly varying component of the medium. For a wrong starting model an iterative reconstruction procedure is likely to end up in a local minimum. We propose to use a different measure of the misfit that makes the optimization problem well-posed in terms of the slowly varying velocity structures. This procedure can be seen as a generalization of ray-based traveltime tomography. We discuss the theoretical underpinnings of the method and give some numerical examples.}, date-added = {2011-07-19}, keywords = {Presentation,ICIAM,Imaging}, month = {07}, note = {Presented at ICIAM 2011, Vancouver BC}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/ICIAM/2011/vanleeuwen2011ICIAMcbmcwe/vanleeuwen2011ICIAMcbmcwe.pdf} } @CONFERENCE{vanleeuwen2011SEGext, author = {Tristan van Leeuwen and Felix J. Herrmann}, title = {Probing the extended image volume}, booktitle = {SEG Technical Program Expanded Abstracts}, year = {2011}, month = {09}, volume = {30}, pages = {4045-4050}, organization = {SEG}, abstract = {The prestack image volume can be defined as a cross- correlation of the source and receivers wavefields for non-zero space and time lags. If the background velocity is kinemati- cally acceptable, this image volume will have its main contributions at zero lag, even for complex models. Thus, it is an ideal tool for wave-equation migration velocity analysis in the presence of strong lateral heterogeneity. In particular, it allows us to pose migration velocity analysis as a PDE-constrained optimization problem, where the goal is to minimize the energy in the image volume at non-zero lag subject to fitting the data approximately. However, it is computationally infeasi- ble to explicitly form the whole image volume. In this paper, we discuss several ways to reduce the computational costs involved in computing the image volume and evaluating the focusing criterion. We reduce the costs for calculating the data by randomized source synthesis. We also present an efficient way to subsample the image volume. Finally, we propose an alternative optimization criterion and suggest a multiscale inversion strategy for wave-equation MVA.}, keywords = {SEG, imaging}, doi = {10.1190/1.3628051}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SEG/2011/vanleeuwen11SEGext/vanleeuwen11SEGext.pdf} } @CONFERENCE{vanleeuwen2011WAVESpeiv, author = {Tristan van Leeuwen and Felix J. Herrmann}, title = {Probing the extended image volume for seismic velocity inversion}, booktitle = {WAVES}, year = {2011}, organization = {Waves 2011}, abstract = {In seismic velocity inversion one aims to reconstruct a kinematically correct subsurface velocity model that can be used as input for further processing and inversion of the data. An important tool in velocity inversion is the prestack image volume. This image volume can be defined as a cross-correlation of the source and receivers wavefields for non-zero space and time lags. If the background velocity is kinematically acceptable, this image volume will have its main contributions at zero lag, even for complex models. Thus, it is an ideal tool for wave-equation migration velocity analysis in the presence of strong lateral heterogeneity. In particular, it allows us to pose migration velocity analysis as a PDE-constrained optimization problem, where the goal is to minimize the energy in the image volume at non-zero lag subject to fitting the data approximately. However, it is computationally infeasible to explicitly form the whole image volume. In this paper, we discuss several ways to reduce the computational costs involved in computing the image volume and evaluating the focusing criterion. We reduce the costs for calculating the data by randomized source synthesis. We also present an efficient way to subsample the image volume. Finally, we propose an alternative optimization criterion and suggest a multiscale inversion strategy for wave-equation MVA.}, date-added = {2011-07-29}, keywords = {Presentation}, month = {07}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/ICIAM/2011/vanleeuwen2011WAVESpeiv/vanleeuwen2011WAVESpeiv.pdf} } @CONFERENCE{vanleeuwen2011EAGEhsdomwi, author = {Tristan van Leeuwen and Felix J. Herrmann and Mark Schmidt and Michael P. Friedlander}, title = {A hybrid stochastic-deterministic optimization method for waveform inversion}, booktitle = {EAGE}, year = {2011}, month = {05}, abstract = {Present-day high quality 3D acquisition can give us lower frequencies and longer offsets with which to invert. However, the computational costs involved in handling this data explosion are tremendous. Therefore, recent developments in full-waveform inversion have been geared towards reducing the computational costs involved. A key aspect of several approaches that have been proposed is a dramatic reduction in the number of sources used in each iteration. A reduction in the number of sources directly translates to less PDE-solves and hence a lower computational cost. Recent attention has been drawn towards reducing the sources by randomly combining the sources in to a few supershots, but other strategies are also possible. In all cases, the full data misfit, which involves all the sequential sources, is replaced by a reduced misfit that is much cheaper to evaluate because it involves only a small number of sources (batchsize). The batchsize controls the accuracy with which the reduced misfit approximates the full misfit. The optimization of such an inaccurate, or noisy, misfit is the topic of stochastic optimization. In this paper, we propose an optimization strategy that borrows ideas from the field of stochastic optimization. The main idea is that in the early stage of the optimization, far from the true model, we do not need a very accurate misfit. The strategy consists of gradually increasing the batchsize as the iterations proceed. We test the proposed strategy on a synthetic dataset. We achieve a very reasonable inversion result at the cost of roughly 13 evaluations of the full misfit. We observe a speed-up of roughly a factor 20.}, keywords = {Presentation, EAGE, full-waveform inversion, optimization}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/EAGE/2011/vanleeuwen11EAGEhsdomwi/vanleeuwen11EAGEhsdomwi_pres.pdf}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/EAGE/2011/vanleeuwen11EAGEhsdomwi/vanleeuwen11EAGEhsdomwi.pdf}, url2 = {http://earthdoc.eage.org/publication/publicationdetails/?publication=50341} } @CONFERENCE{vanleeuwen2011SIAMGEOmawt, author = {Tristan van Leeuwen and Wim Mulder}, title = {Multiscale aspects of waveform tomography}, booktitle = {SIAMGEO}, year = {2011}, organization = {SIAM GeoSciences 2011}, abstract = {We consider the inference of medium velocity from transmitted acoustic waves. Typically, the measurements are done in a narrow frequency band. As a result the sensitivity of the data with respect to velocity perturbations varies dramatically with the scale of the perturbation. {\textquoteleft}Smooth{\textquoteright} perturbations will cause a phase shift, whereas perturbations that vary on the wavelength-scale cause amplitude variations. We investigate how to incorporate this scale dependent behavior in the formulation of the inverse problem.}, keywords = {Presentation}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/ICIAM/2011/vanleeuwen2011SIAMGEOmawt/vanleeuwen2011SIAMGEOmawt.pdf} } @CONFERENCE{vanleeuwen2011AMPhsdmwi, author = {Tristan van Leeuwen and Mark Schmidt and Michael P. Friedlander and Felix J. Herrmann}, title = {A hybrid stocahstic-deterministic method for waveform inversion}, booktitle = {AMP}, year = {2011}, organization = {WAVES 2011}, abstract = {A lot of seismic and medical imaging problems can be written as a least-squares data- fitting problem. In particular, we consider the case of multi-experiment data, where the data consists of a large number of "independent" measurements. Solving the inverse problem then involves repeatedly forward modeling the data for each of these experiments. In case the number of experiments is large and the modeling kernel expensive to apply, such an approach may be prohibitively expensive. We review techniques from stochastic optimization which aim at dramatically reducing the number of experiments that need to be modeled at each iteration. This reduction is typically achieved by randomly subsampling the data. Special care needs to be taken in the optimization to deal with the stochasticity that is introduced in this way.}, date-added = {2011-07-15}, keywords = {Presentation}, month = {07}, note = {Presented at AMP Medical and Seismic Imaging, 2011, Vancouver BC}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/ICIAM/2011/vanleeuwen2011AMPhsdmwi/vanleeuwen2011AMPhsdmwi.pdf} } @CONFERENCE{li2011EAGEfwirr, author = {Xiang Li and Aleksandr Y. Aravkin and Tristan van Leeuwen and Felix J. Herrmann}, title = {Full-waveform inversion with randomized {L1} recovery for the model updates}, booktitle = {EAGE}, year = {2011}, month = {05}, abstract = {Full-waveform inversion (FWI) is a data fitting procedure that relies on the collection of seismic data volumes and sophisticated computing to create high-resolution results. With the advent of FWI, the improvements in acquisition and inversion have been substantial, but these improvements come at a high cost because FWI involves extremely large multi-experiment data volumes. The main obstacle is the "curse of dimensionality" exemplified by NyquistÕs sampling criterion, which puts a disproportionate strain on current acquisition and processing systems as the size and desired resolution increases. In this paper, we address the "curse of dimensionality" by randomized dimensionality reduction of the FWI problem adapted from the field of CS. We invert for model updates by replacing the Gauss-Newton linearized subproblem for subsampled FWI with a sparsity promoting formulation, and solve this formulation using the SPGl1 algorithm. We speed up the algorithm and avoid overfitting the data by solving for the linearized updates only approximately. Our approach is successful because it reduces the size of seismic data volumes without loss of information. With this reduction, we can compute a Newton-like update with the reduced data volume at the cost of roughly one gradient update for the fully sampled wavefield.}, keywords = {Presentation, EAGE, full-waveform inversion}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/EAGE/2011/li11EAGEfwirr/li11EAGEfwirr_pres.pdf}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/EAGE/2011/li11EAGEfwirr/li11EAGEfwirr.pdf}, url2 = {http://earthdoc.eage.org/publication/publicationdetails/?publication=50345} } @CONFERENCE{li2011CSEGefimag, author = {Xiang Li and Felix J. Herrmann}, title = {Efficient full-waveform inversion with marine acquisition geometry}, booktitle = {CSEG technical program}, year = {2012}, abstract = {Full-waveform inversion (FWI) is a nonlinear data fitting procedure based on seismic data to derive a accurate velocity model. With the increasing demand for high resolution images in complex geological settings, the importance of improvements in acquisition and inversion become more and more critical. However, these improvements will be obtained at high computational cost, as a typical marine survey contains thousands of shot and receiver positions, and FWI needs several passes through massive seismic data. Computational cost of FWI will grow exponentially as the size of seismic data and desired resolution increase. In this paper we present a modified Gauss-Newton (GN) method that borrows ideas from compressive sensing, where we compute the GN updates from a few randomly selected sequential shots. Each subproblem is solved by using a sparsity promoting algorithm. With this approach, we dramatically reduce the size and hence the computational costs of the problem, whilst we control information loss by redrawing a different set of sequential shots for each subproblem.}, keywords = {CSEG}, month = {02}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/CSEG/2012/li2011CSEGefimag/li2011CSEGefimag.pdf} } @CONFERENCE{li2012SEGspmamp, author = {Xiang Li and Felix J. Herrmann}, title = {Sparsity-promoting migration accelerated by message passing}, booktitle = {SEG Technical Program Expanded Abstracts}, year = {2012}, month = {11}, volume = {31}, pages = {1-6}, organization = {SEG}, abstract = {Seismic imaging via linearized inversion requires multiple iterations to minimize the least-squares misfit as a function of the medium perturbation. Unfortunately, the cost for these iterations are prohibitive because each iteration requires many wave-equation simulations, which without direct solvers require an expensive separate solve for each source. To overcome this problem, we use dimensionality-reduction to decrease the size of seismic imaging problem by turning the large number of sequential shots into a much small number of simultaneous shots. In our approach, we take advantage of sparsifying transforms to remove source crosstalk resulting from randomly weighting and stacking sequential shots into a few super shots. We also take advantage of the fact that the convergence of large-scale sparsity-promoting solvers can be improved significantly by borrowing ideas from message passing, which are designed to break correlation built up between the linear system and the model iterate. In this way, we arrive at a formulation where we run the sparsity-promoting solver for a relatively large number of very iterations. Aside from leading to a significant speed up, our approach had the advantage of greatly reducing the memory imprint and IO requirements. We demonstrate this feature by solving a sparsity-promoting imaging problem with operators of reverse-time migration, which is computationally infeasible without the dimensionality reduction.}, keywords = {SEG, imaging, inversion}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SEG/2012/li2012SEGspmamp/li2012SEGspmamp_pres.pdf}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SEG/2012/li2012SEGspmamp/li2012SEGspmamp.pdf}, doi = {10.1190/segam2012-1500.1} } @CONFERENCE{li2010SEGfwi, author = {Xiang Li and Felix J. Herrmann}, title = {Full-waveform inversion from compressively recovered model updates}, booktitle = {SEG Technical Program Expanded Abstracts}, year = {2010}, month = {10}, volume = {29}, pages = {1029-1033}, organization = {SEG}, abstract = {Full-waveform inversion relies on the collection of large multi-experiment data volumes in combination with a sophisticated back-end to create high-fidelity inversion results. While improvements in acquisition and inversion have been extremely successful, the current trend of incessantly pushing for higher quality models in increasingly complicated regions of the Earth reveals fundamental shortcomings in our ability to handle increasing problem size numerically. Two main culprits can be identified. First, there is the so-called {\textquoteleft}{\textquoteleft}curse of dimensionality{\textquoteright}{\textquoteright} exemplified by Nyquist{\textquoteright}s sampling criterion, which puts disproportionate strain on current acquisition and processing systems as the size and desired resolution increases. Secondly, there is the recent {\textquoteleft}{\textquoteleft}departure from Moore{\textquoteright}s law{\textquoteright}{\textquoteright} that forces us to lower our expectations to compute ourselves out of this. In this paper, we address this situation by randomized dimensionality reduction, which we adapt from the field of compressive sensing. In this approach, we combine deliberate randomized subsampling with structure-exploiting transform-domain sparsity promotion. Our approach is successful because it reduces the size of seismic data volumes without loss of information. With this reduction, we compute Newton-like updates at the cost of roughly one gradient update for the fully-sampled wavefield.}, keywords = {Presentation, SEG, full-waveform inversion}, doi = {10.1190/1.3513022}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SEG/2010/li10SEGfwi/li10SEGfwi_pres.pdf}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SEG/2010/li10SEGfwi/li10SEGfwi.pdf} } @CONFERENCE{li2011SBGFmgnsu, author = {Xiang Li and Felix J. Herrmann and Tristan van Leeuwen and Aleksandr Y. Aravkin}, title = {Modified {Gauss-Newton} with sparse updates}, booktitle = {SBGF}, year = {2011}, organization = {SBGF}, abstract = {Full-waveform inversion (FWI) is a data fitting procedure that relies on the collection of seismic data volumes and sophisticated computing to create high-resolution models.With the advent of FWI, the improvements in acquisition and inversion have been substantial, but these improvements come at a high cost because FWI involves extremely large multi-experiment data volumes. The main obstacle is the {\textquoteleft}curse of dimensionality{\textquoteright} exemplified by Nyquist{\textquoteright}s sampling criterion, which puts a disproportionate strain on current acquisition and processing systems as the size and desired resolution increases. In this paper, we address the {\textquoteleft}curse of dimensionality{\textquoteright} by using randomized dimensionality reduction of the FWI problem, coupled with a modified Gauss-Newton (GN) method designed to promote curvelet-domain sparsity of model updates. We solve for these updates using the spectral projected gradient method, implemented in the SPG￿1 software package. Our approach is successful because it reduces the size of seismic data volumes without loss of information. With this reduction, we can compute Gauss-Newton updates with the reduced data volume at the cost of roughly one gradient update for the fully sampled wavefield}, keywords = {SBGF, full-waveform inversion}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SBGF/2011/li11SBGFmgnsu/li11SBGFmgnsu.pdf} } @CONFERENCE{lin2006SINBADci, author = {Tim T.Y. Lin}, title = {Compressed imaging}, booktitle = {SINBAD 2006}, year = {2006}, abstract = {In 1998 Grimbergen et. al. introduced a new method for computing wavefield propagation which improved on the previously employed local explicit operator method in that it exhibited no dip limitation, accurately handled laterally varying background ground velocity models, and is unconditionally stable. These desirable properties are mainly attributed to bringing the propagation problem into an eigenvector basis that diagonalizes the propagation operators. This modal-transform method, however, requires at each depth-level the solution of a large-scale sparse eigenvalue problem to compute the square-root of the Helmholtz operator. By using recent results from compressed sensing, we hope to reduce these computational costs that typically involve the synthesizes of the imaging operators and the cost of matrix-vector products. To reduce these costs, we compress the extrapolation operators by using only a fraction of the positive eigenvalues and temporal frequencies. This reduction not only leads to smaller matrices but also to reduced synthesis costs. These reductions go at the expense of solving a recovery problem from incomplete data. During the presentation, we show that wavefields can accurately be extrapolated with a compressed operators and competitive costs.}, keywords = {Presentation, SINBAD, SLIM}, presentation = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2006/lin2006SINBADci/lin2006SINBADci.pdf} } @CONFERENCE{lin2009SEGcsf, author = {Tim T.Y. Lin and Yogi A. Erlangga and Felix J. Herrmann}, title = {Compressive simultaneous full-waveform simulation}, booktitle = {SEG Technical Program Expanded Abstracts}, year = {2009}, month = {10}, volume = {28}, pages = {2577-2581}, organization = {SEG}, abstract = {The fact that the computational complexity of wavefield simulation is proportional to the size of the discretized model and acquisition geometry, and not to the complexity of the simulated wavefield, is a major impediment within seismic imaging. By turning simulation into a compressive sensing problem{\textendash}where simulated data is recovered from a relatively small number of independent simultaneous sources{\textendash}we remove this impediment by showing that compressively sampling a simulation is equivalent to compressively sampling the sources, followed by solving a reduced system. As in compressive sensing, this allows for a reduction in sampling rate and hence in simulation costs. We demonstrate this principle for the time-harmonic Helmholtz solver. The solution is computed by inverting the reduced system, followed by a recovery of the full wavefield with a sparsity promoting program. Depending on the wavefield{\textquoteright}s sparsity, this approach can lead to a significant cost reduction, in particular when combined with the implicit preconditioned Helmholtz solver, which is known to converge even for decreasing mesh sizes and increasing angular frequencies. These properties make our scheme a viable alternative to explicit time-domain finite-difference.}, keywords = {Presentation, SEG}, doi = {10.1190/1.3255381}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SEG/2009/Lin09SEGcsf/Lin09SEGcsf_pres.pdf}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SEG/2009/Lin09SEGcsf/Lin09SEGcsf.pdf} } @CONFERENCE{lin2011EAGEepsic, author = {Tim T.Y. Lin and Felix J. Herrmann}, title = {Estimating primaries by sparse inversion in a curvelet-like representation domain}, booktitle = {EAGE}, year = {2011}, month = {05}, abstract = {We present an uplift in the fidelity and wavefront continuity of results obtained from the Estimation of Primaries by Sparse Inversion (EPSI) program by reconstructing the primary events in a hybrid wavelet-curvelet representation domain. EPSI is a multiple removal technique that belongs to the class of wavefield inversion methods, as an alternative to the traditional adaptive-subtraction process. The main assumption is that the correct primary events should be as sparsely-populated in time as possible. A convex reformulation of the original EPSI algorithm allows its convergence property to be preserved even when the solution wavefield is not formed in the physical domain. Since wavefronts and edge-type singularities are sparsely represented in the curvelet domain, sparse solutions formed in this domain will exhibit vastly improved continuity when compared to those formed in the physical domain, especially for the low-energy events at later arrival times. Further- more, a wavelet-type representation domain will preserve sparsity in the reflected events even if they originate from non-zero-order discontinuities in the subsurface, providing an additional level of robustness. This method does not require any changes in the underlying computational algorithm and does not explicitly impose continuity constraints on each update.}, keywords = {Presentation, EAGE, processing}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/EAGE/2011/lin11EAGEepsic/lin11EAGEepsic_pres.pdf}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/EAGE/2011/lin11EAGEepsic/lin11EAGEepsic.pdf}, url2 = {http://earthdoc.eage.org/publication/publicationdetails/?publication=50427} } @CONFERENCE{lin2011SEGrssde, author = {Tim T.Y. Lin and Felix J. Herrmann}, title = {Robust source signature deconvolution and the estimation of primaries by sparse inversion}, booktitle = {SEG Technical Program Expanded Abstracts}, year = {2011}, month = {09}, volume = {30}, pages = {4354-4359}, organization = {Dept. of Earth and Ocean Sciences, University of British Columbia}, abstract = {The past few years had seen some concentrated interest on a particular wavefield-inversion approach to the popular SRME multiple removal technique called Estimation of Primaries by Sparse Inversion (EPSI). EPSI promises greatly improved tolerance to noise, missing data, edge effect, and other physi- cal phenomenon generally not described by the SRME relation (van Groenestijn and Verschuur, 2009a,b). It is based on the premise that it is possible to stably invert for both the primary impulse response and the source signature despite beforehand having no (or very limited) explicit knowledge of latter. The key to successful applications of EPSI, as shown in very recent works (Savels et al., 2010), is a robust way to reconstruct very sparse primary impulse response events as part of the inversion process. Based on the various successful demonstrations in literature, there is a very strong sense that EPSI will also play an important role in future developments of source sig- nature deconvolution and the general recovering of wavefield spectrum.}, keywords = {Presentation, deconvolution, SEG, sparse inversion, processing}, doi = {10.1190/1.3628116}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SEG/2011/lin11SEGrssde/lin11SEGrssde_pres.pdf}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SEG/2011/lin11SEGrssde/lin11SEGrssde.pdf} } @CONFERENCE{lin2010EAGEseo, author = {Tim T.Y. Lin and Felix J. Herrmann}, title = {Stabilized estimation of primaries via sparse inversion}, booktitle = {EAGE}, year = {2010}, month = {06}, abstract = {Estimation of Primaries by Sparse Inversion (EPSI) is a recent method for surface-related multiple removal using a direct estimation method closely related to Amundsen inversion, where under a sparsity assumption the primary impulse response is determined directly from a data-driven wavefield inversion process. One of the major difficulties in its practical adoption is that one must have precise knowledge of a time-window that contains multiple-free primaries during each update. Moreover, due to the nuances involved in regularizing the model impulse response in the inverse problem, the EPSI approach has an additional number of inversion parameters where it may be difficult to choose a reasonable value. We show that the specific sparsity constraint on the EPSI updates lead to an inherently intractable problem, and that the time-window and other inversion variables arise in the context of additional regularizations that attempts to drive towards a meaningful solution. We furthermore suggest a way to remove almost all of these parameters via convexification, which stabilizes the inversion while preserving the crucial sparsity assumption in the primary impulse response model.}, keywords = {Presentation, EAGE, processing}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/EAGE/2010/lin10EAGEseo/lin10EAGEseo_pres.pdf}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/EAGE/2010/lin10EAGEseo/lin10EAGEseo.pdf}, url2 = {http://earthdoc.eage.org/publication/publicationdetails/?publication=39122} } @CONFERENCE{lin2009EAGEdsa, author = {Tim T.Y. Lin and Felix J. Herrmann}, title = {Designing simultaneous acquisitions with compressive sensing}, booktitle = {EAGE}, year = {2009}, month = {06}, abstract = {The goal of this paper is in designing a functional simultaneous acquisition scheme by applying the principles of compressive sensing. By framing the acquisition in a compressive sensing setting we immediately gain insight into not only how to choose the source signature and shot patterns, but also in how well we can hope to demultiplex the data when given a set amount of reduction in the number of sweeps. The principles of compressive sensing dictates that the quality of the demultiplexed data is closely related to the transform-domain sparsity of the solution. This means that, given an estimate in the complexity of the expectant data wavefield, it is possible to controllably reduce the number of shots that needs to be recorded in the field. We show a proof of concept by introducing an acquisition compatible with compressive sensing based on randomly phase-encoded vibroseis sweeps.}, keywords = {Presentation, EAGE}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/EAGE/2009/lin09EAGEdsa/lin2009EAGEdsa_pres.pdf}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/EAGE/2009/lin09EAGEdsa/lin09EAGEdsa.pdf}, yrl2 = {http://earthdoc.eage.org/publication/publicationdetails/?publication=23951} } @CONFERENCE{lin2009SEGucs, author = {Tim T.Y. Lin and Felix J. Herrmann}, title = {Unified compressive sensing framework for simultaneous acquisition with primary estimation}, booktitle = {SEG Technical Program Expanded Abstracts}, year = {2009}, month = {10}, volume = {28}, pages = {3113-3117}, organization = {SEG}, abstract = {The central promise of simultaneous acquisition is a vastly improved crew efficiency during acquisition at the cost of additional post-processing to obtain conventional source-separated data volumes. Using recent theories from the field of compressive sensing, we present a way to systematically model the effects of simultaneous acquisition. Our formulation form a new framework in the study of acquisition design and naturally leads to an inversion-based approach for the separation of shot records. Furthermore, we show how other inversion-based methods, such as a recently proposed method from van Groenestijn and Verschuur (2009) for primary estimation, can be processed together with the demultiplexing problem to achieve a better result compared to a separate treatment of these problems.}, keywords = {Presentation, SEG}, doi = {10.1190/1.3255502}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SEG/2009/lin09SEGucs/lin09SEGucs_pres.pdf}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SEG/2009/lin09SEGucs/lin09SEGucs.pdf} } @CONFERENCE{lin2008SINBADcwe, author = {Tim T.Y. Lin and Felix J. Herrmann}, title = {Compressed wavefield extrapolation}, booktitle = {SINBAD}, year = {2008}, keywords = {Presentation, SINBAD, SLIM}, presentation = {https://www.slim.eos.ubc.ca/sites/data/Papers/lin08cwe.pdf} } @CONFERENCE{lin2007SEGcwe1, author = {Tim T.Y. Lin and Felix J. Herrmann}, title = {Compressed wavefield extrapolation with curvelets}, booktitle = {SEG Technical Program Expanded Abstracts}, year = {2007}, volume = {26}, pages = {1997-2001}, organization = {SEG}, abstract = {An explicit algorithm for the extrapolation of one-way wavefields is proposed which combines recent developments in information theory and theoretical signal processing with the physics of wave propagation. Because of excessive memory requirements, explicit formulations for wave propagation have proven to be a challenge in 3-D. By using ideas from {\textquoteleft}{\textquoteleft}compressed sensing{\textquoteright}{\textquoteright}, we are able to formulate the (inverse) wavefield extrapolation problem on small subsets of the data volume, thereby reducing the size of the operators. According to compressed sensing theory, signals can successfully be recovered from an imcomplete set of measurements when the measurement basis is incoherent with the representation in which the wavefield is sparse. In this new approach, the eigenfunctions of the Helmholtz operator are recognized as a basis that is incoherent with curvelets that are known to compress seismic wavefields. By casting the wavefield extrapolation problem in this framework, wavefields can successfully be extrapolated in the modal domain via a computationally cheaper operation. A proof of principle for the {\textquoteleft}{\textquoteleft}compressed sensing{\textquoteright}{\textquoteright} method is given for wavefield extrapolation in 2-D. The results show that our method is stable and produces identical results compared to the direct application of the full extrapolation operator. {\copyright}2007 Society of Exploration Geophysicists}, keywords = {Presentation, SLIM, SEG}, doi = {10.1190/1.2792882}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SEG/2007/lin07SEGcwe1/lin07SEGcwe1_pres.pdf}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SEG/2007/lin07SEGcwe1/lin07SEGcwe1.pdf } } @CONFERENCE{lin2009DELPHIrwi, author = {Tim T.Y. Lin and Felix J. Herrmann and Yogi A. Erlangga}, title = {Randomized wavefield inversion}, booktitle = {DELPHI}, year = {2009}, keywords = {Presentation}, note = {Presented at the DELPHI meeting. The Hague}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/Delphi/2009/lin2009DELPHIrwi/lin2009DELPHIrwi.pdf} } @CONFERENCE{lin2008SEGiso, author = {Tim T.Y. Lin and Evgeniy Lebed and Yogi A. Erlangga and Felix J. Herrmann}, title = {Interpolating solutions of the {Helmholtz} equation with compressed sensing}, booktitle = {SEG Technical Program Expanded Abstracts}, year = {2008}, volume = {27}, pages = {2122-2126}, organization = {SEG}, abstract = {We present an algorithm which allows us to model wavefields with frequency-domain methods using a much smaller number of frequencies than that typically required by the classical sampling theory in order to obtain an alias-free result. The foundation of the algorithm is the recent results on the compressed sensing, which state that data can be successfully recovered from an incomplete measurement if the data is sufficiently sparse. Results from numerical experiment show that only 30\% of the total frequency spectrum is need to capture the full wavefield information when working in the hard 2D synthetic Marmousi model.}, keywords = {Presentation, SLIM, SEG}, doi = {10.1190/1.3059307}, month = {01}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SEG/2008/lin08SEGiso/lin08SEGiso.pdf}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SEG/2008/lin08SEGiso/lin08SEGiso.pdf } } @CONFERENCE{lin2010SEGspm, author = {Tim T.Y. Lin and Ning Tu and Felix J. Herrmann}, title = {Sparsity-promoting migration from surface-related multiples}, booktitle = {SEG Technical Program Expanded Abstracts}, year = {2010}, month = {10}, volume = {29}, pages = {3333-3337}, organization = {SEG}, abstract = {Seismic imaging typically begins with the removal of multiple energy in the data, out of fear that it may introduce erroneous structure. However, seismic multiples have effectively seen more of the earth{\textquoteright}s structure, and if treated correctly can potential supply more information to a seismic image compared to primaries. Past approaches to accomplish this leave ample room for improvement; they either require extensive modification to standard migration techniques, rely too much on prior information, require extensive pre-processing, or resort to full-waveform inversion. We take some valuable lessons from these efforts and present a new approach balanced in terms of ease of implementation, robustness, efficiency and well-posedness, involving a sparsity-promoting inversion procedure using standard Born migration and a data-driven multiple modeling approach based on the focal transform.}, keywords = {Presentation, SEG, processing}, doi = {10.1190/1.3513540}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SEG/2010/lin10SEGspm/lin10SEGspm_pres.pdf}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SEG/2010/lin10SEGspm/lin10SEGspm.pdf} } @CONFERENCE{mansour2012SSPwspgl1, author = {Hassan Mansour}, title = {Beyond $\ell_1$ norm minimization for sparse signal recovery}, booktitle = {2012 IEEE Statistical Signal Processing Workshop (SSP) (SSP'12)}, year = {2012}, address = {Ann Arbor, Michigan, USA}, organization = {IEEE}, abstract = {Sparse signal recovery has been dominated by the basis pursuit denoise (BPDN) problem formulation for over a decade. In this paper, we propose an algorithm that outperforms BPDN in finding sparse solutions to underdetermined linear systems of equations at no additional computational cost. Our algorithm, called WSPGL1, is a modification of the spectral projected gradient for $\ell_1$ minimization (SPGL1) algorithm in which the sequence of LASSO subproblems are replaced by a sequence of weighted LASSO subproblems with constant weights applied to a support estimate. The support estimate is derived from the data and is updated at every iteration. The algorithm also modifies the Pareto curve at every iteration to reflect the new weighted $\ell_1$ minimization problem that is being solved. We demonstrate through extensive simulations that the sparse recovery performance of our algorithm is superior to that of $\ell_1$ minimization and approaches the recovery performance of iterative re-weighted $\ell_1$ (IRWL1) minimization of Cand{\`e}s, Wakin, and Boyd. Moreover, our algorithm has the computational cost of a single BPDN problem.}, keywords = {sparse recovery, compressed sensing, iterative algorithms, weighted $\ell_1$ minimization, partial support recovery}, month = {03}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SSP/2012/mansour2012SSPwspgl1/mansour2012SSPwspgl1_pres.pdf}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SSP/2012/mansour2012SSPwspgl1/mansour2012SSPwspgl1.pdf} } @CONFERENCE{mansour2011SBGFcspsma, author = {Hassan Mansour and Haneet Wason and Tim T.Y. Lin and Felix J. Herrmann}, title = {A compressive sensing perspective on simultaneous marine acquisition}, booktitle = {SBGF}, year = {2011}, organization = {SBGF}, abstract = {The high cost of acquiring seismic data in marine environments compels the adoption of simultaneous- source acquisition - an emerging technology that is stimulating both geophysical research and commercial efforts. In this paper, we discuss the properties of randomized simultaneous acquisition matrices and demonstrate that sparsity-promoting recovery improves the quality of the reconstructed seismic data volumes. Simultaneous marine acquisition calls for the development of a new set of design principles and post-processing tools. Leveraging established findings from the field of compressed sensing, the recovery from simultaneous sources depends on a sparsifying transform that compresses seismic data, is fast, and reasonably incoherent with the compressive sampling matrix. To achieve this incoherence, we use random time dithering where sequential acquisition with a single airgun is replaced by continuous acquisition with multiple airguns firing at random times and at random locations. We demonstrate our results with simulations of simultaneous Marine acquisition using periodic and randomized time dithering.}, keywords = {Presentation, SBGF, acquisition, compressive sensing}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SBGF/2011/Mansour11SBGFcspsma/Mansour11SBGFcspsma.pdf}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SBGF/2011/Mansour11SBGFcspsma/Mansour11SBGFcspsma.pdf} } @CONFERENCE{mansour2012ICASSadapt, author = {Hassan Mansour and Ozgur Yilmaz}, title = {Adaptive compressed sensing for video acquisition}, booktitle = {ICASSP}, year = {2012}, organization = {ICASSP}, abstract = {In this paper, we propose an adaptive compressed sensing scheme that utilizes a support estimate to focus the measurements on the large valued coefficients of a compressible signal. We embed a "sparse-filtering" stage into the measurement matrix by weighting down the contribution of signal coefficients that are outside the support estimate. We present an application which can benefit from the proposed sampling scheme, namely, video compressive acquisition. We demonstrate that our proposed adaptive CS scheme results in a significant improvement in reconstruction quality compared with standard CS as well as adaptive recovery using weighted $\ell_1$ minimization.}, keywords = {ICASSP}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/ICASSP/2012/MansourYilmazICASSPaCS/MansourYilmazICASSPaCS.pdf} } @CONFERENCE{mansour2012ICASSsupport, author = {Hassan Mansour and Ozgur Yilmaz}, title = {Support driven reweighted $\ell_1$ minimization}, booktitle = {ICASSP}, year = {2012}, organization = {ICASSP}, abstract = {In this paper, we propose a support driven reweighted $\ell_1$ minimization algorithm (SDRL1) that solves a sequence of weighted $\ell_1$ problems and relies on the support estimate accuracy. Our SDRL1 algorithm is related to the IRL1 algorithm proposed by Candes, Wakin, and Boyd. We demonstrate that it is sufficient to find support estimates with good accuracy and apply constant weights instead of using the inverse coefficient magnitudes to achieve gains similar to those of IRL1. We then prove that given a support estimate with sufficient accuracy, if the signal decays according to a specific rate, the solution to the weighted $\ell_1$ minimization problem results in a support estimate with higher accuracy than the initial estimate. We also show that under certain conditions, it is possible to achieve higher estimate accuracy when the intersection of support estimates is considered. We demonstrate the performance of SDRL1 through numerical simulations and compare it with that of IRL1 and standard $\ell_1$ minimization.}, keywords = {ICASSP}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/ICASSP/2012/MansourYilmazICASSPwL1/MansourYilmazICASSPwL1.pdf} } @CONFERENCE{maysami2006SINBADrro, author = {Mohammad Maysami}, title = {Recent results on seismic deconvolution}, booktitle = {SINBAD 2006}, year = {2006}, abstract = {One of the important steps in seismic imaging is to provide suitable information about boundaries. Sharp variation of physical properties at a layer boundary cause reflection the wavefield. In previous work done by C. M. Dupuis, seismic signal characterization is divided into two steps: detection and estimation. In the detection phase, the goal is to find all singularities in a seismic section regardless of their order and then to categorize the data to different events by windowing each singularity. In the estimation step, we determine the order of singularity more precisely by using a rough estimate based on the detection phase. Traditionally, a redundant dictionary method is employed for the detection part. However, we attempt to instead use a new L1-solver developed by D.L. Donoho: the Stagewise Orthogonal Matching Pursuit (StOMP). It approximates the solution to inverse problems while promoting the sparsity in the solution vector. This algorithm will allow us to experimentally confirm the recent analysis by S. Mallat on spiky deconvolution limits, which imposes a required minimum distance between spikes. This required minimum distance between different spikes is dependent on the number of spikes as well as the width of the chosen source wavelet used in convolution with the train. These results allow for the design of more robust and accurate detection schemes for seismic signal characterization.}, keywords = {Presentation, SINBAD, SLIM}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2006/maysami06SINBADrro/maysami06SINBADrro_pres.pdf} } @CONFERENCE{maysami2008SEGlcf, author = {Mohammad Maysami and Felix J. Herrmann}, title = {Lithological constraints from seismic waveforms: application to opal-{A} to opal-{CT} transition}, booktitle = {SEG Technical Program Expanded Abstracts}, year = {2008}, volume = {27}, pages = {2011-2015}, organization = {SEG}, abstract = {In this paper, we present a new method for seismic waveform characterization whose aim is threefold, namely (i) extraction of detailed information on the sharpness of transitions in the subsurface from seismic waveforms, (ii) reflector modeling, based on binary-mixture and percolation theory, and (iii) establishment of well-seismic ties, through parameterizations of our waveform and critical reflector model. We test this methodology on the opal-A (Amorphous) to opal-CT (Cristobalite/Tridymite) transition imaged in a migrated section of North Sea field data West of the Shetlands.}, keywords = {SEG, SLIM}, doi = {10.1190/1.3059400}, month = {11}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SEG/2008/maysami08SEGlcf/maysami08SEGlcf.pdf} } @CONFERENCE{maysami2007EAGEsrc, author = {Mohammad Maysami and Felix J. Herrmann}, title = {Seismic reflector characterization by a multiscale detection-estimation method}, booktitle = {EAGE}, year = {2007}, month = {06}, abstract = {Seismic transitions of the subsurface are typically considered as zero-order singularities (step functions). According to this model, the conventional deconvolution problem aims at recovering the seismic reflectivity as a sparse spike train. However, recent multiscale analysis on sedimentary records revealed the existence of accumulations of varying order singularities in the subsurface, which give rise to fractional-order discontinuities. This observation not only calls for a richer class of seismic reflection waveforms, but it also requires a different methodology to detect and characterize these reflection events. For instance, the assumptions underlying conventional deconvolution no longer hold. Because of the bandwidth limitation of seismic data, multiscale analysis methods based on the decay rate of wavelet coefficients may yield ambiguous results. We avoid this problem by formulating the estimation of the singularity orders by a parametric nonlinear inversion method.}, keywords = {Presentation, SLIM, EAGE}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/EAGE/2007/maysami07EAGEsrc/maysami07EAGEsrc_pres.pdf}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/EAGE/2007/maysami07EAGEsrc/maysami07EAGEsrc.pdf}, url2 = {http://earthdoc.eage.org/publication/publicationdetails/?publication=7081} } @CONFERENCE{modzelewski2008SINBADdas, author = {Henryk Modzelewski}, title = {Design and specifications for {SLIM{\textquoteright}s} software framework}, booktitle = {SINBAD 2008}, year = {2008}, abstract = {The SLIM group is actively developing software for seismic imaging. This talk will give a general overview of the software development during SINBAD project with focus on the final release in February 2008. The covered topics will include: 1) adopting Python for object-oriented programming, 2) including parallelism into the algorithms used in seismic imaging/modeling, 3) in-house algorithms for seismic imaging, and 4) contributions to Madagascar (RSF). The talk will serve as an introduction to the other presentations in the session "SINBAD Software releases".}, keywords = {Presentation, SINBAD, SLIM}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2008/modzelewski2008SINBADdas/modzelewski2008SINBADdas.pdf} } @CONFERENCE{modzelewski2006SINBADdas, author = {Henryk Modzelewski}, title = {Design and specifications for {SLIM{\textquoteright}s} software framework}, booktitle = {SINBAD 2006}, year = {2006}, keywords = {Presentation, SINBAD, SLIM}, presentation = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2006/modzelewski2006SINBADdas/modzelewski2006SINBADdas.pdf} } @CONFERENCE{moghaddam2008SINBADrtm, author = {Peyman P. Moghaddam}, title = {Reverse-time migration amplitude recovery with curvelets}, booktitle = {SINBAD 2008}, year = {2008}, abstract = {We recover the amplitude of a seismic image by approximating the normal (demigration-migration) operator. In this approximation, we make use of the property that curvelets remain invariant under the action of the normal operator. We propose a seismic amplitude recovery method that employs an eigenvalue like decomposition for the normal operator using curvelets as eigenvectors. Subsequently, we propose an approximate nonlinear singularity-preserving solution to the least-squares seismic imaging problem with sparseness in the curvelet domain and spatial continuity constraints. Our method is tested with a reverse-time {\textquoteright}wave-equation{\textquoteright} migration code simulating the acoustic wave equation.}, keywords = {Presentation, SINBAD, SLIM}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2008/moghaddam2008SINBADrtm/moghaddam2008SINBADrtm.pdf} } @CONFERENCE{moghaddam2006SINBADioa, author = {Peyman P. Moghaddam}, title = {Imaging operator approximation using curvelets}, booktitle = {SINBAD 2006}, year = {2006}, abstract = {In this presentation, the normal (demigation-migration) operator is studied in terms of a pseudo-differential operator. The invariance of curvelets under this operator and their sparsity on the seismic images is used to precondition the migration operator. A brief overview will be given on some of the theory from micro-local analysis which proofs that curvelets remain approximately invariant under the operator. The proper setting for which a diagonal approximation in the curvelet domain is accurate is discussed together with different methods that estimate this diagonal from of-the-shelf migration operators. This is joint work with Chris Stolk.}, keywords = {Presentation, SINBAD, SLIM}, presentation = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2006/moghaddam2006SINBADioa/moghaddam2006SINBADioa.pdf} } @CONFERENCE{moghaddam2006SINBADsac, author = {Peyman P. Moghaddam}, title = {Sparsity- and continuity-promoting norms for seismic images}, booktitle = {SINBAD 2006}, year = {2006}, abstract = {During this presentation, the importance of sparsity and continuity enhancing energy norms is emphasized for seismic imaging and inversion. The continuity promoting energy norm is justified by the apparent smoothness of reflectors in the direction along and the oscillatory behavior across the interfaces. This energy norm is called anisotropic diffusion and will be defined mathematically. Denoising examples will be given during which seismic images are recovered from the noise by a joint norm-one and continuity promoting minimization.}, keywords = {Presentation, SINBAD, SLIM}, presentation = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2006/moghaddam2006SINBADsac/moghaddam2006SINBADsac.pdf} } @CONFERENCE{moghaddam2008SEGcbm, author = {Peyman P. Moghaddam and Cody R. Brown and Felix J. Herrmann}, title = {Curvelet-based migration preconditioning}, booktitle = {SEG Technical Program Expanded Abstracts}, year = {2008}, volume = {27}, pages = {2211-2215}, organization = {SEG}, abstract = {In this paper, we introduce a preconditioner for seismic imaging{\textendash}-i.e., the inversion of the linearized Born scattering operator. This preconditioner approximately corrects for the {\textquoteleft}{\textquoteleft}square root{\textquoteright}{\textquoteright} of the normal{\textendash}-i.e., the demigration-migration operator. This approach consists of three parts, namely (i) a left preconditoner, defined by a fractional time integration designed to make the migration operator zero order, and two right preconditioners that apply (ii) a scaling in the physical domain accounting for a spherical spreading, and (iii) a curvelet-domain scaling that corrects for spatial and reflector-dip dependent amplitude errors. We show that a combination of these preconditioners lead to a significant improvement of the convergence for iterative least-squares solutions to the seismic imaging problem based on reverse-time migration operators.}, keywords = {Presentation, SLIM, SEG}, doi = {10.1190/1.3059325}, month = {11}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SEG/2008/moghaddam08SEGcbm/moghaddam08SEGcbm_pres.pdf}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SEG/2008/moghaddam08SEGcbm/moghaddam08SEGcbm.pdf } } @CONFERENCE{moghaddam2010SEGrfw, author = {Peyman P. Moghaddam and Felix J. Herrmann}, title = {Randomized full-waveform inversion: a dimenstionality-reduction approach}, booktitle = {SEG Technical Program Expanded Abstracts}, year = {2010}, month = {10}, volume = {29}, pages = {977-982}, organization = {SEG}, abstract = {Full-waveform inversion relies on the collection of large multi-experiment data volumes in combination with a sophisticated back-end to create high-fidelity inversion results. While improvements in acquisition and inversion have been extremely successful, the current trend of incessantly pushing for higher quality models in increasingly complicated regions of the Earth reveals fundamental shortcomings in our ability to handle increasing problem sizes numerically. Two main culprits can be identified. First, there is the so-called {\textquoteleft}{\textquoteleft}curse of dimensionality{\textquoteright}{\textquoteright} exemplified by Nyquist{\textquoteright}s sampling criterion, which puts disproportionate strain on current acquisition and processing systems as the size and desired resolution increases. Secondly, there is the recent {\textquoteleft}{\textquoteleft}departure from Moore{\textquoteright}s law{\textquoteright}{\textquoteright} that forces us to develop algorithms that are amenable to parallelization. In this paper, we discuss different strategies that address these issues via randomized dimensionality reduction.}, keywords = {Presentation, SEG, full-waveform inversion, optimization}, doi = {10.1190/1.3513940}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SEG/2010/moghaddam10SEGrfw/moghaddam10SEGrfw_pres.pdf}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SEG/2010/moghaddam10SEGrfw/moghaddam10SEGrfw.pdf} } @CONFERENCE{moghaddam2004SEGmpw, author = {Peyman P. Moghaddam and Felix J. Herrmann}, title = {Migration preconditioning with curvelets}, booktitle = {SEG Technical Program Expanded Abstracts}, year = {2004}, volume = {23}, pages = {2204-2207}, organization = {SEG}, abstract = {In this paper, the property of Curvelet transforms for preconditioning the migration and normal operators is investigated. These operators belong to the class of Fourier integral operators and pseudo-differential operator, respectively. The effect of this pre-conditioner is shown in term of improvement of sparsity, convergence rate, number of iteration for the Krylov-subspace solver and clustering of singular(eigen) values. The migration operator, which we employed in this work is the common-offset Kirchoff-Born migration. {\copyright}2004 Society of Exploration Geophysicists}, keywords = {Presentation, SLIM, SEG}, doi = {10.1190/1.1845213}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SEG/2004/Moghaddam04SEGmpw/Moghaddam04SEGmpw_pres.pdf}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SEG/2004/Moghaddam04SEGmpw/Moghaddam04SEGmpw_paper.pdf} } @CONFERENCE{moghaddam2007CSEGmar, author = {Peyman P. Moghaddam and Felix J. Herrmann and C. C. Stolk}, title = {Migration amplitude recovery using curvelets}, booktitle = {CSEG Technical Program Expanded Abstracts}, year = {2007}, organization = {CSEG}, abstract = {In this paper, we recover the amplitude of a seismic image by approximating the normal operator and subsequently inverting it. Normal operator (migration followed by modeling) is an example of pseudo-differential. curvelets are proven to be invariant under the action of pseudo-differential operators under certain conditions. Subsequently, curvelets are forming as eigen-vectors for such an operator. We propose a seismic amplitude recovery method that employs an eigen-value decomposition for normal operator using curvelets as eigen-vectors and to be estimated eigenvalues. A post-stack reverse-time, wave-equation migration is used for evaluation of the proposed method.}, file = {http://cseg.ca/assets/files/resources/abstracts/2007/168S0131.pdf}, keywords = {SLIM}, month = {05}, url = {http://cseg.ca/assets/files/resources/abstracts/2007/168S0131.pdf} } @CONFERENCE{moghaddam2007CSEGsac, author = {Peyman P. Moghaddam and Felix J. Herrmann and C. C. Stolk}, title = {Sparsity and continuity enhancing seismic imaging}, booktitle = {CSEG Technical Program Expanded Abstracts}, year = {2007}, organization = {CSEG}, abstract = {A non-linear singularity-preserving solution to the least-squares seismic imaging problem with sparseness and continuity constraints is proposed. The applied formalism explores curvelets as a directional frame that, by their sparsity on the image, and their invariance under the imaging operators, allows for a stable recovery of the amplitudes. Our method is based on the estimation of the normal operator in the form of an {\textquoteright}eigenvalue{\textquoteright} decompsoition with curvelets as the eigenvectors{\textquoteright}. Subsequently, we propose an inversion method that derives from estimation of the normal operator and is formulated as a convex optimization problem. Sparsity in the curvelet domain as well as continuity along the reflectors in the image domain are promoted as part of this optimization. Our method is tested with a reverse-time {\textquoteright}wave-equation{\textquoteright} migration code simulating the acoustic wave equation.}, file = {http://cseg.ca/assets/files/resources/abstracts/2007/091S0130.pdf}, keywords = {SLIM}, month = {05}, url = {http://cseg.ca/assets/files/resources/abstracts/2007/091S0130.pdf} } @CONFERENCE{moghaddam2007EAGEsar, author = {Peyman P. Moghaddam and Felix J. Herrmann and C. C. Stolk}, title = {Seismic amplitude recovery with curvelets}, booktitle = {EAGE}, year = {2007}, month = {06}, abstract = {A non-linear singularity-preserving solution to the least-squares seismic imaging problem with sparseness and continuity constraints is proposed. The applied formalism explores curvelets as a directional frame that, by their sparsity on the image, and their invariance under the imaging operators, allows for a stable recovery of the amplitudes. Our method is based on the estimation of the normal operator in the form of an {\textquoteright}eigenvalue{\textquoteright} decompsoition with curvelets as the {\textquoteright}eigenvectors{\textquoteright}. Subsequently, we propose an inversion method that derives from estimation of the normal operator and is formulated as a convex optimization problem. Sparsity in the curvelet domain as well as continuity along the reflectors in the image domain are promoted as part of this optimization. Our method is tested with a reverse-time {\textquoteright}wave-equation{\textquoteright} migration code simulating the acoustic wave equation.}, keywords = {SLIM, EAGE}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/EAGE/2007/moghaddam07EAGEsar/moghaddam07EAGEsar.pdf}, url2 = {http://earthdoc.eage.org/publication/publicationdetails/?publication=6935} } @CONFERENCE{moghaddam2007SEGrsi, author = {Peyman P. Moghaddam and Felix J. Herrmann and C. C. Stolk}, title = {Robust seismic-images amplitude recovery using curvelets}, booktitle = {SEG Technical Program Expanded Abstracts}, year = {2007}, volume = {26}, pages = {2225-2229}, organization = {SEG}, abstract = {In this paper, we recover the amplitude of a seismic image by approximating the normal (demigration-migration) operator. In this approximation, we make use of the property that curvelets remain invariant under the action of the normal operator. We propose a seismic amplitude recovery method that employs an eigenvalue like decomposition for the normal operator using curvelets as eigen-vectors. Subsequently, we propose an approximate non-linear singularity-preserving solution to the least-squares seismic imaging problem with sparseness in the curvelet domain and spatial continuity constraints. Our method is tested with a reverse-time {\textquoteleft}wave-equation{\textquoteright} migration code simulating the acoustic wave equation on the SEG-AA salt model. {\copyright}2007 Society of Exploration Geophysicists}, keywords = {Presentation, SLIM, SEG}, doi = {10.1190/1.2792928}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SEG/2007/moghaddam07SEGrsi/moghaddam07SEGrsi_pres.pdf}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SEG/2007/moghaddam07SEGrsi/moghaddam07SEGrsi.pdf } } @CONFERENCE{min2012CSEGrgfe, author = {Ju-Won Oh and Dong-Joo Min and Felix J. Herrmann}, title = {Re-establishment of gradient in frequency-domain elastic waveform inversion}, booktitle = {CSEG technical program}, year = {2012}, abstract = {To obtain solutions close to global minimum in waveform inversion, the gradients computed at each frequency need to be weighted to appropriately describe the residuals between modeled and field data. While the low-frequency components of the gradients should be weighted to recover the long-wavelength structures, the high-frequency components of the gradients need to be weighted when the short-wavelength structures are restored. However, the conventional elastic waveform inversion algorithms cannot properly weight the gradients computed at each frequency. When gradients are scaled using the pseudo-Hessian matrix inside the frequency loop, gradients obtained at high frequencies are over-emphasized. When the gradients are scaled outside the frequency loop, gradients are weighted by the source spectra. In this study, we propose applying weighting factors to the gradients obtained at each frequency so that gradients can properly reflect the differences between the true and assumed models satisfying the general inverse theory. The weighting factors are composed by the backpropagated residuals. Numerical examples for the simple rectangular-shaped model and the modified version of the Marmousi-2 model show that the weighting method enhances gradient images and inversion results compared to the conventional inversion algorithms.}, keywords = {elastic, waveform inversion, frequency-domain, weighting factors}, month = {02}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/CSEG/2012/min2012CSEGrgfe/min2012CSEGrgfe.pdf} } @CONFERENCE{min2012EAGEefwi, author = {Ju-Won Oh and Dong-Joo Min and Felix J. Herrmann}, title = {Frequency-domain elastic waveform inversion using weighting factors related to source-deconvolved residuals}, booktitle = {EAGE}, year = {2012}, month = {06}, abstract = {One of the limitations in seismic waveform inversion is that inversion results are very sensitive to initial guesses, which may be because the gradients computed at each frequency are not properly weighted depending on given models.Analyzingthe conventional waveform inversion algorithms using the pseudo-Hessian matrix as a pre-conditioner shows that the gradientsdo not properly describe the feature of given models or high- and low-end frequencies do not contribute the model parameter updates due to banded spectra of source wavelet. For a better waveform inversion algorithm, we propose applying weighting factors to gradients computed at each frequency. The weighting factors are designed using the source-deconvolved back-propagated wavefields. Numerical results for the SEG/EAGE salt model show that the weighting method improves gradient images and its inversion results are compatible with true velocities even with poorly estimated initial guesses.}, keywords = {EAGE, elastic, waveform inversion, frequency-domain, weighting factors}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/EAGE/2012/min2012EAGEefwi/min2012EAGEefwi.pdf}, url2 = {http://earthdoc.eage.org/publication/publicationdetails/?publication=59623} } @CONFERENCE{ross2008SINBADsit, author = {Sean Ross-Ross}, title = {Seismic inversion through operator overloading}, booktitle = {SINBAD 2008}, year = {2008}, abstract = {Geophysical processing is dominated by many different out of core memory software environments (OOCE). Such environments include Madagascar and SU and are designed to handle data that can not be operated on in memory. Each base operation is created as a main program that reads data from disk and writes the result to disk. The main programs can also be chained together on stdin/out pipes using a shell only writing data to disk at the end. To be efficient, the algorithm using an OOCE must chain together the longest pipe to avoid disk I/O, as a result it is very difficult to use iterative techniques. The algorithms are written in shell scripts can be difficult to read and understand. SLIMpy is a software library that contains definitions of coordinate free vectors and linear operators. It allows the user to design and run algorithms with any out of core package, in a Matlab style interface while maintaining optimal efficiency and speed. SLIMpy looks at each main program of each OOCE as a Matrix vector operation or vector reduction/transformation operation. It uses operator overloading to generate an abstract syntax tree (AST) which can be optimized in many ways before executing its commands. The AST also provides a pathway for embarrassingly parallel applications by splitting the tree over different nodes and processors. SLIMpy provides an interface to these OOCE that allows for optimal construction of commands and allows for iterative techniques. It smoothes the transition from other languages such as Matlab and allows the algorithm designer to write readable and reusable code. SLIMpy also adds to OOCE by allowing for easy parallelization.}, keywords = {Presentation, SINBAD, SLIM}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2008/ross2008SINBADsit/ross2008SINBADsit.pdf} } @CONFERENCE{saab2008SINBADcps, author = {Rayan Saab}, title = {Curvelet-based primary-multiple separation from a {Bayesian} perspective}, booktitle = {SINBAD 2008}, year = {2008}, abstract = {We present a novel primary-multiple separation scheme which makes use of the sparsity of both primaries and multiples in a transform domain, such as the curvelet transform, to provide estimates of each. The proposed algorithm utilizes seismic data as well as the output of a preliminary step that provides (possibly) erroneous predictions of the multiples. The algorithm separates the signal components, i.e., the primaries and multiples, by solving an optimization problem that assumes noisy input data and can be derived from a Bayesian perspective. More precisely, the optimization problem can be arrived at via an assumption of a weighted Laplacian distribution for the primary and multiple coefficients in the transform domain and of white Gaussian noise contaminating both the seismic data and the preliminary prediction of the multiples, which both serve as input to the algorithm.}, date-modified = {2008-08-22 12:45:53 -0700}, keywords = {SLIM, SINBAD, Presentation}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2008/saab2008SINBADcps/saab2008SINBADcp.pdf} } @CONFERENCE{saab2008ICASSPssa, author = {Rayan Saab and Rick Chartrand and Ozgur Yilmaz}, title = {Stable sparse approximations via nonconvex optimization}, booktitle = {ICASSP}, year = {2008}, organization = {ICASSP}, abstract = {We present theoretical results pertaining to the ability of lp minimization to recover sparse and compressible signals from incomplete and noisy measurements. In particular, we extend the results of Cande`s, Romberg and Tao [1] to the p < 1 case. Our results indicate that depending on the restricted isometry constants (see, e.g.,[2] and [3]) and the noise level, lp minimization with certain values of p < 1 provides better theoretical guarantees in terms of stability and robustness than l1 minimization does. This is especially true when the restricted isometry constants are relatively large.}, keywords = {ICASSP}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/ICASSP/2008/saab08ICASSPssa/saab08ICASSPssa.pdf } } @CONFERENCE{saab2007SEGcbp, author = {Rayan Saab and Deli Wang and Ozgur Yilmaz and Felix J. Herrmann}, title = {Curvelet-based primary-multiple separation from a {Bayesian} perspective}, booktitle = {SEG Technical Program Expanded Abstracts}, year = {2007}, volume = {26}, pages = {2510-2514}, organization = {SEG}, abstract = {In this abstract, we present a novel primary-multiple separation scheme which makes use of the sparsity of both primaries and multiples in a transform domain, such as the curvelet transform, to provide estimates of each. The proposed algorithm utilizes seismic data as well as the output of a preliminary step that provides (possibly) erroneous predictions of the multiples. The algorithm separates the signal components, i.e., the primaries and multiples, by solving an optimization problem that assumes noisy input data and can be derived from a Bayesian perspective. More precisely, the optimization problem can be arrived at via an assumption of a weighted Laplacian distribution for the primary and multiple coefficients in the transform domain and of white Gaussian noise contaminating both the seismic data and the preliminary prediction of the multiples, which both serve as input to the algorithm. {\copyright}2007 Society of Exploration Geophysicists}, keywords = {Presentation, SLIM, SEG}, doi = {10.1190/1.2792988}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SEG/2007/saab07SEGcbp/saab07SEGcbp_pres.pdf}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SEG/2007/saab07SEGcbp/saab07SEGcbp.pdf } } @CONFERENCE{saab2009SAMPTAnccs, author = {Rayan Saab and Ozgur Yilmaz}, title = {A short note on non-convex compressed sensing}, booktitle = {SAMPTA technical program}, year = {2009}, organization = {SAMPTA}, abstract = {In this note, we summarize the results we recently proved in\cite{SY08} on the theoretical performance guarantees of the decoders $\Delta_p$. These decoders rely on $\ell^p$ minimization with $p {\i}n (0,1)$ to recover estimates of sparse and compressible signals from incomplete and inaccurate measurements. Our guarantees generalize the results of \cite{CRT05} and \cite{Wojtaszczyk08} about decoding by $\ell_p$ minimization with $p = 1$, to the setting where $p {\i}n (0,1)$ and are obtained under weaker sufficient conditions. We also present novel extensions of our results in \cite{SY08} that follow from the recent work of DeVore et al. in \cite{DPW08}. Finally, we show some insightful numerical experiments displaying the trade-off in the choice of $p \in (0,1]$ depending on certain properties of the input signal.}, keywords = {Presentation}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SAMPTA/2009/saab09SAMPTAnccs/saab09SAMPTAnccs.pdf} } @CONFERENCE{sastry2007SINBADnor, author = {Challa S. Sastry}, title = {Norm-one recovery from irregular sampled data}, booktitle = {SINBAD 2007}, year = {2007}, abstract = {Seismic traces are sampled irregularly and insufficiently due to practical and economical limitations. The use of such data in seismic imaging results in image artifacts and poor spatial resolution. Therefore, before being used, the measurements are to be interpolated onto a regular grid. One of the methods achieving this objective is based on the Fourier reconstruction, which deals with the under-determined system of equations. The recent pursuit techniques (namely, basis pursuit, matching pursuit etc) admit certain promising features such as faster and simpler implementation even in large scale settings. The presentation discusses the application of the pursuit algorithms to the Fourier-based interpolation problem for the signals that have sparse Fourier spectra. In particular, the objective of the presentation includes: 1). studying the performance of the algorithm if, and how far, the measurement coordinates can be shifted from uniform distribution on the continuous interval. 2). studying what could be the allowable misplacement in the measurement coordinates that does not alter the quality of the reconstruction process}, keywords = {SLIM, SINBAD, Presentation} } @CONFERENCE{challa2007EAGEsrf, author = {Challa S. Sastry and Gilles Hennenfent and Felix J. Herrmann}, title = {Signal reconstruction from incomplete and misplaced measurements}, booktitle = {EAGE}, year = {2007}, month = {06}, abstract = {Constrained by practical and economical considerations, one often uses seismic data with missing traces. The use of such data results in image artifacts and poor spatial resolution. Sometimes due to practical limitations, measurements may be available on a perturbed grid, instead of on the designated grid. Due to algorithmic requirements, when such measurements are viewed as those on the designated grid, the recovery procedures may result in additional artifacts. This paper interpolates incomplete data onto regular grid via the Fourier domain, using a recently developed greedy algorithm. The basic objective is to study experimentally as to what could be the size of the perturbation in measurement coordinates that allows for the measurements on the perturbed grid to be considered as on the designated grid for faithful recovery. Our experimental work shows that for compressible signals, a uniformly distributed perturbation can be offset with slightly more number of measurements.}, keywords = {SLIM, EAGE}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/EAGE/2007/challa07EAGEsrf/challa07EAGEsrf.pdf}, url2 = {http://earthdoc.eage.org/publication/publicationdetails/?publication=6917} } @CONFERENCE{sastry2007SINBADrfu, author = {Challa S. Sastry and Gilles Hennenfent and Felix J. Herrmann}, title = {Recovery from unstructured data}, booktitle = {SINBAD 2006}, year = {2006}, keywords = {SLIM, SINBAD, Presentation}, presentation = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2006/sastry2007SINBADrfu/sastry2007SINBADrfu.pdf} } @CONFERENCE{shahidi2009SEGcmf, author = {Reza Shahidi and Felix J. Herrmann}, title = {Curvelet-domain matched filtering with frequency-domain regularization}, booktitle = {SEG Technical Program Expanded Abstracts}, year = {2009}, month = {10}, volume = {28}, pages = {3645-3649}, organization = {SEG}, abstract = {In Herrmann et al. (2008), it is shown that zero-order pseudodifferential operators, which model the migration-demigration operator and the operator mapping the predicted multiples to the true multiples, can be represented by a diagonal weighting in the curvelet domain. In that paper, a smoothness constraint was introduced in the phase space of the operator in order to regularize the solution to make it unique. In this paper, we use recent results in Demanet and Ying (2008) on the discrete symbol calculus to impose a further smoothness constraint, this time in the frequency domain. It is found that with this additional constraint, faster convergence is realized. Results on a synthetic pseudodifferential operator as well as on an example of primary-multiple separation in seismic data are included, comparing the model with and without the new smoothness constraint, from which it is found that results of improved quality are also obtained.}, keywords = {Presentation,SEG}, doi = {10.1190/1.3255624}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SEG/2009/shahidi09SEGcmf/shahidi09segcmf.pdf} } @CONFERENCE{tang2009SEGhdb, author = {Gang Tang and Reza Shahidi and Felix J. Herrmann and Jianwei Ma}, title = {Higher dimensional blue-noise sampling schemes for curvelet-based seismic data recovery}, booktitle = {SEG Technical Program Expanded Abstracts}, year = {2009}, month = {10}, volume = {28}, pages = {191-195}, organization = {SEG}, abstract = {In combination with compressive sensing, a successful reconstruction scheme called Curvelet-based Recovery by Sparsity-promoting Inversion (CRSI) has been developed, and has proven to be useful for seismic data processing. One of the most important issues for CRSI is the sampling scheme, which can greatly affect the quality of reconstruction. Unlike usual regular undersampling, stochastic sampling can convert aliases to easy-to-eliminate noise. Some stochastic sampling methods have been developed for CRSI, e.g. jittered sampling, however most have only been applied to 1D sampling along a line. Seismic datasets are usually higher dimensional and very large, thus it is desirable and often necessary to develop higher dimensional sampling methods to deal with these data. For dimensions higher than one, few results have been reported, except uniform random sampling, which does not perform well. In the present paper, we explore 2D sampling methodologies for curvelet-based reconstruction, possessing sampling spectra with blue noise characteristics, such as Poisson Disk sampling, Farthest Point Sampling, and the 2D extension of jittered sampling. These sampling methods are shown to lead to better recovery and results are compared to the other more traditional sampling protocols.}, keywords = {Presentation, SEG}, doi = {10.1190/1.3255230}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SEG/2009/tang09SEGhdb/tang09SEGhdb.pdf} } @CONFERENCE{thomson2006SINBADlss, author = {Darren Thomson}, title = {Large-scale seismic data recovery by the parallel windowed curvelet transform}, booktitle = {SINBAD 2006}, year = {2006}, abstract = {We propose using overlapping, tapered windows to process seismic data in parallel. This method consists of numerically tight linear operators and adjoints that are suitable for use in iterative algorithms. This method is also highly scalable and makes parallelprocessing of large seismic data sets feasible. We use this scheme to define the Parallel Windowed Fast Discrete Curvelet Transform (PWFDCT), which we have applied to a seismic data interpolation algorithm. Some preliminary results will be shown. Henryk Modzeleweski: Design and specifications for SLIMPy's software framework The SLIM group is actively developing software for seismic imaging. This talk will give a general overview of the software development philosophy adopted by SLIM. The covered topics will include: 1) adopting Python for object-oriented programming, 2) including parallelism into the algorithms used in seismic imaging/modeling, 3) in-house algorithms for seismic imaging, and 4) contributions to Madagascar (RSF). The talk will serve as an introduction to the other presentations in the session "SINBAD Software releases".}, keywords = {SLIM, SINBAD, Presentation}, presentation = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2006/thomson2006SINBADlss/thomson2006SINBADlss.pdf} } @CONFERENCE{thomson2006SINBADppe, author = {Darren Thomson}, title = {{(P)SLIMPy}: parallel extension}, booktitle = {SINBAD 2006}, year = {2006}, abstract = {The parallel extensions to the SLIMpy environment enable pipe-based processing of large data sets in an MPI-based parallel environment. Parallel processing can be done by straightforward slicing of data, or by using an overlapping domain decomposition that requires communication between different processors. The principal aim of the parallel extensions is to leave abstract numerical algorithms (ANA's) and applications programmed for use in SLIMpy untouched when moving to parallel processing. The object-oriented functionality of Python makes this possible.}, keywords = {SLIM, SINBAD, Presentation}, presentation = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2006/thomson2006SINBADppe/thomson2006SINBADppe.pdf} } @CONFERENCE{thomson2006SEGpwfd, author = {Darren Thomson and Gilles Hennenfent and Henryk Modzelewski and Felix J. Herrmann}, title = {A parallel windowed fast discrete curvelet transform applied to seismic processing}, booktitle = {SEG Technical Program Expanded Abstracts}, year = {2006}, volume = {25}, pages = {2767-2771}, organization = {SEG}, abstract = {We propose using overlapping, tapered windows to process seismic data in parallel. This method consists of numerically tight linear oper ators and adjoints that are suitable for use in iterative algorithms. This method is also highly scalable and makes parallel processing of large seismic data sets feasible. We use this scheme to define the Parallel Windowed Fast Discrete Curvelet Transform (PWFDCT), which we apply to a seismic data interpolation algorithm. The successful performance of our parallel processing scheme and algorithm on a two-dimensional synthetic data is shown.}, keywords = {SEG}, doi = {10.1190/1.2370099}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SEG/2006/thomson06SEGpwfd/thomson06SEGpwfd.pdf } } @CONFERENCE{tu2012EAGElsm, author = {Ning Tu and Felix J. Herrmann}, title = {Least-squares migration of full wavefield with source encoding}, booktitle = {EAGE}, year = {2012}, month = {06}, abstract = {Multiples can provide valuable information that is missing in primaries, and there is a growing interest in using them for seismic imaging. In our earlier work, we proposed to combine primary estimation and migration to image from the total up-going wavefield. The method proves to be effective but computationally expensive. In this abstract, we propose to reduce the computational cost by removing the multi-dimensional convolution required by primary estimation, and reducing the number of PDE solves in migration by introducing simultaneous sources with source renewal. We gain great performance boost without compromising the quality of the image.}, keywords = {EAGE, depth migration, surface-related multiples}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/EAGE/2012/tu2012EAGElsm/tu2012EAGElsm_pres.pdf}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/EAGE/2012/tu2012EAGElsm/tu2012EAGElsm.pdf}, url2 = {http://earthdoc.eage.org/publication/publicationdetails/?publication=59688} } @CONFERENCE{tu2012SEGima, author = {Ning Tu and Felix J. Herrmann}, title = {Imaging with multiples accelerated by message passing}, booktitle = {SEG Technical Program Expanded Abstracts}, year = {2012}, month = {11}, volume = {31}, pages = {1-6}, organization = {SEG}, abstract = {With the growing realization that multiples can provide valuable information, there is a paradigm shift from removing them to using them. For instance, primary estimation by sparse inversion has demonstrated its superiority over surface-related multiple removal in many aspects. Inspired by this shift, we propose a method to image directly from the total up-going wavefield, including surface-related multiples, by sparse inversion. To address the high computational cost associated with this method, we propose to speed up the inversion by having the wave-equation solver carry out the multi-dimensional convolutions implicitly and cheaply by randomized subsampling. We improve the overall performance of this algorithm by selecting new independent copies of the randomized modeling operator, which leads to a cancellation of correlations that hamper the speed of convergence of the solver. We show the merits of our approach on a number of examples.}, keywords = {SEG, imaging, multiples}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SEG/2012/tu2012SEGima/tu2012SEGima_pres.pdf}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SEG/2012/tu2012SEGima/tu2012SEGima.pdf}, doi = {10.1190/segam2012-1552.1} } @CONFERENCE{tu2011EAGEspmsrm, author = {Ning Tu and Tim T.Y. Lin and Felix J. Herrmann}, title = {Sparsity-promoting migration with surface-related multiples}, booktitle = {EAGE}, year = {2011}, month = {05}, abstract = {Multiples, especially the surface-related multiples, form a significant part of the total up-going wave- field. If not properly dealt with, they can lead to false reflectors in the final image. So conventionally practitioners remove them prior to migration. Recently research has revealed that multiples can actually provide extra illumination so different methods are proposed to address the issue that how to use multiples in seismic imaging, but with various kinds of limitations. In this abstract, we combine primary estimation and sparsity-promoting migration into one convex-optimization process to include information from multiples. Synthetic examples show that multiples do make active contributions to seismic migration. Also by this combination, we can benefit from better recoveries of the Greens function by using sparsity-promoting algorithms since reflectivity is sparser than the Greens function.}, keywords = {Presentation, EAGE, imaging, processing}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/EAGE/2011/tu11EAGEspmsrm/tu11EAGEspmsrm_pres.pdf}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/EAGE/2011/tu11EAGEspmsrm/tu11EAGEspmsrm.pdf}, url2 = {http://earthdoc.eage.org/publication/publicationdetails/?publication=50369} } @CONFERENCE{tu2011SEGmult, author = {Ning Tu and Tim T.Y. Lin and Felix J. Herrmann}, title = {Migration with surface-related multiples from incomplete seismic data}, booktitle = {SEG Technical Program Expanded Abstracts}, year = {2011}, month = {09}, volume = {30}, pages = {3222-3227}, organization = {SEG}, abstract = {Seismic acquisition is confined by limited aperture that leads to finite illumination, which, together with other factors, hinders imaging of subsurface objects in complex geological settings such as salt structures. Conventional processing, including surface-related multiple elimination, further reduces the amount of information we can get from seismic data. With the growing consensus that multiples carry valuable information that is missing from primaries, we are motivated to exploit the extra illumination provided by multiples to image the sub- surface. In earlier research, we proposed such a method by combining primary estimation and sparsity-promoting migration to invert for model perturbations directly from the total up-going wavefield. In this abstract, we focus on a particular case. By exploiting the extra illumination from surface-related multiples, we mitigate the effects caused by migrating from incomplete data with missing sources and missing near-offsets.}, keywords = {Presentation, SEG, imaging, processing}, doi = {10.1190/1.3627865}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SEG/2011/tu11SEGmult/tu11SEGmult_pres.pdf}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SEG/2011/tu11SEGmult/tu11SEGmult.pdf} } @CONFERENCE{vandenberg2008IAMesr, author = {Ewout {van den Berg}}, title = {Exact sparse reconstruction and neighbourly polytopes}, booktitle = {IAM}, year = {2008}, bdsk-url-1 = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/IAM/2008/vandenberg2008IAMesr/vandenberg2008IAMesr.pdf}, date-added = {2008-08-26 15:44:44 -0700}, date-modified = {2008-08-26 15:45:58 -0700}, keywords = {SLIM, IAM, Presentation}, presentation = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/IAM/2008/vandenberg2008IAMesr/vandenberg2008IAMesr.pdf} } @CONFERENCE{vandenberg2008SINBADsat, author = {Ewout {van den Berg}}, title = {Sparco: A testing framework for sparse reconstruction}, booktitle = {SINBAD 2008}, year = {2008}, abstract = {Sparco is a framework for testing and benchmarking algorithms for sparse reconstruction. It includes a large collection of sparse reconstruction problems drawn from the imaging, compressed sensing, and geophysics literature. Sparco is also a framework for implementing new test problems and can be used as a tool for reproducible research. We describe the software environment, and demonstrate its usefulness for testing and comparing solvers for sparse reconstruction.}, date-modified = {2008-08-22 12:54:25 -0700}, keywords = {SLIM, SINBAD, Presentation}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2008/vandenberg2008SINBADsat/vandenberg2008SINBADsat.pdf} } @CONFERENCE{friedlander2009SCAIMspot, author = {Ewout {van den Berg} and Michael P. Friedlander}, title = {Spot: A linear-operator toolbox for Matlab}, booktitle = {SCAIM}, year = {2009}, address = {University of British Columbia}, organization = {SCAIM Seminar}, keywords = {minimization, Presentation, SLIM}, presentation = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2009/VandenBerg-Mon-1130.pdf} } @CONFERENCE{vandenberg2007SINBADipo1, author = {Ewout {van den Berg} and Michael P. Friedlander}, title = {In pursuit of a root}, booktitle = {2007 Von Neumann Symposium}, year = {2007}, keywords = {minimization, Presentation, SLIM}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2007/vandenberg2007SINBADipo1/vandenberg2007SINBADipo1.pdf} } @CONFERENCE{vandenberg2009SLIMocf, author = {Ewout {van den Berg} and Mark Schmidt and Michael P. Friedlander and K. Murphy}, title = {Optimizing costly functions with simple constraints: a limited-memory projected {Quasi-Newton} algorithm}, booktitle = {SLIM}, year = {2009}, volume = {12}, series = {Twelfth International Conference on Artificial Intelligence and Statistics}, abstract = {An optimization algorithm for minimizing a smooth function over a convex set is described. Each iteration of the method computes a descent direction by minimizing, over the original constraints, a diagonal-plus low-rank quadratic approximation to the function. The quadratic approximation is constructed using a limited-memory quasi-Newton update. The method is suitable for large-scale problems where evaluation of the function is substan- tially more expensive than projection onto the constraint set. Numerical experiments on one-norm regularized test problems indicate that the proposed method is competitve with state-of-the-art methods such as bound-constrained L-BFGS and orthant-wise descent. We further show that the method generalizes to a wide class of problems, and substantially improves on state-of-the-art methods for problems such as learning the structure of Gaussian graphi- cal models (involving positive-definite matrix constraints) and Markov random fields (involving second-order cone constraints).}, date-added = {2009-01-29 17:16:34 -0800}, date-modified = {2009-01-29 17:16:34 -0800}, keywords = {SLIM}, month = {04}, url = {http://www.cs.ubc.ca/~mpf/papers/SchmidtBergFriedMurph09.pdf} } @CONFERENCE{vanderneut2012EAGEdecomp, author = {Joost {van der Neut} and Felix J. Herrmann}, title = {Up / down wavefield decomposition by sparse inversion}, booktitle = {EAGE}, year = {2012}, month = {06}, abstract = {Expressions have been derived for the decomposition of multi-component seismic recordings into up- and down-going constituents. However, these expressions contain singularities at critical angles and can be sensitive for noise. By interpreting wavefield decomposition as an inverse problem and imposing constraints on the sparseness of the solution, we arrive at a robust formalism that can be applied to noisy data. The method is demonstrated on synthetic data with multi-component receivers in a horizontal borehole, but can also be applied for different configurations, including OBC and dual-sensor streamers.}, keywords = {EAGE, wavefield decomposition, sparse inversion}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/EAGE/2012/vanderneut2012EAGEdecomp/vanderneut2012EAGEdecomp_pres.pdf}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/EAGE/2012/vanderneut2012EAGEdecomp/vanderneut2012EAGEdecomp.pdf}, url2 = {http://earthdoc.eage.org/publication/publicationdetails/?publication=58907} } @CONFERENCE{vanderneut2012SEGirs, author = {Joost {van der Neut} and Felix J. Herrmann and Kees Wapenaar}, title = {Interferometric redatuming with simultaneous and missing sources using sparsity promotion in the curvelet domain}, booktitle = {SEG Technical Program Expanded Abstracts}, year = {2012}, month = {11}, volume = {31}, pages = {1-7}, organization = {SEG}, abstract = {Interferometric redatuming is a velocity-independent method to turn downhole receivers into virtual sources. Accurate redatuming involves solving an inverse problem, which can be highly ill-posed, especially in the presence of noise, incomplete data and limited aperture. We address these issues by combining interferometric redatuming with transform-domain sparsity promotion, leading to a formulation that deals with data imperfections. We show that sparsity promotion improves the retrieval of virtual shot records under a salt flank. To reduce acquisition costs, it can be beneficial to reduce the number of sources or shoot them simultaneously. It is shown that sparse inversion can still provide a stable solution in such cases.}, keywords = {processing, imaging, optimization, interferometry, SEG}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SEG/2012/vanderneut2012SEGirs/vanderneut2012SEGirs.pdf}, doi = {10.1190/segam2012-0566.1} } @CONFERENCE{vanleeuwen2012EAGEcarpcg, author = {Tristan van Leeuwen and Dan Gordon and Rachel Gordon and Felix J. Herrmann}, title = {Preconditioning the {Helmholtz} equation via row-projections}, booktitle = {EAGE}, year = {2012}, month = {06}, abstract = {3D frequency-domain full waveform inversion relies on being able to efficiently solve the 3D Helmholtz equation. Iterative methods require sophisticated preconditioners because the Helmholtz matrix is typically indefinite. We review a preconditioning technique that is based on row-projections. Notable advantages of this preconditioner over existing ones are that it has low algorithmic complexity, is easily parallelizable and extendable to time-harmonic vector equations.}, keywords = {EAGE, Helmholtz equation, precondition}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/EAGE/2012/vanleeuwen2012EAGEcarpcg/vanleeuwen2012EAGEcarpcg_pres.pdf}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/EAGE/2012/vanleeuwen2012EAGEcarpcg/vanleeuwen2012EAGEcarpcg.pdf}, url2 = {http://earthdoc.eage.org/publication/publicationdetails/?publication=58891} } @CONFERENCE{vanleeuwen2012EAGEext, author = {Tristan {van Leeuwen} and Felix J. Herrmann}, title = {Wave-equation extended images: computation and velocity continuation}, booktitle = {EAGE}, year = {2012}, month = {06}, abstract = {An extended image is a multi-dimensional correlation of source and receiver wavefields. For a kinematically correct velocity, most of the energy will be concentrated at zero offset. Because of the computational cost involved in correlating the wavefields for all offsets, such exteded images are computed for a subsurface offset that is aligned with the local dip. In this paper, we present an efficient way to compute extended images for all subsurface offsets without explicitly calculating the receiver wavefields, thus making it computationally feasible to compute such extended images. We show how more conventional image gathers, where the offset is aligned with the dip, can be extracted from this extended image. We also present a velocity continuation procedure that allows us to compute the extended image for a given velocity without recomputing all the source wavefields.}, keywords = {EAGE, extended image, velocity continuation}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/EAGE/2012/vanleeuwen2012EAGEext/vanleeuwen2012EAGEext_pres.pdf}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/EAGE/2012/vanleeuwen2012EAGEext/vanleeuwen2012EAGEext.pdf}, url2 = {http://earthdoc.eage.org/publication/publicationdetails/?publication=59616} } @CONFERENCE{verschuur2007SEGmmp, author = {D. J. Verschuur and Deli Wang and Felix J. Herrmann}, title = {Multiterm multiple prediction using separated reflections and diffractions combined with curvelet-based subtraction}, booktitle = {SEG Technical Program Expanded Abstracts}, year = {2007}, volume = {26}, pages = {2535-2539}, organization = {SEG}, abstract = {The surface-related multiple elimination (SRME) method has proven to be successful on a large number of data cases. Most of the applications are still 2D, as the full 3D implementation is still expensive and under development. However, the earth is a 3D medium, such that 3D effects are difficult to avoid. Most of the 3D effects come from diffractive structures, whereas the specular reflections normally have less of a 3D behavior. By separating the seismic data in a specular reflecting and a diffractive part, multiple prediction can be carried out with these different subsets of the input data, resulting in several categories of predicted multiples. Because each category of predicted multiples can be subtracted from the input data with different adaptation filters, a more flexible SRME procedure is obtained. Based on some initial results from a Gulf of Mexico dataset, the potential of this approach is investigated. {\copyright}2007 Society of Exploration Geophysicists}, keywords = {Presentation, SLIM, SEG}, doi = {10.1190/1.2792993}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SEG/2007/verschuur07SEGmmp/verschuur07SEGmmp_pres.pdf}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SEG/2007/verschuur07SEGmmp/verschuur07SEGmmp.pdf } } @CONFERENCE{wang2008SINBADrri, author = {Deli Wang and Rayan Saab and Ozgur Yilmaz and Felix J. Herrmann}, title = {Recent results in curvelet-based primary-multiple separation}, booktitle = {SINBAD 2008}, year = {2008}, abstract = {We present a nonlinear curvelet-based sparsity-promoting formulation for the primary-multiple separation problem. We show that these coherent signal components can be separated robustly by explicitly exploiting the locality of curvelets in phase space (space-spatial frequency plane) and their ability to compress data volumes that contain wavefronts. This work is an extension of earlier results and the presented algorithms are shown to be stable under noise and moderately erroneous multiple predictions.}, keywords = {Presentation, SINBAD, SLIM}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2008/wang2008SINBADrri/wang2008SINBADrri.pdf} } @CONFERENCE{wang2007SEGrri, author = {Deli Wang and Rayan Saab and Ozgur Yilmaz and Felix J. Herrmann}, title = {Recent results in curvelet-based primary-multiple separation: application to real data}, booktitle = {SEG Technical Program Expanded Abstracts}, year = {2007}, volume = {26}, pages = {2500-2504}, organization = {SEG}, abstract = {In this abstract, we present a nonlinear curvelet-based sparsity-promoting formulation for the primary-multiple separation problem. We show that these coherent signal components can be separated robustly by explicitly exploting the locality of curvelets in phase space (space-spatial frequency plane) and their ability to compress data volumes that contain wavefronts. This work is an extension of earlier results and the presented algorithms are shown to be stable under noise and moderately erroneous multiple predictions. {\copyright}2007 Society of Exploration Geophysicists}, keywords = {Presentation, SLIM, SEG}, doi = {10.1190/1.2792986}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SEG/2007/wang07SEGrri/wang07SEGrri_pres.pdf}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SEG/2007/wang07SEGrri/wang07SEGrri.pdf } } @CONFERENCE{wason2012CSEGode, author = {Haneet Wason and Felix J. Herrmann}, title = {Only dither: efficient simultaneous marine acquisition}, booktitle = {CSEG technical program}, year = {2012}, abstract = {Simultaneous-source acquisition is an emerging technology that is stimulating both geophysical research and commercial efforts. Simultaneous marine acquisition calls for the development of a new set of design principles and post-processing tools. The focus here is on simultaneous-source marine acquisition design and sparsity-promoting sequential-source data recovery. We propose a pragmatic simultaneous-source, randomized marine acquisition scheme where multiple vessels sail across an ocean-bottom array firing airguns at --- sequential locations and randomly time-dithered instances. By leveraging established findings from the field of compressive sensing, where the choice of the sparsifying transform needs to be incoherent with the compressive sampling matrix, we can significantly impact the reconstruction quality, and demonstrate that the compressive sampling matrix resulting from the proposed sampling scheme is sufficiently incoherent with the curvelet transform to yield successful recovery by sparsity promotion. Results are illustrated with simulations of “purely” random marine acquisition, which requires an airgun to be located at each source location, and random time-dithering marine acquisition with one and two source vessels. Size of the collected data volumes in all cases is the same. Compared to the recovery from the former acquisition scheme (SNR = 10.5dB), we get good results by dithering with only one source vessel (SNR = 8.06dB) in the latter scheme, which improve at the cost of having an additional source vessel (SNR = 9.85dB).}, keywords = {CSEG, acquisition, marine, simultaneous}, month = {02}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/CSEG/2012/wason2012CSEGode/wason2012CSEGode.pdf} } @CONFERENCE{wason2012EAGEode, author = {Haneet Wason and Felix J. Herrmann}, title = {Only dither: efficient simultaneous marine acquisition}, booktitle = {EAGE}, year = {2012}, month = {06}, abstract = {Simultaneous-source acquisition is an emerging technology that is stimulating both geophysical research and commercial efforts. The focus here is on simultaneous-source marine acquisition design and sparsity-promoting sequential-source data recovery. We propose a pragmatic simultaneous-source, randomized marine acquisition scheme where multiple vessels sail across an ocean-bottom array firing airguns at --- sequential locations and randomly time-dithered instances. Within the context of compressive sensing, where the choice of the sparsifying transform needs to be incoherent with the compressive sampling matrix, we can significantly impact the reconstruction quality, and demonstrate that the compressive sampling matrix resulting from the proposed sampling scheme is sufficiently incoherent with the curvelet transform to yield successful recovery by sparsity promotion. Results are illustrated with simulations of ``purely" random marine acquisition, which requires an airgun to be located at each source location, and random time-dithering marine acquisition with one and two source vessels. Size of the collected data volumes in all cases is the same. Compared to the recovery from the former acquisition scheme (SNR = 10.5dB), we get good results by dithering with only one source vessel (SNR = 8.06dB) in the latter scheme, which improve at the cost of having an additional source vessel (SNR = 9.44dB).}, keywords = {EAGE, acquisition, marine, simultaneous}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/EAGE/2012/wason2012EAGEode/wason2012EAGEode_pres.pdf}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/EAGE/2012/wason2012EAGEode/wason2012EAGEode.pdf}, url2 = {http://earthdoc.eage.org/publication/publicationdetails/?publication=58915} } @CONFERENCE{wason2011SEGsprsd, author = {Haneet Wason and Felix J. Herrmann and Tim T.Y. Lin}, title = {Sparsity-promoting recovery from simultaneous data: a compressive sensing approach}, booktitle = {SEG Technical Program Expanded Abstracts}, year = {2011}, month = {09}, volume = {30}, pages = {6-10}, organization = {SEG}, abstract = {Seismic data acquisition forms one of the main bottlenecks in seismic imaging and inversion. The high cost of acquisition work and collection of massive data volumes compel the adoption of simultaneous-source seismic data acquisition - an emerging technology that is developing rapidly, stimulating both geophysical research and commercial efforts. Aimed at improving the performance of marine- and land-acquisition crews, simultaneous acquisition calls for development of a new set of design principles and post-processing tools. Leveraging developments from the field of compressive sensing the focus here is on simultaneous-acquisition design and sequential-source data recovery. Apart from proper compressive sensing sampling schemes, the recovery from simultaneous simulations depends on a sparsifying transform that compresses seismic data, is fast, and reasonably incoherent with the compressive-sampling matrix. Using the curvelet transform, in which seismic data can be represented parsimoniously, the recovery of the sequential-source data volumes is achieved using the sparsity-promoting program {\textemdash} SPGL1, a solver based on projected spectral gradients. The main outcome of this approach is a new technology where acquisition related costs are no longer determined by the stringent Nyquist sampling criteria.}, keywords = {Presentation, SEG, acquisition, compressive sensing}, doi = {10.1190/1.3628174}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SEG/2011/wason11SEGsprsd/wason11SEGsprsd_pres.pdf}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SEG/2011/wason11SEGsprsd/wason11SEGsprsd.pdf} } @CONFERENCE{yan2008SINBADwru, author = {Jiupeng Yan}, title = {Wavefield reconstruction using simultaneous denoising interpolation vs. denoising after interpolation}, booktitle = {SINBAD 2008}, year = {2008}, abstract = {This report represents and compares two methods of wavefield reconstruction from noisy seismic data with missing traces. The two methods are (i) First interpolate incomplete noisy data to get complete noisy data and then denoise, and (ii) Interpolate and denoise the incomplete noisy data simultaneously. A sample test of synthetic data will be presented. The results of tests show that denoising after interpolation is better than simultaneous denoising and interpolation if the parameter of the denoising problem is chosen appropriately.}, keywords = {Presentation, SINBAD, SLIM}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2008/yan2008SINBADwru/yan2008SINBADwru.pdf} } @CONFERENCE{yan2009SEGgpb, author = {Jiupeng Yan and Felix J. Herrmann}, title = {Groundroll prediction by interferometry and separation by curvelet-domain matched filtering}, booktitle = {SEG Technical Program Expanded Abstracts}, year = {2009}, month = {10}, volume = {28}, pages = {3297-3301}, organization = {SEG}, abstract = {The removal of groundroll in land based seismic data is a critical step for seismic imaging. In this paper, we introduce a work flow to predict the groundroll by interferometry and then separate the groundroll in the curvelet domain. Thus workflow is similar to the workflow of surface-related multiple elimination (SRME). By exploiting the adaptability and sparsity of curvelets, we are able to significantly improve the separation of groundroll in comparison to results yielded by frequency-domain adaptive subtraction methods. We provide synthetic data example to illustrate our claim.}, keywords = {Presentation, SEG}, doi = {10.1190/1.3255544}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SEG/2009/yan09SEGgpb/yan09SEGgpb_pres.pdf}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SEG/2009/yan09SEGgpb/yan09SEGgpb.pdf} } @CONFERENCE{yan2009SEGgpb2, author = {Jiupeng Yan and Felix J. Herrmann}, title = {Groundroll prediction by interferometry and separation by curvelet-domain filtering}, booktitle = {SEG Technical Program Expanded Abstracts}, year = {2009}, abstract = {The removal of groundroll in land based seismic data is a critical step for seismic imaging. In this paper, we introduce a work flow to predict the groundroll by interferometry and then separate the groundroll in the curvelet domain. Thus workflow is similar to the workflow of surface-related multiple elimination (SRME). By exploiting the adaptability and sparsity of curvelets, we are able to significantly improve the separation of groundroll in comparison to results yielded by frequency-domain adaptive subtraction methods. We provide synthetic data example to illustrate our claim.}, keywords = {Presentation}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SEG/2009/yan2009SEGgpb2/yan2009SEGgpb2.pdf} } @CONFERENCE{yarham2008SINBADbss, author = {Carson Yarham}, title = {Bayesian signal separation applied to ground-roll removal}, booktitle = {SINBAD 2008}, year = {2008}, abstract = {Accurate and adaptive noise removal is a critical part in seismic processing. Recent developments in signal separation methods have allowed a more flexible and accurate framework in which to perform ground roll and reflector separation. The use of a new Bayesian separation scheme developed at the SLIM group that contains control parameters to adjust for the uniqueness of specific problems is used. The sensitivity and variation of the control parameters is examined and this method is applied to synthetic and real data and the results are compared to previous methods.}, date-modified = {2008-08-22 12:42:58 -0700}, keywords = {Presentation, SLIM}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2008/yarham2008SINBADbss/yarham2008SINBADbss.pdf} } @CONFERENCE{yarham2007SINBADnsw, author = {Carson Yarham}, title = {Nonlinear surface wave prediction and separation}, booktitle = {SINBAD 2007}, year = {2007}, abstract = {Removal of surface waves is an integral step in seismic processing. There are many standard techniques for removal of this type of coherent noise, such as f-k filtering, but these methods are not always effective. One of the common problems with removal of surface waves is that they tend to be aliased in the frequency domain. This can make removal difficult and affect the frequency content of the reflector signals, as this signals will not be completely separated. As seen in (Hennenfent, G. and F. Herrmann, 2006, Application of stable signal recovery to seismic interpolation) interpolation can be used effectively to resample the seismic record thus dealiasing the surface waves. This separates the signals in the frequency domain allowing for a more precise and complete removal. The use of this technique with curvelet based surface wave predictions and an iterative L1 separation scheme can be used to remove surface waves from shot records more completely that with standard techniques.}, keywords = {Presentation, SINBAD, SLIM}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/EAGE/2007/yarham2007SINBADnsw/yarham2007SINBADnsw.pdf} } @CONFERENCE{yarham2006SEGcgrr, author = {Carson Yarham and Urs Boeniger and Felix J. Herrmann}, title = {Curvelet-based ground roll removal}, booktitle = {SEG Technical Program Expanded Abstracts}, year = {2006}, volume = {25}, pages = {2777-2782}, organization = {SEG}, abstract = {We have effectively identified and removed ground roll through a two-step process. The first step is to identify the major components of the ground roll through various methods including multiscale separation, directional or frequency filtering or by any other method that identifies the ground roll. Given this estimate for ground roll, the recorded signal is separated during the second step through a block-coordinate relaxation method that seeks the sparsest set for weighted curvelet coefficients of the ground roll and the sought-after reflectivity. The combination of these two methods allows us to separate out the ground roll signal while preserving the reflector information. Since our method is iterative, we have control of the separation process. We successfully tested our algorithm on a real data set with a complex ground roll and reflector structure.}, keywords = {SEG}, doi = {10.1190/1.2370101}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SEG/2006/yarham06SEGcgrr/yarham06SEGcgrr.pdf } } @CONFERENCE{yarham2007EAGEcai, author = {Carson Yarham and Gilles Hennenfent and Felix J. Herrmann}, title = {Curvelet applications in surface wave removal}, booktitle = {EAGE Workshop on Curvelets, contourlets, seislets, … in seismic data processing - where are we and where are we going?}, year = {2007}, month = {06}, abstract = {Ground roll removal of seismic signals can be a challenging prospect. Dealing with undersampleing causing aliased waves amplitudes orders of magnitude higher than reflector signals and low frequency loss of information due to band ...}, keywords = {SLIM, EAGE}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/EAGE/2007/yarham07EAGEcai/yarham07EAGEcai.pdf}, url2 = {http://earthdoc.eage.org/publication/publicationdetails/?publication=7590} } @CONFERENCE{yarham2008SEGbgr, author = {Carson Yarham and Felix J. Herrmann}, title = {Bayesian ground-roll seperation by curvelet-domain sparsity promotion}, booktitle = {SEG Technical Program Expanded Abstracts}, year = {2008}, volume = {27}, pages = {2576-2580}, organization = {SEG}, abstract = {The removal of coherent noise generated by surface waves in land based seismic is a prerequisite to imaging the subsurface. These surface waves, termed as ground roll, overlay important reflector information in both the t-x and f-k domains. Standard techniques of ground-roll removal commonly alter reflector information. We propose the use of the curvelet domain as a sparsifying transform in which to preform signal-separation techniques that preserves reflector information while increasing ground-roll removal. We look at how this method preforms on synthetic data for which we can build quantitative results and a real field data set.}, keywords = {Presentation, SLIM, SEG}, doi = {10.1190/1.3063878}, month = {11}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SEG/2008/yarham08SEGbgr/yarham08SEGbgr_pres.pdf}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SEG/2008/yarham08SEGbgr/yarham08SEGbgr.pdf } } @CONFERENCE{yarham2004CSEGgrr, author = {Carson Yarham and Felix J. Herrmann and Daniel Trad}, title = {Ground roll removal using non-separable wavelet transforms}, booktitle = {CSEG Technical Program Expanded Abstracts}, year = {2004}, keywords = {Presentation}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/CSEG/2004/Yarham04CSEGgrr/Yarham04CSEGgrr.pdf} } @CONFERENCE{yarham2004CSEGcpa, author = {Carson Yarham and Daniel Trad and Felix J. Herrmann}, title = {Curvelet processing and imaging: adaptive ground roll removal}, booktitle = {CSEG Technical Program Expanded Abstracts}, year = {2004}, organization = {CSEG}, abstract = {In this paper we present examples of ground roll attenuation for synthetic and real data gathers by using Contourlet and Curvelet transforms. These non-separable wavelet transforms are locoalized both (x,t)- and (k,f)-domains and allow for adaptive seperation of signal and ground roll. Both linear and non-linear filtering are discussed using the unique properties of these basis that allow for simultaneous localization in the both domains. Eventhough, the linear filtering techniques are encouraging the true added value of these basis-function techniques becomes apparent when we use these decompositions to adaptively substract modeled ground roll from data using a non-linear thesholding procedure. We show real and synthetic examples and the results suggest that these directional-selective basis functions provide a usefull tool for the removal of coherent noise such as ground roll.}, keywords = {Presentation, SLIM, CSEG}, month = {05}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/CSEG/2004/Yarham04CSEGcpa/Yarham04CSEGcpa_pres.pdf}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/CSEG/2004/Yarham04CSEGcpa/yarham04csegcpa.pdf} } @CONFERENCE{yilmaz2008SINBADsse, author = {Ozgur Yilmaz}, title = {Stable sparse expansions via non-convex optimization}, booktitle = {SINBAD 2008}, year = {2008}, abstract = {We present theoretical results pertaining to the ability of p-(quasi)norm minimization to recover sparse and compressible signals from incomplete and noisy measurements. In particular, we extend the results of Candes, Romberg and Tao for 1-norm to the p $\ll$ 1 case. Our results indicate that depending on the restricted isometry constants and the noise level, p-norm minimization with certain values of p $\ll$ 1 provides better theoretical guarantees in terms of stability and robustness compared to 1-norm minimization. This is especially true when the restricted isometry constants are relatively large, or equivalently, when the data is significantly undersampled.}, keywords = {Presentation, SINBAD, SLIM}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2008/yilmaz2008SINBADsse/yilmaz2008SINBADsse.pdf} } % This file was created with JabRef 2.9. % Encoding: ISO8859_1 @MASTERSTHESIS{lin08THccl, author = {Tim T.Y. Lin}, title = {Compressed computation of large-scale wavefield extrapolation in inhomogeneous medium}, school = {University of British Columbia}, year = {2008}, type = {masters}, abstract = {In this work an explicit algorithm for the extrapolation of one-way wavefields is proposed which combines recent developments in information theory and theoretical signal processing with the physics of wave propagation. Because of excessive memory requirements, explicit formulations for wave propagation have proven to be a challenge in 3-D. By using ideas from {\textquoteleft}{\textquoteleft}compressed sensing{\textquoteright}{\textquoteright}, we are able to formulate the (inverse) wavefield extrapolation problem on small subsets of the data volume, thereby reducing the size of the operators. Compressed sensing entails a new paradigm for signal recovery that provides conditions under which signals can be recovered from incomplete samplings by \emph{nonlinear} recovery methods that promote sparsity of the to-be-recovered signal. According to this theory, signals can successfully be recovered when the measurement basis is \emph{incoherent} with the representation in which the wavefield is sparse. In this new approach, the eigenfunctions of the Helmholtz operator are recognized as a basis that is incoherent with sparsity transforms that are known to compress seismic wavefields. By casting the wavefield extrapolation problem in this framework, wavefields can successfully be extrapolated in the modal domain, despite evanescent wave modes. The degree to which the wavefield can be recovered depends on the number of missing (evanescent) wave modes and on the complexity of the wavefield. A proof of principle for the {\textquoteleft}{\textquoteleft}compressed sensing{\textquoteright}{\textquoteright} method is given for inverse wavefield extrapolation in 2-D. The results show that our method is stable, has reduced dip limitations and handles evanescent waves in inverse extrapolation.}, keywords = {BSc, SLIM}, month = {04}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Thesis/2008/lin08THccl.pdf} } %----- 2014 (SPRING) -----% @PRESENTATION{dasilva2014SINBADlrp, title = {Low-rank promoting transformations and tensor interpolation - applications to seismic data denoising}, booktitle = {SINBAD Spring consortium talks}, organization = {SINBAD}, year = {2014}, abstract = {In this presentation, we extend our previous work in Hierarchical Tucker (HT) tensor completion, which uses an extremely efficient representation for representing high-dimensional tensors exhibiting low-rank structure, to handle subsampled tensors with noisy entries. We consider a ’low-noise’ case, so that the energies of the noise and the signal are nearly indistinguishable, and a ’high-noise’ case, in which the noise energy is now scaled to the amplitude of the entire data volume. For the low-noise case in particular, standard trace-by-trace energy comparisons cannot distinguish noise from signal. We examine the behaviour of noise in terms of the singular values along different matricizations of the data, i.e. reshaping of the tensor along different dimensions. By interpreting this effect in the context of tensor completion, we demonstrate the inefficacy of denoising by this method in the source-receiver domain. In light of this observation, we transform the decimated, noisy data in to the midpoint-offset domain, which promotes low-rank behaviour in the signal and high-rank behaviour in the noise. This distinction between signal and noise allows low-rank interpolation to effectively denoise the signal, without knowledge of the noise location, with only a marginal increase in computational cost. We demonstrate the effectiveness of this approach on a 4D frequency slice.}, keywords = {presentation, SINBAD, SINBADSPRING2014, SLIM, private}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2014/Spring/dasilva2014SINBADlrp.pdf}, author = {Curt da Silva} } @PRESENTATION{esser2014SINBADsgp, title = {A scaled gradient projection method for total variation regularized full waveform inversion}, booktitle = {SINBAD Spring consortium talks}, organization = {SINBAD}, year = {2014}, abstract = {We propose a modification to the quadratic penalty formulation for seismic full waveform inversion proposed by van Leeuwen and Herrmann that includes convex constraints on the model. In particular, we show how to simultaneously constrain the total variation of the slowness squared while enforcing bound constraints to keep it within a physically realistic range. Synthetic experiments show that including total variation regularization can improve the recovery of a high velocity perturbation to a smooth background model.}, keywords = {presentation, SINBAD, SINBADSPRING2014, SLIM, private}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2014/Spring/esser2014SINBADsgp.pdf}, author = {Ernie Esser} } @PRESENTATION{fang2014SINBADsqn, title = {A stochastic quasi-{Newton} {McMC} method for uncertainty quantification of full-waveform inversion}, booktitle = {SINBAD Spring consortium talks}, organization = {SINBAD}, year = {2014}, abstract = {In this work, we present a fast McMC method using the stochastic l-BFGS Hessian to quantify the uncertainty of full-waveform inversion. Using the stochastic l-BFGS Hessian, we do not need the assumption that the Hessian of data misfit is low rank and we also reduce the computational cost of estimating the Hessian. Numerical result shows the capability of this fast McMC method.}, keywords = {presentation, SINBAD, SINBADSPRING2014, SLIM, private}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2014/Spring/fang2014SINBADsqn.pdf}, author = {Zhilong Fang} } @PRESENTATION{herrmann2014SINBADdns, title = {{DNOISE III} – the next step}, booktitle = {SINBAD Spring consortium talks}, organization = {SINBAD}, year = {2014}, abstract = {During this presentation I will discuss our plans for the NSERC Collaborative Research and Development Grant DNOISE III. With this grant, we aim to match the industry contributions of SINBAD dollar-for-dollar.}, keywords = {presentation, SINBAD, SINBADSPRING2014, SLIM, private}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2014/Spring/herrmann2014SINBADdns.pdf}, author = {Felix J. Herrmann} } @PRESENTATION{herrmann2014SINBADldm, title = {Latest developments in marine ({4D}) acquisition}, booktitle = {SINBAD Spring consortium talks}, organization = {SINBAD}, year = {2014}, abstract = {During this talk, we will show the advantages of randomly dithered marine acquisition with ocean bottom nodes and how randomization techniques from compressive sensing affect the way we think about time-lapse surveys. This is joint work with Felix and Haneet.}, keywords = {presentation, SINBAD, SINBADSPRING2014, SLIM, private}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2014/Spring/herrmann2014SINBADldm.pdf}, author = {Felix J. Herrmann} } @PRESENTATION{herrmann2014SINBADrpe, title = {Relax the physics & expand the search space – {FWI} via {Wavefield} {Reconstruction} {Inversion}}, booktitle = {SINBAD Spring consortium talks}, organization = {SINBAD}, year = {2014}, abstract = {During this talk, we will present a new formulation of full-waveform inversion that combines the best of full-space constrained methods and reduced-space unconstrained methods. Instead of eliminating the constraint, which leads to the reduced adjoint-state method that underpins most formulations of full-waveform inversion, our method relaxes the PDE constraint by replacing it by an additive (least-squares) penalty term. By using the method of variable projection, we arrive at a formulation that alternates between solving for the wavefield, given the velocity model & data, and solving for velocity-model updates, given the wavefields. We named this method Wavefield Reconstruction Inversion (WRI) because it inverts for the model updates by reconstructing the wavefield everywhere given data observed at the receivers and the physics of the wave equation. During the talk, we present this new method and discuss how the increased search space, now consisting of the wavefields and model, mitigates the effects of local minima. We will also discuss recent extensions including multi-parameter inversion and regularization. This is joint work with Tristan van Leeuwen, Bas Peters, and Ernie Esser.}, keywords = {presentation, SINBAD, SINBADSPRING2014, SLIM, private}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2014/Spring/herrmann2014SINBADrpe.pdf}, author = {Felix J. Herrmann} } @PRESENTATION{kumar2014SINBADsfm, title = {{SVD}-free matrix completion for seismic data reconstruction}, booktitle = {SINBAD Spring consortium talks}, organization = {SINBAD}, year = {2014}, abstract = {Seismic data interpolation via rank-minimization techniques has been recently introduced in the seismic community. All the existing rank-minimization techniques assumes the underlying grid to be regular. Irregularity is one of the common impediments in acquisition. In this work, we studied the effect of irregularity on structured and show that how we can modify the existing techniques to handle it. Other then irregularity, we often have missing data. We also show that we can tackle both regularization and interpolation issue simultaneously. The objective of this work is to extend our existing method of interpolation on structured grid to unstructured grid. We illustrate the advantages of the modification in existing methodology using a seismic line from Gulf of Suez to obtain high quality results for regularization and interpolation, a key application in exploration geophysics.}, keywords = {presentation, SINBAD, SINBADSPRING2014, SLIM, private}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2014/Spring/kumar2014SINBADsfm.pdf}, author = {Rajiv Kumar} } @PRESENTATION{lago2014SINBADhfw, title = {Heuristics in full-waveform inversion}, booktitle = {SINBAD Spring consortium talks}, organization = {SINBAD}, year = {2014}, abstract = {For many full-waveform inversion techniques, the most computationally intensive step is the computation of a numerical solution for the wave equation on every iteration. In the frequency domain approach, this requires the solution of very large, complex, sparse, ill-conditioned linear systems. In this abstract we bring out attention specifically to CGMN method for solving PDEs, known for being flexible (i.e. it is able to treat equally acoustic data as well as visco-elastic or more complex scenarios) efficient with respect both to memory and computation time, and controllable accuracy of the final approximation. We propose an improvement for the known CGMN method by imposing a minimal residual condition, which incurs in one extra model vector storage. The resulting algorithm called CRMN enjoys several interesting properties as monotonically nonincreasing behaviour of the norm of the residual and minimal residual, guaranteeing optimal convergence for the relative residual criterion. We discuss numerical experiments both in an isolated PDE solve and also within the inversion procedure, showing that in a realistic scenario we can expect a speedup around 25\% when using CRMN rather than CGMN. Joint work with Art Petrenko, Zhilong Fang, Felix J. Herrmann.}, keywords = {presentation, SINBAD, SINBADSPRING2014, SLIM, private}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2014/Spring/lago2014SINBADhfw.pdf}, author = {Rafael Lago} } @PRESENTATION{lin2014SINBADiit, title = {Implicit interpolation of trace gaps in {REPSI} using auto-convolution terms}, booktitle = {SINBAD Spring consortium talks}, organization = {SINBAD}, year = {2014}, abstract = {It is possible to solve the Estimation of Primaries by Sparse Inversion problem from a sesimic record with large holes without any explicit data reconstruction, by instead simulating the missing multiple contributions with terms involving auto-convolutions of the primary wavefield. Exclusion of the unknown data as an inversion variable from the REPSI process is desireable, since it eliminates a significant source of local minima that arises from attempting to invert for the unobserved traces using primary and multiple models that may be far-away from the true solution. In this talk we investigate the necessary modifications to the REPSI algorithm to account for the resulting non-linear modeling operator, and demonstrate that just a few auto-convolution terms are enough to satisfactorily mitigate the effects of data gaps during the inversion process.}, keywords = {presentation, SINBAD, SINBADSPRING2014, SLIM, private}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2014/Spring/lin2014SINBADiit.pdf}, author = {Tim T. Y. Lin} } @PRESENTATION{lin2014SINBADmas, title = {Multilevel acceleration strategy for {REPSI}}, booktitle = {SINBAD Spring consortium talks}, organization = {SINBAD}, year = {2014}, abstract = {This talk discusses a multilevel inversion strategy that aims to substantially reduce the computational costs of the Robust Estimation of Primaries by Sparse Inversion algorithm. The proposed method solves early iterations of REPSI at very coarse spatial sampling grids while gradually ramping-up the spatial sampling when more accuracy is desired. No changes to the the core implementation of the original algorithm are necessary while in addition only requiring trace decimation, low-pass filtering, and rudimentary interpolation techniques.}, keywords = {presentation, SINBAD, SINBADSPRING2014, SLIM, private}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2014/Spring/lin2014SINBADmas.pdf}, author = {Tim T. Y. Lin} } @PRESENTATION{oghenekohwo2014SINBADrsw, title = {Randomized sampling without repetition in time-lapse seismic surveys}, booktitle = {SINBAD Spring consortium talks}, organization = {SINBAD}, year = {2014}, abstract = {In this talk, we will show a method for acquiring time-lapse data, where we do not have to repeat the survey geometry. Our method works provided our acquisition is randomized, where sources and receivers are at random locations on a computational grid, and provided we know the spatial locations of the shots and receivers. In addition, we show the implications of either (sub)sampling more for the baseline and less for the monitor or vice-versa, and how this sampling scheme affects the 4-D signal.}, keywords = {presentation, SINBAD, SINBADSPRING2014, SLIM, private}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2014/Spring/oghenekohwo2014SINBADrsw.pdf}, author = {Felix Oghenekohwo} } @PRESENTATION{peters2014SINBADmpw, title = {Multi-parameter waveform inversion; exploiting the structure of penalty-methods}, booktitle = {SINBAD Spring consortium talks}, organization = {SINBAD}, year = {2014}, abstract = {In this talk I consider the problem of inverting waveforms for multiple medium parameters. The governing PDE is chosen to be the Helmholtz equation with compressibility and buoyancy as the unknowns. Both unknowns occur in the same equation and practice has shown it is very hard to estimate both equally accurate; the buoyancy estimate (or density if a slightly different parametrization is used) is typically much smoother than the compressibility (or velocity). Here I introduce a new waveform inversion algorithm: a full Newton-type method based on a penalty method which adds the PDE constraint as a quadratic penalty term. This method updates both the 'wavefields' and medium parameters, without explicitly solving PDE's. One of the main advantages is the availability of a sparse Hessian and exact gradient which are not the result of any PDE solves. We asses if the availability of the Hessian, which includes information about the coupling between the two medium parameters, can help reconstruct both compressibility and buoyancy.}, keywords = {presentation, SINBAD, SINBADSPRING2014, SLIM, private}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2014/Spring/peters2014SINBADmpw.pdf}, author = {Bas Peters} } @PRESENTATION{petrenko2014SINBADaih, title = {Accelerating an iterative {Helmholtz} solver with {FPGAs}}, booktitle = {SINBAD Spring consortium talks}, organization = {SINBAD}, year = {2014}, abstract = {An implementation of seismic wave simulation on a platform consisting of a conventional host processor and a reconfigurable hardware accelerator is presented. This research is important in the field of exploration for oil and gas resources, where a 3D model of the subsurface is frequently required. By comparing seismic data collected in a real-world survey with synthetic data generated by simulated waves, it is possible to deduce such a model. However this requires many time-consuming simulations with different Earth models to find the one that best fits the measured data. Speeding up the wave simulations would allow more models to be tried, yielding a more accurate estimate of the subsurface. The reconfigurable hardware accelerator employed in this work is a field programmable gate array (FPGA). FPGAs are computer chips that consist of electronic building blocks that the user can configure and reconfigure to represent their algorithm in hardware. Whereas a traditional processor can be viewed as a pipeline for processing instructions, an FPGA is a pipeline for processing data. The chief advantage of the FPGA is that all the instructions in the algorithm are already hardwired onto the chip. This means that execution time depends only on the amount of data to be processed, and not on the complexity of the algorithm. The main contribution is an implementation of the well-known Kaczmarz row projection algorithm on the FPGA, using techniques of dataflow programming. This kernel is used as the preconditioning step of CGMN, a modified version of the conjugate gradients method that is used to solve the time-harmonic acoustic isotropic constant density wave equation. Using one FPGA accelerator, the current implementation allows seismic wave simulations to be performed over twice as fast, compared to running on one Intel Xeon E5-2670 core. I also discuss the effect of modifications of the algorithm necessitated by the hardware on the convergence properties of CGMN. Finally, a specific plan for future work is set-out in order to fully exploit the accelerator platform, and my work is set in its larger context}, keywords = {presentation, SINBAD, SINBADSPRING2014, SLIM, private}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2014/Spring/petrenko2014SINBADaih.pdf}, author = {Art Petrenko} } @PRESENTATION{warner2014SINBADawi, title = {Adaptive waveform inversion - {FWI} without cycle skipping}, booktitle = {SINBAD Spring consortium talks}, organization = {SINBAD}, year = {2014}, abstract = {Conventional FWI minimises the direct differences between observed and predicted seismic datasets. Because seismic data are oscillatory, this approach will suffer from the detrimental effects of cycle skipping if the starting model is inaccurate. We reformulate FWI so that it instead adapts the predicted data to the observed data using Wiener filters, and then iterates to improve the model by forcing the Wiener filters towards zero-lag delta functions. This adaptive FWI scheme is demonstrated on synthetic data where it is shown to be immune to cycle skipping, and is able to invert successfully data for which conventional FWI fails entirely. The new method does not require low frequencies or a highly accurate starting model to be successful. Adaptive FWI has some features in common with wave-equation migration velocity analysis, but it works for all types of arrivals including multiples and refractions, and it does not have the high computational costs of WEMVA in 3D.}, keywords = {presentation, SINBAD, SINBADSPRING2014, SLIM, private}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2014/Spring/warner2014SINBADawi.pdf}, author = {Mike Warner} } @PRESENTATION{zheglova2014SINBADead, title = {Exploring applications of depth stepping in seismic inverse problems}, booktitle = {SINBAD Spring consortium talks}, organization = {SINBAD}, year = {2014}, abstract = {We are exploring applications of stable depth extrapolation with the full wave equation to imaging and inversion. Depth stepping with full wave equation can be advantageous to the time and frequency domain modelling if special care is taken to stabilize the depth exptrapolator efficiently, since it reduces the higher dimensional modelling problem to a number of lower dimensional subproblems. We are interested in exploring applications in inversion, modelling and imaging. For example, just as the reverse time migration can be shown to be the gradient of the reduced formulation of the full waveform inversion problem, it is interesting to explore whether a formulation of the inversion problem can be achieved whose gradient can be computed using depth stepping techniques. We are also interested in such applications as preconditioning of iterative methods for Helmholtz equation and imaging.}, keywords = {presentation, SINBAD, SINBADSPRING2014, SLIM, private}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2014/Spring/zheglova2014SINBADead.pdf}, author = {Polina Zheglova} } %----- 2013 (FALL) -----% @PRESENTATION{fang2013SINBADsfwi, title = {Swift {FWI}}, booktitle = {SINBAD Fall consortium talks}, organization = {SINBAD}, year = {2013}, abstract = {In 3D case, there is much more data and modeling is much more expensive. As a result, parallel computing is very important for 3D full-waveform inversion. Both domain decomposition and data decomposition need a large number of parallel computing resources. However, programs based on parallel MATLAB suffer from the limitation of licenses. In order to obtain a MATLAB licenses free solution, we use SWIFT, which is a fast and easy parallel scripting language. Once the original Mat file is complied to an executable file, SWIFT can run the code inside the executable file in parallel without using parallel MATLAB. We use the SWIFT to compute the object functions and gradients of different shots in parallel, and test the parallel 3D FWI code with overthrust data. This is joint work with Thomas Lai, Harsh Juneja, Bas Peters.}, keywords = {presentation, SINBAD, SINBADFALL2013, SLIM, private}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2013/Fall/fang2013SINBADsfwi.pdf}, author = {Zhilong Fang} } @PRESENTATION{vanleeuwen2013SINBADsda, title = {Solving the data-augmented wave equation}, booktitle = {SINBAD Fall consortium talks}, organization = {SINBAD}, year = {2013}, abstract = {The recently proposed penalty method promises to mitigate some of the non-linearity inherent in full-waveform inversion by relaxing the requirement that the wave-equation needs to be solved exactly. The basic workflow of this new method is as follows; i) solve an overdetermined wave-equation (the data-augmented wave-equation), where the data serves as additional constraints for the wavefields, ii) compute the wavefield-residual by substituting this wavefield in the wave-equation, and iii) correlate the wavefield with the wavefield-residual to obtain a model-update. As opposed to the conventional workflow, no explicit adjoint solve is needed to compute the model-update. However, instead of solving a wave-equation, we need to solve a data-augmented wave-equation. In this talk we explore some of the challenges of solving this data-augmented wave-equation and review some possible solution strategies for both time and frequency-domain applications.}, keywords = {presentation, SINBAD, SINBADFALL2013, SLIM, private}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2013/Fall/vanleeuwen2013SINBADsda.pdf}, author = {Tristan van Leeuwen} } @PRESENTATION{petrenko2013SINBADaih, title = {Accelerating an iterative {Helmholtz} solver with {FPGAs}}, booktitle = {SINBAD Fall consortium talks}, organization = {SINBAD}, year = {2013}, abstract = {Solution of the Helmholtz equation is the main computational burden of full-waveform inversion in the frequency domain. For this task we employ the CARP-CG algorithm (Gordon & Gordon 2010), an iterative solver that preconditions the original Helmholtz system into an equivalent symmetric positive definite system and then applies the method of conjugate gradients. Forming the matrix for the new system is not necessary as its multiplicative action on a vector is implemented using a series of projections onto the rows of the original system. Our contribution is implementing CARP-CG for a host + accelerator (FPGA) computing environment. The computational paradigm is one of dataflow: vector and matrix elements are streamed from memory through the accelerator which applies the row projections. The advantage of an FPGA to process streams of data is that unless the algorithm is memory bandwidth limited, computation time is directly proportional to the amount of data. The complexity of the algorithm implemented on the FPGA is irrelevant since all the operations programmed onto the FPGA happen in the same clock tick. In contrast, on a CPU, more complex algorithms require more clock ticks as the instructions are executed sequentially, or with only a small amount of parallelism. Ongoing work porting the CARP-CG algorithm to the accelerator is presented.}, keywords = {presentation, SINBAD, SINBADFALL2013, SLIM, private}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2013/Fall/petrenko2013SINBADaih.pdf}, author = {Art Petrenko} } @PRESENTATION{lago2013SINBADksf, title = {Krylov solvers in frequency domain {FWI}}, booktitle = {SINBAD Fall consortium talks}, organization = {SINBAD}, year = {2013}, abstract = {We briefly discuss here several aspects rising from the use of Krylov solvers for frequency domain FWI. Although several powerful preconditioners are in constant development by the linear algebra community targeting this application, some issues as the multishot and multifrequency scenarios as well as advanced Krylov method techniques in combination with these powerful preconditioners are rarely addressed. We provide an overview of some of the recent research on this regard and discuss possibility of use of some of these techniques in the context of an inversion.}, keywords = {presentation, SINBAD, SINBADFALL2013, SLIM, private}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2013/Fall/lago2013SINBADksf.pdf}, author = {Rafael Lago} } @PRESENTATION{yeung2013SINBADcsr, title = {Compressed sensing, recovery of signals using random {Turbo} matrices}, booktitle = {SINBAD Fall consortium talks}, organization = {SINBAD}, year = {2013}, abstract = {Compressed sensing is an emerging technique that allows us to recover an image using far fewer number of measurements than classical sampling techniques. Designing the measurement matrices with certain properties are critical to this task. Gaussian matrices are most commonly used. We discover a new class of random matrices that can outperform the Gaussian matrices when we are in a situation of taking an outrageously small number of samples.}, keywords = {presentation, SINBAD, SINBADFALL2013, SLIM, private}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2013/Fall/yeung2013SINBADcsr.pdf}, author = {Enrico Au-Yeung} } @PRESENTATION{wang2013SINBADnru, title = {Noise reduction by using interferometric measurements}, booktitle = {SINBAD Fall consortium talks}, organization = {SINBAD}, year = {2013}, abstract = {The interferometric formulation of linear wave-based inversion problem was proposed by Demanet and Jugnon recently. Instead of directly fitting the data, they proposed to fit a subset of the data's cross-correlation. It can be verified that if the full cross-correlation is used, then the problem is equivalent to the usual least square problem. The subsampling, which is usually considered to cause instability to the solution, is surprisingly useful in this setting. Numerical experiments for the inverse source problem and the inverse scatting problem have both suggested that a 'good' sampling strategy can actually increase the stability under modeling error caused by the uncertainty of a kinetic nature. We will study the mathematical mechanism behind this phenomenon, and try to see whether or not there exists a universally 'good' sampling strategy independent of the types of forward operators we use.}, keywords = {presentation, SINBAD, SINBADFALL2013, SLIM, private}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2013/Fall/wang2013SINBADnru.pdf}, author = {Rongrong Wang} } @PRESENTATION{esser2013SINBADasr, title = {Applications of phase retrieval methods to blind seismic deconvolution}, booktitle = {SINBAD Fall consortium talks}, organization = {SINBAD}, year = {2013}, abstract = {Phase retrieval is the non-convex optimization problem of recovering a signal from magnitudes of complex linear measurements. Solving convex semi-definite program (SDP) relaxations has been shown to be a robust approach, but it remains too expensive to apply to large problems. We will discuss methods for accelerating computations and explore applications to seismic deconvolution.}, keywords = {presentation, SINBAD, SINBADFALL2013, SLIM, private}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2013/Fall/esser2013SINBADasr.pdf}, author = {Ernie Esser} } @PRESENTATION{hargreaves2013SINBADbfo, title = {The bridge from orthogonal to redundant transforms and weighted $\ell_1$ optimization}, booktitle = {SINBAD Fall consortium talks}, organization = {SINBAD}, year = {2013}, abstract = {Traditional arguments in synthesis $\ell_1$ -optimization require our forward operator to be orthogonal, though we use redundant transforms in practice. These traditional arguments do not translate to redundant transforms, and other arguments require impractical conditions on our effective measurement matrix. Recent theory in one-norm analysis, namely the optimal dual $\ell_1$ analysis of Shidong et al, have provided point-wise reconstruction error estimates for synthesis using an equivalence relationship where we can use weaker assumptions. This exposes an important model assumption indicating why analysis might outperform synthesis, for which careful consideration in seismic is necessary, and the need for models such as the cosparse model. In this talk we will discuss these ideas, provide evidence which indicates this theory should generalize to uniform error estimates(and thus not signal dependent), and how redundancy, support information, and weighting play important roles.}, keywords = {presentation, SINBAD, SINBADFALL2013, SLIM, private}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2013/Fall/hargreaves2013SINBADbfo.pdf}, author = {Brock Hargreaves} } @PRESENTATION{goh2013SINBADttt, title = {Taming time through tangents}, booktitle = {SINBAD Fall consortium talks}, organization = {SINBAD}, year = {2013}, abstract = {Given two vectors of (possibly) different lengths, the edit distance considers all possible alignments between the two and picks the one that minimizes the number of operations needed to turn one into the other. Though highly non-smooth and riddled with local minima, we show a way to compute the convex envelope of this function, which opens the door to using the approximate edit distance as a surrogate for the L2 distance and comparing vectors of different lengths.}, keywords = {presentation, SINBAD, SINBADFALL2013, SLIM, private}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2013/Fall/goh2013SINBADttt.pdf}, author = {Gabriel Goh} } @PRESENTATION{macedo2013SINBADdap, title = {A dual approach to {PhaseLift} via gauge programming and bundle methods}, booktitle = {SINBAD Fall consortium talks}, organization = {SINBAD}, year = {2013}, abstract = {A feature common to many sparse optimization problems is that the number of variables may be significantly larger than the number of constraints- e.g., the matrix-lifting approach taken by PhaseLift for phase retrieval results in a problem where the number of variables is quadratic in the number of constraints. We consider a duality framework and numerical methods to leverage the relatively small number of constraints. Preliminary numerical results illustrate our approach and its flexibility.}, keywords = {presentation, SINBAD, SINBADFALL2013, SLIM, private}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2013/Fall/macedo2013SINBADdap.pdf}, author = {Ives Macedo} } @PRESENTATION{nutini2013SINBADpcb, title = {Putting the curvature back into sparse solvers}, booktitle = {SINBAD Fall consortium talks}, organization = {SINBAD}, year = {2013}, abstract = {For many problems in signal and image processing, we seek a sparse solution that solves that approximately solves the problem Ax $\approx$ b, where A is an m-by-n matrix and b is an m-vector. Many of the most used approaches to problem thissuch as iterative soft thresholding SPGL1 andare first-order methods. As a result, these methods can sometimes be slow to converge. In this talk, we present an approach that takes advantage of the easily-obtainable second-order information. By exploiting this available second-order information, we are able to put the curvature back into sparse solvers and improve upon the convergence rates of existing solvers.}, keywords = {presentation, SINBAD, SINBADFALL2013, SLIM, private}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2013/Fall/nutini2013SINBADpcb.pdf}, author = {Julie Nutini} } @PRESENTATION{pong2013SINBADppg, title = {The proximal-proximal gradient algorithm}, booktitle = {SINBAD Fall consortium talks}, organization = {SINBAD}, year = {2013}, abstract = {In many applications, one has to minimize the sum of a smooth loss function modeling misfit and a regularization term inducing structures. In this talk, we consider the case when the regularization is a composition of a convex function, whose proximal mapping is easy to compute, and a nonzero linear map. Such instances arise in system identification and realization problems. In this talk, we present a new algorithm, the proximal-proximal gradient algorithm, which admits easy subproblems. Our algorithm reduces to the proximal gradient algorithm if the linear map is just the identity map, and can be viewed as "very inexact" inexact proximal gradient algorithm. We show that the whole sequence generated from the algorithm converges to an optimal solution, and establish an upper bound on iteration complexity.}, keywords = {presentation, SINBAD, SINBADFALL2013, SLIM, private}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2013/Fall/pong2013SINBADppg.pdf}, author = {Ting Kei Pong} } @PRESENTATION{akalin2013SINBADmtc, title = {Matrix and tensor completion for large-scale seismic interpolation: a comparative study}, booktitle = {SINBAD Fall consortium talks}, organization = {SINBAD}, year = {2013}, abstract = {Owing to their high dimensionality, interpolating 3D seismic data volumes remains a computationally daunting task. In this work, we outline a comprehensive framework for sampling and interpolating such volumes based on the well-understood theory of Matrix and Tensor completion. This interpolation theory consists of three components major signal structure, structure-destroying sampling, and structure-restoring optimization. By viewing interpolation in the context of this theory, we are able to specify exactly when these approaches are expected to perform well. We also introduce structure-revealing transformations that promote the inherent low-rank structure in seismic data as well as a factorization approach that scales to large problem sizes. Our methods are able to handle large-scale data volumes more accurately and more quickly compared to other more ad-hoc approaches, as we will demonstrate. This is joint work with Curt Da Silva, Rajiv Kumar, Ben Recht, and Felix J. Herrmann.}, keywords = {presentation, SINBAD, SINBADFALL2013, SLIM, private}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2013/Fall/akalin2013SINBADmtc.pdf}, author = {Okan Akalin} } @PRESENTATION{dasilva2013SINBADstf, title = {Structured tensor formats for missing-trace interpolation and beyond}, booktitle = {SINBAD Fall consortium talks}, organization = {SINBAD}, year = {2013}, abstract = {High-dimensional data, alternatively known as tensors, occurs in a variety of seismic problems. By exploiting the fact that seismic data can be well represented as a structured tensor, we design algorithms that operate on much lower dimensional parameters. In this talk, we will review some recent developments in interpolating seismic data volumes in the so-called Hierarchical Tucker format as well as demonstrate the need for such formats when tackling high-dimensional problems such as Uncertainty Quantification.}, keywords = {presentation, SINBAD, SINBADFALL2013, SLIM, private}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2013/Fall/dasilva2013SINBADstf.pdf}, author = {Curt da Silva} } @PRESENTATION{kumar2013SINBADwrs, title = {Wavefield reconstruction with {SVD}-free low-rank matrix factorization}, booktitle = {SINBAD Fall consortium talks}, organization = {SINBAD}, year = {2013}, abstract = {As shown in past, we can leverage the ideas from the field of compressed sensing to cast problems like seismic data interpolation or sequential shot data recovery from simultaneous data, as a compressed sensing problem. In this work we will show how we can borrow the same ideas of compressed sensing and cast these problems as matrix completion problems. Instead of sparsity we will show that we can exploit the low-rank structure of seismic data to solve these problems. One of the impediments in rank-minimization problem is the computation of singular values. We will also show how we can solve the rank minimization problems SVD-free. The practical application is divided into three parts: 1. In case of sequential seismic data acquisition, how jittered subsampling helps to recover the better quality data as compared to random subsampling. 2. How the incorporation of reciprocity principles help to enhance the quality of recovered fully sampled data. 3. How we can recover the sequential source data from simultaneous source data.}, keywords = {presentation, SINBAD, SINBADFALL2013, SLIM, private}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2013/Fall/kumar2013SINBADwrs.pdf}, author = {Rajiv Kumar} } @PRESENTATION{ghadermarzy2013SINBADups, title = {Using prior support information in approximate message passing algorithms}, booktitle = {SINBAD Fall consortium talks}, organization = {SINBAD}, year = {2013}, abstract = {Consider the standard compressed sensing problem. We want to recover sparse compressible aorsignal from few linear measurements. In this talk we investigate recovery performance when we have prior information about the support, i.e., the indices of the non-zero entries, of the signal to be recovered. First we briefly review the results of "weighted $\ell_p$ minimization algorithm with p = 1 and 0 < p < 1". Then we derive a weighted approximate message passing (AMP) algorithm which incorporates prior support information into the AMP algorithm. We empirically show that this algorithm recovers sparse signals significantly faster than weighted $\ell_1$ minimization. We also introduce a reweighting scheme for AMP and weighted AMP which, we observe, substantially improves the recovery conditions of these algorithms. We illustrate our results with extensive numerical experiments on synthetic data and seismic data reconstruction.}, keywords = {presentation, SINBAD, SINBADFALL2013, SLIM, private}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2013/Fall/ghadermarzy2013SINBADups.pdf}, author = {Navid Ghadermarzy} } @PRESENTATION{lin2013SINBADbre, title = {Bootstrapping robust {EPSI} with coarsely sampled data}, booktitle = {SINBAD Fall consortium talks}, organization = {SINBAD}, year = {2013}, abstract = {The EPSI method of surface multiple removal directly inverts for the free-surface operator, i.e., the multiple-free Greens function of the subsurface seismic response. One peculiar feature of this approach is the theoretical independence of the spectrum of the free-surface operator from the spectrum of the observed data. The SRME approach requires coarsely sampled data to be low-pass filtered sufficiently in order to avoid aliasing in multiple contribution gathers, which in turn limits the temporal resolution of the demultipled result. Conversely, such limitations in temporal resolution do not directly apply to the inversion solution of EPSI. This property can in turn be exploited both to significantly lower the cost of EPSI and to mitigate the effect of under sampled data in a controlled way.}, keywords = {presentation, SINBAD, SINBADFALL2013, SLIM, private}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2013/Fall/lin2013SINBADbre.pdf}, author = {Tim T. Y. Lin} } @PRESENTATION{oghenekohwo2013SINBADedt, title = {Estimating {4D} differences in time-lapse using randomized sampling techniques}, booktitle = {SINBAD Fall consortium talks}, organization = {SINBAD}, year = {2013}, abstract = {Repeatability in the seismic survey and processing has been cited as the main reason for which 4D seismic technology work. In the last decade, concerted efforts have been spent to make the 4D seismic process highly repeatable, without significant success. On the contrary, Compressed Sensing, which is also a relatively new sampling paradigm proposes that one can recover an estimate of a fully sampled signal from a noisy, under-sampled measurements provided the acquisition architecture satisfies some properties. By observing different under-sampled and random measurements from each vintage, corresponding to different acquisition geometries, we show that one can still detect the 4D change in time using recent ideas from Compressed sensing. Using a realistic synthetic model, we show two methods of estimating the 4D difference and compare their relative performance to each other.}, keywords = {presentation, SINBAD, SINBADFALL2013, SLIM, private}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2013/Fall/oghenekohwo2013SINBADedt.pdf}, author = {Felix Oghenekohwo} } @PRESENTATION{wason2013SINBADtjm, title = {Time-jittered marine sources}, booktitle = {SINBAD Fall consortium talks}, organization = {SINBAD}, year = {2013}, abstract = {Current efforts towards dense shot (and/or receiver) sampling and full azimuthal coverage to produce higher-resolution images have led to the deployment of multiple source vessels across the survey area. A step ahead from multi-source seismic acquisition is simultaneous or blended acquisition where different source arrays/vessels fire shots at near-simultaneous or slightly random times. Seismic data acquisition with simultaneous (or blended) sources has helped improve acquisition efficiency and mitigate acquisition related costs. Deblending then aims to recover unblended data, as acquired during conventional acquisition, from blended data since many processing techniques rely on full, regular sampling. We present a simultaneous/blended marine acquisition setup where shots fire at significantly jittered instances in time resulting in jittered shot locations for a given speed of the source vessel. The conventional, unblended data is recovered from the blended, jittered/irregular data by sparsity-promoting inversion using the non-equispaced fast discrete curvelet transform. The optimization scheme aims to deblend the blended data along with regularization and interpolation to a (finer) regular grid.}, keywords = {presentation, SINBAD, SINBADFALL2013, SLIM, private}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2013/Fall/wason2013SINBADtjm.pdf}, author = {Haneet Wason} } @PRESENTATION{kumar2013SINBADava, title = {Extended images in action: efficient {AVA} via probing}, booktitle = {SINBAD Fall consortium talks}, organization = {SINBAD}, year = {2013}, abstract = {Common image gathers (CIG) are an important tool to perform AVA analysis in areas of complex geology. Unfortunately, it is prohibitively very expensive to compute these CIG for all the subsurface points. In this work, we present an efficient way to compute CIG for all subsurface offsets without explicitly calculating the source and receiver wavefields for all the sources. Because the CIG contain all possible subsurface offsets, we compute the angle-domain image gathers by selecting the subsurface offset that is aligned with the local geologic dip. We propose a method to compute the local dip information directly from common-image-point gathers. To assess the quality of the angle-domain common-image-points gathers we compute the angle-dependent reflectivity coefficients and compare them with theoretical reflectivity coefficients yielded by the (linearized) Zoeppritz equations for a few synthetic models.}, keywords = {presentation, SINBAD, SINBADFALL2013, SLIM, private}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2013/Fall/kumar2013SINBADava.pdf}, author = {Rajiv Kumar} } @PRESENTATION{miao2013SINBADfid, title = {Fast imaging via depth stepping with the two-way wave equation}, booktitle = {SINBAD Fall consortium talks}, organization = {SINBAD}, year = {2013}, abstract = {In this presentation we propose a fast imaging algorithm via depth stepping with the two-way wave equation. Within the framework of survey sinking, a stabilized depth extrapolation operator is computed using a spectral projector which can efficiently split evanescent wave components. The computation of the spectral projector features with an Hierarchically Semi-Seperable (HSS) matrix representation speeded up polynomial recursion, resulting in an accleration from cubic numerical complexity to linear numerical complexity.}, keywords = {presentation, SINBAD, SINBADFALL2013, SLIM, private}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2013/Fall/miao2013SINBADfid.pdf}, author = {Lina Miao} } @PRESENTATION{zheglova2013SINBADihss, title = {Imaging with hierarchical semi separable matrices}, booktitle = {SINBAD Fall consortium talks}, organization = {SINBAD}, year = {2013}, abstract = {Hierarchically Semi Separable (HSS) matrices are (in general) dense matrices that have low rank off-diagonal blocks that can be represented economically. Exploiting a specific structure of HSS representation, fast algorithms have been devised for matrix matrix multiplication, addition and computing a matrix inverse. We are interested in developing fast algorithms for seismic imaging using ideas from this approach. An overview of HSS representation and some methods using HSS representations of operators will be shown in this talk.}, keywords = {presentation, SINBAD, SINBADFALL2013, SLIM, private}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2013/Fall/zheglova2013SINBADihss.pdf}, author = {Polina Zheglova} } @PRESENTATION{tu2013SINBADfim, title = {Fast imaging with multiples and source estimation}, booktitle = {SINBAD Fall consortium talks}, organization = {SINBAD}, year = {2013}, abstract = {During this talk, we present a computationally efficient (cost of 1-2 RTMs with all data) iterative sparsity-promoting inversion framework where surface-related multiples are jointly imaged with primaries and where the source signature is estimated on the fly. Our imaging algorithm is computationally efficient because it works during each iteration with small independent randomized subsets of data. The multiples are handled by introducing an areal source term that includes the upgoing wavefield. We update the source signature for each iteration using a variable projection method. The resulting algorithm removes imaging artifacts from surface-related multiples, estimates and removes the imprint of the source, recovers true amplitudes, is fast, and robust to linearization errors by virtue of the statistical independence of the subsets of data we are working with at each iteration.}, keywords = {presentation, SINBAD, SINBADFALL2013, SLIM, private}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2013/Fall/tu2013SINBADfim.pdf}, author = {Ning Tu} } @PRESENTATION{fang2013SINBADuafwi, title = {Uncertainty analysis for {FWI}}, booktitle = {SINBAD Fall consortium talks}, organization = {SINBAD}, year = {2013}, abstract = {Uncertainty analysis is important for seismic interpretation. Based on the framework of Bayesian, we can analyse different statistic parameters of our FWI result. However, directly sampling the posterior probability density function (pdf) is computationally intractable. In order to make this problem computationally tractable, in this work, we use Gaussian distribution approximation and low rank approximation to generate the posterior pdf. Simultaneous shots are also used to reduce the computational costs.}, keywords = {presentation, SINBAD, SINBADFALL2013, SLIM, private}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2013/Fall/fang2013SINBADuafwi.pdf}, author = {Zhilong Fang} } @PRESENTATION{li2013SINBADmsd, title = {Model-space versus data-space {FWI} with the acoustic wave equation}, booktitle = {SINBAD Fall consortium talks}, organization = {SINBAD}, year = {2013}, abstract = {Inverting data with elastic phases using an acoustic wave equation can lead to erroneous results, especially when the number of iterations is too high, which may lead to over fitting the data. Several approaches have been proposed to address this issue. Most commonly, people data- filtering operations that are aimed to deemphasize the elastic phases in the data in favor of the acoustic phases. Examples of this approach are nested loops over offset range and Laplace parameters. In this presentation, we discuss two complementary optimization-driven methods where the minimization process decides adaptively which of the data or model components are consistent with the objective. Specifically, we compare the s t misfit function as the data-space alternative and curvelet-domain sparsity promotion as the model-space alternative. Application of these two methods to a realistic synthetic lead to comparable results that we believe can be improved by combining these two methods.}, keywords = {presentation, SINBAD, SINBADFALL2013, SLIM, private}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2013/Fall/li2013SINBADmsd.pdf}, author = {Xiang Li} } @PRESENTATION{tu2013SINBADmachar, title = {{SLIM's} findings on the {Machar} dataset}, booktitle = {SINBAD Fall consortium talks}, organization = {SINBAD}, year = {2013}, abstract = {By courtesy of BP, we are able to get some hands-on experience imaging seismic data from the Machar field in North Sea. We performed reverse-time migration and sparsity-promoting migration with source-estimation to this dataset (or a cropped section of the dataset in many cases due to computational constraints), and had some interesting findings. In this presentation, we will show some conclusive results we have got, and explain the key techniques in imaging this dataset.}, keywords = {presentation, SINBAD, SINBADFALL2013, SLIM, private}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2013/Fall/tu2013SINBADmachar.pdf}, author = {Ning Tu} } @PRESENTATION{li2013SINBADgom, title = {Lessons learned from {Chevron} {Gulf} of {Mexico} data set}, booktitle = {SINBAD Fall consortium talks}, organization = {SINBAD}, year = {2013}, abstract = {Chevron Gulf of Mexico data set is very challenging for FWI because of elastic phases, limited offset, lack of low frequencies and salt structure. To overcome these issue, we first use ray-based tomography on the hand-picked first breaks to generate initial model for FWI, and then we apply curvelet-denosing techniques to improve the poor signal-to-noise ratio of the observed data at low frequencies. Finally, Curvelet domain sparsity promoting Gauss-Newton FWI helps to suppress model space artifacts caused by elastic phases. This is joint work with Andrew J. Calvert, Ian Hanlon, Mostafa Javanmehri, Rajiv Kumar, Tristan van Leeuwen, Brendan Smithyman, Haneet Wason and Felix J. Herrmann}, keywords = {presentation, SINBAD, SINBADFALL2013, SLIM, private}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2013/Fall/li2013SINBADgom.pdf}, author = {Xiang Li} } @PRESENTATION{warner2013SINBADrfwi, title = {Reflection {FWI} with a poor starting model}, booktitle = {SINBAD Fall consortium talks}, organization = {SINBAD}, year = {2013}, keywords = {presentation, SINBAD, SINBADFALL2013, SLIM, private}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2013/Fall/warner2013SINBADrfwi.pdf}, author = {Mike Warner} } @PRESENTATION{smithyman2013SINBADprb, title = {Phase-residual based quality-control methods and techniques for mitigating cycle skips}, booktitle = {SINBAD Fall consortium talks}, organization = {SINBAD}, year = {2013}, abstract = {Most full-waveform inversion algorithms use local optimization methods to iteratively improve the numerical earth model. All of these make the implicit assumption that the the model close to the true earth to avoid cycle. In practice this may not be true. We explore two questions: 1. How do we understand and visualize the cycle skip phenomenon in order to recognize it if it exists? 2. How do we automate this quality control step and use the rich information from multiple data to mitigate cycle skips and avoid local minima?}, keywords = {presentation, SINBAD, SINBADFALL2013, SLIM, private}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2013/Fall/smithyman2013SINBADprb.pdf}, author = {Brendan Smithyman} } @PRESENTATION{fang2013SINBADp3dfwi, title = {Parallel {3D} {FWI} with simultaneous shots}, booktitle = {SINBAD Fall consortium talks}, organization = {SINBAD}, year = {2013}, abstract = {In this work, we manage to build the workflow for the parallel 3D full-waveform inversion. In the forward simulation part, we generate the Helmholtz matrix in parallel and use parallel CARPCG to solve the Helmholtz equation. In the inversion process, simultaneous shots are used to reduce the computational costs. Additionally, we propose a method to select the number of simultaneous shots and tolerance of CARPCG dynamically, to reach a compromise between the computational costs and accuracy.}, keywords = {presentation, SINBAD, SINBADFALL2013, SLIM, private}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2013/Fall/fang2013SINBADp3dfwi.pdf}, author = {Zhilong Fang} } @PRESENTATION{kumar2013SINBADeia, title = {Extended images in action: efficient {WEMVA} via randomized probing}, booktitle = {SINBAD Fall consortium talks}, organization = {SINBAD}, year = {2013}, abstract = {Extended images as a function of the full subsurface offset are an important tool to perform wave-equation based migration velocity analysis (WEMVA) in areas of complex geology. Unfortunately, computation & storage of these extended images is prohibitively expensive. In this work, we present an efficient way to compute extended images for all subsurface offsets without explicitly calculating the source and receiver wavefields for all the sources. Instead, we calculate actions of extended image volumes on probing vectors that live in the image space. The probing can either be defined as vectors from the Dirac basis, which allows us to form the extended image at the location of the point diffractor, or they can be defined in terms of Gaussian noise. The latter corresponds to sources with random weights firing simultaneously at every grid point. We demonstrate that this probing leads to a computationally efficient implementation of WEMVA. This is joint work with Tristan van Leeuwen.}, keywords = {presentation, SINBAD, SINBADFALL2013, SLIM, private}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2013/Fall/kumar2013SINBADeia.pdf}, author = {Rajiv Kumar} } @PRESENTATION{peters2013SINBADepm, title = {Examples from the penalty-method}, booktitle = {SINBAD Fall consortium talks}, organization = {SINBAD}, year = {2013}, abstract = {A novel penalty method for PDE-constrained optimization was recently proposed by van Leeuwen and Herrmann (2013). The conventional PDE-constrained optimization formulation used in seismic waveform inversion is based on calculating the gradient of the data misfit objective functional via the adjoint-state method, at the cost of two PDE solves. The penalty method requires only one solution of an overdetermined linear system, in the least-squares sense. In this talk some numerical properties of this linear system will be exposed. The penalty method for PDE-constrained optimization involves a parameter, balancing the data misfit and PDE-misfit parts of the objective functional. This talk will address how to select this very important parameter. Some examples will be shown in which the penalty method outperforms the conventional method in non-linear waveform inversion, as well as linearized seismic imaging by migration.}, keywords = {presentation, SINBAD, SINBADFALL2013, SLIM, private}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2013/Fall/peters2013SINBADepm.pdf}, author = {Bas Peters} } @PRESENTATION{herrmann2013SINBADffwi, title = {Frugal {FWI}}, booktitle = {SINBAD Fall consortium talks}, organization = {SINBAD}, year = {2013}, abstract = {Seismic waveform inversion aims at obtaining detailed estimates of subsurface medium parameters, such as soundspeed, from seismic data. A formulation in the frequency-domain leads to an optimization problem constrained by a Helmholtz equation with many right-hand sides. Application of this technique in 3D precludes the use of factorization techniques to solve the Helmholtz equation due the large number of gridpoints and the bandwidth of the matrix. While many sophisticated pre-conditioned iterative techniques have been developed for the Helmholtz equation, they often include model-specific tuning parameters and are thus not very attractive for inversion since the medium parameters change from one iteration to the next. In this paper, we propose a method for 3D seismic waveform inversion that addresses both the need to efficiently solve the Helmholtz equation as well as the computational cost induced by the many right-hand sides. To solve the Helmholtz equation, we consider a simple generic preconditioned iterative method (CARP-CG) that is well-suited for inversion because of its robustness. We extend this method to a block-iterative method that can efficiently handle multiple right-hand sides. To reduce the computational cost of of the overall optimization procedure, we use recently proposed techniques from stochastic optimization that allow us to work with approximate gradient information. These approximations are obtained by evaluating only a small portion of the right-hand sides and/or by solving the PDE approximately. We propose heuristics to adaptively determine the required accuracy of the PDE solves and the sample-size and illustrate the algorithms on synthetic benchmark models. This is joint work with Tristan van Leeuwen.}, keywords = {presentation, SINBAD, SINBADFALL2013, SLIM, private}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2013/Fall/herrmann2013SINBADffwi.pdf}, author = {Felix J. Herrmann} } @PRESENTATION{vanleeuwen2013SINBADrpp, title = {Relaxing the physics: a penalty method for full-waveform inversion}, booktitle = {SINBAD Fall consortium talks}, organization = {SINBAD}, year = {2013}, abstract = {Computationally efficient method of solving partial differential equation (PDE)-constrained optimization problems that occur in (geophysical) inversion problems. The method takes measured data from a physical system as input and minimizes an objective function that depends on an unknown model for the physical parameters, fields, and additional nuisance parameters such as the source function. The invention consists of a minimization procedure involving a cost functional comprising of a data-misfit term and a penalty term that measures how accurately the fields satisfy the PDE. The method is composed of two alternating steps, namely the solution of a system of equations forming the discretization of the data-augmented PDE, and the solution of physical model parameters from the PDE itself given the field that solves the data-augmented system and an estimate for the sources. Compared to all-at-once approaches to PDE-constrained optimization, there is no need to update and store the fields for all sources leading to significant memory savings. As in the all-at-once approach, the proposed method explores a larger search space and is therefore less sensitive to initial estimates for the physical model parameters. Contrary to the reduced formulation, the proposed method does not require the solution of an adjoint PDE, effectively halving the number of PDE solves and memory requirement. As in the reduced formulation, fields can be computed independently and aggregated, possibly in parallel.}, keywords = {presentation, SINBAD, SINBADFALL2013, SLIM, private}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2013/Fall/vanleeuwen2013SINBADrpp.pdf}, author = {Tristan van Leeuwen} } %----- 2013 (SPRING) -----% @PRESENTATION{dasilva2013SINBADSPRhtuck, title = {Hierarchical {Tucker} tensor optimization - applications to {4D} seismic data interpolation}, booktitle = {SINBAD Spring consortium talks}, organization = {SINBAD}, year = {2013}, abstract = {In this work, we develop optimization algorithms on the manifold of Hierarchical Tucker (HT) tensors, an extremely efficient format for representing high-dimensional tensors exhibiting particular low-rank structure. With some minor alterations to existing theoretical developments, we develop an optimization framework based on the geometric understanding of HT tensors as a smooth manifold, a generalization of smooth curves/surfaces. Building on the existing research of solving optimization problems on smooth manifolds, we develop Steepest Descent and Conjugate Gradient methods for HT tensors. The resulting algorithms converge quickly, are immediately parallelizable, and do not require the computation of SVDs. We also derive efficient Gauss-Newton based algorithms which converge much faster than standard, first-order methods. We also extend ideas about favourable sampling conditions for missing-data recovery from the field of Matrix Completion to Tensor Completion and demonstrate how the organization of data can affect the success of recovery. As a result, if one has data with randomly missing source pairs, using these ideas, coupled with an efficient solver, one can interpolate large-scale seismic data volumes with missing sources and/or receivers by exploiting the multidimensional dependencies in the data. We are able to recover data volumes amidst extremely high subsampling ratios (in some cases, > 75\%) using this approach and we demonstrate our recovery on a synthetic 5D data set provided to us by BG.}, keywords = {presentation, SINBAD, SINBADSPRING2013, SLIM, Hierarchical Tucker, Structured Tensor, 3D Data interpolation, structured tensor, private}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2013/Spring/dasilva2013SINBADSPRhtuck.pdf}, author = {Curt Da Silva and Felix J. Herrmann} } @PRESENTATION{herrmann2013SINBADmlm, title = {Mitigating local minima in full-waveform inversion by expanding the search space with the penalty method}, booktitle = {SINBAD Spring consortium talks}, organization = {SINBAD}, year = {2013}, abstract = {Wave-equation based inversions, such as full-waveform inversion, are challenging because of their computational costs, memory requirements, and reliance on accurate initial models. To confront these issues, we propose a novel formulation of full-waveform inversion based on a penalty method. In this formulation, the objective function consists of a data-misfit term and a penalty term which measures how accurately the wavefields satisfy the wave-equation. Because we carry out the inversion over a larger search space, including both the model and synthetic wavefields, our approach suffers less from local minima. Our main contribution is the development of an efficient optimization scheme that avoids having to store and update the wavefields by explicit elimination. Compared to existing optimization strategies for full-waveform inversion, our method differers in two main aspects; i. The wavefields are solved from an augmented wave-equation, where the solution is forced to solve the wave-equation and fit the observed data, ii. no adjoint wavefields are required to update the model, which leads to significant computational savings. We demonstrate the validity of our approach by carefully selected examples and discuss possible extensions and future research. This is joint work with Tristan van Leeuwen.}, keywords = {presentation, SINBAD, SINBADSPRING2013, SLIM, private}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2013/Spring/herrmann2013SINBADmlm.pdf}, author = {Felix J. Herrmann} } @PRESENTATION{herrmann2013SINBADfwi, title = {Frugal {FWI}}, booktitle = {SINBAD Spring consortium talks}, organization = {SINBAD}, year = {2013}, abstract = {Seismic waveform inversion aims at obtaining detailed estimates of subsurface medium parameters, such as soundspeed, from seismic data. A formulation in the frequency-domain leads to an optimization problem constrained by a Helmholtz equation with many right-hand- sides. Application of this technique in 3D precludes the use of factorization techniques to solve the Helmholtz equation due the large number of gridpoints and the bandwidth of the matrix. While many sophisticated pre-conditioned iterative techniques have been developed for the Helmholtz equation, they often include model-specific tuning parameters and are thus not very attractive for inversion since the medium parameters change from one iteration to the next. In this paper, we propose a method for 3D seismic waveform inversion that addresses both the need to efficiently solve the Helmholtz equation as well as the computational cost induced by the many right-hand-sides. To solve the Helmholtz equation, we consider a simple generic preconditioned iterative method (CARP-CG) that is well-suited for inversion because of its robustness. We extend this method to a block-iterative method that can efficiently handle multiple right-hand sides. To reduce the computational cost of of the overall optimization procedure, we use recently proposed techniques from stochastic optimization that allow us to work with approximate gradient information. These approximations are obtained by evaluating only a small portion of the right-hand sides and/or by solving the PDE approximately. We propose heuristics to adaptively determine the required accuracy of the PDE solves and the sample-size and illustrate the algorithms on synthetic benchmark models. This is joint work with Tristan van Leeuwen.}, keywords = {presentation, SINBAD, SINBADSPRING2013, SLIM, private}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2013/Spring/herrmann2013SINBADfwi.pdf}, author = {Felix J. Herrmann} } @PRESENTATION{herrmann2013SINBADeia, title = {Extended images in action}, booktitle = {SINBAD Spring consortium talks}, organization = {SINBAD}, year = {2013}, abstract = {Image gathers as a function of subsurface offset are an important tool for migration-velocity and amplitude-versus-angle analysis in areas of complex geology. Traditionally, these gathers are thought of as multidimensional correlations of the source and receiver wavefields. The bottleneck in computing the gathers lies in the fact that one needs to store and compute these wavefields and in correlating the wavefields to obtain the desired image gathers. Therefore, the image gathers are typically only computed for a limited number of subsurface points and for a limited range of subsurface offsets. In this presentation, we offer a new perspective on such gathers by organizing the extended image as a function of all subsurface offsets for all subsurface points in a matrix whose (i,j) entry captures the interaction between gridpoints i and j. Of course, it is infeasible to form and store this matrix. Instead, we propose an efficient algorithm to glean information from the image volume via the action of matrix-vector products with this matrix. We illustrate how this can be used to (i) form conventional image gathers and construct objective functions for automated MVA and (ii) calculate true two-way wave-equation angle gathers that allow us to carry out linearized angle-versus-offset inversion with geologic-dip correction. This is joint work with Tristan van Leeuwen and Rajiv Kumar.}, keywords = {presentation, SINBAD, SINBADSPRING2013, SLIM, private}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2013/Spring/herrmann2013SINBADeia.pdf}, author = {Felix J. Herrmann} } @PRESENTATION{herrmann2013SINBADldc, title = {Latest developments on the {Chevron} {GOM} and other datasets}, booktitle = {SINBAD Spring consortium talks}, organization = {SINBAD}, year = {2013}, abstract = {During this talk, we will give a brief overview on our imaging and FWI results on various synthetic and field datasets we have been working on. This is joint work with Andrew Calvert, Brendan Smithyman, and the SLIM team.}, keywords = {presentation, SINBAD, SINBADSPRING2013, SLIM, private}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2013/Spring/herrmann2013SINBADldc.pdf}, author = {Felix J. Herrmann} } @PRESENTATION{kumar2013SINBADhss, title = {Seismic data interpolation via low-rank matrix factorization in the hierarchical semi-separable representation}, booktitle = {SINBAD Spring consortium talks}, organization = {SINBAD}, year = {2013}, abstract = {Recent developments in matrix rank optimization have allowed for new computational approaches in the field of seismic data interpolation. One of the main requirements of exploiting rank-minimization approaches is that the target data set should exhibit a low-rank structure. Seismic frequency slices exhibit low-rank structure at the low-frequencies, but not at the high frequencies. This behavior is due to the increase in oscillations as we move from low to high-frequency slices, even though the energy remains focused around the diagonal. Therefore, interpolation via rank minimization in the high-frequency range requires extended formulations that incorporate low-rank structure. We propose an approach for seismic data interpolation which incorporates the Hierarchical Semi-Separable Structure (HSS) inside rank-regularized least-squares formulations for the missing-trace interpolation problem. The proposed approach is suitable for large scale problems, since it avoids SVD computations and uses a low-rank factorized formulation instead. We illustrate the advantages of the new HSS approach by interpolating a seismic line from the Gulf of Suez and compare the reconstruction with conventional rank minimization.}, keywords = {presentation, SINBAD, SINBADSPRING2013, SLIM, private}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2013/Spring/kumar2013SINBADhss.pdf}, author = {Rajiv Kumar and Hassan Mansour and Aleksandr Y. Aravkin and Felix J. Herrmann} } @PRESENTATION{lin2013SINBADa, title = {Cosparse seismic data interpolation}, booktitle = {SINBAD Spring consortium talks}, organization = {SINBAD}, year = {2013}, abstract = {Over the years we have investigated seismic data interpolation and redatuming algorithms rely on on the assumption that seismic records and images permit sparse approximations under certain representations, such as Curvelet coefficients. Recent findings have suggested that for redundant representations (of which Curvelet is an example), the analysis operator that maps the physical signal to coefficients may also play a crucial role in recovering data from incomplete observations. This insight elevates the significance of a question that often goes unaddressed: is it better for the transform-domain sparsity to be achieved through explicit construction of sparse representations (e.g., by thresholding of small transform-domain coefficients), or by demanding that the algorithm return physical signals which produces sparse coefficients when hit with the forward transform? Recent results show that the two approaches give rise to different solutions when the transform is redundant, and that the latter approach imposes a whole new class of constraints. In particular, the number of zero-valued coefficients given by the analysis operator acting on the signal, referred to as its "cosparsity", have an analogous role to the sparsity of the signal in terms of the coefficients. From this framework, a new reconstruction algorithm is proposed which may allow better reconstruction from subsampled signaled than what the sparsity assumption alone would predict. In this work we apply the new framework and algorithm to the case of seismic data interpolation under the curvelet domain, and show that it admits better reconstruction than some existing L1 sparsity-based methods derived from compressive sensing for a range of subsampling factors. We will also investigate different analysis operators and their impact on both sparsity and cosparsity-based algorithms.}, keywords = {presentation, SINBAD, SINBADSPRING2013, SLIM, private}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2013/Spring/lin2013SINBADcsd.pdf}, author = {Tim T.Y. Lin} } @PRESENTATION{tu2013SINBADvp, title = {Fast imaging with multiples and source estimation}, booktitle = {SINBAD Spring consortium talks}, organization = {SINBAD}, year = {2013}, abstract = {Multiples are usually treated as unwanted components in seismic data. However, if correctly used, they can provide valuable information about the subsurface. In this presentation, I will talk about how to make use of multiples in $\ell_1$ regularized least-squares imaging, and how to estimate the source wavelet on the fly using multiples. Synthetic examples show that using multiples not only helps to retrieve the true-amplitude seismic image and the source wavelet, it also increases the wavenumber contents in the seismic image. By using dimensionality reduction techniques with rerandomization, we also greatly decrease the simulation cost without compromising the image quality.}, keywords = {presentation, SINBAD, SINBADSPRING2013, SLIM, private}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2013/Spring/tu2013SINBADvp.pdf}, author = {Ning Tu} } @PRESENTATION{tu2013SINBADrerand, title = {Controlling linearization errors with rerandomization}, booktitle = {SINBAD Spring consortium talks}, organization = {SINBAD}, year = {2013}, abstract = {Least squares migration aims to fit the observed seismic data with data predicted by linearized modelling, by solving an PDE-constrained optimization problem. This problem is challenging mostly because of its prohibitive computational cost. To address the issue, dimensionality reduction techniques were proposed in the literature. However, the solution of the reduced problem can deviate from that of the full problem when there are components in the observed data that cannot be explained by linearized modelling. We solve the problem by rerandomizing our $\ell_1$ regularized inversion. In this presentation, I will explain the method and demonstrate what we can achieve with rerandomization, especially for resolving fine sub-salt structures.}, keywords = {presentation, SINBAD, SINBADSPRING2013, SLIM, private}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2013/Spring/tu2013SINBADrerand.pdf}, author = {Ning Tu} } @PRESENTATION{wason2013SINBADtjo, title = {Time-jittered ocean bottom seismic acquisition}, booktitle = {SINBAD Spring consortium talks}, organization = {SINBAD}, year = {2013}, abstract = {Leveraging ideas from the field of compressed sensing, we show how simultaneous or blended acquisition can be setup as a -- compressed sensing problem. This helps us to design a pragmatic time-jittered marine acquisition scheme where multiple source vessels sail across an ocean-bottom array firing airguns at -- jittered source locations and instances in time, resulting in better spatial sampling, and speedup acquisition. Furthermore, we can significantly impact the reconstruction quality of conventional seismic data (from jittered data) and demonstrate successful recovery by sparsity promotion. In contrast to random (under)sampling, acquisition via jittered (under)sampling helps in controlling the maximum gap size, which is a practical requirement of wavefield reconstruction with localized sparsifying transforms. Results are illustrated with simulations of time-jittered marine acquisition, which translates to jittered source locations for a given speed of the source vessel.}, keywords = {presentation, SINBAD, SINBADSPRING2013, SLIM, private}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2013/Spring/wason2013SINBADtjo.pdf}, author = {Haneet Wason and Felix J. Herrmann} } %----- 2012 (FALL) -----% @PRESENTATION{Akalin2012SINBADlss, title = {Large scale seismic data interpolation with matrix completion}, booktitle = {SINBAD Fall consortium talks}, year = {2012}, organization = {SINBAD}, abstract = {Seismic surveys amass large and incomplete data sets, and designing algorithms to interpolate the missing data at very large scales poses a daunting and critical challenge. We study how to apply scalable matrix completion methods to such interpolation problems. Recent studies in matrix completion have shown that a matrix that has low rank can be exactly completed when only a small number of observations are available. However, there are two challenges to applying matrix completion to seismic data. Matrix completion is typically applied to two dimensional or dyadic data whereas seismic data is often tensorial. Also successful matrix completion requires a low-rank matrix structure. We address these problems by organizing the seismic data on a matrix grid which exhibits a low-rank structure. This encoding allows us to apply the Jellyfish algorithm, developed at the University of Wisconsin, which achieves state-of-the-art performance for large-scale matrix completion. The proposed framework makes it possible to complete high-SNR interpolations of gigabytes of 4-D seismic data in minutes on standard multicore workstations. Our preliminary experimental results suggest that matrix completion provides a promising new approach to the seismic data interpolation problem.}, keywords = {presentation, SINBAD, SINBADFALL2012, SLIM}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2012/Fall/Akalin2012SINBADlss/Akalin2012SINBADlss_pres.pdf}, author = {Okan Akalin} } @PRESENTATION{aravkin2012SINBADenp, title = {Estimating nuisance parameters in inverse problems}, booktitle = {SINBAD Fall consortium talks}, year = {2012}, organization = {SINBAD}, abstract = {Many inverse problems include nuisance parameters which, while not of direct interest, are required to recover primary parameters. In this talk, we present the idea of "projecting out" these variables, and how this idea allows us to design methods for solving a broad class of problems with nuisance parameters, such as variance or degrees of freedom. We then discuss several geophysical applications, including including estimation of unknown variance parameters in the Gaussian model for full waveform inversion, degree of freedom (d.o.f.) parameter estimation in the context of robust imaging problems, and robust source estimation.}, keywords = {presentation, SINBAD, SINBADFALL2012, SLIM}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2012/Fall/aravkin2012SINBADenp/aravkin2012SINBADenp_pres.pdf}, author = {Aleksandr Y. Aravkin} } @PRESENTATION{aravkin2012SINBADgsf, title = {Generalized {SPGL1}: from theory to applications}, booktitle = {SINBAD Fall consortium talks}, year = {2012}, organization = {SINBAD}, abstract = {The SPGL1 solver has been effectively used for many geophysical applications, including curvelet data interpolation, imaging, and as a subroutine in full waveform inversion. In this talk, we present an overview of the theoretical foundation of the solver, along with a broad generalization of this foundation. We then introduce several applications, including robust & sparse imaging, sparse deconvolution, and data interpolation by matrix completion.}, keywords = {presentation, SINBAD, SINBADFALL2012, SLIM}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2012/Fall/aravkin2012SINBADgsf/aravkin2012SINBADgsf_pres.pdf}, author = {Aleksandr Y. Aravkin} } @PRESENTATION{au-yeung2012SINBADcs, title = {Compressed sensing, random {Fourier} matrix and jitter sampling}, booktitle = {SINBAD Fall consortium talks}, year = {2012}, organization = {SINBAD}, abstract = {Compressed sensing is an emerging signal processing technique that allows signals to be sampled well below the Nyquist rate, when the signal has a sparse representation in an orthonormal basis. By using a random Fourier matrix or a Gaussian matrix as our measurement matrix, we can reconstruct a signal from far fewer measurements than required by Shannon sampling theorem. In this talk, we will discuss the role of uniform versus jitter sampling, both in a theoretical and practical viewpoint.}, keywords = {presentation, SINBAD, SINBADFALL2012, SLIM}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2012/Fall/au-yeung2012SINBADcs/au-yeung2012SINBADcs_pres.pdf}, author = {Enrico Au-Yeung and Hassan Mansour and Ozgur Yilmaz} } @PRESENTATION{dasilva2012SINBADhtt, title = {Hierarchical {Tucker} tensor optimization - applications to {4D} seismic data interpolation}, booktitle = {SINBAD Fall consortium talks}, year = {2012}, organization = {SINBAD}, abstract = {There has been a swell of research in the scientific computing community in the last couple of years which tries to extend notions of linear algebra (rank, the SVD, linear systems, etc.) to higher dimensional arrays, or tensors. Much work has been proposed to try to overcome the so called "curse of dimensionality", the O(N^d) storage required for a d-dimensional array, where N is the size of each dimension. The hierarchical Tucker format is one such tensor representation which manages to decompose a hierarchy of dimensions into parameter matrices of very manageable size, requiring at most dNK + (d - 2)K^3 + K^2 parameters, where K is an internal rank parameter. In this work, we extend ideas of matrix completion to the tensor case, where we only know a small number of randomly distributed entries from various 4D frequency slices, and try to recover the fully sampled tensor based on the knowledge that it has low hierarchical tucker rank in a particular arrangement of dimensions. Using this approach, we exploit the multi-dimensional dependencies within the full data in order to achieve very promising interpolation results even from heavily subsampled data.}, keywords = {presentation, SINBAD, SINBADFALL2012, SLIM}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2012/Fall/dasilva2012SINBADhtt/dasilva2012SINBADhtt_pres.pdf}, author = {Curt Da Silva} } @PRESENTATION{friedlander2012SINBADrsh, title = {Randomized sampling: {How} confident are you?}, booktitle = {SINBAD Fall consortium talks}, year = {2012}, organization = {SINBAD}, abstract = {At last year's consortium meeting, I described an inexact gradient method and sampling scheme for data fitting. The randomization method has good convergence properties, at least as measured by the distance to the solution----in expectation. But as one insightful critic rightly pointed out, we don't usually observe the expectation, at least not in a single run. In this talk I will characterize the convergence of the method in terms of bounds on the probability of being too far away from the solution.}, keywords = {presentation, SINBAD, SINBADFALL2012, SLIM}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2012/Fall/friedlander2012SINBADrsh/friedlander2012SINBADrsh_pres.pdf}, author = {Michael P. Friedlander} } @PRESENTATION{ghadermarzy2012SINBADncc, title = {Non-convex compressed sensing using partial support information}, booktitle = {SINBAD Fall consortium talks}, year = {2012}, organization = {SINBAD}, abstract = {In this talk, we will address the recovery conditions of weighted $\ell_p$ minimization for signal reconstruction from compressed sensing measurements when (possibly inaccurate) partial support information is available. First we will motivate the use of (weighted) $\ell_p$ minimization with $p<1$ and point out its advantages over weighted $\ell_1$ minimization when there is prior information on the support of the signal that is possibly partial and inaccurate. Then we will provide theoretical guarantees of sufficient recovery conditions for weighted $\ell_p$ minimization, which are better than those for (unweighted) $\ell_p$ minimization as well as those for weighted $\ell_1$. In the last part of the talk, we will illustrate our results with some numerical experiments stylized applications.}, keywords = {presentation, SINBAD, SINBADFALL2012, SLIM}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2012/Fall/ghadermarzy2012SINBADncc/ghadermarzy2012SINBADncc_pres.pdf}, author = {Navid Ghadermarzy} } @PRESENTATION{hargreaves2012SINBADavs, title = {Analysis versus synthesis in weighted sparse recovery}, booktitle = {SINBAD Fall consortium talks}, year = {2012}, organization = {SINBAD}, abstract = {The synthesis model for compressive sensing has been the model of choice for many years and various weighting schemes have been shown to improve it's performance (see Yilmaz, Mansour, and Ghadermarzy talks). However, there is a counterpart model to synthesis, namely the analysis model, which has been less popular but recently attracted more attention (see Lin's talk). In this talk, weighting in the analysis model is discussed and applied to the seismic trace interpolation problem.}, keywords = {presentation, SINBAD, SINBADFALL2012, SLIM}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2012/Fall/hargreaves2012SINBADavs/hargreaves2012SINBADavs_pres.pdf}, author = {Brock Hargreaves} } @PRESENTATION{herrmann2012SINBADals, title = {Fast sparsity-promoting imaging with message passing}, booktitle = {SINBAD Fall consortium talks}, year = {2012}, organization = {SINBAD}, abstract = {To meet current-day challenges, exploration seismology increasingly relies on more and more sophisticated algorithms that require multiple paths through all data. This requirement leads to problems because the size of seismic data volumes is increasing exponentially, exposing bottlenecks in IO and computational capability. To overcome these bottlenecks, we follow recent trends in machine learning and compressive sensing by proposing a sparsity-promoting inversion technique that works on small randomized subsets of data only. We boost the performance of this algorithm significantly by modifying a state-of-the-art l1-norm solver to benefit from message passing, which breaks the build up of correlations between model iterates and the randomized linear forward model. We demonstrate the performance of this algorithm on a toy sparse-recovery problem and on a realistic reverse-time-migration example with random source encoding. The improvements in speed, memory use, and output quality are truly remarkable.}, keywords = {presentation, SINBAD, SINBADFALL2012, SLIM}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2012/Fall/herrmann2012SINBADals/herrmann2012SINBADals_pres.pdf}, author = {Felix J. Herrmann} } @PRESENTATION{herrmann2012SINBADfwi, title = {Our findings on the {Chevron} benchmark dataset}, booktitle = {SINBAD Fall consortium talks}, year = {2012}, organization = {SINBAD}, abstract = {During this presentation, we will review our findings working with the synthetic GOM data released as part of the post SEG workshop:"Gulf of Mexico Imaging Challenges: What Can Full Waveform Inversion Achieve?". This is joint work with Andrew J. Calvert, Ian Hanlon, Mostafa Javanmehri, Rajiv Kumar, Tristan van Leeuwen, Xiang Li, Brendan Smithyman, Eric Takam Takougang, and Haneet Wason.}, keywords = {presentation, SINBAD, SINBADFALL2012, SLIM}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2012/Fall/herrmann2012SINBADfwi/herrmann2012SINBADfwi_pres.pdf}, author = {Andrew J. Calvert and Ian Hanlon and Mostafa Javanmehri and Rajiv Kumar and Tristan van Leeuwen and Xiang Li and Brendan Smithyman and Eric Takam Takougang and Haneet Wason and Felix J. Herrmann} } @PRESENTATION{herrmann2012SINBADhpc, title = {{SLIM's} perspective on {HPC} & big data}, booktitle = {SINBAD Fall consortium talks}, year = {2012}, organization = {SINBAD}, keywords = {presentation, SINBAD, SINBADFALL2012, SLIM}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2012/Fall/herrmann2012SINBADhpc/herrmann2012SINBADhpc_pres.pdf}, author = {Tim T.Y. Lin and Tristan van Leeuwen and Felix J. Herrmann} } @PRESENTATION{krislock2012SINBADwsn, title = {Wireless sensor network localization}, booktitle = {SINBAD Fall onsortium talks}, year = {2012}, organization = {SINBAD}, abstract = {Locating the position of sensors connected together in a wireless network given only the position of a small number of the sensors and estimates of some of the distances between the sensors is a difficult problem with many modern applications. Within the last few years research in wireless sensor network localization has greatly increased due to the many new applications using wireless sensors, from lightweight sensors used to monitor the environment to ocean-bottom sensors used in geophysical applications. A second reason for this increased interest is our recent ability to efficiently solve these problems using modern semidefinite optimization solvers. We will discuss how semidefinite optimization can be used to solve such problems and possible directions for future work.}, keywords = {presentation, SINBAD, SINBADFALL2012, SLIM}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2012/Fall/krislock2012SINBADwsn/krislock2012SINBADwsn_pres.pdf}, author = {Nathan Krislock} } @PRESENTATION{kumar2012SINBADsdi, title = {Seismic data interpolation using {SVD} free {Pareto} curve based low rank optimization}, booktitle = {SINBAD Fall consortium talks}, year = {2012}, organization = {SINBAD}, abstract = {Seismic data acquisition is cursed by missing data caused by physical and/or budget constraints. Aim of interpolation technique is to spatially transform irregularly acquired data to regularly sampled data while maintaining the events coherency. While transform-domain sparsity promotion has proven to be an effective tool to solve this recovery problem, recent developments in Rank penalizing techniques opens new horizon to improved recovery by exploiting low-rank structure. A major downside of current state of the art techniques is their reliance on the SVD of seismic data structures, which can be prohibitively expensive. Fortunately, recent work allows us to circumvent this problem by working with matrix factorizations. We review a novel approach to rank penalization, and successfully apply it to the seismic interpolation problem by exploiting the low-rank structure of seismic data. Experiments for the recovery of 2D and 3D acquisition support the feasibility and potential of the new approach.}, keywords = {presentation, SINBAD, SINBADFALL2012, SLIM}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2012/Fall/kumar2012SINBADsdi/kumar2012SINBADsdi_pres.pdf}, author = {Rajiv Kumar} } @PRESENTATION{vanleeuwen2012SINBAD3dfd, title = {{3D} frequency-domain waveform inversion using a row-projected {Helmholtz} solver}, booktitle = {SINBAD Fall consortium talks}, year = {2012}, organization = {SINBAD}, abstract = {3D frequency-domain full waveform inversion relies on being able to efficiently solve the 3D Helmholtz equation. Iterative methods require sophisticated preconditioners because the Helmholtz matrix is typically indefinite. In the first part of the talk I review a preconditioning technique that is based on row-projections. Notable advantages of this preconditioner over existing ones are that it has low algorithmic complexity, is easily parallelizable and extendable to time-harmonic vector equations. In the second part of the talk I discuss how the row-projected solver can be used in the context of waveform inversion. Key aspects are: the use of block-iterative methods for multiple sources and adapting the accuracy of the solver.}, keywords = {presentation, SINBAD, SINBADFALL2012, SLIM}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2012/Fall/vanleeuwen2012SINBAD3dfd/vanleeuwen2012SINBAD3dfd_pres.pdf}, author = {Tristan van Leeuwen} } @PRESENTATION{vanleeuwen2012SINBADyap, title = {Yet another perspective on image volumes}, booktitle = {SINBAD Fall consortium talks}, year = {2012}, organization = {SINBAD}, abstract = {An extended image is defined as the multi-dimensional cross-correlation of the source and receiver wavefields used for imaging. This extended image will reveal velocity errors by de-focusing and can thus be used for velocity analysis. However, for optimal sensitivity to velocity errors, the subsurface offset has to be aligned with the local dip. As this dip is not known a priori, we consider forming the extended image for subsurface offsets in all directions. However, computing and storing such a large image volume is not computationally feasible. We organize the image volume in a matrix and use matrix-probing techniques to glean information form the matrix without explicitly forming it. A matrix-vector multiply with the image-volume matrix can be performed at the cost of two wave-equation solves and does not require any explicit cross-correlations of the wavefields. Such techniques can also be used to evaluate focusing penalties without forming the whole image volume. Finally, the matrix-viewpoint allows us to derive a 2-way equivalent of the DSR equation in a straightforward manner and provides a possible avenue for developing new velocity-continuation techniques.}, keywords = {presentation, SINBAD, SINBADFALL2012, SLIM}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2012/Fall/vanleeuwen2012SINBADyap/vanleeuwen2012SINBADyap_pres.pdf}, author = {Tristan van Leeuwen} } @PRESENTATION{lin2012SINBADics, title = {An introduction to cosparse signal reconstruction}, booktitle = {SINBAD Fall consortium talks}, year = {2012}, organization = {SINBAD}, abstract = {Undersampling techniques in exploration seismology usually relies on the assumption that seismic records and images permit sparse approximations under certain representations, such as Curvelet coefficients. Recent findings have suggested that for redundant representations (of which Curvelet is an example), the analysis operator that maps the physical signal to coefficients may also play a crucial role in recovering data from incomplete observations. In particular, the number of zero-valued coefficients given by the analysis operator acting on the signal, referred to as its "cosparsity", have an analogous role to the sparsity of the signal in terms of the coefficients. The cosparsity of the signal permits recovery guarantees that are completely separate from sparsity-based models, and gives rise to distinct sets of reconstruction algorithms and performances compared to sparsity-based approaches. We present in this talk some initial findings on the viability of cosparse reconstruction for a variety of seismic applications that previously relied on sparse signal reconstruction, such as data interpolation and source separation.}, keywords = {presentation, SINBAD, SINBADFALL2012, SLIM}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2012/Fall/lin2012SINBADics/lin2012SINBADics_pres.pdf}, author = {Tim T.Y. Lin} } @PRESENTATION{Lin2012SINBADrdr, title = {Recent developments on the robust estimation of primaries by sparse inversion}, booktitle = {SINBAD Fall consortium talks}, year = {2012}, organization = {SINBAD}, abstract = {Robust estimation of primaries by sparse inversion is a next-generation surface multiple removal technique with an objective to truly invert an operator that models the free-surface. Key to the success of this approach is the imposition of a sparsity constraint on the primary impulse response in the time domain. This is accomplished by carefully applying large-scale convex optimization techniques on an extended L1 minimization problem. One of the benefits of our approach is that many extensions to the algorithm can be devised under this optimization framework to improve the quality of the solution given fixed computational costs and mitigating various shortcomings in field data. This talk will first review the basic technique of Robust EPSI and follow with some highlights on recent further developments of the algorithm, including a discussion on the role of regularization by reciprocity and the interpolation of near-offset data, as well as investigations into optimality and robustness to data outliers.}, keywords = {presentation, SINBAD, SINBADFALL2012, SLIM}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2012/Fall/Lin2012SINBADrdr/Lin2012SINBADrdr_pres.pdf}, author = {Tim T.Y. Lin} } @PRESENTATION{mansour2012SINBADsti, title = {Seismic trace interpolation via sparsity promoting reweighted algorithms}, booktitle = {SINBAD Fall consortium talks}, year = {2012}, organization = {SINBAD}, abstract = {Missing-trace interpolation aims to reconstruct regularly sampled wavefields from periodically sampled data with gaps caused by physical constraints. While transform-domain sparsity promotion has proven to be an effective tool to solve this recovery problem, current recovery techniques make no use of a priori information on the transform-domain coefficients. To overcome these vulnerabilities in solving the recovery problem for large-scale problems, we propose recovery by weighted one-norm minimization, which exploits correlations between locations of significant coefficients of different partitions, e.g., shot records, common-offset gathers, or frequency slices, of the acquired data. Moreover, in situations where no prior support estimate is available, we propose the WSPGL1 algorithm that outperforms standard $\ell_1$ minimization in finding sparse solutions to underdetermined linear systems of equations. Our algorithm is a modification of the SPGL1 algorithm and enjoys better sparse recovery performance at no additional computational cost. We illustrate the improved recovery using WSPGL1 for randomly subsampled seismic traces.}, keywords = {presentation, SINBAD, SINBADFALL2012, SLIM}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2012/Fall/mansour2012SINBADsti/mansour2012SINBADsti_pres.pdf}, author = {Hassan Mansour} } @PRESENTATION{miao2012SINBADasp, title = {Accelerating on sparse promoting recovery and its benefits in seismic application}, booktitle = {SINBAD Fall consortium talks}, year = {2012}, organization = {SINBAD}, abstract = {Sparse promoting recovery problem arises more and more frequently with the broad application of compressed sensing tool in exploration seismology. Because of the curse of dimensionality, the prohibitive computation burden on iteratively evaluating objective functions is one of the key issues that constrain high performance l1 solver. In this paper, we try to further improve the convergence performance of SPGl1, one of the state-of-the-art large scale sparse recovery solver, and as a result limit the number of objective function evaluations by introducing a projected quasi Newton method. Examples showing acceleration on seismic data collection, data processing as well as inversion are included.}, keywords = {presentation, SINBAD, SINBADFALL2012, SLIM}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2012/Fall/miao2012SINBADasp/miao2012SINBADasp_pres.pdf}, author = {Lina Miao} } @PRESENTATION{ning2012SINBADfim, title = {Fast imaging with multiples}, booktitle = {SINBAD Fall consortium talks}, year = {2012}, organization = {SINBAD}, abstract = {If correctly used, multiple energy can be mapped to the correct subsurface locations. However, simply applying the cross-correlation imaging condition will introduce non-causal artifacts into the final image. Here we propose an inversion approach to image primaries and multiples simultaneously that yields an artifact-free image. To address the high computational cost associated with inversion, we propose to: i) have the wave-equation solver carry out the multi-dimensional convolutions implicitly, and ii) reduce the number of PDE solves by randomized subsampling. We then propose to improve the overall performance of this algorithm by a process called rerandomization, which helps to cancel the correlation built between model iterate and the subsampling operator. We show the merits of our approach on a number of examples.}, keywords = {presentation, SINBAD, SINBADFALL2012, SLIM}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2012/Fall/ning2012SINBADfim/ning2012SINBADfim_pres.pdf}, author = {Ning Tu} } @PRESENTATION{oghenekohwo2012SINBADcs, title = {Compressed sensing: a tool for eliminating repeatability in acquisition of {4D} (time-lapse) seismic data}, booktitle = {SINBAD Fall consortium talks}, year = {2012}, organization = {SINBAD}, abstract = {In 4D (time-lapse) seismic data acquisition, a very significant step is the repeatability of the acquisition process. In other words, the geophones must be placed at the exact location as they were, during baseline survey and acquisition. This condition is required to be able to produce an image of the same location over time and this enhances a proper reservoir characterization. The cost of repeating the seismic acquisition is very expensive, as geophones (receivers) have to be left at the same location over the period for which the data will be acquired. In this talk, we attempt to highlight the effort of Compressed Sensing, to eliminate this condition of repeatability of the acquisition. We show that a random sampling of the shots or random placement of the geophones is able to reproduce the same image over time, hence eliminating any acquisition imprints on the final seismic image.}, keywords = {presentation, SINBAD, SINBADFALL2012, SLIM}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2012/Fall/oghenekohwo2012SINBADcs/oghenekohwo2012SINBADcs_pres.pdf}, author = {Felix Oghenekohwo} } @PRESENTATION{peters2012SINBADfde, title = {Frequency domain {3D} elastic wave propagation in general anisotropic media}, booktitle = {SINBAD Fall consortium talks}, year = {2012}, organization = {SINBAD}, abstract = {Elastic wave propagation in 3 spatial dimensions is modeled using a wave equation containing the full stiffness tensor consisting of 21 independent components. This allows modeling in general anisotropic media. The wave equation is discretized on several Cartesian and rotated Cartesian staggered finite-difference grids (using a 2nd order approximation). The grids are linearly combined and, in combination with a antilumped mass strategy, minimize numerical dispersion while requiring a low number of grid points per wavelength. In case not all 21 components need to be modeled, an approximation of the stiffness tensor can be used (e.g., orthorhombic anisotropy, TTI, ...). This results in a linear system of equations, which is solved using an iterative method. The modeling of all 21 components of the stiffness tensor (or an approximation) enables the development of new waveform inversion functionalities.}, keywords = {presentation, SINBAD, SINBADFALL2012, SLIM}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2012/Fall/peters2012SINBADfde/peters2012SINBADfde_pres.pdf}, author = {Bas Peters} } @PRESENTATION{petrenko2012SINBADcarp, title = {{CARP-CG}: a computational study}, booktitle = {SINBAD Fall consortium talks}, year = {2012}, organization = {SINBAD}, abstract = {Forward modelling of the wave equation is a key ingredient in seismic full waveform inversion (FWI). Simulation in the time domain and solution of the wave equation in the frequency domain are two competing approaches to modelling. Frequency domain approaches can further be categorized as using either direct or iterative solvers. For 3D FWI, iterative solvers in the frequency domain are attractive, partly because they require less memory than the other methods. This is due to the fact that there is no need to store the wavefield at each time step, or compute a factorization of the Helmholtz operator that will not be as sparse as the original matrix. One iterative solver that has been applied to the Helmholtz equation is CARP-CG. CARP-CG uses Kaczmarz row projections for each block of a domain decomposition scheme to precondition the Helmholtz system into being symmetric and positive semidefinite. The method of conjugate gradients is then used to solve the preconditioned system. We present a comparison of the performance of CARP-CG implemented in several languages (MATLAB, C, FORTRAN, julia) and in two different hardware environments: the LIMA HPC cluster hosted at UBC, and the Checkers cluster which is part of the Westgrid consortium. Parallelization of the algorithm via domain decomposition implemented with MPI (distributed memory) and OMP (shared memory) is also examined.}, keywords = {presentation, SINBAD, SINBADFALL2012, SLIM}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2012/Fall/petrenko2012SINBADcarp/petrenko2012SINBADcarp_pres.pdf}, author = {Art Petrenko} } @PRESENTATION{tamalet2012SINBADvpe, title = {Variance parameters estimation - application to full waveform inversion}, booktitle = {SINBAD Fall consortium talks}, year = {2012}, organization = {SINBAD}, abstract = {Many inverse problems include nuisance parameters. While not of direct interest, these parameters are required to recover primary parameters. In order to estimate these nuisance parameters as well as the primary parameters in large-scale inverse problems, a method based on variable projection, which consists in projecting out a subset over the variables, has been developed. We present here the application of this method to the problem of variance parameters estimation in multiple datasets, which is an important problem in many areas including geophysics. More precisely, we apply the method to Full Waveform Inversion and demonstrate the improvement in recovery of the model parameters in the case where the variance of the noise increases with the frequency.}, keywords = {presentation, SINBAD, SINBADFALL2012, SLIM}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2012/Fall/tamalet2012SINBADvpe/tamalet2012SINBADvpe_pres.pdf}, author = {Anais Tamalet} } @PRESENTATION{warner2012SINBADafw, title = {Anisotropic {3D} full-waveform inversion of the {Tommeliten} {Alpha} field}, booktitle = {SINBAD Fall consortium talks}, year = {2012}, organization = {SINBAD}, abstract = {We have implemented a robust and practical scheme for anisotropic 3D acoustic full-waveform inversion. We demonstrate this scheme on a field data set, applying it to a four-component ocean-bottom survey over the Tommeliten Alpha field in the North Sea. This shallow-water data set provides good azimuthal coverage to offsets of 7 km, with reduced coverage to a maximum offset of about 11 km. The reservoir lies at the crest of a high-velocity antiformal chalk section, overlain by about 3000 m of clastics within which a low-velocity gas cloud produces a seismic obscured area. We inverted only the hydrophone data, and we retained free-surface multiples and ghosts within the field data. We invert in six narrow frequency bands, in the range 3 to 6.5 Hz. At each iteration, we selected only a subset of sources, using a different subset at each iteration; this strategy is more efficient than inverting all the data every iteration. Our starting velocity model was obtained using standard PSDM model building including anisotropic reflection tomography, and contained epsilon values as high as 20\%. We have also attempted full-elastic inversion of these data to recover a shallow isotropic model of both p and s-wave velocities. The final FWI velocity model shows a network of shallow high-velocity channels that match similar features in the reflection data. Deeper in the section, the FWI velocity model reveals a sharper and more-intense low-velocity region associated with the gas cloud in which low-velocity fingers match the location of gas-filled faults visible in the reflection data. The resulting velocity model provides a better match to well logs, and better flattens common image gathers, than does the starting model. Reverse-time migration, using the FWI velocity model, provides significant uplift to the migrated image, simplifying the planform of the reservoir section at depth. The workflows, inversion strategy, and algorithms that we have used have broad application to invert a wide-range of analogous field data sets.}, keywords = {presentation, SINBAD, SINBADFALL2012, SLIM}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2012/Fall/warner2012SINBADafw/warner2012SINBADafw_pres.pdf}, author = {Mike Warner} } @PRESENTATION{wason2012SINBADobs, title = {Ocean bottom seismic acquisition via jittered sampling}, booktitle = {SINBAD Fall consortium talks}, year = {2012}, organization = {SINBAD}, abstract = {We present a pragmatic marine acquisition scheme where a single (or multiple) vessel sails across an ocean-bottom array firing airguns at ? optimally jittered source locations and instances in time. Following the principles of compressive sensing, we can significantly impact the reconstruction quality of conventional seismic data (from jittered data) and demonstrate successful recovery by sparsity promotion. In contrast to random (under)sampling, acquisition via jittered (under)sampling helps in controlling the maximum gap size, which is a practical requirement of wavefield reconstruction with localized sparsifying transforms. Results are illustrated with simulations of optimally jittered marine acquisition, and periodic time-dithering marine acquisition.}, keywords = {presentation, SINBAD, SINBADFALL2012, SLIM}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2012/Fall/wason2012SINBADobs/wason2012SINBADobs_pres.pdf}, author = {Haneet Wason} } @PRESENTATION{xiang2012SINBADfgn, title = {Fast {Gauss-Newton} full-waveform inversion with sparsity regularization}, booktitle = {SINBAD Fall consortium talks}, year = {2012}, organization = {SINBAD}, abstract = {Full-waveform inversion (FWI) can be considered as a controlled data fitting process, in which we approximately fit observed data by iteratively updating the initial velocity model, we expect the final model can reveal subsurface structure till the wavefield misfit can converge to designed tolerance. The conventional FWI approach is expensive since it requires the inversion of a linear system, which involves extremely large multi-experiment data volumes. To overcome this issue we percent a curvetlet based sparsity-promoting Gauss-Newton inversion method. In this presentation we invert for the model updates by replacing the normal Gauss-Newton linearized subproblem for subsampled FWI with a sparsity promoting FWI formulation. We speed up the algorithm and avoid over fitting the data by solving the problem approximately. Aside from this, we control wavefield dispersion by gradually increasing grid size as we move to higher frequencies. Our approach is successful because it reduces the size of seismic data volumes without loss of information. With this reduction, we can compute a Newton-like update with the reduced data volume at the cost of roughly one gradient update for the fully sampled wavefield.}, keywords = {presentation, SINBAD, SINBADFALL2012, SLIM}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2012/Fall/xiang2012SINBADfgn/xiang2012SINBADfgn_pres.pdf}, author = {Xiang Li} } @PRESENTATION{xiang2012SINBADweb, title = {Wave-equation based inversion with joint sparsity promotion}, booktitle = {SINBAD Fall consortium talks}, year = {2012}, organization = {SINBAD}, keywords = {presentation, SINBAD, SINBADFALL2012, SLIM}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2012/Fall/xiang2012SINBADweb/xiang2012SINBADweb_pres.pdf}, author = {Xiang Li} } @PRESENTATION{yilmaz2012SINBADwms, title = {Weighted methods in sparse recovery}, booktitle = {SINBAD Fall consortium talks}, year = {2012}, organization = {SINBAD}, abstract = {In the recent years we have successfully employed "weighted" algorithms to recover sparse signals from few linear, non-adaptive measurements. The general principle here is to use prior knowledge about the signal to be recovered, e.g., approximate locations of large-in-magnitude transform coefficients, if such information is available. An example for this is the use of weighted 1-norm minimization to improve wavefield reconstruction from randomized (sub)sampling. We will review these results and outline some new directions we have explored during the last year, such as weighted non-convex sparse recovery (see Ghadermarzy's talk), weighted analysis-based recovery (see Hargreaves's talk), and a weighted randomized Kaczmarz algorithm for solving large overdetermined systems of equations that are known to admit a (nearly) sparse solution. Various examples in seismic will be shown.}, keywords = {presentation, SINBAD, SINBADFALL2012, SLIM}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2012/Fall/yilmaz2012SINBADwms/yilmaz2012SINBADwms_pres.pdf}, author = {Ozgur Yilmaz} } %----- 2012 (SPRING) -----% @PRESENTATION{aravkin2012SINBADipu, title = {Inverse problems using {Student's} t}, booktitle = {SINBAD Spring consortium talks}, year = {2012}, organization = {SINBAD}, keywords = {presentation, SINBAD, SINBADSPRING2012, SLIM}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2012/Spring/aravkin2012SINBADipu/aravkin2012SINBADipu_pres.pdf}, author = {Aleksandr Y. Aravkin} } @PRESENTATION{dasilva2012SINBADrdp, title = {Recent developments in preconditioning the wave-equation {Hessian}}, booktitle = {SINBAD Spring consortium talks}, year = {2012}, organization = {SINBAD}, keywords = {presentation, SINBAD, SINBADSPRING2012, SLIM}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2012/Spring/dasilva2012SINBADrdp/dasilva2012SINBADrdp_pres.pdf}, author = {Curt Da Silva} } @PRESENTATION{herrmann2012SINBADlds, title = {Latest developments in seismic-data recovery}, booktitle = {SINBAD Spring consortium talks}, year = {2012}, organization = {SINBAD}, keywords = {presentation, SINBAD, SINBADSPRING2012, SLIM}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2012/Spring/herrmann2012SINBADlds/herrmann2012SINBADlds_pres.pdf}, author = {Felix J. Herrmann} } @PRESENTATION{herrmann2012SINBADsls, title = {Supercool(ed) least-squares imaging: latest insights in sparsity-promoting migration}, booktitle = {SINBAD Spring consortium talks}, year = {2012}, organization = {SINBAD}, keywords = {presentation, SINBAD, SINBADSPRING2012, SLIM}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2012/Spring/herrmann2012SINBADsls/herrmann2012SINBADsls_pres.pdf}, author = {Felix J. Herrmann} } @PRESENTATION{ning2012SINBADSPRINGfim, title = {Fast imaging with multiples by sparse inversion}, booktitle = {SINBAD Spring consortium talks}, year = {2012}, organization = {SINBAD}, keywords = {presentation, SINBAD, SINBADSPRING2012, SLIM}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2012/Spring/ning2012SINBADSPRINGfim/ning2012SINBADSPRINGfim_pres.pdf}, author = {Ning Tu} } @PRESENTATION{vanleeuwen2012SINBADoof, title = {An object-oriented framework for frequency-domain {FWI}}, booktitle = {SINBAD Spring consortium talks}, year = {2012}, organization = {SINBAD}, keywords = {presentation, SINBAD, SINBADSPRING2012, SLIM}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2012/Spring/vanleeuwen2012SINBADoof/vanleeuwen2012SINBADoof_pres.pdf}, author = {Tristan van Leeuwen} } @PRESENTATION{wason2012SINBADrma, title = {Randomized marine acquisition for ocean-bottom surveys}, booktitle = {SINBAD Spring consortium talks}, year = {2012}, organization = {SINBAD}, keywords = {presentation, SINBAD, SINBADSPRING2012, SLIM}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2012/Spring/wason2012SINBADrma/wason2012SINBADrma_pres.pdf}, author = {Haneet Wason} } %----- 2011 (FALL) -----% @PRESENTATION{aravkin2011SINBADesp, title = {Extensions to sparsity promotion}, booktitle = {SINBAD Fall consortium talks}, year = {2011}, organization = {SINBAD}, keywords = {presentation, SINBAD, SINBADFALL2011, SLIM}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2011/Fall/aravkin2011SINBADesp/aravkin2011SINBADesp_pres.pdf}, author = {Aleksandr Y. Aravkin} } @PRESENTATION{aravkin2011SINBADrfwi, title = {Robust {FWI} using {Student's} t & robust source estimation}, booktitle = {SINBAD Fall consortium talks}, year = {2011}, organization = {SINBAD}, keywords = {presentation, SINBAD, SINBADFALL2011, SLIM}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2011/Fall/aravkin2011SINBADrfwi/aravkin2011SINBADrfwi_pres.pdf}, author = {Aleksandr Y. Aravkin} } @PRESENTATION{aravkin2011SINBADrsp, title = {A randomized, sparsity promoting, Gauss-Newton algorithm for seismic waveform inversion}, booktitle = {SINBAD Fall consortium talks}, year = {2011}, organization = {SINBAD}, keywords = {presentation, SINBAD, SINBADFALL2011, SLIM}, url1 = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2011/Fall/aravkin2011SINBADrsp/aravkin2011SINBADrsp_pres.pdf}, url2 = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2011/Fall/aravkin2011SINBADrsp/li2011SINBADrsp_pres.pdf}, author = {Aleksandr Y. Aravkin and Xiang Li} } @PRESENTATION{dasilva2011SINBADrdp, title = {Recent developments in preconditioning the {FWI} {Hessian}}, booktitle = {SINBAD Fall consortium talks}, year = {2011}, organization = {SINBAD}, keywords = {presentation, SINBAD, SINBADFALL2011, SLIM}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2011/Fall/dasilva2011SINBADrdp/dasilva2011SINBADrdp_pres.pdf}, author = {Curt Da Silva} } @PRESENTATION{friedlander2011SINBADrir, title = {Robust inversion, data-fitting, and inexact gradient methods}, booktitle = {SINBAD Fall consortium talks}, year = {2011}, organization = {SINBAD}, keywords = {presentation, SINBAD, SINBADFALL2011, SLIM}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2011/Fall/friedlander2011SINBADrir/friedlander2011SINBADrir_pres.pdf}, author = {Michael P. Friedlander} } @PRESENTATION{herrmann2011SINBADcoc, title = {Challenges and opportunities for compressive sensing in seismic acquisition}, booktitle = {SINBAD Fall consortium talks}, year = {2011}, organization = {SINBAD}, keywords = {presentation, SINBAD, SINBADFALL2011, SLIM}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2011/Fall/herrmann2011SINBADcoc/herrmann2011SINBADcoc_pres.pdf}, author = {Felix J. Herrmann} } @PRESENTATION{herrmann2011SINBADcos, title = {Challenges and opportunities in sparse wavefield inversion}, booktitle = {SINBAD Fall consortium talks}, year = {2011}, organization = {SINBAD}, keywords = {presentation, SINBAD, SINBADFALL2011, SLIM}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2011/Fall/herrmann2011SINBADcos/herrmann2011SINBADcos_pres.pdf}, author = {Felix J. Herrmann} } @PRESENTATION{herrmann2011SINBADoverview, title = {Welcome and overview of {SINBAD} & {DNOISE}}, booktitle = {SINBAD Fall consortium talks}, year = {2011}, organization = {SINBAD}, keywords = {presentation, SINBAD, SINBADFALL2011, SLIM}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2011/Fall/herrmann2011SINBADoverview/herrmann2011SINBADoverview_pres.pdf}, author = {Felix J. Herrmann} } @PRESENTATION{herrmann2011SINBADrnr, title = {To redraw or not to redraw: recent insights in randomized dimensionality reduction for inversion}, booktitle = {SINBAD Fall consortium talks}, year = {2011}, organization = {SINBAD}, keywords = {presentation, SINBAD, SINBADFALL2011, SLIM}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2011/Fall/herrmann2011SINBADrnr/herrmann2011SINBADrnr_pres.pdf}, author = {Felix J. Herrmann} } @PRESENTATION{jumah2011SINBADdrEPSI, title = {Dimensionality-reduced estimation of primaries by sparse inversion}, booktitle = {SINBAD Fall consortium talks}, year = {2011}, organization = {SINBAD}, keywords = {presentation, SINBAD, SINBADFALL2011, SLIM}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2011/Fall/jumah2011SINBADdrEPSI/jumah2011SINBADdrEPSI_pres.pdf}, author = {Bander Jumah} } @PRESENTATION{li2011SINBADels, title = {Efficient least-squares imaging with sparsity promotion and compressive sensing}, booktitle = {SINBAD Fall consortium talks}, year = {2011}, organization = {SINBAD}, keywords = {presentation, SINBAD, SINBADFALL2011, SLIM}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2011/Fall/li2011SINBADels/li2011SINBADels_pres.pdf}, author = {Xiang Li} } @PRESENTATION{lin2011SINBADirEPSI, title = {Inside the robust {EPSI} formulation}, booktitle = {SINBAD Fall consortium talks}, year = {2011}, organization = {SINBAD}, keywords = {presentation, SINBAD, SINBADFALL2011, SLIM}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2011/Fall/lin2011SINBADirEPSI/lin2011SINBADirEPSI_pres.pdf}, author = {Tim T.Y. Lin} } @PRESENTATION{lin2011SINBADslim, title = {{SLIM's} software design principles}, booktitle = {SINBAD Fall consortium talks}, year = {2011}, organization = {SINBAD}, keywords = {presentation, SINBAD, SINBADFALL2011, SLIM}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2011/Fall/lin2011SINBADslim/lin2011SINBADslim_pres.pdf}, author = {Tim T.Y. Lin} } @PRESENTATION{mansour2011SINBADwcw, title = {Why do curvelets work?}, booktitle = {SINBAD Fall consortium talks}, year = {2011}, organization = {SINBAD}, keywords = {presentation, SINBAD, SINBADFALL2011, SLIM}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2011/Fall/mansour2011SINBADwcw/mansour2011SINBADwcw_pres.pdf}, author = {Hassan Mansour} } @PRESENTATION{min2011SINBADpss, title = {Parameter-selection strategy for density in frequency-domain elastic waveform inversion}, booktitle = {SINBAD Fall consortium talks}, year = {2011}, organization = {SINBAD}, keywords = {presentation, SINBAD, SINBADFALL2011, SLIM}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2011/Fall/min2011SINBADpss/min2011SINBADpss_pres.pdf}, author = {Dong-Joo Min} } @PRESENTATION{ning2011SINBADmsr, title = {Migration from surface-related multiples}, booktitle = {SINBAD Fall consortium talks}, year = {2011}, organization = {SINBAD}, keywords = {presentation, SINBAD, SINBADFALL2011, SLIM}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2011/Fall/ning2011SINBADmsr/ning2011SINBADmsr_pres.pdf}, author = {Ning Tu} } @PRESENTATION{vanderneut2011SINBADirs, title = {Interferometric redatuming by sparse inversion}, booktitle = {SINBAD Fall consortium talks}, year = {2011}, organization = {SINBAD}, keywords = {presentation, SINBAD, SINBADFALL2011, SLIM}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2011/Fall/vanderneut2011SINBADirs/vanderneut2011SINBADirs_pres.pdf}, author = {Joost van der Neut} } @PRESENTATION{vanleeuwen2011SINBADfwi, title = {Fast waveform inversion without source encoding}, booktitle = {SINBAD Fall consortium talks}, year = {2011}, organization = {SINBAD}, keywords = {presentation, SINBAD, SINBADFALL2011, SLIM}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2011/Fall/vanleeuwen2011SINBADfwi/vanleeuwen2011SINBADfwi_pres.pdf}, author = {Tristan van Leeuwen} } @PRESENTATION{vanleeuwen2011SINBADoofwi, title = {Wavefield modelling and inversion in {Matlab}}, booktitle = {SINBAD Fall consortium talks}, year = {2011}, organization = {SINBAD}, keywords = {presentation, SINBAD, SINBADFALL2011, SLIM}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2011/Fall/vanleeuwen2011SINBADoofwi/vanleeuwen2011SINBADoofwi_pres.pdf}, author = {Tristan van Leeuwen} } @PRESENTATION{vanleeuwen2011SINBADtem, title = {Towards extended modelling for velocity inversion}, booktitle = {SINBAD Fall consortium talks}, year = {2011}, organization = {SINBAD}, keywords = {presentation, SINBAD, SINBADFALL2011, SLIM}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2011/Fall/vanleeuwen2011SINBADtem/vanleeuwen2011SINBADtem_pres.pdf}, author = {Tristan van Leeuwen} } @PRESENTATION{wason2011SINBADode, title = {Only dither: efficient marine acquisition ``without" simultaneous sourcing}, booktitle = {SINBAD Fall consortium talks}, year = {2011}, organization = {SINBAD}, keywords = {presentation, SINBAD, SINBADFALL2011, SLIM}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2011/Fall/wason2011SINBADode/wason2011SINBADode_pres.pdf}, author = {Haneet Wason} } @PRESENTATION{yilmaz2011SINBADcsp, title = {Compressed sensing with prior information}, booktitle = {SINBAD Fall consortium talks}, year = {2011}, organization = {SINBAD}, keywords = {presentation, SINBAD, SINBADFALL2011, SLIM}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2011/Fall/yilmaz2011SINBADcsp/yilmaz2011SINBADcsp_pres.pdf}, author = {Ozgur Yilmaz} } %----- 2011 (SPRING) -----% @PRESENTATION{aravkin2011SINBADSPRINGrfwi, title = {Robust {FWI} using {Student's} t-distribution}, booktitle = {SINBAD Spring consortium talks}, year = {2011}, organization = {SINBAD}, keywords = {presentation, SINBAD, SINBADSPRING2011, SLIM}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2011/Spring/aravkin2011SINBADSPRINGrfwi/aravkin2011SINBADSPRINGrfwi_pres.pdf}, author = {Aleksandr Y. Aravkin} } @PRESENTATION{herrmann2011SINBADdre, title = {Dimensionality-reduced estimation of primaries by sparse inversion}, booktitle = {SINBAD Spring consortium talks}, year = {2011}, organization = {SINBAD}, keywords = {presentation, SINBAD, SINBASPRING2011, SLIM}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2011/Spring/herrmann2011SINBADdre/herrmann2011SINBADdre_pres.pdf}, author = {Felix J. Herrmann} } @PRESENTATION{lin2011SINBADrep, title = {Robust {EPSI} in a curvelet-like representation domain}, booktitle = {SINBAD Spring consortium talks}, year = {2011}, organization = {SINBAD}, keywords = {presentation, SINBAD, SINBADSPRING2011, SLIM}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2011/Spring/lin2011SINBADrep/lin2011SINBADrep_pres.pdf}, author = {Tim T.Y. Lin} } @PRESENTATION{vanleeuwen2011SINBADfwiso, title = {A hybrid stochastic-deterministic method for waveform inversion}, booktitle = {SINBAD Spring consortium talks}, year = {2011}, organization = {SINBAD}, keywords = {presentation, SINBAD, SINBADSPRING2011, SLIM}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2011/Spring/vanleeuwen2011SINBADfwiso/vanleeuwen2011SINBADfwiso_pres.pdf}, author = {Tristan van Leeuwen} } @PRESENTATION{vanleeuwen2011SINBADpei, title = {Probing the extended image volume}, booktitle = {SINBAD Spring consortium talks}, year = {2011}, organization = {SINBAD}, keywords = {presentation, SINBAD, SINBADSPRING2011, SLIM}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2011/Spring/vanleeuwen2011SINBADpei/vanleeuwen2011SINBADpei_pres.pdf}, author = {Tristan van Leeuwen} } %----- 2010 (FALL) -----% @PRESENTATION{almatar2010SINBADesf, title = {Estimation of surface-free data by curvelet-domain matched filtering and sparse inversion}, booktitle = {SINBAD Fall consortium talks}, year = {2010}, organization = {SINBAD}, keywords = {presentation, SINBAD, SINBADFALL2010, SLIM}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2010/Fall/almatar2010SINBADesf/almatar2010SINBADesf_pres.pdf}, author = {Mufeed H. AlMatar} } @PRESENTATION{aravkin2010SINBADesf, title = {Exploiting sparsity in full-waveform inversion: nonlinear basis pursuit denoise algorithm}, booktitle = {SINBAD Fall consortium talks}, year = {2010}, organization = {SINBAD}, keywords = {presentation, SINBAD, SINBADFALL2010, SLIM}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2010/Fall/aravkin2010SINBADesf/aravkin2010SINBADesf_pres.pdf}, author = {Aleksandr Y. Aravkin} } @PRESENTATION{aravkin2010SINBADicc, title = {Introduction to convex composite optimization}, booktitle = {SINBAD Fall consortium talks}, year = {2010}, organization = {SINBAD}, keywords = {presentation, SINBAD, SINBADFALL2010, SLIM}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2010/Fall/aravkin2010SINBADicc/aravkin2010SINBADicc_pres.pdf}, author = {Aleksandr Y. Aravkin} } @PRESENTATION{friedlander2010SINBADaso, title = {Algorithms for sparse optimization}, booktitle = {SINBAD Fall consortium talks}, year = {2010}, organization = {SINBAD}, keywords = {presentation, SINBAD, SINBADFALL2010, SLIM}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2010/Fall/friedlander2010SINBADaso/friedlander2010SINBADaso_pres.pdf}, author = {Michael P. Friedlander} } @PRESENTATION{friedlander2010SINBADisl, title = {Introduction to {Spot}: a linear-operator toolbox}, booktitle = {SINBAD Fall consortium talks}, year = {2010}, organization = {SINBAD}, keywords = {presentation, SINBAD, SINBADFALL2010, SLIM}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2010/Fall/friedlander2010SINBADisl/friedlander2010SINBADisl_pres.pdf}, author = {Michael P. Friedlander} } @PRESENTATION{herrmann2010SINBADcss, title = {Compressive sensing and sparse recovery in exploration seismology}, booktitle = {SINBAD Fall consortium talks}, year = {2010}, organization = {SINBAD}, keywords = {presentation, SINBAD, SINBADFALL2010, SLIM}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2010/Fall/herrmann2010SINBADcss/herrmann2010SINBADcss_pres.pdf}, author = {Felix J. Herrmann} } @PRESENTATION{herrmann2010SINBADdrf, title = {Dimensionality reduction for full-waveform inversion}, booktitle = {SINBAD Fall consortium talks}, year = {2010}, organization = {SINBAD}, keywords = {presentation, SINBAD, SINBADFALL2010, SLIM}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2010/Fall/herrmann2010SINBADdrf/herrmann2010SINBADdrf_pres.pdf}, author = {Felix J. Herrmann} } @PRESENTATION{kumar2010SINBADpoe, title = {Parallelizing operations with ease using Parallel {SPOT}}, booktitle = {SINBAD Fall consortium talks}, year = {2010}, organization = {SINBAD}, keywords = {presentation, SINBAD, SINBADFALL2010, SLIM}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2010/Fall/kumar2010SINBADpoe/kumar2010SINBADpoe_pres.pdf}, author = {Nameet Kumar} } @PRESENTATION{li2010SINBADci, title = {Compressive imaging}, booktitle = {SINBAD Fall consortium talks}, year = {2010}, organization = {SINBAD}, keywords = {presentation, SINBAD, SINBADFALL2010, SLIM}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2010/Fall/li2010SINBADci/li2010SINBADci_pres.pdf}, author = {Xiang Li} } @PRESENTATION{li2010SINBADfwi, title = {Full-waveform inversion with randomized {L1} recovery for the model updates}, booktitle = {SINBAD Fall consortium talks}, year = {2010}, organization = {SINBAD}, keywords = {presentation, SINBAD, SINBADFALL2010, SLIM}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2010/Fall/li2010SINBADfwi/li2010SINBADfwi_pres.pdf}, author = {Xiang Li} } @PRESENTATION{lin2010SINBADlib, title = {Leveraging informed blind deconvolution techniques for the estimation of primaries by sparse inversion}, booktitle = {SINBAD Fall consortium talks}, year = {2010}, organization = {SINBAD}, keywords = {presentation, SINBAD, SINBADFALL2010, SLIM}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2010/Fall/lin2010SINBADlib/lin2010SINBADlib_pres.pdf}, author = {Tim T.Y. Lin} } @PRESENTATION{lin2010SINBADsol, title = {Sparse optimization and the {L1} norm}, booktitle = {SINBAD Fall consortium talks}, year = {2010}, organization = {SINBAD}, keywords = {presentation, SINBAD, SINBADFALL2010, SLIM}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2010/Fall/lin2010SINBADsol/lin2010SINBADsol_pres.pdf}, author = {Tim T.Y. Lin} } @PRESENTATION{lin2010SINBADsre, title = {Software release: estimation of primaries by {L1} inversion}, booktitle = {SINBAD Fall consortium talks}, year = {2010}, organization = {SINBAD}, keywords = {presentation, SINBAD, SINBADFALL2010, SLIM}, author = {Tim T.Y. Lin} } @PRESENTATION{mansour2010SINBADrcs, title = {Recovering compressively sampled signals using partial support information}, booktitle = {SINBAD Fall consortium talks}, year = {2010}, organization = {SINBAD}, keywords = {presentation, SINBAD, SINBADFALL2010, SLIM}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2010/Fall/mansour2010SINBADrcs/mansour2010SINBADrcs_pres.pdf}, author = {Hassan Mansour} } @PRESENTATION{modzelewski2010SINBADsra, title = {Software releases and architecture}, booktitle = {SINBAD Fall consortium talks}, year = {2010}, organization = {SINBAD}, keywords = {presentation, SINBAD, SINBADFALL2010, SLIM}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2010/Fall/modzelewski2010SINBADsra/modzelewski2010SINBADsra_pres.pdf}, author = {Henryk Modzelewski} } @PRESENTATION{moghaddam2010SINBADrfwi, title = {Randomized full-waveform inversion}, booktitle = {SINBAD Fall consortium talks}, year = {2010}, organization = {SINBAD}, keywords = {presentation, SINBAD, SINBADFALL2010, SLIM}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2010/Fall/moghaddam2010SINBADrfwi/moghaddam2010SINBADrfwi_pres.pdf}, author = {Peyman P. Moghaddam} } @PRESENTATION{pacteau2010SINBADkpo, title = {Kronecker product optimization in {SPOT}}, booktitle = {SINBAD Fall consortium talks}, year = {2010}, organization = {SINBAD}, keywords = {presentation, SINBAD, SINBADFALL2010, SLIM}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2010/Fall/pacteau2010SINBADkpo/pacteau2010SINBADkpo_pres.pdf}, author = {Sebastien Pacteau} } @PRESENTATION{saab2010SINBADcsk, title = {Compressed sensing using {Kronecker} products}, booktitle = {SINBAD Fall consortium talks}, year = {2010}, organization = {SINBAD}, keywords = {presentation, SINBAD, SINBADFALL2010, SLIM}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2010/Fall/saab2010SINBADcsk/saab2010SINBADcsk_pres.pdf}, author = {Rayan Saab} } @PRESENTATION{schmidt2010SINBADhsd, title = {Hybrid stochastic-deterministic methods}, booktitle = {SINBAD Fall consortium talks}, year = {2010}, organization = {SINBAD}, keywords = {presentation, SINBAD, SINBADFALL2010, SLIM}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2010/Fall/schmidt2010SINBADhsd/schmidt2010SINBADhsd_pres.pdf}, author = {Mark Schmidt} } @PRESENTATION{tu2010SINBADspm, title = {Sparsity promoting migration with surface-related multiples}, booktitle = {SINBAD Fall consortium talks}, year = {2010}, organization = {SINBAD}, keywords = {presentation, SINBAD, SINBADFALL2010, SLIM}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2010/Fall/tu2010SINBADspm/tu2010SINBADspm_pres.pdf}, author = {Ning Tu} } @PRESENTATION{vanleeuwen2010SINBADwis, title = {Waveform inversion by stochastic optimization}, booktitle = {SINBAD Fall consortium talks}, year = {2010}, organization = {SINBAD}, keywords = {presentation, SINBAD, SINBADFALL2010, SLIM}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2010/Fall/vanleeuwen2010SINBADwis/vanleeuwen2010SINBADwis_pres.pdf}, author = {Tristan van Leeuwen} } @PRESENTATION{wason2010SINBADssd, title = {Sequential source data recovery from simultaneous acquisition through transform-domain sparsity promotion - curvelet versus shearlet transform}, booktitle = {SINBAD Fall consortium talks}, year = {2010}, organization = {SINBAD}, keywords = {presentation, SINBAD, SINBADFALL2010, SLIM}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2010/Fall/wason2010SINBADssd/wason2010SINBADssd_pres.pdf}, author = {Haneet Wason} } @PRESENTATION{yilmaz2010SINBADsac, title = {Sparse approximations and compressive sensing: an overview}, booktitle = {SINBAD Fall consortium talks}, year = {2010}, organization = {SINBAD}, keywords = {presentation, SINBAD, SINBADFALL2010, SLIM}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SINBAD/2010/Fall/yilmaz2010SINBADsac/yilmaz2010SINBADsac_pres.pdf}, author = {Ozgur Yilmaz} } %----- 2010 (SPRING) -----% % This file was created with JabRef 2.9. % Encoding: MacRoman @MASTERSTHESIS{hargreaves2014THssr, author = {Brock Hargreaves}, title = {Sparse signal recovery: analysis and synthesis formulations with prior support information}, school = {University of British Columbia}, year = {2014}, abstract = {The synthesis model for signal recovery has been the model of choice for many years in compressive sensing. Various weighting schemes using prior support information to adjust the objective function associated with the synthesis model have been shown to improve the recovery of the signal in terms of accuracy. Generally, even with no prior knowledge of the support, iterative methods can build support estimates and incorporate that into the recovery which has also been shown to increase the speed and accuracy of the recovery. However when the original signal is sparse with respect to a redundant dictionary (rather than an orthonormal basis) there is a counterpart model to synthesis, namely the analysis model, which has been less popular but has recently attracted more attention. The analysis model is much less understood and thus there are fewer theorems available in both the context of non-weighted and weighted signal recovery. In this thesis, we investigate weighting in both the analysis model and synthesis model in weighted $\ell_1$-minimization. Theoretical guarantees on reconstruction and various weighting strategies for each model are discussed. We give conditions for weighted synthesis recovery with frames which do not require strict incoherency conditions, this is based on recent results of regular synthesis with frames using optimal dual $\ell_1$ analysis. A novel weighting technique is introduced in the analysis case which outperforms its traditional counterparts in the case of seismic wavefield reconstruction. We also introduce a weighted split Bregman algorithm for analysis and optimal dual analysis. We then investigate these techniques on seismic data and synthetically created test data using a variety of frames.}, keywords = {MSc, thesis, sparse, analysis, synthesis, weighted $\ell_1$}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Thesis/2014/hargreaves2014THssr/hargreaves2014THssr.pdf} } @MASTERSTHESIS{petrenko2014THaih, author = {Art Petrenko}, title = {Accelerating an iterative {Helmholtz} solver using reconfigurable hardware}, school = {University of British Columbia}, year = {2014}, abstract = {An implementation of seismic wave simulation on a platform consisting of a conventional host processor and a reconfigurable hardware accelerator is presented. This research is important in the field of exploration for oil and gas resources, where a 3D model of the subsurface of the Earth is frequently required. By comparing seismic data collected in a real-world survey with synthetic data generated by simulated waves, it is possible to deduce such a model. However this requires many time-consuming simulations with different Earth models to find the one that best fits the measured data. Speeding up the wave simulations would allow more models to be tried, yielding a more accurate estimate of the subsurface. The reconfigurable hardware accelerator employed in this work is a field programmable gate array (FPGA). FPGAs are computer chips that consist of electronic building blocks that the user can configure and reconfigure to represent their algorithm in hardware. Whereas a traditional processor can be viewed as a pipeline for processing instructions, an FPGA is a pipeline for processing data. The chief advantage of the FPGA is that all the instructions in the algorithm are already hardwired onto the chip. This means that execution time depends only on the amount of data to be processed, and not on the complexity of the algorithm. The main contribution is an implementation of the well-known Kaczmarz row projection algorithm on the FPGA, using techniques of dataflow programming. This kernel is used as the preconditioning step of CGMN, a modified version of the conjugate gradients method that is used to solve the time-harmonic acoustic isotropic constant density wave equation. Using one FPGA-based accelerator, the current implementation allows seismic wave simulations to be performed over twice as fast, compared to running on one Intel Xeon E5-2670 core. I also discuss the effect of modifications of the algorithm necessitated by the hardware on the convergence properties of CGMN. Finally, a specific plan for future work is set-out in order to fully exploit the accelerator platform, and the work is set in its larger context.}, keywords = {CG, CGMN, FPGA, Helmholtz, Kaczmarz, linear solver, Maxeler, MSc, thesis, wave equation}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Thesis/2014/petrenko2014THaih/petrenko2014THaih.pdf}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Thesis/2014/petrenko2014THaih/petrenko2014THaih_pres.pdf} } @MASTERSTHESIS{miao2014THesi, author = {Lina Miao}, title = {Efficient seismic imaging with spectral projector and joint sparsity}, school = {University of British Columbia}, year = {2014}, abstract = {In this thesis, we investigate the potential of improving the eciency of seismic imaging with two advanced techniques: the spectral projector and the 'joint sparsity'. The spectral projector offers an eigenvalue decomposition free computation routine that can filter out unstable evanescent wave com- ponents during wave equation based depth extrapolation. 'Joint sparsity' aims to improve on the pure sparsity promoting recovery by making use of additional structure information of the signal. Besides, a new sparsity optimization algorithm - PQNL1 - is proposed to improve both theoretical convergence rate and practical performance for extremely large seismic imaging problems.}, keywords = {MSc, thesis}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Thesis/2014/miao2014THesi/miao2014THesi.pdf}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Thesis/2014/miao2014THesi/miao2014THesi_pres.pdf} } @MASTERSTHESIS{ghadermarzy2013THups, author = {Navid Ghadermarzy}, title = {Using prior support information in compressed sensing}, school = {University of British Columbia}, year = {2013}, abstract = {Compressed sensing is a data acquisition technique that entails recovering estimates of sparse and compressible signals from $n$ linear measurements, significantly fewer than the signal ambient dimension $N$. In this thesis we show how we can reduce the required number of measurements even further if we incorporate prior information about the signal into the reconstruction algorithm. Specifically, we study certain weighted nonconvex $\ell_p$ minimization algorithms and a weighted approximate message passing algorithm. In Chapter 1 we describe compressed sensing as a practicable signal acquisition method in application and introduce the generic sparse approximation problem. Then we review some of the algorithms used in compressed sensing literature and briefly introduce the method we used to incorporate prior support information into these problems. In Chapter 2 we derive sufficient conditions for stable and robust recovery using weighted $\ell_p$ minimization and show that these conditions are better than those for recovery by regular $\ell_p$ and weighted $\ell_1$. We present extensive numerical experiments, both on synthetic examples and on audio, and seismic signals. In Chapter 3 we derive weighted AMP algorithm which iteratively solves the weighted $\ell_1$ minimization. We also introduce a reweighting scheme for weighted AMP algorithms which enhances the recovery performance of weighted AMP. We also apply these algorithms on synthetic experiments and on real audio signals.}, keywords = {MSc, thesis, compressed sensing, weighted $\ell_1$}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Thesis/2013/ghadermarzy2013THups.pdf} } @MASTERSTHESIS{johnson2013THswr, author = {James Johnson}, title = {Seismic wavefield reconstruction using reciprocity}, school = {University of British Columbia}, year = {2013}, abstract = {The primary focus of most reflection seismic surveys is to help locate hydrocarbon recourses. Due to an ever increasing scarcity of these recourses, we must increase the size and quality of our seismic surveys. However, processing such large seismic data volumes to accurately recover earth properties is a painstaking and computationally intensive process. Due to the way reflection seismic surveys are conducted there are often holes in the collected data, where traces are not recorded. This can be due to physical or cost constraints. For some of the initial stages of processing these missing traces are of little consequence. However processes like multiple prediction and removal, interferometric ground roll prediction, and migration require densely sampled data on a regular grid. Thus the need to interpolate undersampled data cannot be ignored. Using the fact that reflection seismic data sets obey a reciprocal relationship in source and receiver locations, combined with recent advances in the field of compressed sensing, we show that properly regularized the wavefield reconstruction problem can be solved with a high degree of accuracy. We exploit the compressible nature of seismic data in the curvelet domain to solve regularized l1 recovery problems that seek to match the measured data and enforce the above mentioned reciprocity. Using our method we were able to achieve results with a 20.45 dB signal to noise ratio when reconstructing a marine data set that had 50\% of its traces decimated. This is a 13.44 dB improvement over using the same method run without taking reciprocity into account.}, keywords = {MSc, thesis}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Thesis/2013/johnson2013THswr.pdf} } @MASTERSTHESIS{alhashim09THsdp, author = {Fadhel Abbas Alhashim}, title = {Seismic data processing with the parallel windowed curvelet transform}, school = {University of British Columbia}, year = {2009}, type = {masters}, abstract = {The process of obtaining high quality seismic images is very challenging when exploring new areas that have high complexities. The to be processed seismic data comes from the field noisy and commonly incomplete. Recently, major advances were accomplished in the area of coherent noise removal, for example, Surface Related Multiple Elimination (SRME). Predictive multiple elimination methods, such as SRME, consist of two steps: The first step is the prediction step, in this step multiples are predicted from the seismic data. The second step is the separation step in which primary reflection and surface related multiples are separated, this involves predicted multiples from the first step to be matched with the true multiples in the data and eventually removed. A recent robust Bayesian wavefield separation method have been recently introduced to improve on the separation by matching methods. This method utilizes the effectiveness of using the multi scale and multi angular curvelet transform in processing seismic images. The method produced excellent results and improved multiple removal. A considerable problem in the seismic processing field is the fact that seismic data are large and require a correspondingly large memory size and processing time. The fact that curvelets are redundant also increases the need for large memory to process seismic data. In this thesis we propose a parallel approach based windowing operator that divides large seismic data into smaller more managable datasets that can fit in memory so that it is possible to apply the Bayesian separation process in parallel with minimal harm to the image quality and data integrity. However, by dividing the data, we introduce discontinuities. We take these discontinuities into account and compare two ways that different windows may communicate. The first method is to communicate edge information at only two steps, namely, data scattering and gathering processes while applying the multiple separation on each window separately. The second method is to define our windowing operator as a global operator, which exchanges window edge information at each forward and inverse curvelet transform. We discuss the trade off between the two methods trying to minimize complexity and I/O time spent in the process. We test our windowing operator on a seismic denoising problem and then apply the windowing operator on our sparse-domain Bayesian primary-multiple separation.}, keywords = {MSc}, presentation = {https://www.slim.eos.ubc.ca/Publications/Public/Thesis/2009/alhashim09THsdp_pres.pdf}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Thesis/2009/alhashim09THsdp.pdf} } @MASTERSTHESIS{almatar10THesd, author = {Mufeed H. AlMatar}, title = {Estimation of surface-free data by curvelet-domain matched filtering and sparse inversion}, school = {University of British Columbia}, year = {2010}, abstract = {A recent robust multiple-elimination technique, based on the underlying principle that relates primary impulse response to total upgoing wavefield, tries to change the paradigm that sees surface-related multiples as noise that needs to be removed from the data prior to imaging. This technique, estimation of primaries by sparse inversion (EPSI), (van Groenestijn and Verschuur, 2009; Lin and Herrmann, 2009), proposes an inversion procedure during which the source function and surface- free impulse response are directly calculated from the upgoing wavefield using an alternating optimization procedure. EPSI hinges on a delicate interplay between surface-related multiples and pri- maries. Finite aperture and other imperfections may violate this relationship. In this thesis, we investigate how to make EPSI more robust by incorporating curvelet-domain matching in its formulation. Compared to surface-related multiple removal (SRME), where curvelet-domain matching was used successfully, incorporating this step has the additional advantage that matches multiples to multiples rather than predicated multiples to total data as in SRME.}, keywords = {MSc}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Thesis/2010/almatar10THesd.pdf} } @MASTERSTHESIS{dupuis05THssc, author = {Catherine Dupuis}, title = {Seismic singularity characterization with redundant dictionaries}, school = {The University of British Columbia}, year = {2005}, type = {masters}, address = {Vancouver, BC Canada}, abstract = {We consider seismic signals as a superposition of waveforms parameterized by their fractional- orders. Each waveform models the reflection of a seismic wave at a particular transition between two lithological layers in the subsurface. The location of the waveforms in the seismic signal corresponds to the depth of the transitions in the subsurface, whereas their fractional-order constitutes a measure of the sharpness of the transitions. By considering fractional-order transitions, we generalize the zero-order transition model of the conventional deconvolution problem, and aim at capturing the different types of transitions. The goal is to delineate and characterize transitions from seismic signals by recovering the locations and fractional-orders of its corresponding waveforms. This problem has received increasing interest, and several methods have been proposed, including multi- and monoscale analysis based on Mallat{\textquoteright}s wavelet transform modulus maxima, and seismic atomic decomposition. We propose a new method based on a two-step approach, which divides the initial problem of delineating and characterizing transitions over the whole seismic signal, into two easier sub-problems. The algorithm first partitions the seismic signal into its major components, and then estimates the fractional-orders and locations of each component. Both steps are based on the sparse decomposition of seismic signals in overcomplete dictionaries of waveforms parameter- ized by their fractional-orders, and involve $\ell_1$ minimizations solved by an iterative thresholding algorithm. We present the method and show numerical results on both synthetic and real data.}, keywords = {MSc}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Thesis/2005/dupuis05THssc.pdf} } @MASTERSTHESIS{kumar09THins, author = {Vishal Kumar}, title = {Incoherent noise suppression and deconvolution using curvelet-domain sparsity}, school = {University of British Columbia}, year = {2009}, type = {masters}, abstract = {Curvelets are a recently introduced transform domain that belongs to a family of multiscale and also multidirectional data expansions. As such, curvelets can be applied to resolution of the issues of complicated seismic wavefronts. We make use of this multiscale, multidirectional and hence sparsifying ability of the curvelet transform to suppress incoherent noise from crustal data where the signal-to-noise ratio is low and to develop an improved deconvolution procedure. Incoherent noise present in seismic reflection data corrupts the quality of the signal and can often lead to misinterpretation. The curvelet domain lends itself particularly well for denoising because coherent seismic energy maps to a relatively small number of significant curvelet coefficents.}, keywords = {MSc}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Thesis/2009/kumar09THins.pdf} } @MASTERSTHESIS{lebed08THssr, author = {Evgeniy Lebed}, title = {Sparse signal recovery in a transform domain}, school = {The University of British Columbia}, year = {2008}, type = {masters}, abstract = {The ability to efficiently and sparsely represent seismic data is becoming an increasingly important problem in geophysics. Over the last thirty years many transforms such as wavelets, curvelets, contourlets, surfacelets, shearlets, and many other types of x-lets have been developed. Such transform were leveraged to resolve this issue of sparse representations. In this work we compare the properties of four of these commonly used transforms, namely the shift-invariant wavelets, complex wavelets, curvelets and surfacelets. We also explore the performance of these transforms for the problem of recovering seismic wavefields from incomplete measurements.}, keywords = {MSc}, month = {08}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Thesis/2008/lebed08THssr.pdf} } @MASTERSTHESIS{maysami08THlcs, author = {Mohammad Maysami}, title = {Lithology constraints from seismic waveforms: application to opal-{A} to opal-{CT} transition}, school = {The University of British Columbia}, year = {2008}, type = {masters}, address = {Vancouver, BC Canada}, abstract = {In this work, we present a new method for seismic waveform characterization, which is aimed at extracting detailed litho-stratigraphical information from seismic data. We attempt to estimate the lithological attributes from seismic data according to our parametric representation of stratigraphical horizons, where the parameter values provide us with a direct link to nature of lithological transitions. We test our method on a seismic dataset with a strong diagenetic transition (opal-A to opal-CT transition). Given some information from cutting samples of well, we use a percolation-based model to construct the elastic profile of lithological transitions. Our goal is to match parametric representation for the diagenetic transition in both real data and synthetic data given by these elastic profiles. This match may be interpreted as a well-seismic tie, which reveals lithological information about stratigraphical horizons.}, keywords = {MSc}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Thesis/2008/maysami08THlcs.pdf} } @MASTERSTHESIS{yarham08THsgs, author = {Carson Yarham}, title = {Seismic ground-roll separation using sparsity promoting $\ell_1$ minimization}, school = {The University of British Columbia}, year = {2008}, address = {Vancouver, BC Canada}, abstract = {The removal of coherent noise generated by surface waves in land based seismic is a prerequisite to imaging the subsurface. These surface waves, termed as ground roll, overlay important reflector information in both the t-x and f-k domains. Standard techniques of ground roll removal commonly alter reflector information as a consequence of the ground roll removal. We propose the combined use of the curvelet domain as a sparsifying basis in which to perform signal separation techniques that can preserve reflector information while increasing ground roll removal. We examine two signal separation techniques, a block-coordinate relaxation method and a Bayesian separation method. The derivations and background for both methods are presented and the parameter sensitivity is examined. Both methods are shown to be effective in certain situations regarding synthetic data and erroneous surface wave predictions. The block-coordinate relaxation method is shown to have ma jor weaknesses when dealing with seismic signal separation in the presence of noise and with the production of artifacts and reflector degradation. The Bayesian separation method is shown to improve overall separation for both seismic and real data. The Bayesian separation scheme is used on a real data set with a surface wave prediction containing reflector information. It is shown to improve the signal separation by recovering reflector information while improving the surface wave removal. The abstract contains a separate real data example where both the block-coordinate relaxation method and the Bayesian separation method are compared.}, keywords = {MSc}, month = {05}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Thesis/2008/yarham08THsgs.pdf} } % This file was created with JabRef 2.9. % Encoding: MacRoman @TECHREPORT{esser2014SEGsgp, author = {Ernie Esser and Tristan van Leeuwen and Aleksandr Y. Aravkin and Felix J. Herrmann}, title = {A scaled gradient projection method for total variation regularized full waveform inversion}, year = {2014}, month = {04}, institution = {UBC}, abstract = {We propose an extended full waveform inversion formulation that includes convex constraints on the model. In particular, we show how to simultaneously constrain the total variation of the slowness squared while enforcing bound constraints to keep it within a physically realistic range. Synthetic experiments show that including total variation regularization can improve the recovery of a high velocity perturbation to a smooth background model.}, keywords = {full waveform inversion, convex constraints, total variation regularization}, url = {https://www.slim.eos.ubc.ca/Publications/Public/TechReport/2014/esser2014SEGsgp/esser2014SEGsgp.html} } @TECHREPORT{zfang2014SEGsqn, author = {Zhilong Fang and Felix J. Herrmann}, title = {A stochastic quasi-Newton {McMC} method for uncertainty quantification of full-waveform inversion}, year = {2014}, month = {04}, institution = {UBC}, abstract = {In this work we propose a stochastic quasi-Newton Markov chain Monte Carlo (McMC) method to quantify the uncertainty of full-waveform inversion (FWI). We formulate the uncertainty quantification problem in the framework of the Bayesian inference, which formulates the posterior probability as the conditional probability of the model given the observed data. The Metropolis-Hasting algorithm is used to generate samples satisfying the posterior probability density function (pdf) to quantify the uncertainty. However it suffers from the challenge to construct a proposal distribution that simultaneously provides a good representation of the true posterior pdf and is easy to manipulate. To address this challenge, we propose a stochastic quasi-Newton McMC method, which relies on the fact that the Hessian of the deterministic problem is equivalent to the inverse of the covariance matrix of the posterior pdf. The l-BFGS (limited-memory Broyden–Fletcher–Goldfarb–Shanno) Hessian is used to approximate the inverse of the covariance matrix efficiently, and the randomized source sub-sampling strategy is used to reduce the computational cost of evaluating the posterior pdf and constructing the l-BFGS Hessian. Numerical experiments show the capability of this stochastic quasi-Newton McMC method to quantify the uncertainty of FWI with a considerable low cost.}, keywords = {FWI, uncertainty quantification, quasi-Newton, McMC}, url = {https://www.slim.eos.ubc.ca/Publications/Public/TechReport/2014/zfang2014SEGsqn/zfang2014SEGsqn.html} } @TECHREPORT{wang2014SEGfwi, author = {Rongrong Wang and Ozgur Yilmaz and Felix J. Herrmann}, title = {Full waveform inversion with interferometric measurements}, year = {2014}, month = {04}, institution = {UBC}, abstract = {In this note, we design new misfit functions for full-waveform inversion by using interferometric measurements to reduce sensitivity to phase errors. Though established within a completely different setting from the linear case, we obtain a similar observation: the interferometry can improve robustness under certain modeling errors. Moreover, in order to deal with errors on both source and receiver sides, we propose a higher order interferometry, which, as a generalization of the usual definition, involves the cross correlation of four traces. A proof of principle simulations is included on a stylized example.}, keywords = {FWI}, url = {https://www.slim.eos.ubc.ca/Publications/Public/TechReport/2014/wang2014SEGfwi/wang2014SEGfwi.html} } @TECHREPORT{kumar2014SEGmcu, author = {Rajiv Kumar and Oscar Lopez and Ernie Esser and Felix J. Herrmann}, title = {Matrix completion on unstructured grids : 2-D seismic data regularization and interpolation}, year = {2014}, month = {04}, institution = {UBC}, abstract = {Seismic data interpolation via rank-minimization techniques has been recently introduced in the seismic community. All the existing rank-minimization techniques assume the recording locations to be on a regular grid, e.g. sampled periodically, but seismic data are typically irregularly sampled along spatial axes. Other than the irregularity of the sampled grid, we often have missing data. In this paper, we study the effect of grid irregularity to conduct matrix completion on a regular grid for unstructured data. We propose an improvement of existing rank-minimization techniques to do regularization. We also demonstrate that we can perform seismic data regularization and interpolation simultaneously. We illustrate the advantages of the modification using a real seismic line from the Gulf of Suez to obtain high quality results for regularization and interpolation, a key application in exploration geophysics.}, keywords = {regularization, interpolation, matrix completion, NFFT}, url = {https://www.slim.eos.ubc.ca/Publications/Public/TechReport/2014/kumar2014SEGmcu/kumar2014SEGmcu.html} } @TECHREPORT{smithyman2014SEGjfw, author = {Brendan R. Smithyman and Bas Peters and Bryan DeVault and Felix J. Herrmann}, title = {Joint full-waveform inversion of on-land surface and {VSP} data from the {Permian} {Basin}}, year = {2014}, month = {04}, institution = {UBC}, abstract = {Full-waveform Inversion is applied to generate a high-resolution model of P-wave velocity for a site in the Permian Basin, Texas, USA. This investigation jointly inverts seismic waveforms from a surface 3-D vibroseis surface seismic survey and a co-located 3-D Vertical Seismic Profiling (VSP) survey, which shared common source Vibration Points (VPs). The resulting velocity model captures features that were not resolvable by conventional migration velocity analysis.}, keywords = {full-waveform inversion, seismic, land, vibroseis, downhole receivers}, url = {https://www.slim.eos.ubc.ca/Publications/Public/TechReport/2014/smithyman2014SEGjfw/smithyman2014SEGjfw.html} } @TECHREPORT{slim2014NSERCpr, author = {Felix J. Herrmann}, title = {{NSERC} 2014 {DNOISE} progress report}, year = {2014}, institution = {UBC}, abstract = {As we entered the second half of the DNOISE II project, we are happy to report that we have made significant progress on several fronts. Firstly, our work on seismic data acquisition with compressive sensing is becoming widely recognized. For instance, ConocoPhilips ran a highly successful field trial on Marine acquisition with compressive sensing and obtained significant improvements compared to standard production (see figure below). Moreover, one of the main outcomes of this year’s EAGE workshop was that industry is ready to adapt randomized sampling as a new acquisition paradigm. Needless to say this is a big success for what we have been trying to accomplish with DNOISE II. Finally, we have made a breakthrough in the application of randomized sampling in 4-D seismic, which is receiving a lot of interest from industry. Secondly, our work on large-scale optimization in the context of wave-equation based inversion is also increasingly widely adapted. For instance, our batching techniques are making the difference between making a loss or profit for a large contractor company active in the area of full-waveform inversion. We also continued to make progress in exciting new directions that go beyond sparsity promotion and which allow us to exploit other types of structure within the data, such as low-rank for matrices or hierarchical Tucker formats for tensors. Application of these techniques show excellent results and in certain cases, such as source separation problems with small dithering, show significant improvements over transform-domain methods. Thirdly, we continued to make significant progress in wave-equation based inversion. We extended our new penalty-based formulation now called Wavefield Reconstruction Inversion/Imaging to include total-variation regularization and density variations. We also continued to make progress on multiples, imaging with multiples and 3-D full-waveform inversion. Statoil is the latest company to join and we have several other companies that have shown a keen interest. We also received substantial in-kind contributions including a license to WesternGeco’s iOmega and HPC equipment discounts. After many years of support BP decided unfortunately to no longer support SINBAD quoting financial headwind related to the Deep horizon disaster. On a more positive note, we are extremely happy to report major progress on our efforts to secure access to high-performance compute, including renewed funding from NSERC and our involvement in the International Inversion Initiative in Brazil. 9 peer-reviewed journal publications have resulted from our work within the reporting period, with a further 6 submitted, and DNOISE members disseminated the results of our research at 49 major national and international conference presentations. On the HQP training side, 4 MSc students have recently graduated, with one obtaining a position with CGG Calgary, and we added 4 postdocs and 3 PhD students to our team in September 2014, greatly increasing our research capacity. As can be seen from the report below, we are well on schedule and on certain topics well beyond the milestones included in the original proposal. With the purchase of the new cluster we expect to see a surge of activity in extending our algorithms to 3D. With this increased capacity, we continue to be in an excellent position to make fundamental contributions to the fields of seismic data acquisition, processing, and wave-equation based inversion. In the sections below, we give a detailed overview of the research and publication activities of the different members of the group and how these relate to the objectives of the grant, to industrial uptake, and to outreach. Unless stated otherwise the students and PDFs are (co)-supervised by the PI. We refer to the publications section 4.0 for a complete list of our presentations, conference proceedings, and journal publications. We also refer to our mindmap, which clearly establishes connections between the different research topics we have embarked upon as part of the DNOISE II project.}, keywords = {NSERC, DNOISE, private}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Tech%20Report/NSERC/2014/Progress_Report_2014.html} } @TECHREPORT{dasilva2014htuck, author = {Curt Da Silva and Felix J. Herrmann}, title = {Optimization on the {Hierarchical} {Tucker} manifold - applications to tensor completion}, year = {2014}, month = {03}, institution = {UBC}, abstract = {In this work, we develop an optimization framework for problems whose solutions are well-approximated by Hierarchical Tucker (HT) tensors, an efficient structured tensor format based on recursive subspace factorizations. By exploiting the smooth manifold structure of these tensors, we construct standard optimization algorithms such as Steepest Descent and Conjugate Gradient for completing tensors from missing entries. Our algorithmic framework is fast and scalable to large problem sizes as we do not require SVDs on the ambient tensor space, as required by other methods. Moreover, we exploit the structure of the Gramian matrices associated with the HT format to regularize our problem, reducing overfitting for high subsampling ratios. We also find that the organization of the tensor can have a major impact on completion from realistic seismic acquisition geometries. These samplings are far from idealized randomized samplings that are usually considered in the literature but are realizable in practical scenarios. Using these algorithms, we successfully interpolate large-scale seismic data sets and demonstrate the competitive computational scaling of our algorithms as the problem sizes grow.}, keywords = {hierarchical tucker, structured tensor, tensor interpolation, differential geometry, riemannian optimization, gauss newton}, url = {https://www.slim.eos.ubc.ca/Publications/Public/TechReport/2014/dasilva2014htuck/dasilva2014htuck.pdf} } @TECHREPORT{slim2011NSERCpr, author = {Felix J. Herrmann}, title = {{NSERC} 2011 {DNOISE} progress report}, year = {2011}, institution = {UBC}, abstract = {The main thrust of the DNOISE project is focused on the following researth themes: [1] seismic acquisition design and recovery from incomplete data with the goal to reduce acquisition costs while increasing the spatial bandwidth and aperture of seismic data; [2] Removal of the 'surface nonlinearity' by simultaneous estimation of the source signature and the surface-free Green's function by inverting the surface-related multiple prediction operator; [3] Reduction of the computational complexity of full-waveform inversion (FWI) by randomized dimensionality reduction; [4] "Convexification" of FWI to remove or at least diminish the adverse effects of non-uniqueness that has plagued FWI since its inception; The first three themes are directed towards removing major impediments faced by FWI related to the costs of acquiring data, the computational costs of processing and inverting data, and to issues with source calibration and surface-related multiples. The final theme is more 'blue sky' and tries to incorporate ideas from migration-velocity analysis into the formulation of full-waveform inversion. Aside from these themes, we will continue to work on seismic data acquisition schemes that favor sparsity-promoting recovery and on the development of large-scale solvers using recent developments in convex and stochastic optimization.}, keywords = {NSERC, DNOISE, private}, url = {https://www.slim.eos.ubc.ca/Publications/Private/TechReport/NSERC/2011/nserc-2011-dnoise-progress-report.pdf} } @TECHREPORT{slim2010NSERCapp, author = {Felix J. Herrmann}, title = {{NSERC} 2010 {DNOISE} application}, year = {2010}, institution = {UBC}, abstract = {DNOISE II: Dynamic Nonlinear Optimization for Imaging in Seismic Exploration is a multidisciplinary research project that involves faculty from the Mathematics, Computer Science, and Earth and Ocean Sciences Departments at the University of British Columbia. DNOISE II constitutes a transformative research program towards a new paradigm in seismic exploration where the acquisition- and processing-related costs are no longer determined by the survey area and discretization but by transform-domain sparsity of the final result. In this approach, we rid ourselves from the confinements of conventional overly stringent sampling criteria that call for regular sampling with sequentiual sources at Nyquist rates. By adapting the principles of compressive sensing, DNOISE II promotes a ground-up formulation for seismic imaging where adverse subsampling-related artifacts are removed by intelligent simultaneous-acquisition design and recovery by transform-domain sparsity promotion. This development---in conjunction with our track records in sparse recovery and time-harmonic Helmholtz solvers---puts us in an unique position to deliver on fundamental breakthroughs in the development and implementation of the next-generation of processing, imaging, and full-waveform inversion solutions.}, keywords = {NSERC, DNOISE, private}, url = {https://www.slim.eos.ubc.ca/Publications/Private/TechReport/NSERC/2010/nserc-2010-dnoise-application.pdf} } @TECHREPORT{oghenekohwo2013SEGtlswrs, author = {Felix Oghenekohwo and Felix J. Herrmann}, title = {Time-lapse seismics with randomized sampling}, year = {2013}, institution = {UBC}, abstract = {In time-lapse or 4D seismics, repeatability of the acquisition is a very crucial step, as we do not want spurious events that are not there. In this paper, we propose an approach which avoids any requirement to repeat the surveys, by using randomized sampling technique which allows us to be more efficient in the acquisition. Our method applies to sampling data using ocean bottom nodes (OBN) as receivers. We test the efficacy of our proposed randomized acquisition geometry for time-lapse survey on two different models. In the first example, model properties does not change with time, while in the second example, model exhibit a time-lapse effect which may be caused by the migration of fluid within the reservoir. We perform two types of randomized sampling - uniform randomized sampling and jittered sampling to visualize the effects of non-repeatability in time-lapse survey. We observe that jittered randomized sampling is a more efficient method compared to randomized sampling, due to it's requirement to control the maximum spacing between the receivers. The results are presented, in the image space, as a least-squares migration of the model perturbation and they are shown for a subset of a synthetic model - the Marmousi model}, keywords = {acquisition, time-lapse, migration, private}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SEG/2013/oghenekohwo2013SEGtlswrs/oghenekohwo2013SEGtlswrs.pdf} } @TECHREPORT{slim2013NSERCpr, author = {Felix J. Herrmann}, title = {{NSERC} 2013 {DNOISE} progress report}, year = {2013}, institution = {UBC}, abstract = {As we enter the second half of the DNOISE II project, we are happy to report that we have made significant progress on several fronts. Firstly, our work on seismic data acquisition with compressive sensing is becoming widely recognized, reflected in adaptations of this technology by industry and in this year’s SEG Karcher award, which went to Gilles Hennenfent, who was one of the researchers who started working in this area in our group. As this report shows, we continued to make progress on this topic with numerous presentations, publications, and software releases. Secondly, our work on large-scale optimization is also widely adapted and instrumental to the different research areas on the grant. In particular, we are excited about new directions that go beyond sparsity promotion and which allow us to exploit other types of structure within the data, such as low-rank. Over the near future, we expect to see a body of new research based on these findings touching acquisition as well as the wave-equation based inversion aspects of our research program. Thirdly, we are also very happy to report that we continued to make substantial progress in wave-equation base inversion. In particular, we would like to mention successes in the areas of acceleration of sparsity-promoting imaging with source estimation and multiples and in theoretical as well as practical aspects of full-waveform inversion. We derived a highly practical and economic formulation of 3-D FWI and we also came up with a complete new formulation of FWI, which mitigates issues related to cycle skipping. Finally, we made a lot of progress applying our algorithm to industrial datasets, which has been well received by industry. Our findings show that FWI is still an immature technology calling for more theoretical input and for the development of practical workflows. Over the last year our work cumulated in 14 peer-reviewed journal publications, 5 submitted journal publications, 13 (+ 9) extended abstracts, 32 talks at international conferences, and 6 software packages. Finally, we are happy to report that we have been joined by several new companies, namely, ION Geophysical, CGG, and Woodside. At this midpoint of the Grant, we are also happy to report that we are well on schedule to meet the milestones included in the original proposal. Given our wide range of expertise and our plans to replace our compute cluster, we continue to be in an excellent position to make fundamental contributions to the fields of seismic data acquisition, processing, and wave-equation based inversion.}, keywords = {NSERC, DNOISE, private}, url = {https://www.slim.eos.ubc.ca/Publications/Private/TechReport/NSERC/2013/Progress_Report_2013.pdf} } @TECHREPORT{slim2012NSERCpr, author = {Felix J. Herrmann}, title = {{NSERC} 2012 {DNOISE} progress report}, year = {2012}, institution = {UBC}, keywords = {NSERC, DNOISE, private}, url = {https://www.slim.eos.ubc.ca/Publications/Private/TechReport/NSERC/2012/Progress_Report_2012.pdf} } @TECHREPORT{petrenko2013SEGsaoc, author = {Art Petrenko and Tristan van Leeuwen and Felix J. Herrmann}, title = {Software acceleration of {CARP}, an iterative linear solver and preconditioner}, year = {2013}, institution = {UBC}, abstract = {We present the results of software optimization of a row-wise preconditioner (Component Averaged Row Projections) for the method of conjugate gradients, which is used to solve the diagonally banded Helmholtz system representing frequency domain, isotropic acoustic seismic wave simulation. We demonstrate that in our application, a preconditioner bound to one processor core and accessing memory contiguously reduces execution time by 7\% for matrices having on the order of 108 non-zeros. For reference we note that our C implementation is over 80 times faster than the corresponding code written for a high-level numerical analysis language.}, keywords = {Helmholtz equation, Kaczmarz, software, wave propagation, frequency-domain, private}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Conferences/SEG/2013/petrenko2013SEGsaoc/petrenko2013SEGsaoc.pdf} } @TECHREPORT{kumar2013ICMLlr, author = {Aleksandr Y. Aravkin and Rajiv Kumar and Hassan Mansour and Ben Recht and Felix J. Herrmann}, title = {An {SVD}-free {Pareto} curve approach to rank minimization}, year = {2013}, institution = {UBC}, abstract = {Recent SVD-free matrix factorization formulations have enabled rank optimization for extremely large-scale systems (millions of rows and columns). In this paper, we consider rank-regularized formulations that only require a target data-fitting error level, and propose an algorithm for the corresponding problem. We illustrate the advantages of the new approach using the Netflix problem, and use it to obtain high quality results for seismic trace interpolation, a key application in exploration geophysics. We show that factor rank can be easily adjusted as the inversion proceeds, and propose a weighted extension that allows known subspace information to improve the results of matrix completion formulations. Using these methods, we obtain high-quality reconstructions for large scale seismic interpolation problems with real data.}, keywords = {interpolation, low-rank}, month = {02}, url = {https://www.slim.eos.ubc.ca/Publications/Public/TechReport/2013/kumar2013ICMLlr/kumar2013ICMLlr.pdf} } @TECHREPORT{li2013EAGEwebmplijsp, author = {Xiang Li and Felix J. Herrmann}, title = {Wave-equation based multi-parameter linearized inversion with joint-sparsity promotion}, year = {2013}, institution = {UBC}, abstract = {The successful application of linearized inversion is affected by the prohibitive size of the data, computational resources required, and how accurately the model parameters reflects the real Earth properties. The issue of data size and computational resources can be addressed by combining ideas from sparsity promoting and stochastic optimization, which can allow us to invert model perturbation with a small subset of the data, yielding a few PDE solves for the inversion. In this abstract, we are aiming at addressing the issue of accuracy of model parameters by inverting density and velocity simultaneously rather than only using velocity. As a matter of face, the effects of density and velocity variations towards the wavefield are very similar, which will cause energy leakage between density and velocity images. To overcome this issue, we proposed a incoherence enhanced method that can reduce the similarity between the effect of density and velocity. Moreover, the location of structural variations in velocity and density are often overlapped in geological setting, thus in this abstract, we also exploit this property with joint-sparsity promoting to further improve the imaging result.}, keywords = {linearized inversion, incoherence enhancement, joint-sparsity}, month = {01}, url = {https://www.slim.eos.ubc.ca/Publications/Public/TechReport/2013/li2013EAGEwebmplijsp/li2013EAGEwebmplijsp.pdf} } @TECHREPORT{vanLeeuwen2013Penalty2, author = {Tristan van Leeuwen and Felix J. Herrmann}, title = {A penalty method for {PDE}-constrained optimization}, year = {2013}, institution = {UBC}, abstract = {We present a method for solving PDE constrained optimization problems based on a penalty formulation. This method aims to combine advantages of both full-space and reduced methods by exploiting a large search-space (consisting of both control and state variables) while allowing for an efficient implementation that avoids storing and updating the state-variables. This leads to a method that has roughly the same per-iteration complexity as conventional reduced approaches while dening an objective that is less non-linear in the control variable by implicitly relaxing the constraint. We apply the method to a seismic inverse problem where it leads to a particularly ecient implementation when compared to a conventional reduced approach as it avoids the use of adjoint state-variables. Numerical examples illustrate the approach and suggest that the proposed formulation can indeed mitigate some of the well-known problems with local minima in the seismic inverse problem.}, keywords = {waveform inversion, optimization, private}, month = {04}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Tech%20Report/2013/vanLeeuwen2013Penalty2/vanLeeuwen2013Penalty2.pdf} } @TECHREPORT{vanleeuwen2012CGMN, author = {Tristan van Leeuwen}, title = {Fourier analysis of the {CGMN} method for solving the {Helmholtz} equation}, year = {2012}, institution = {Department of Earth, Ocean and Atmospheric Sciences}, address = {The University of British Columbia, Vancouver}, abstract = {The Helmholtz equation arises in many applications, such as seismic and medical imaging. These application are characterized by the need to propagate many wavelengths through an inhomogeneous medium. The typical size of the problems in 3D applications precludes the use of direct factorization to solve the equation and hence iterative methods are used in practice. For higher wavenumbers, the system becomes increasingly indefinite and thus good preconditioners need to be constructed. In this note we consider an accelerated Kazcmarz method (CGMN) and present an expression for the resulting iteration matrix. This iteration matrix can be used to analyze the convergence of the CGMN method. In particular, we present a Fourier analysis for the method applied to the 1D Helmholtz equation. This analysis suggests an optimal choice of the relaxation parameter. Finally, we present some numerical experiments.}, keywords = {Helmholtz equation, modelling}, url = {http://arxiv.org/abs/1210.2644}, } @TECHREPORT{almatar10SEGesfd, author = {Mufeed H. AlMatar and Tim T.Y. Lin and Felix J. Herrmann}, title = {Estimation of surface-free data by curvelet-domain matched filtering and sparse inversion}, institution = {Department of Earth and Ocean Sciences}, year = {2010}, address = {University of British Columbia, Vancouver}, abstract = {Matching seismic wavefields and images lies at the heart of many pre-post-processing steps part of seismic imaging- whether one is matching predicted wavefield components, such as multiples, to the actual to-be-separated wavefield components present in the data or whether one is aiming to restore migration amplitudes by scaling, using an image-to-remigrated- image matching procedure to calculate the scaling coefficients. The success of these wavefield matching procedures depends on our ability to (i) control possible overfitting, which may lead to accidental removal of energy or to inaccurate image-amplitude corrections, (ii) handle data or images with nonunique dips, and (iii) apply subsequent wavefield separations or migraton amplitude corrections stably. In this paper, we show that the curvelet transform allows us to address all these issues by im- posing smoothness in phase space, by using their capability to handle conflicting dips, and by leveraging their ability to represent seismic data and images sparsely. This latter property renders curvelet-domain sparsity promotion an effective prior.}, keywords = {SEG}, organization = {SEG}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SEG/2010/almatar10SEGesfd/almatar10SEGesfd.pdf} } @TECHREPORT{herrmann2010SEGerc, author = {Felix J. Herrmann}, title = {Empirical recovery conditions for seismic sampling}, institution = {Department of Earth and Ocean Sciences, UBC}, year = {2010}, abstract = {In this paper, we offer an alternative sampling method leveraging recent insights from compressive sensing towards seismic acquisition and processing for data that are traditionally considered to be undersampled. The main outcome of this approach is a new technology where acquisition and processing related costs are no longer determined by overly stringent sampling criteria, such as Nyquist. At the heart of our approach lies randomized incoherent sampling that breaks subsampling related interferences by turning them into harmless noise, which we subsequently remove by promoting transform-domain sparsity. Now, costs no longer grow with resolution and dimensionality of the survey area, but instead depend on transform-domain sparsity only. Our contribution is twofold. First, we demonstrate by means of carefully designed numerical experiments that compressive sensing can successfully be adapted to seismic acquisition. Second, we show that accurate recovery can be accomplished for compressively sampled data volumes sizes that exceed the size of conventional transform-domain data volumes by only a small factor. Because compressive sensing combines transformation and encoding by a single linear encoding step, this technology is directly applicable to acquisition and to dimensionality reduction during processing. In either case, sampling, storage, and processing costs scale with transform-domain sparsity.}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SEG/2010/herrmann10SEGerc/herrmann10SEGerc.pdf} } @TECHREPORT{hennenfent08TRori, author = {Gilles Hennenfent and Felix J. Herrmann}, title = {One-norm regularized inversion: learning from the {Pareto} curve}, institution = {UBC Earth and Ocean Sciences Department}, year = {2008}, number = {TR-2008-5}, abstract = {Geophysical inverse problems typically involve a trade off between data misfit and some prior. Pareto curves trace the optimal trade off between these two competing aims. These curves are commonly used in problems with two-norm priors where they are plotted on a log-log scale and are known as L-curves. For other priors, such as the sparsity-promoting one norm, Pareto curves remain relatively unexplored. First, we show how these curves provide an objective criterion to gauge how robust one-norm solvers are when they are limited by a maximum number of matrix-vector products that they can perform. Second, we use Pareto curves and their properties to define and compute one-norm compressibilities. We argue this notion is key to understand one-norm regularized inversion. Third, we illustrate the correlation between the one-norm compressibility and the performance of Fourier and curvelet reconstructions with sparsity promoting inversion.}, keywords = {SLIM}, url = {https://www.slim.eos.ubc.ca/Publications/Public/TechReport/2008/hennenfent08TRori/hennenfent08TRori.pdf} } @TECHREPORT{rajiv2012SEGFRM, author = {Rajiv Kumar and Aleksandr Y. Aravkin and Felix J. Herrmann}, title = {Fast methods for rank minimization with applications to seismic-data interpolation}, institution = {Department of Earth and Ocean Sciences}, year = {2012}, number = {TR-2012-04}, address = {University of British Columbia, Vancouver}, abstract = {Rank penalizing techniques are an important direction in seismic inverse problems, since they allow improved recovery by exploiting low-rank structure. A major downside of current state of the art techniques is their reliance on the SVD of seismic data structures, which can be prohibitively expensive. Fortunately, recent work allows us to circumvent this problem by working with matrix factorizations. We review a novel approach to rank penalization, and successfully apply it to the seismic interpolation problem by exploiting the low-rank structure of seismic data in the midpoint-offset domain. Experiments for the recovery of 2D monochromatic data matrices and seismic lines represented as 3D volumes support the feasibility and potential of the new approach.}, keywords = {rank, optimization, seismic data interpolation}, month = {04}, organization = {SEG}, url = {https://www.slim.eos.ubc.ca/Publications//Public/TechReport/2012/rajiv2012SEGFRM/rajiv2012SEGFRM.pdf} } @TECHREPORT{lebed08TRhgg, author = {Evgeniy Lebed and Felix J. Herrmann}, title = {A hitchhiker's guide to the galaxy of transform-domain sparsification}, institution = {UBC Earth and Ocean Sciences Department}, year = {2008}, number = {TR-2008-4}, abstract = {The ability to efficiently and sparsely represent seismic data is becoming an increasingly important problem in geophysics. Over the last decade many transforms such as wavelets, curvelets, contourlets, surfacelets, shearlets, and many other types of 'x-lets' have been developed to try to resolve this issue. In this abstract we compare the properties of four of these commonly used transforms, namely the shift-invariant wavelets, complex wavelets, curvelets and surfacelets. We also briefly explore the performance of these transforms for the problem of recovering seismic wavefields from incomplete measurements.}, keywords = {SLIM}, url = {https://www.slim.eos.ubc.ca/Publications/Public/TechReport/2008/lebed08TRhgg/lebed08TRhgg.pdf} } @TECHREPORT{tang09TRdtr, author = {Gang Tang and Reza Shahidi and Jianwei Ma}, title = {Design of two-dimensional randomized sampling schemes for curvelet-based sparsity-promoting seismic data recovery}, institution = {UBC Earth and Ocean Sciences Department}, year = {2009}, number = {TR-2009-03}, abstract = {The tasks of sampling, compression and reconstruction are very common and often necessary in seismic data processing due to the large size of seismic data. Curvelet-based Recovery by Sparsity-promoting Inversion, motivated by the newly developed theory of compressive sensing, is among the best recovery strategies for seismic data. The incomplete data input to this curvelet-based recovery is determined by randomized sampling of the original complete data. Unlike usual regular undersampling, randomized sampling can convert aliases to easy-to-eliminate noise, thus facilitating the process of reconstruction of the complete data from the incomplete data. Randomized sampling methods such as jittered sampling have been developed in the past that are suitable for curvelet-based recovery, however most have only been applied to sampling in one dimension. Considering that seismic datasets are usually higher dimensional and extremely large, in the present paper, we extend the 1D version of jittered sampling to two dimensions, both with underlying Cartesian and hexagonal grids. We also study separable and non-separable two dimensional jittered sampling, the former referring to the Kronecker product of two one-dimensional jittered samplings. These different categories of jittered sampling are compared against one another in terms of signal-to-noise ratio and visual quality, from which we find that jittered hexagonal sampling is better than jittered Cartesian sampling, while fully non-separable jittered sampling is better than separable sampling. Because in the image processing and computer graphics literature, sampling patterns with blue-noise spectra are found to be ideal to avoid aliasing, we also introduce two other randomized sampling methods, possessing sampling spectra with beneficial blue noise characteristics, Poisson Disk sampling and Farthest Point sampling. We compare these methods, and apply the introduced sampling methodologies to higher dimensional curvelet-based reconstruction. These sampling schemes are shown to lead to better results from CRSI compared to the other more traditional sampling protocols, e.g. regular subsampling.}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Journals/2009/tang09TRdtr/tang09TRdtr.pdf} } @TECHREPORT{vandenberg07TRipr, author = {Ewout {van den Berg} and Michael P. Friedlander}, title = {In pursuit of a root}, institution = {Department of Computer Science}, year = {2007}, month = {06}, number = {TR-2007-19}, address = {University of British Columbia, Vancouver}, abstract = {The basis pursuit technique is used to find a minimum one-norm solution of an underdetermined least-squares problem. Basis pursuit denoise fits the least-squares problem only approximately, and a single parameter determines a curve that traces the trade-off between the least-squares fit and the one-norm of the solution. We show that the function that describes this curve is convex and continuously differentiable over all points of interest. The dual solution of a least-squares problem with an explicit one-norm constraint gives function and derivative information needed for a root-finding method. As a result, we can compute arbitrary points on this curve. Numerical experiments demonstrate that our method, which relies on only matrix-vector operations, scales well to large problems.}, url = {http://www.optimization-online.org/DB_HTML/2007/06/1708.html} } @TECHREPORT{vanleeuwen2012SEGparallel, author = {Tristan van Leeuwen and Felix J. Herrmann}, title = {A parallel, object-oriented framework for frequency-domain wavefield imaging and inversion.}, institution = {Department of Earth and Ocean Sciences}, year = {2012}, number = {TR-2012-03}, address = {University of British Columbia, Vancouver}, abstract = {We present a parallel object-oriented matrix-free framework for frequency-domain seismic modeling, imaging and inversion. The key aspects of the framework are its modularity and level of abstraction, which allows us to write code that reflects the underlying mathematical structure and develop unit-tests that guarantee the fidelity of the code. By overloading standard linear-algebra operations, such as matrix-vector multiplications, we can use standard optimization packages to work with our code without any modification. This leads to a scalable testbed on which new methods can be rapidly prototyped and tested on medium-sized 2D problems. Although our current implementation uses (parallel) Matlab, all of these design principles can also be met by using lower-level languages which is important when we want to scale to realistic 3D problems. We present some numerical examples on synthetic data.}, keywords = {modeling, imaging, inversion, SEG}, month = {04}, organization = {SEG}, url = {https://www.slim.eos.ubc.ca/Publications/Public/TechReport/2012/vanleeuwen2012SEGparallel/vanleeuwen2012SEGparallel.pdf} } @TECHREPORT{vanleeuwen2012smii, author = {Tristan van Leeuwen}, title = {A parallel matrix-free framework for frequency-domain seismic modelling, imaging and inversion in Matlab}, year = {2012}, abstract = {I present a parallel matrix-free framework for frequency-domain seismic modeling, imaging and inversion. The framework provides basic building blocks for designing and testing optimization-based formulations of both linear and non-linear seismic in- verse problems. By overloading standard linear-algebra operations, such as matrix-vector multiplications, standard optimization packages can be used to work with the code without any modification. This leads to a scalable testbed on which new methods can be rapidly prototyped and tested on medium-sized 2D problems. I present some numerical examples on both linear and non-linear seismic inverse problems.}, keywords = {seismic imaging, optimization, Matlab, object-oriented programming}, month = {07}, url = {https://www.slim.eos.ubc.ca/Publications/Public/TechReport/2012/vanleeuwen2012smii/vanleeuwen2012smii.pdf} } @TECHREPORT{wang08TRbss, author = {Deli Wang and Rayan Saab and Ozgur Yilmaz and Felix J. Herrmann}, title = {Bayesian-signal separation by sparsity promotion: application to primary-multiple separation}, institution = {UBC Earth and Ocean Sciences Department}, year = {2008}, number = {TR-2008-1}, abstract = {Successful removal of coherent noise sources greatly determines the quality of seismic imaging. Major advances were made in this direction, e.g., Surface-Related Multiple Elimination (SRME) and interferometric ground-roll removal. Still, moderate phase, timing, amplitude errors and clutter in the predicted signal components can be detrimental. Adopting a Bayesian approach along with the assumption of approximate curvelet-domain independence of the to-be-separated signal components, we construct an iterative algorithm that takes the predictions produced by for example SRME as input and separates these components in a robust fashion. In addition, the proposed algorithm controls the energy mismatch between the separated and predicted components. Such a control, which was lacking in earlier curvelet-domain formulations, produces improved results for primary-multiple separation on both synthetic and real data.}, keywords = {signal separation, SLIM}, month = {01}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Journals/2008/wang08TRbss/paper_html/paper.html} } % This file was created with JabRef 2.9. % Encoding: MacRoman %-----2014-----% @UNPUBLISHED{kumar2014GEOPemc, author = {Rajiv Kumar and Curt Da Silva and Okan Akalin and Aleksandr Y. Aravkin and Hassan Mansour and Ben Recht and Felix J. Herrmann}, title = {Efficient matrix completion for seismic data reconstruction}, year = {2014}, month = {08}, institution = {UBC}, abstract = {Despite recent developments in improved acquisition, seismic data often remains undersampled along source and/or receiver coordinates, resulting in incomplete data for key applications such as migration and multiple prediction requiring densely sampled, alias-free wide azimuth data. When seismic data is organized in monochromatic frequency slices, missing-trace interpolation can be cast into a matrix completion problem, where the low-rank structure of seismic data in the appropriate domain can be exploited to recover densely sampled data volumes from data with missing entries. Current approaches that exploit low-rank structure are based on repeated singular value decompositions, which become prohibitively expensive for large-scale problems unless the data is partitioned and processed in small windows. While computationally manageable, our theory and experiments show degraded results when the windows sizes become too small. To overcome this problem, we carry out our interpolations for each frequency independently while working with the complete data in the midpoint-offset domain instead of windowing. For lateral varying geologies that are not too complex, working in the midpoint-offset domain leads to favorable rank minimization recovery because the singular values decay faster while sampling-related artifacts remain full rank. This combination of fast decay and full-rank artifacts agrees with the principles of the compressive sensing paradigm, which is based on exploiting (low-rank) structure, a sampling process that breaks this structure, and a rank-minimizing optimization that restores the signal's structure and interpolates the subsampled data. To make our proposed method computationally viable and practical, we introduce a factorization-based approach that avoids computing the singular values, and that therefore scales to large seismic data problems as long as the factors can be stored in memory. Tests on realistic two- and three-dimensional seismic data show that our method compares favorably, both in terms of computational speed and recovery quality, to existing curvelet-based and tensor-based techniques.}, keywords = {interpolation, low-rank, private}, note = {Submitted to Geophysics on August 8, 2014.}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Submitted/2014/kumar2014GEOPemc/kumar2014GEOPemc.pdf} } @UNPUBLISHED{oghenekohwo2014GEOPfrt, author = {Felix Oghenekohwo and Haneet Wason and Ernie Esser and Felix J. Herrmann}, title = {Foregoing repetition in time-lapse seismic --- reaping benefits of randomized sampling and joint recovery}, year = {2014}, month = {06}, institution = {UBC}, abstract = {In the current paradigm of time-lapse seismic, guaranteeing repeatability in acquisition and processing of the baseline and monitor surveys ranks amongst the highest technical challenges we are faced with when recovering useful 4-D information. By using recent insights from the field of distributed compressive sensing, we show that under certain conditions, the constraint of survey repeatability can be relaxed as long as we jointly invert the different surveys from randomized samplings via a sparsity-promoting program that exploits shared information amongst the baseline and monitor surveys. Motivated by a series of stylized examples, which demonstrate the benefits of exploiting correlations between the vintages through joint recovery, we are able to compute high-fidelity time-lapse vintages and differences from randomly jittered simultaneous-source marine acquisitions. Results from both the stylized examples and realistic marine synthetics show that the recovery of the vintages and time-lapse signal improve when we bestow a certain degree of independence on the randomized acquisitions of the different surveys. This suggests that by foregoing our insistence on strict repetition in time-lapse seismic, we will be opening fundamentally new opportunities to improve the quality and cost-effectiveness of time-lapse seismic acquisition. Our numerical experiments on a realistic synthetic confirms this enticing premise.}, keywords = {acquistion, time-lapse, marine, sampling, random, joint recovery method, private}, note = {Submitted to Geophysics on June 9, 2014.}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Submitted/2014/oghenekohwo2014GEOPfrt/oghenekohwo2014GEOPfrt.html} } @UNPUBLISHED{vanLeeuwen2014pmpde, author = {Tristan van Leeuwen and Felix J. Herrmann}, title = {A penalty method for {PDE}-constrained optimization ({CONFIDENTIAL})}, year = {2014}, month = {04}, institution = {UBC}, abstract = {The invention relates to a partial-differential-equation (PDE) constrained optimization method and especially to a partial-differential-equation (PDE) constrained optimization method for geophysical prospecting.}, keywords = {FWI, optimization, patent, private}, note = {Patent filed on April 22, 2014. PCT International Application.}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Submitted/2014/vanLeeuwen2014pmpde/vanLeeuwen2014pmpde.pdf} } @UNPUBLISHED{lin2014SEGmdg, author = {Tim T.Y. Lin and Felix J. Herrmann}, title = {Mitigating data gaps in the estimation of primaries by sparse inversion without data reconstruction}, year = {2014}, month = {04}, institution = {UBC}, abstract = {We propose to solve the Estimation of Primaries by Sparse Inversion problem from a sesimic record with missing near-offsets and large holes without any explicit data reconstruction, by instead simulating the missing multiple contributions with terms involving auto-convolutions of the primary wavefield. Exclusion of the unknown data as an inversion variable from the REPSI process is desireable, since it eliminates a significant source of local minima that arises from attempting to invert for the unobserved traces using primary and multiple models that may be far-away from the true solution. In this talk we investigate the necessary modifications to the Robust EPSI algorithm to account for the resulting non-linear modeling operator, and demonstrate that just a few auto-convolution terms are enough to satisfactorily mitigate the effects of data gaps during the inversion process.}, keywords = {EPSI, REPSI, multiples, inversion, algorithm}, note = {(to be presented at the SEG)}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SEG/2014/lin2014SEGmdg/lin2014SEGmdg.html} } @UNPUBLISHED{ghadermarzy2014SEGsti, author = {Navid Ghadermarzy and Ozgur Yilmaz and Felix J. Herrmann}, title = {Seismic trace interpolation with approximate message passing}, year = {2014}, month = {04}, institution = {UBC}, abstract = {Approximate message passing (AMP) is a computationally effective algorithm for recovering high dimensional signals from a few compressed measurements. In this paper we use AMP to solve the seismic trace interpolation problem. We also show that we can exploit the fast AMP algorithm to improve the recovery results of seismic trace interpolation in curvelet domain, both in terms of convergence speed and recovery performance by using AMP in Fourier domain as a preprocessor for the L1 recovery in Curvelet domain.}, keywords = {interpolation, AMP}, note = {(to be presented at the SEG)}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SEG/2014/ghadermarzy2014SEGsti/ghadermarzy2014SEGsti.html} } @UNPUBLISHED{miao2014SEGrhss, author = {Lina Miao and Polina Zheglova and Felix J. Herrmann}, title = {Randomized {HSS} acceleration for full-wave-equation depth stepping migration}, year = {2014}, month = {04}, institution = {UBC}, abstract = {In this work we propose to use the spectral projector (Kenney and Laub, 1995) and randomized HSS technique (Chandrasekaran et al., 2006) to achieve a stable and affordable two-way wave equation depth stepping migration algorithm.}, keywords = {acoustic, randomized SVD, spectral projector, full wave equation migration, depth extrapolation}, note = {(to be presented at the SEG)}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SEG/2014/miao2014SEGrhss/miao2014SEGrhss.html} } @UNPUBLISHED{oghenekohwo2014SEGrsw, author = {Felix Oghenekohwo and Rajiv Kumar and Felix J. Herrmann}, title = {Randomized sampling without repetition in time-lapse surveys}, year = {2014}, month = {04}, institution = {UBC}, abstract = {Vouching for higher levels of repeatability in acquisition and processing of time-lapse (4D) seismic data has become the standard with oil and gas contractor companies, with significant investment in the design of acquisition systems and processing algorithms that attempt to address some of the current 4D challenges, in particular, imaging weak 4D signals. Recent developments from the field of compressive sensing have shown the benefits of variants of randomized sampling in marine seismic acquisition and its impact for the future of seismic exploration. Following these developments, we show that the requirement for accurate survey repetition in time-lapse seismic data acquisition can be waived provided we solve a sparsity-promoting convex optimization program that makes use of the shared component between the baseline and monitor data. By setting up a framework for inversion of the stacked sections of a time-lapse data, given the pre-stack data volumes, we are able to extract 4D signals with relatively highfidelity from significant subsamplings. Our formulation is applied to time-lapse data that has been acquired with different source/receiver geometries, paving the way for an efficient approach to dealing with time-lapse data acquired with initially poor repeatability levels, provided the survey geometry details are known afterwards.}, keywords = {acquisition, repetition, 4D, time-lapse, random}, note = {(to be presented at the SEG)}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SEG/2014/oghenekohwo2014SEGrsw/oghenekohwo2014SEGrsw.html} } @UNPUBLISHED{wason2014SEGrrt, author = {Haneet Wason and Felix Oghenekohwo and Felix J. Herrmann}, title = {Randomization and repeatability in time-lapse marine acquisition}, year = {2014}, month = {04}, institution = {UBC}, abstract = {We present an extension of our time-jittered simultaneous marine acquisition to time-lapse surveys where the requirement for repeatability in acquisition can be waived provided we know the acquisition geometry afterwards. Our method, which does not require repetition, gives 4-D signals comparable to conventional methods where repeatability is key to their success.}, keywords = {marine, acquisition, time-laspe, deblending}, note = {(to be presented at the SEG)}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SEG/2014/wason2014SEGrrt/wason2014SEGrrt.html} } @UNPUBLISHED{wason2014SEGsss, author = {Haneet Wason and Rajiv Kumar and Aleksandr Y. Aravkin and Felix J. Herrmann}, title = {Source separation via {SVD}-free rank minimization in the hierarchical semi-separable representation}, year = {2014}, month = {04}, institution = {UBC}, abstract = {Recent developments in matrix rank optimization have allowed for new computational approaches in the field of source separation or deblending. In this paper, we propose a source separation algorithm for blended marine acquisition, where two sources are deployed at different depths (over/under acquisition). The separation method incorporates the Hierarchical Semi-Separable structure (HSS) inside rank-regularized least-squares formulations. The proposed approach is suitable for large scale problems, since it avoids SVD computations and uses a low-rank factorized formulation instead. We illustrate the performance of the new HSS-based deblending approach by simulating an over/under blended acquisition, wherein uniformly random time delays (of < 1 second) are applied to one of the sources.}, keywords = {source separation, deblending, marine, acquisition, rank, HSS}, note = {(to be presented at the SEG)}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Conferences/SEG/2014/wason2014SEGsss/wason2014SEGsss.html} } @article{tu2014fis, author = {Tu, Ning and Herrmann, Felix J.}, title = {Fast imaging with surface-related multiples by sparse inversion}, volume = {201}, number = {1}, pages = {304-317}, year = {2015}, doi = {10.1093/gji/ggv020}, abstract ={In marine exploration seismology, surface-related multiples are usually treated as noise mainly because subsequent processing steps, such as migration velocity analysis and imaging, require multiple-free data. Failure to remove these wavefield components from the data may lead to erroneous estimates for migration velocity or result in strong coherent artefacts that interfere with the imaged reflectors. However, multiples can carry complementary information compared to primaries, as they interact with the free surface and are therefore exposed more to the subsurface. Recent work has shown that when processed correctly multiples can improve seismic illumination. Given a sufficiently accurate background velocity model and an estimate for the source signature, we propose a new and computationally efficient linearized inversion procedure based on two-way wave equations, which produces accurate images of the subsurface from the total upgoing wavefield including surface-related multiples. Modelling of the surface-related multiples in the proposed method derives from the well-known surface-related multiple elimination method. We incur a minimal overhead from incorporating the multiples by having the wave-equation solver carry out the multiple predictions via the inclusion of an areal source instead of expensive dense matrix-matrix multiplications. By using subsampling techniques, we obtain high-quality true-amplitude least-squares migrated images at computational costs of roughly a single reverse-time migration (RTM) with all the data. These images are virtually free of coherent artefacts from multiples. Proper inversion of the multiples would be computationally infeasible without using these techniques that significantly bring down the cost. By promoting sparsity in the curvelet domain and using rerandomization, out method gains improved robustness to errors in the background velocity model, and errors incurred in the linearization of the wave equation with respect to the model. We demonstrate the superior performance of the proposed method compared to the conventional RTM using realistic synthetic examples.}, URL = {http://gji.oxfordjournals.org/content/201/1/304.abstract}, eprint = {http://gji.oxfordjournals.org/content/201/1/304.full.pdf+html}, journal = {Geophysical Journal International} } @UNPUBLISHED{vanLeeuwen20143Dfds, author = {Tristan van Leeuwen and Felix J. Herrmann}, title = {{3D} frequency-domain seismic inversion with controlled sloppiness}, year = {2014}, month = {03}, abstract = {Seismic waveform inversion aims at obtaining detailed estimates of subsurface medium parameters, such as the spatial distribution of soundspeed, from multi-experiment seismic data. A formulation of this inverse problem in the frequency-domain leads to an optimization problem constrained by a Helmholtz equation with many right-hand-sides. Application of this technique to industry-scale problem faces several challenges: Firstly, we need to solve the Helmholtz equation for high wavenumbers over large computational domains. Secondly, the data consists of many independent experiments, leading to a large number of PDE-solves. This results in high computational complexity both in terms of memory and CPU time as well as i/o costs. Finally, the inverse problem is highly non-linear and a lot of art goes into preprocessing and regularization. Ideally, an inversion needs to be run several times with different initial guesses and/or tuning parameters. In this paper, we discuss the requirements of the various components (PDE-solver, optimization method, ...) when applied to large-scale 3D seismic waveform inversion and combine several existing approaches into a flexible inversion scheme for seismic waveform inversion. The scheme is based on the idea that in the early stages of the inversion we do not need all the data or very accurate PDE-solves. We base our method on an existing preconditioned Krylov solver (CARP-CG) and use ideas from stochastic optimization to formulate a gradient-based (Quasi-Newton) optimization algorithm that works with small subsets of the right-hand-sides and uses inexact PDE solves for the gradient calculations. We proposed novel heuristics to adaptively control both the accuracy and the number of right-hand-sides. We illustrate the algorithms on synthetic benchmark models for which significant computational gains can be made without being sensitive to noise and without loosing accuracy of the inverted model.}, keywords = {waveform inversion, optimization}, note = {to appear in the SIAM Journal on Scientific Computing (SISC)}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Journals/SIAM_Journal_on_Scientific_Computing/2014/vanLeeuwen20143Dfds/vanLeeuwen20143Dfds.pdf} } %-----2013-----% @UNPUBLISHED{ghadermarzy2013ncs, author = {Navid Ghadermarzy and Hassan Mansour and Ozgur Yilmaz}, title = {Non-convex compressed sensing using partial support information}, year = {2013}, institution = {UBC}, abstract = {In this paper we address the recovery conditions of weighted $\ell_p$ minimization for signal reconstruction from compressed sensing measurements when partial support in- formation is available. We show that weighted $\ell_p$ minimization with 0 < p < 1 is stable and robust under weaker sufficient conditions compared to weighted $\ell_1$ minimization. Moreover, the sufficient recovery conditions of weighted $\ell_p$ are weaker than those of regular $\ell_p$ minimization if at least 50\% of the support estimate is accurate. We also review some algorithms which exist to solve the non-convex $\ell_p$ problem and illustrate our results with numerical experiments.}, keywords = {Compressed sensing, weighted $\ell_p$, nonconvex optimization, sparse reconstruction}, month = {11}, url = {http://arxiv.org/abs/1311.3773} } @UNPUBLISHED{aravkin2013SISCLR, author = {Aleksandr Y. Aravkin and Rajiv Kumar and Hassan Mansour and Ben Recht and Felix J. Herrmann}, title = {Fast methods for denoising matrix completion formulations, with application to robust seismic data interpolation}, year = {2013}, month = {05}, abstract = {Recent SVD-free matrix factorization formulations have enabled rank minimization for systems with millions of rows and columns, paving the way for matrix completion in extremely large-scale applications, such as seismic data interpolation. In this paper, we consider matrix completion formulations designed to hit a target data-fitting error level provided by the user, and propose an algorithm called LR-BPDN that is able to exploit factorized formulations to solve the corresponding optimization problem. Since practitioners typically have strong prior knowledge about target error level, this innovation makes it easy to apply the algorithm in practice, leaving only the factor rank to be determined. Within the established framework, we propose two extensions that are highly relevant to solving practical challenges of data interpolation. First, we propose a weighted extension that allows known subspace information to improve the results of matrix completion formulations. We show how this weighting can be used in the context of frequency continuation, an essential aspect to seismic data interpolation. Second, we propose matrix completion formulations that are robust to large measurement errors in the available data. We illustrate the advantages of LR-BPDN on the collaborative filtering problem using the MovieLens 1M, 10M, and Netflix 100M datasets. Then, we use the new method, along with its robust and subspace re-weighted extensions, to obtain high-quality reconstructions for large scale seismic interpolation problems with real data, even in the presence of data contamination.}, keywords = {interpolation, denoising, robust, SVD-free}, url = {http://arxiv.org/abs/1302.4886} } % This file was created with JabRef 2.6. % Encoding: MacRoman % This file was created with JabRef 2.8.1. % Encoding: MacRoman @ARTICLE{vanLeeuwen2014GEOPcav, author = {Tristan van Leeuwen and Aleksandr Y. Aravkin and Felix J. Herrmann}, title = {Comment on: “Application of the variable projection scheme for frequency-domain full-waveform inversion” (M. Li, J. Rickett, and A. Abubakar, Geophysics, 78, no. 6, R249–R257)}, year = {2014}, month = {05}, journal = {Geophysics}, volume = {79}, number = {3}, pages = {X11-X17}, keywords = {waveform inversion, variable projection}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Journals/Geophysics/2014/vanLeeuwen2014GEOPcav/vanLeeuwen2014GEOPcav.pdf}, doi = {10.1190/geo2013-0466.1}, note = {(discussion by Tristan van Leeuwen, Aleksandr Y. Aravkin, and Felix J. Herrmann)} } @ARTICLE{jumah2014GPdre, author = {Bander Jumah and Felix J. Herrmann}, title = {Dimensionality-reduced estimation of primaries by sparse inversion}, year = {2014}, month = {02}, journal = {Geophysical Prospecting}, keywords = {sparse inversion, factorization, primaries}, abstract = {Wave-equation based methods, such as the estimation of primaries by sparse inversion, have been successful in the mitigation of the adverse effects of surface-related multiples on seismic imaging and migration-velocity analysis. However, the reliance of these methods on multidimensional convolutions with fully sampled data exposes the ‘curse of dimensionality’, which leads to disproportional growth in computational and storage demands when moving to realistic 3D field data. To remove this fundamental impediment, we propose a dimensionality-reduction technique where the ‘data matrix’ is approximated adaptively by a randomized low-rank factorization. Compared to conventional methods, which need for each iteration passage through all data possibly requiring on-the-fly interpolation, our randomized approach has the advantage that the total number of passes is reduced to only one to three. In addition, the low-rank matrix factorization leads to considerable reductions in storage and computational costs of the matrix multiplies required by the sparse inversion. Application of the proposed method to two-dimensional synthetic and real data shows that significant performance improvements in speed and memory use are achievable at a low computational up-front cost required by the low-rank factorization.}, url = {http://onlinelibrary.wiley.com/doi/10.1111/1365-2478.12113/abstract}, doi = {10.1111/1365-2478.12113}, note = {(article first published online: 21 FEB 2014)} } @ARTICLE{herrmann2013TLEffwi, author = {Felix J. Herrmann and Andrew J. Calvert and Ian Hanlon and Mostafa Javanmehri and Rajiv Kumar and Tristan van Leeuwen and Xiang Li and Brendan Smithyman and Eric Takam Takougang and Haneet Wason}, title = {Frugal full-waveform inversion: from theory to a practical algorithm}, year = {2013}, month = {09}, journal = {The Leading Edge}, volume = {32}, number = {9}, pages = {1082-1092}, abstract = {As conventional oil and gas fields are maturing, our profession is challenged to come up with the next-generation of more and more sophisticated exploration tools. In exploration seismology this trend has led to the emergence of wave-equation based inversion technologies such as reverse-time migration and full-waveform inversion. While significant progress has been made in wave-equation based inversion, major challenges remain in the development of robust and computationally feasible workflows that give reliable results in geophysically challenging areas that may include ultra-low shear velocity zones or high-velocity salt. Moreover, sub-salt production carries risks that needs mitigation, which raises the bar from creating sub-salt images to inverting for sub-salt overpressure.}, keywords = {waveform inversion, optimization}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Journals/The_Leading_Edge/2013/herrmann2013ffwi/herrmann2013ffwi.html}, doi = {10.1190/tle32091082.1} } @ARTICLE{vanLeeuwen2013GJImlm, author = {Tristan van Leeuwen and Felix J. Herrmann}, title = {Mitigating local minima in full-waveform inversion by expanding the search space}, year = {2013}, month = {10}, volume = {195}, pages = {661-667}, journal = {Geophysical Journal International}, abstract = {Wave equation based inversions, such as full-waveform inversion and reverse-time migration, are challenging because of their computational costs, memory requirements and reliance on accurate initial models. To confront these issues, we propose a novel formulation of wave equation based inversion based on a penalty method. In this formulation, the objective function consists of a data-misfit term and a penalty term, which measures how accurately the wavefields satisfy the wave equation. This new approach is a major departure from current formulations where forward and adjoint wavefields, which both satisfy the wave equation, are correlated to compute updates for the unknown model parameters. Instead, we carry out the inversions over two alternating steps during which we first estimate the wavefield everywhere, given the current model parameters, source and observed data, followed by a second step during which we update the model parameters, given the estimate for the wavefield everywhere and the source. Because the inversion involves both the synthetic wavefields and the medium parameters, its search space is enlarged so that it suffers less from local minima. Compared to other formulations that extend the search space of wave equation based inversion, our method differs in several aspects, namely (i) it avoids storage and updates of the synthetic wavefields because we calculate these explicitly by finding solutions that obey the wave equation and fit the observed data and (ii) no adjoint wavefields are required to update the model, instead our updates are calculated from these solutions directly, which leads to significant computational savings. We demonstrate the validity of our approach by carefully selected examples and discuss possible extensions and future research.}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Journals/GeophysicalJournalInternational/2013/vanLeeuwen2013GJImlm/vanLeeuwen2013mlm.pdf}, doi = {10.1093/gji/ggt258}, eprint = {http://gji.oxfordjournals.org/content/early/2013/07/30/gji.ggt258.full.pdf+html} } @ARTICLE{mansour2013GEOPiwr, author = {Hassan Mansour and Felix J. Herrmann and Ozgur Yilmaz}, title = {Improved wavefield reconstruction from randomized sampling via weighted one-norm minimization}, journal = {Geophysics}, year = {2013}, month = {08}, volume = {78}, number = {5}, pages = {V193-V206}, abstract = {Missing-trace interpolation aims to recover the gaps caused by physical obstacles or deliberate subsampling to control acquisition costs in otherwise regularly sampled seismic wavefields. Although transform-domain sparsity promotion has proven to be an effective tool to solve this recovery problem, current recovery techniques do not fully utilize a priori information derived from the locations of the transform-domain coefficients, especially when curvelet domain sparsity is exploited. We use recovery by weighted one-norm minimization, which exploits correlations between the locations of significant curvelet coefficients of different partitions, e.g., shot records, common-offset gathers, or frequency slices of the acquired data. We use these correlations to define a sequence of 2D curvelet-based recovery problems that exploit 3D continuity exhibited by seismic wavefields without relying on the highly redundant 3D curvelet transform. To test the performance of our weighted algorithm, we compared recoveries from different data sorting and partitioning scenarios for a seismic line from the Gulf of Suez. These tests demonstrated that our method is superior to standard $\ell_1$ minimization in terms of antialiasing capability, reconstruction quality and computational memory requirements.}, keywords = {trace interpolation, weighted one-norm minimization, compressed sensing, randomized sampling}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Journals/Geophysics/2013/mansour2013GEOPiwr/mansour2013GEOPiwr.pdf}, doi = {10.1190/geo2012-0383.1} } @ARTICLE{lin2013GEOPrepsi, author = {Tim T.Y. Lin and Felix J. Herrmann}, title = {Robust estimation of primaries by sparse inversion via one-norm minimization}, journal = {Geophysics}, year = {2013}, month = {05}, volume = {78}, number = {3}, pages = {R133-R150}, abstract = {A recently proposed method called estimation of primaries by sparse inversion (EPSI) avoids the need for adaptive subtraction of approximate multiple predictions by directly inverting for the multiple-free subsurface impulse response as a collection of band-limited spikes. Although it can be shown that the correct primary impulse response is obtained through the sparsest possible solution, the original EPSI algorithm was not designed to take advantage of this result, and instead it relies on a multitude of inversion parameters, such as the level of sparsity per gradient update. We proposed and tested a new algorithm, named robust EPSI, in which we make obtaining the sparsest solution an explicit goal. Our approach remains a gradient-based approach like the original algorithm, but it is derived from a new biconvex optimization framework based on an extended basis-pursuit denoising formulation. Furthermore, because it is based on a general framework, robust EPSI can recover the impulse response in transform domains, such as sparsifying curvelet-based representations, without changing the underlying algorithm. We discovered that the sparsity-minimizing objective of our formulation enabled it to operate successfully on a variety of synthetic and field marine data sets without excessive tweaking of inversion parameters. We also found that recovering the solution in alternate sparsity domains can significantly improve the quality of the directly estimated primaries, especially for weaker late-arrival events. In addition, we found that robust EPSI produces a more artifact-free impulse response compared to the original algorithm.}, keywords = {multiples, optimization, sparsity, waveform inversion, pareto, biconvex, algorithm, EPSI}, doi = {10.1190/geo2012-0097.1}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Journals/Geophysics/2013/lin2013GEOPrepsi/lin2013GEOPrepsi.pdf} } @ARTICLE{shahidi2013GEOPROSars, author = {Reza Shahidi and Gang Tang and Jianwei Ma and Felix J. Herrmann}, title = {Application of randomized sampling schemes to curvelet-based sparsity-promoting seismic data recovery}, journal = {Geophysical Prospecting}, volume = {61}, number = {5}, pages = {973-997}, year = {2013}, month = {09}, abstract = {Reconstruction of seismic data is routinely used to improve the quality and resolution of seismic data from incomplete acquired seismic recordings. Curvelet-based Recovery by Sparsity-promoting Inversion, adapted from the recently-developed theory of compressive sensing, is one such kind of reconstruction, especially good for recovery of undersampled seismic data. Like traditional Fourier-based methods, it performs best when used in conjunction with randomized subsampling, which converts aliases from the usual regular periodic subsampling into easy-to-eliminate noise. By virtue of its ability to control gap size, along with the random and irregular nature of its sampling pattern, jittered (sub)sampling is one proven method that has been used successfully for the determination of geophone positions along a seismic line. In this paper, we extend jittered sampling to two-dimensional acquisition design, a more difficult problem, with both underlying Cartesian, and hexagonal grids. We also study what we term separable and non-separable two-dimensional jittered samplings. We find hexagonal jittered sampling performs better than Cartesian jittered sampling, while fully non-separable jittered sampling performs better than separable jittered sampling. Two other 2D randomized sampling methods, Poisson Disk sampling and Farthest Point sampling, both known to possess blue-noise spectra, are also shown to perform well.}, keywords = {Geophysical Prospecting, randomized sampling, curvelets}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Journals/GeophysicalProspecting/2013/shahidi2013GEOPROSars/shahidi2013GEOPROSars.pdf}, doi = {10.1111/1365-2478.12050} } @ARTICLE{moghaddam2013GEOPnoa, author = {Peyman P. Moghaddam and Henk Keers and Felix J. Herrmann and Wim A. Mulder}, title = {A new optimization approach for source-encoding full-waveform inversion}, year = {2013}, month = {05}, journal = {Geophysics}, volume = {78}, number = {3}, pages = {R125-R132}, abstract = {Waveform inversion is the method of choice for determining highly heterogeneous subsurface structure. However, conventional waveform inversion requires that the wavefield for each source is computed separately. This makes it very expensive for realistic 3D seismic surveys. Source-encoding waveform inversion, in which the sources are modelled simultaneously, is considerably faster than conventional waveform inversion but suffers from artifacts. These artifacts can partly be removed by assigning random weights to the source wavefields. We found that the misfit function, and therefore also its gradient, for source-encoding waveform inversion is an unbiased random estimation of the misfit function used in conventional waveform inversion. We found a new method of source-encoding waveform inversion which takes into account the random nature of the gradients used in the optimization. In this new method, the gradient at each iteration is a weighted average of past gradients such that the most recent gradients have the largest weights with exponential decay. This way we damped the random fluctuations of the gradient by incorporating information from the previous iterations. We compare this new method with existing source-encoding waveform inversion methods as well as conventional waveform inversion and found that the model misfit reduction is faster and smoother than those of existing source-encoding waveform inversion methods, and it approaches the model misfit reduction obtained in conventional waveform inversion.}, keywords = {Geophysics, FWI, optimization, source encoding}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Journals/Geophysics/2013/moghaddam2013GEOPnoa/moghaddam2013GEOPnoa.pdf}, doi = {10.1190/GEO2012-0090.1} } @ARTICLE{vanderneut2013GJIirs, author = {Joost {van der Neut} and Felix J. Herrmann}, title = {Interferometric redatuming by sparse inversion}, journal = {Geophysical Journal International}, year = {2013}, month = {02}, volume = {192}, pages = {666-670}, abstract = {Assuming that exact transmission responses are known between the surface and a particular depth level in the subsurface, seismic sources can be effectively mapped to that level by a process called interferometric redatuming. After redatuming, the obtained wavefields can be used for imaging below this particular depth level. Interferometric redatuming consists of two steps, namely (i) the decomposition of the observed wavefields into up- and down-going constituents and (ii) a multidimensional deconvolution of the up- and downgoing wavefields. While this method works in theory, sensitivity to noise and artifacts due to incomplete acquisition call for a different formulation. In this letter, we demonstrate the benefits of formulating the two steps that undergird interferometric redatuming in terms of a transform-domain sparsity-promoting program. By exploiting compressibility of seismic wavefields in the curvelet domain, we not only become robust with respect to noise but we are also able to remove certain artifacts while preserving the frequency content. These improvements lead to a better image of the target from the redatumed data.}, keywords = {Controlled source seismology, interferometry, inverse theory}, url = {http://gji.oxfordjournals.org/content/192/2/666} } @ARTICLE{berg2008SJSCpareto, author = {Ewout {van den Berg} and Michael P. Friedlander}, title = {Probing the {Pareto} frontier for basis pursuit solutions}, journal = {SIAM Journal on Scientific Computing}, year = {2008}, volume = {31}, pages = {890-912}, number = {2}, month = {01}, abstract = {The basis pursuit problem seeks a minimum one-norm solution of an underdetermined least-squares problem. Basis pursuit denoise (BPDN) fits the least-squares problem only approximately, and a single parameter determines a curve that traces the optimal trade-off between the least-squares fit and the one-norm of the solution. We prove that this curve is convex and continuously differentiable over all points of interest, and show that it gives an explicit relationship to two other optimization problems closely related to BPDN. We describe a root-finding algorithm for finding arbitrary points on this curve; the algorithm is suitable for problems that are large scale and for those that are in the complex domain. At each iteration, a spectral gradient-projection method approximately minimizes a least-squares problem with an explicit one-norm constraint. Only matrix-vector operations are required. The primal-dual solution of this problem gives function and derivative information needed for the root-finding method. Numerical experiments on a comprehensive set of test problems demonstrate that the method scales well to large problems.}, keywords = {basis pursuit, convex program, duality, Newton{\textquoteright}s method, one-norm regularization, projected gradient, root-finding, sparse solutions, optimization}, doi = {10.1137/080714488}, publisher = {SIAM}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Journals/SIAM_Journal_on_Scientific_Computing/2008/vanderberg08SIAMptp/vanderberg08SIAMptp.pdf} } @ARTICLE{vandenberg2010IEEEter, author = {Ewout {van den Berg} and Michael P. Friedlander}, title = {Theoretical and empirical results for recovery from multiple measurements}, journal = {IEEE Transactions on Information Theory}, year = {2010}, month = {05}, volume = {56}, number = {5}, pages = {2516-2527}, abstract = {The joint-sparse recovery problem aims to recover, from sets of compressed measurements, unknown sparse matrices with nonzero entries restricted to a subset of rows. This is an extension of the single-measurement-vector (SMV) problem widely studied in compressed sensing. We analyze the recovery properties for two types of recovery algorithms. First, we show that recovery using sum-of-norm minimization cannot exceed the uniform recovery rate of sequential SMV using L1 minimization, and that there are problems that can be solved with one approach but not with the other. Second, we analyze the performance of the ReMBo algorithm [M. Mishali and Y. Eldar, IEEE Trans. Sig. Proc., 56 (2008)] in combination with L1 minimization, and show how recovery improves as more measurements are taken. From this analysis it follows that having more measurements than number of nonzero rows does not improve the potential theoretical recovery rate.}, keywords = {convex optimization, joint sparsity, multiple channels, sparse recovery}, doi = {10.1109/TIT.2010.2043876}, url = {http://www.math.ucdavis.edu/%7Empf/2010-joint-sparsity.html} } @ARTICLE{vandenberg08gsv, author = {Ewout {van den Berg} and Mark Schmidt and Michael P. Friedlander and K. Murphy}, title = {Group sparsity via linear-time projection}, year = {2008}, number = {TR-2008-09}, month = {06}, abstract = {We present an efficient spectral projected-gradient algorithm for optimization subject to a group one-norm constraint. Our approach is based on a novel linear-time algorithm for Euclidean projection onto the one- and group one-norm constraints. Numerical experiments on large data sets suggest that the proposed method is substantially more efficient and scalable than existing methods.}, institution = {UBC - Department of Computer Science}, keywords = {SLIM, optimization}, url = {http://www.cs.ubc.ca/~ewout78/papers/TR-2008-09.pdf} } @ARTICLE{vandenberg2009ACMstf, author = {Ewout {van den Berg} and Michael P. Friedlander and Gilles Hennenfent and Felix J. Herrmann and Rayan Saab and Ozgur Yilmaz}, title = {Sparco: a testing framework for sparse reconstruction}, year = {2009}, month = {02}, journal = {{ACM} Transactions on Mathematical Software}, volume = {35}, number = {4}, pages = {1-16}, abstract = {Sparco is a framework for testing and benchmarking algorithms for sparse reconstruction. It includes a large collection of sparse reconstruction problems drawn from the imaging, compressed sensing, and geophysics literature. Sparco is also a framework for implementing new test problems and can be used as a tool for reproducible research. Sparco is implemented entirely in Matlab, and is released as open-source software under the GNU Public License.}, keywords = {compressed sensing, sparse recovery, linear operators}, url = {http://doi.acm.org/10.1145/1462173.1462178} } @ARTICLE{aravkin2012IPNuisance, author = {Aleksandr Y. Aravkin and Tristan {van Leeuwen}}, title = {Estimating nuisance parameters in inverse problems}, journal = {Inverse Problems}, year = {2012}, volume = {28}, number = {11}, month = {10}, pages={115016}, abstract = {Many inverse problems include nuisance parameters which, while not of direct interest, are required to recover primary parameters. Structure present in these problems allows efficient optimization strategies - a well known example is variable projection, where nonlinear least squares problems which are linear in some parameters can be very efficiently optimized. In this paper, we extend the idea of projecting out a subset over the variables to a broad class of maximum likelihood (ML) and maximum a posteriori likelihood (MAP) problems with nuisance parameters, such as variance or degrees of freedom. As a result, we are able to incorporate nuisance parameter estimation into large-scale constrained and unconstrained inverse problem formulations. We apply the approach to a variety of problems, including estimation of unknown variance parameters in the Gaussian model, degree of freedom (d.o.f.) parameter estimation in the context of robust inverse problems, automatic calibration, and optimal experimental design. Using numerical examples, we demonstrate improvement in recovery of primary parameters for several large- scale inverse problems. The proposed approach is compatible with a wide variety of algorithms and formulations, and its implementation requires only minor modifications to existing algorithms.}, keywords = {full waveform inversion, students t, variance}, doi = {10.1088/0266-5611/28/11/115016}, url = {http://arxiv.org/abs/1206.6532}, url2 = {https://www.slim.eos.ubc.ca/Publications/Public/Journals/InverseProblems/2012/aravkin2012IPNuisance/aravkin2012IPNuisance.pdf}, url3 = {http://iopscience.iop.org/0266-5611/28/11/115016/} } @ARTICLE{Aravkin11TRridr, author = {Aleksandr Y. Aravkin and Michael P. Friedlander and Felix J. Herrmann and Tristan van Leeuwen}, title = {Robust inversion, dimensionality reduction, and randomized sampling}, journal = {Mathematical Programming}, year = {2012}, volume = {134}, pages = {101-125}, number = {1}, month = {08}, abstract = {We consider a class of inverse problems in which the forward model is the solution operator to linear ODEs or PDEs. This class admits several dimensionality-reduction techniques based on data averaging or sampling, which are especially useful for large-scale problems. We survey these approaches and their connection to stochastic optimization. The data-averaging approach is only viable, however, for a least-squares misfit, which is sensitive to outliers in the data and artifacts unexplained by the forward model. This motivates us to propose a robust formulation based on the Student's t-distribution of the error. We demonstrate how the corresponding penalty function, together with the sampling approach, can obtain good results for a large-scale seismic inverse problem with 50 \% corrupted data.}, keywords = {inverse problems, seismic inversion, stochastic optimization, robust estimation, optimization, FWI}, doi = {10.1007/s10107-012-0571-6}, url = {http://www.springerlink.com/content/35rwr101h5736340/}, url2 = {https://www.slim.eos.ubc.ca/Publications/Public/Journals/MathematicalProgramming/aravkin2012MPrid/aravkin2012MPrid.pdf} } @ARTICLE{bernabe2004JGRpas, author = {Y. Bernab{\'e} and U. Mok and B. Evans and Felix J. Herrmann}, title = {Permeability and storativity of binary mixtures of high-and low-porosity materials}, journal = {Journal of Geophysical Research}, year = {2004}, volume = {109}, pages = {B12207}, month = {10}, abstract = {As a first step toward determining the mixing laws for the transport properties of rocks, we prepared binary mixtures of high- and low-permeability materials by isostatically hot-pressing mixtures of fine powders of calcite and quartz. The resulting rocks were marbles containing varying concentrations of dispersed quartz grains. Pores were present throughout the rock, but the largest ones were preferentially associated with the quartz particles, leading us to characterize the material as being composed of two phases, one with high permeability and the second with low permeability. We measured the permeability and storativity of these materials using the oscillating flow technique, while systematically varying the effective pressure and the period and amplitude of the input fluid oscillation. Control measurements performed using the steady state flow and pulse decay techniques agreed well with the oscillating flow tests. The hydraulic properties of the marbles were highly sensitive to the volume fraction of the high-permeability phase (directly related to the quartz content). Below a critical quartz content, slightly less than 20 wt \%, the high-permeability volume elements were disconnected, and the overall permeability was low. Above the critical quartz content the high-permeability volume elements formed throughgoing paths, and permeability increased sharply. We numerically simulated fluid flow through binary materials and found that permeability approximately obeys a percolation-based mixing law, consistent with the measured permeability of the calcite-quartz aggregates.}, keywords = {permeability, porosity, SLIM, modeling}, doi = {10.1029/2004JB00311}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Journals/JournalOfGeophysicalResearch/2004/bernabe04JGRpas/bernabe04JGRpas.pdf} } @ARTICLE{mansour2012IEEETITrcs, author = {Michael P. Friedlander and Hassan Mansour and Rayan Saab and Ozgur Yilmaz}, title = {Recovering compressively sampled signals using partial support information}, journal = {IEEE Trans. on Information Theory}, year = {2012}, volume = {58}, pages = {1122-1134}, number = {2}, month = {02}, abstract = {We study recovery conditions of weighted $\ell_1$ minimization for signal reconstruction from compressed sensing measurements when partial support information is available. We show that if at least 50\% of the (partial) support information is accurate, then weighted $\ell_1$ minimization is stable and robust under weaker sufficient conditions than the analogous conditions for standard $\ell_1$ minimization. Moreover, weighted $\ell_1$ minimization provides better upper bounds on the reconstruction error in terms of the measurement noise and the compressibility of the signal to be recovered. We illustrate our results with extensive numerical experiments on synthetic data and real audio and video signals.}, address = {University of British Columbia, Vancouver}, institution = {Department of Computer Science}, keywords = {compressive sensing}, doi = {10.1109/TIT.2011.2167214}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Journals/IEEETransInformationTheory/2012/mansour2012IEEETITrcs/mansour2012IEEETITrcs.pdf} } @ARTICLE{vandenberg2011SIAMsol, author = {Ewout {van den Berg} and Michael P. Friedlander}, title = {Sparse optimization with least-squares constraints}, journal = {SIAM Journal on Optimization}, year = {2011}, month = {11}, volume = {21}, number = {4}, pages = {1201–1229}, abstract = {The use of convex optimization for the recovery of sparse signals from incomplete or compressed data is now common practice. Motivated by the success of basis pursuit in recovering sparse vectors, new formulations have been proposed that take advantage of different types of sparsity. In this paper we propose an efficient algorithm for solving a general class of sparsifying formulations. For several common types of sparsity we provide applications, along with details on how to apply the algorithm, and experimental results.}, keywords = {basis pursuit, compressed sensing, convex program, duality, group sparsity, matrix completion, Newton’s method, root-finding, sparse solutions}, doi = {10.1137/100785028}, url = {http://www.math.ucdavis.edu/%7Empf/2010-sparse-optimization-with-least-squares.html} } @ARTICLE{Friedlander11TRhdm, author = {Michael P. Friedlander and Mark Schmidt}, title = {Hybrid deterministic-stochastic methods for data fitting}, journal = {SIAM Journal on Scientific Computing}, year = {2012}, volume = {34}, pages = {A1380-A1405}, number = {3}, month = {01}, abstract = {Many structured data-fitting applications require the solution of an optimization problem involving a sum over a potentially large number of measurements. Incremental gradient algorithms (both deterministic and randomized) offer inexpensive iterations by sampling only subsets of the terms in the sum. These methods can make great progress initially, but often slow as they approach a solution. In contrast, full gradient methods achieve steady convergence at the expense of evaluating the full objective and gradient on each iteration. We explore hybrid methods that exhibit the benefits of both approaches. Rate of convergence analysis and numerical experiments illustrate the potential for the approach.}, keywords = {optimization}, doi = {10.1137/110830629}, publisher = {Department of Computer Science}, url = {http://www.cs.ubc.ca/%7Empf/2011-hybrid-for-data-fitting.html} } @ARTICLE{friedlander2011CoRRhybrid, author = {Michael P. Friedlander and Mark Schmidt}, title = {Hybrid deterministic-stochastic methods for data fitting}, journal = {CoRR}, year = {2011}, month = {04}, abstract = {Many structured data-fitting applications require the solution of an optimization problem involving a sum over a potentially large number of measurements. Incremental gradient algorithms (both deterministic and randomized) offer inexpensive iterations by sampling only subsets of the terms in the sum. These methods can make great progress initially, but often slow as they approach a solution. In contrast, full gradient methods achieve steady convergence at the expense of evaluating the full objective and gradient on each iteration. We explore hybrid methods that exhibit the benefits of both approaches. Rate of convergence analysis and numerical experiments illustrate the potential for the approach.}, keywords = {optimization}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Journals/CoRR/2011/friedlander11hybrid.pdf} } @ARTICLE{friedlander2007SJOero, author = {Michael P. Friedlander and P. Tseng}, title = {Exact regularization of convex programs}, journal = {SIAM Journal on Optimization}, year = {2007}, volume = {18}, pages = {1326-1350}, number = {4}, month = {05}, abstract = {The regularization of a convex program is exact if all solutions of the regularized problem are also solutions of the original problem for all values of the regularization parameter below some positive threshold. For a general convex program, we show that the regularization is exact if and only if a certain selection problem has a Lagrange multiplier. Moreover, the regularization parameter threshold is inversely related to the Lagrange multiplier. We use this result to generalize an exact regularization result of Ferris and Mangasarian [Appl. Math. Optim., 23(1991), pp. 266{\textendash}273] involving a linearized selection problem. We also use it to derive necessary and sufficient conditions for exact penalization, similar to those obtained by Bertsekas [Math. Programming, 9(1975), pp. 87{\textendash}99] and by Bertsekas, Nedi , Ozdaglar [Convex Analysis and Optimization, Athena Scientific, Belmont, MA, 2003]. When the regularization is not exact, we derive error bounds on the distance from the regularized solution to the original solution set. We also show that existence of a {\textquoteleft}{\textquoteleft}weak sharp minimum{\textquoteright}{\textquoteright} is in some sense close to being necessary for exact regularization. We illustrate the main result with numerical experiments on the l1 regularization of benchmark (degenerate) linear programs and semidefinite/second-order cone programs. The experiments demonstrate the usefulness of l1 regularization in finding sparse solutions.}, keywords = {SLIM,Optimization}, doi = {10.1137/060675320}, url = {http://www.cs.ubc.ca/~mpf/2007-exact-regularization.html} } @ARTICLE{friedlander2007TASdtd, author = {Michael P. Friedlander and M. A. Saunders}, title = {Discussion: the {Dantzig} selector: statistical estimation when p is much larger then n}, journal = {The Annals of Statistics}, year = {2007}, volume = {35}, pages = {2385-2391}, number = {6}, month = {03}, keywords = {dantzig, SLIM, statistics}, doi = {10.1214/009053607000000479}, url = {http://www.cs.ubc.ca/~mpf/2007-discussion-of-the-dantzig-selector.html} } @ARTICLE{haber10TRemp, author = {Eldad Haber and Matthias Chung and Felix J. Herrmann}, title = {An effective method for parameter estimation with {PDE} constraints with multiple right hand sides}, journal = {SIAM Journal on Optimization}, year = {2012}, volume = {22}, number = {3}, month = {07}, abstract = {Often, parameter estimation problems of parameter-dependent PDEs involve multiple right-hand sides. The computational cost and memory requirements of such problems increase linearly with the number of right-hand sides. For many applications this is the main bottleneck of the computation. In this paper we show that problems with multiple right-hand sides can be reformulated as stochastic programming problems by combining the right-hand sides into a few „simultaneous” sources. This effectively reduces the cost of the forward problem and results in problems that are much cheaper to solve. We discuss two solution methodologies: namely sample average approximation and stochastic approximation. To illustrate the effectiveness of our approach we present two model problems, direct current resistivity and seismic tomography.}, keywords = {SLIM, FWI, optimization}, url = {http://dx.doi.org/10.1137/11081126X} } @ARTICLE{hennenfent2008GEOPnii, author = {Gilles Hennenfent and Ewout {van den Berg} and Michael P. Friedlander and Felix J. Herrmann}, title = {New insights into one-norm solvers from the {Pareto} curve}, journal = {Geophysics}, year = {2008}, month = {07}, volume = {73}, number = {4}, pages = {A23-A26}, abstract = {Geophysical inverse problems typically involve a trade off between data misfit and some prior. Pareto curves trace the optimal trade off between these two competing aims. These curves are commonly used in problems with two-norm priors where they are plotted on a log-log scale and are known as L-curves. For other priors, such as the sparsity-promoting one norm, Pareto curves remain relatively unexplored. We show how these curves lead to new insights in one-norm regularization. First, we confirm the theoretical properties of smoothness and convexity of these curves from a stylized and a geophysical example. Second, we exploit these crucial properties to approximate the Pareto curve for a large-scale problem. Third, we show how Pareto curves provide an objective criterion to gauge how different one-norm solvers advance towards the solution.}, keywords = {Pareto, SLIM, Geophysics, optimization, acquisition, processing}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Journals/Geophysics/2008/hennenfent08GEOnii/hennenfent08GEOnii.pdf}, doi = {10.1190/1.2944169} } @ARTICLE{hennenfent2010GEOPnct, author = {Gilles Hennenfent and Lloyd Fenelon and Felix J. Herrmann}, title = {Nonequispaced curvelet transform for seismic data reconstruction: a sparsity-promoting approach}, journal = {Geophysics}, year = {2010}, volume = {75}, pages = {WB203-WB210}, number = {6}, month = {12}, abstract = {We extend our earlier work on the nonequispaced fast discrete curvelet transform (NFDCT) and introduce a second generation of the transform. This new generation differs from the previous one by the approach taken to compute accurate curvelet coefficients from irregularly sampled data. The first generation relies on accurate Fourier coefficients obtained by an l2-regularized inversion of the nonequispaced fast Fourier transform (FFT) whereas the second is based on a direct l1-regularized inversion of the operator that links curvelet coefficients to irregular data. Also, by construction the second generation NFDCT is lossless unlike the first generation NFDCT. This property is particularly attractive for processing irregularly sampled seismic data in the curvelet domain and bringing them back to their irregular record-ing locations with high fidelity. Secondly, we combine the second generation NFDCT with the standard fast discrete curvelet transform (FDCT) to form a new curvelet-based method, coined nonequispaced curvelet reconstruction with sparsity-promoting inversion (NCRSI) for the regularization and interpolation of irregularly sampled data. We demonstrate that for a pure regularization problem the reconstruction is very accurate. The signal-to-reconstruction error ratio in our example is above 40 dB. We also conduct combined interpolation and regularization experiments. The reconstructions for synthetic data are accurate, particularly when the recording locations are optimally jittered. The reconstruction in our real data example shows amplitudes along the main wavefronts smoothly varying with limited acquisition imprint.}, keywords = {curvelet transforms, data acquisition, geophysical techniques, seismology, SLIM, processing}, doi = {10.1190/1.3494032}, publisher = {SEG}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Journals/Geophysics/2010/hennenfent2010GEOPnct/hennenfent2010GEOPnct.pdf} } @ARTICLE{hennenfent2008GEOPsdw, author = {Gilles Hennenfent and Felix J. Herrmann}, title = {Simply denoise: wavefield reconstruction via jittered undersampling}, journal = {Geophysics}, year = {2008}, volume = {73}, pages = {V19-V28}, number = {3}, month = {05}, abstract = {In this paper, we present a new discrete undersampling scheme designed to favor wavefield reconstruction by sparsity-promoting inversion with transform elements that are localized in the Fourier domain. Our work is motivated by empirical observations in the seismic community, corroborated by recent results from compressive sampling, which indicate favorable (wavefield) reconstructions from random as opposed to regular undersampling. As predicted by theory, random undersampling renders coherent aliases into harmless incoherent random noise, effectively turning the interpolation problem into a much simpler denoising problem. A practical requirement of wavefield reconstruction with localized sparsifying transforms is the control on the maximum gap size. Unfortunately, random undersampling does not provide such a control and the main purpose of this paper is to introduce a sampling scheme, coined jittered undersampling, that shares the benefits of random sampling, while offering control on the maximum gap size. Our contribution of jittered sub-Nyquist sampling proves to be key in the formulation of a versatile wavefield sparsity-promoting recovery scheme that follows the principles of compressive sampling. After studying the behavior of the jittered undersampling scheme in the Fourier domain, its performance is studied for curvelet recovery by sparsity-promoting inversion (CRSI). Our findings on synthetic and real seismic data indicate an improvement of several decibels over recovery from regularly-undersampled data for the same amount of data collected.}, html_version = {https://www.slim.eos.ubc.ca/Publications/Public/Journals/Geophysics/2008/hennenfent08GEOsdw/paper_html/paper.html}, keywords = {sampling, Geophysics, SLIM, acquisition, processing, optimization, compressive sensing}, doi = {10.1190/1.2841038}, publisher = {SEG}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Journals/Geophysics/2008/hennenfent08GEOsdw/hennenfent08GEOsdw.pdf} } @ARTICLE{hennenfent2006CiSEsdn, author = {Gilles Hennenfent and Felix J. Herrmann}, title = {Seismic denoising with nonuniformly sampled curvelets}, journal = {Computing in Science \& Engineering}, year = {2006}, volume = {8}, number = {3}, pages = {16-25}, month = {05}, abstract = {The authors present an extension of the fast discrete curvelet transform (FDCT) to nonuniformly sampled data. This extension not only restores curvelet compression rates for nonuniformly sampled data but also removes noise and maps the data to a regular grid.}, keywords = {CiSE, processing}, doi = {10.1109/MCSE.2006.49}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Journals/CiSE/2006/hennenfent06CiSEsdn/hennenfent06CiSEsdn.pdf } } @ARTICLE{herrmann2012IIsi, author = {Felix J. Herrmann}, title = {Seismic advances}, journal = {International Innovation}, year = {2013}, pages = {46-49}, month = {01}, abstract = {Current seismic exploration techniques are hampered by bottlenecks in data sampling and processing due to challenges in data collection, demand for more data and the increasing need to study highly complex geological settings. Professor Felix J. Herrmann's group is developing novel techniques to overcome these barriers which could greatly benefit the hydrocarbon industry.}, keywords = {seismic exploration techniques, compressive sensing, wave-equation-based data mining, dynamic nonlinear optimization}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Journals/InternationalInnovation/2012/herrmann2012IIsi/herrmann2012IIsi.pdf} } @ARTICLE{herrmann2010GEOPrsg, author = {Felix J. Herrmann}, title = {Randomized sampling and sparsity: getting more information from fewer samples}, journal = {Geophysics}, year = {2010}, volume = {75}, pages = {WB173-WB187}, number = {6}, month = {12}, abstract = {Many seismic exploration techniques rely on the collection of massive data volumes that are subsequently mined for information during processing. Although this approach has been extremely successful in the past, current efforts toward higher-resolution images in increasingly complicated regions of the earth continue to reveal fundamental shortcomings in our workflows. Chiefly among these is the so-called {\textquotedblleft}curse of dimensionality{\textquotedblright} exemplified by Nyquist{\textquoteright}s sampling criterion, which disproportionately strains current acquisition and processing systems as the size and desired resolution of our survey areas continue to increase. We offer an alternative sampling method leveraging recent insights from compressive sensing toward seismic acquisition and processing for data that are traditionally considered to be undersampled. The main outcome of this approach is a new technology where acquisition and processing related costs are no longer determined by overly stringent sampling criteria, such as Nyquist. At the heart of our approach lies randomized incoherent sampling that breaks subsampling related interferences by turning them into harmless noise, which we subsequently remove by promoting transform-domain sparsity. Now, costs no longer grow significantly with resolution and dimensionality of the survey area, but instead depend only on transform-domain sparsity. Our contribution is twofold. First, we demonstrate by means of carefully designed numerical experiments that compressive sensing can successfully be adapted to seismic exploration. Second, we show that accurate recovery can be accomplished for compressively sampled data volumes sizes that exceed the size of conventional transform-domain data volumes by only a small factor. Because compressive sensing combines transformation and encoding by a single linear encoding step, this technology is directly applicable to acquisition and to dimensionality reduction during processing. In either case, sampling, storage, and processing costs scale with transform-domain sparsity. We illustrate this principle by means of number of case studies.}, keywords = {data acquisition, geophysical techniques, Nyquist criterion, sampling methods, seismology, SLIM, acquisition, compressive sensing, optimization}, doi = {10.1190/1.3506147}, publisher = {SEG}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Journals/Geophysics/2010/herrmann2010GEOPrsg/herrmann2010GEOPrsg.pdf} } @ARTICLE{herrmann2005ICAEsdb, author = {Felix J. Herrmann}, title = {Seismic deconvolution by atomic decomposition: a parametric approach with sparseness constraints}, journal = {Integrated Computer-Aided Engineering}, year = {2005}, volume = {12}, pages = {69-90}, number = {1}, month = {01}, abstract = {In this paper an alternative approach to the blind seismic deconvolution problem is presented that aims for two goals namely recovering the location and relative strength of seismic reflectors, possibly with super-localization, as well as obtaining detailed parametric characterizations for the reflectors. We hope to accomplish these goals by decomposing seismic data into a redundant dictionary of parameterized waveforms designed to closely match the properties of reflection events associated with sedimentary records. In particular, our method allows for highly intermittent non-Gaussian records yielding a reflectivity that can no longer be described by a stationary random process or by a spike train. Instead, we propose a reflector parameterization that not only recovers the reflector{\textquoteright}s location and relative strength but which also captures reflector attributes such as its local scaling, sharpness and instantaneous phase-delay. The first set of parameters delineates the stratigraphy whereas the second provides information on the lithology. As a consequence of the redundant parameterization, finding the matching waveforms from the dictionary involves the solution of an ill-posed problem. Two complementary sparseness-imposing methods Matching and Basis Pursuit are compared for our dictionary and applied to seismic data.}, address = {Amsterdam, The Netherlands}, issn = {1069-2509}, keywords = {deconvolution, SLIM, processing, modelling}, publisher = {IOS Press}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Journals/IntegratedComputerAidedEngineering/2005/herrmann2005ICAEsdb/herrmann2005ICAEsdb.pdf} } @ARTICLE{herrmann2004GJIssa, author = {Felix J. Herrmann and Y. Bernab\'e }, title = {Seismic singularities at upper-mantle phase transitions: a site percolation model}, journal = {Geophysical Journal International}, year = {2004}, volume = {159}, pages = {949-960}, number = {3}, month = {12}, abstract = {Mineralogical phase transitions are usually invoked to account for the sharpness of globally observed upper-mantle seismic discontinuities. We propose a percolation-based model for the elastic properties of the phase mixture in the coexistence regions associated with these transitions. The major consequence of the model is that the elastic moduli (but not the density) display a singularity at the percolation threshold of the high-pressure phase. This model not only explains the sharp but continuous change in seismic velocities across the phase transition, but also predicts its abruptness and scale invariance, which are characterized by a non-integral scale exponent. Using the receiver-function approach and new, powerful signal-processing techniques, we quantitatively determine the singularity exponent from recordings of converted seismic waves at two Australian stations (CAN and WRAB). Using the estimated values, we construct velocity{\textendash}depth profiles across the singularities and verify that the calculated converted waveforms match the observations under CAN. Finally, we point out a series of additional predictions that may provide new insights into the physics and fine structure of the upper-mantle transition zone.}, keywords = {percolation, SLIM, modelling}, doi = {10.1111/j.1365-246X.2004.02464.x}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Journals/GeophysicalJournalInternational/2004/herrmann2004GJIssa/herrmann2004GJIssa.pdf} } @ARTICLE{herrmann2007GJInlp, author = {Felix J. Herrmann and U. Boeniger and D. J. Verschuur}, title = {Non-linear primary-multiple separation with directional curvelet frames}, journal = {Geophysical Journal International}, year = {2007}, volume = {170}, pages = {781-799}, number = {2}, month = {08}, abstract = {Predictive multiple suppression methods consist of two main steps: a prediction step, during which multiples are predicted from seismic data, and a primary-multiple separation step, during which the predicted multiples are {\textquoteright}matched{\textquoteright} with the true multiples in the data and subsequently removed. This second separation step, which we will call the estimation step, is crucial in practice: an incorrect separation will cause residual multiple energy in the result or may lead to a distortion of the primaries, or both. To reduce these adverse effects, a new transformed-domain method is proposed where primaries and multiples are separated rather than matched. This separation is carried out on the basis of differences in the multiscale and multidirectional characteristics of these two signal components. Our method uses the curvelet transform, which maps multidimensional data volumes into almost orthogonal localized multidimensional prototype waveforms that vary in directional and spatio-temporal content. Primaries-only and multiples-only signal components are recovered from the total data volume by a non-linear optimization scheme that is stable under noisy input data. During the optimization, the two signal components are separated by enhancing sparseness (through weighted l1-norms) in the transformed domain subject to fitting the observed data as the sum of the separated components to within a user-defined tolerance level. Whenever, during the optimization, the estimates for the primaries in the transformed domain correlate with the predictions for the multiples, the recovery of the coefficients for the estimated primaries will be suppressed while for regions where the correlation is small the method seeks the sparsest set of coefficients that represent the estimation for the primaries. Our algorithm does not seek a matched filter and as such it differs fundamentally from traditional adaptive subtraction methods. The method derives its stability from the sparseness obtained by a non-parametric (i.e. not depending on a parametrized physical model) multiscale and multidirectional overcomplete signal representation. This sparsity serves as prior information and allows for a Bayesian interpretation of our method during which the log-likelihood function is minimized while the two signal components are assumed to be given by a superposition of prototype waveforms, drawn independently from a probability function that is weighted by the predicted primaries and multiples. In this paper, the predictions are based on the data-driven surface-related multiple elimination method. Synthetic and field data examples show a clean separation leading to a considerable improvement in multiple suppression compared to the conventional method of adaptive matched filtering. This improved separation translates into an improved stack.}, keywords = {signal separation, SLIM, processing}, doi = {10.1111/j.1365-246X.2007.03360.x}, url = { https://www.slim.eos.ubc.ca/Publications/Public/Journals/GeophysicalJournalInternational/2007/herrmann07nlp/herrmann07nlp.pdf } } @ARTICLE{herrmann2009GEOPcbm, author = {Felix J. Herrmann and Cody R. Brown and Yogi A. Erlangga and Peyman P. Moghaddam}, title = {Curvelet-based migration preconditioning and scaling}, journal = {Geophysics}, year = {2009}, volume = {74}, pages = {A41}, month = {09}, abstract = {The extremely large size of typical seismic imaging problems has been one of the major stumbling blocks for iterative techniques to attain accurate migration amplitudes. These iterative methods are important because they complement theoretical approaches that are hampered by difficulties to control problems such as finite-acquisition aperture, source-receiver frequency response, and directivity. To solve these problems, we apply preconditioning, which significantly improves convergence of least-squares migration. We discuss different levels of preconditioning that range from corrections for the order of the migration operator to corrections for spherical spreading, and position and reflector-dip dependent amplitude errors. While the first two corrections correspond to simple scalings in the Fourier and physical domain, the third correction requires phase-space (space spanned by location and dip) scaling, which we carry out with curvelets. We show that our combined preconditioner leads to a significant improvement of the convergence of least-squares {\textquoteleft}wave-equation{\textquoteright} migration on a line from the SEG AA{\textquoteright} salt model.}, keywords = {migration, SLIM, imaging}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Journals/herrmann08cmp-r/herrmann08cmp-r.pdf} } @ARTICLE{herrmann2009GEOPcsf, author = {Felix J. Herrmann and Yogi A. Erlangga and Tim T.Y. Lin}, title = {Compressive simultaneous full-waveform simulation}, journal = {Geophysics}, year = {2009}, volume = {74}, pages = {A35}, month = {08}, abstract = {The fact that computational complexity of wavefield simulation is proportional to the size of the discretized model and acquisition geometry, and not to the complexity of the simulated wavefield, is a major impediment within seismic imaging. By turning simulation into a compressive sensing problem{\textendash}-where simulated data is recovered from a relatively small number of independent simultaneous sources{\textendash}-we remove this impediment by showing that compressively sampling a simulation is equivalent to compressively sampling the sources, followed by solving a reduced system. As in compressive sensing, this allows for a reduction in sampling rate and hence in simulation costs. We demonstrate this principle for the time-harmonic Helmholtz solver. The solution is computed by inverting the reduced system, followed by a recovery of the full wavefield with a sparsity promoting program. Depending on the wavefield{\textquoteright}s sparsity, this approach can lead to significant cost reductions, in particular when combined with the implicit preconditioned Helmholtz solver, which is known to converge even for decreasing mesh sizes and increasing angular frequencies. These properties make our scheme a viable alternative to explicit time-domain finite-differences.}, keywords = {full-waveform, SLIM, modelling, compressive sensing}, url = {https://www.slim.eos.ubc.ca/Publications/Public/TechReport/2009/herrmann2009GEOPcsf/herrmann2009GEOPcsf.pdf} } @ARTICLE{Herrmann11TRfcd, author = {Felix J. Herrmann and Michael P. Friedlander and Ozgur Yilmaz}, title = {Fighting the curse of dimensionality: compressive sensing in exploration seismology}, journal = {Signal Processing Magazine, IEEE}, year = {2012}, volume = {29}, pages = {88-100}, number = {3}, month = {05}, abstract = {Many seismic exploration techniques rely on the collection of massive data volumes that are mined for information during processing. This approach has been extremely successful, but current efforts toward higher resolution images in increasingly complicated regions of Earth continue to reveal fundamental shortcomings in our typical workflows. The "curse" of dimensionality is the main roadblock and is exemplified by Nyquist's sampling criterion, which disproportionately strains current acquisition and processing systems as the size and desired resolution of our survey areas continues to increase.}, issn = {1053-5888}, keywords = {Earth, Nyquist sampling criterion, dimensionality curse, higher-resolution images, massive data volumes, seismic exploration techniques, strains current acquisition system, strains current processing system, geographic information systems, seismology}, doi = {10.1109/MSP.2012.2185859}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Journals/IEEESignalProcessingMagazine/2012/Herrmann11TRfcd/Herrmann11TRfcd.pdf} } @ARTICLE{herrmann2008GJInps, author = {Felix J. Herrmann and Gilles Hennenfent}, title = {Non-parametric seismic data recovery with curvelet frames}, journal = {Geophysical Journal International}, year = {2008}, volume = {173}, pages = {233-248}, month = {04}, abstract = {Seismic data recovery from data with missing traces on otherwise regular acquisition grids forms a crucial step in the seismic processing flow. For instance, unsuccessful recovery leads to imaging artifacts and to erroneous predictions for the multiples, adversely affecting the performance of multiple elimination. A non-parametric transform-based recovery method is presented that exploits the compression of seismic data volumes by recently developed curvelet frames. The elements of this transform are multidimensional and directional and locally resem- ble wavefronts present in the data, which leads to a compressible representation for seismic data. This compression enables us to formulate a new curvelet-based seismic data recovery algorithm through sparsity-promoting inversion. The concept of sparsity-promoting inversion is in itself not new to geophysics. However, the recent insights from the field of {\textquoteleft}compressed sensing{\textquoteright} are new since they clearly identify the three main ingredients that go into a successful formulation of a recovery problem, namely a sparsifying transform, a sampling strategy that subdues coherent aliases and a sparsity-promoting program that recovers the largest entries of the curvelet-domain vector while explaining the measurements. These concepts are illustrated with a stylized experiment that stresses the importance of the degree of compression by the sparsifying transform. With these findings, a curvelet-based recovery algorithms is developed, which recovers seismic wavefields from seismic data volumes with large percentages of traces missing. During this construction, we benefit from the main three ingredients of compressive sampling, namely the curvelet compression of seismic data, the existence of a favorable sam- pling scheme and the formulation of a large-scale sparsity-promoting solver based on a cooling method. The recovery performs well on synthetic as well as real data and performs better by virtue of the sparsifying property of curvelets. Our results are applicable to other areas such as global seismology.}, keywords = {curvelet transform, reconstruction, SLIM, acquisition}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Journals/GeophysicalJournalInternational/2008/herrmann2008GJInps.pdf}, doi = {10.1111/j.1365-246X.2007.03698.x} } @ARTICLE{herrmann11GPelsqIm, author = {Felix J. Herrmann and Xiang Li}, title = {Efficient least-squares imaging with sparsity promotion and compressive sensing}, journal = {Geophysical Prospecting}, year = {2012}, volume = {60}, pages = {696-712}, number = {4}, month = {07}, abstract = {Seismic imaging is a linearized inversion problem relying on the minimization of a least-squares misfit functional as a function of the medium perturbation. The success of this procedure hinges on our ability to handle large systems of equations---whose size grows exponentially with the demand for higher resolution images in more and more complicated areas---and our ability to invert these systems given a limited amount of computational resources. To overcome this "curse of dimensionality" in problem size and computational complexity, we propose a combination of randomized dimensionality-reduction and divide-and-conquer techniques. This approach allows us to take advantage of sophisticated sparsity-promoting solvers that work on a series of smaller subproblems each involving a small randomized subset of data. These subsets correspond to artificial simultaneous-source experiments made of random superpositions of sequential-source experiments. By changing these subsets after each subproblem is solved, we are able to attain an inversion quality that is competitive while requiring fewer computational, and possibly, fewer acquisition resources. Application of this concept to a controlled series of experiments showed the validity of our approach and the relationship between its efficiency---by reducing the number of sources and hence the number of wave-equation solves---and the image quality. Application of our dimensionality-reduction methodology with sparsity promotion to a complicated synthetic with well-log constrained structure also yields excellent results underlining the importance of sparsity promotion.}, address = {University of British Columbia, Vancouver}, keywords = {SLIM, imaging, optimization, compressive sensing}, doi = {10.1111/j.1365-2478.2011.01041.x}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Journals/GeophysicalProspecting/2012/herrmann11GPelsqIm/herrmann11GPelsqIm.pdf}, url2 = {http://onlinelibrary.wiley.com/doi/10.1111/j.1365-2478.2011.01041.x/full} } @ARTICLE{herrmann2008ACHAsac, author = {Felix J. Herrmann and Peyman P. Moghaddam and Chris Stolk}, title = {Sparsity- and continuity-promoting seismic image recovery with curvelet frames}, journal = {Applied and Computational Harmonic Analysis}, year = {2008}, volume = {24}, pages = {150-173}, number = {2}, month = {03}, abstract = {A nonlinear singularity-preserving solution to seismic image recovery with sparseness and continuity constraints is proposed. We observe that curvelets, as a directional frame expansion, lead to sparsity of seismic images and exhibit invariance under the normal operator of the linearized imaging problem. Based on this observation we derive a method for stable recovery of the migration amplitudes from noisy data. The method corrects the amplitudes during a post-processing step after migration, such that the main additional cost is one ap- plication of the normal operator, i.e. a modeling followed by a migration. Asymptotically this normal operator corresponds to a pseudodifferential operator, for which a convenient diagonal approximation in the curvelet domain is derived, including a bound for its error and a method for the estimation of the diagonal from a compound operator consisting of discrete implementations for the scattering operator and its adjoint the migration operator. The solution is formulated as a nonlinear optimization problem where sparsity in the curvelet domain as well as continuity along the imaged reflectors are jointly promoted. To enhance sparsity, the $ell_1$-norm on the curvelet coefficients is minimized, while continuity is promoted by minimizing an anisotropic diffusion norm on the image. The performance of the recovery scheme is evaluated with a time-reversed {\textquoteright}wave-equation{\textquoteright} migration code on synthetic datasets, including the complex SEG/EAGE AA salt model.}, keywords = {curvelet transform, imaging, SLIM, processing}, doi = {10.1016/j.acha.2007.06.007}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Journals/ACHA/2008/herrmann2008ACHAsac/herrmann2008ACHAsac.pdf} } @ARTICLE{herrmann2008GEOPcbs, author = {Felix J. Herrmann and Deli Wang and Gilles Hennenfent and Peyman P. Moghaddam}, title = {Curvelet-based seismic data processing: a multiscale and nonlinear approach}, journal = {Geophysics}, year = {2008}, volume = {73}, pages = {A1-A5}, number = {1}, month = {03}, abstract = {Mitigating missing data, multiples, and erroneous migration amplitudes are key factors that determine image quality. Curvelets, little {\textquoteleft}{\textquoteleft}plane waves,{\textquoteright}{\textquoteright} complete with oscillations in one direction and smoothness in the other directions, sparsify a property we leverage explicitly with sparsity promotion. With this principle, we recover seismic data with high fidelity from a small subset (20\%) of randomly selected traces. Similarly, sparsity leads to a natural decorrelation and hence to a robust curvelet-domain primary-multiple separation for North Sea data. Finally, sparsity helps to recover migration amplitudes from noisy data. With these examples, we show that exploiting the curvelet{\textquoteright}s ability to sparsify wavefrontlike features is powerful, and our results are a clear indication of the broad applicability of this transform to exploration seismology. {\copyright}2008 Society of Exploration Geophysicists}, keywords = {curvelet transform, SLIM, acquisition, processing}, doi = {10.1190/1.2799517}, publisher = {SEG}, url = { https://www.slim.eos.ubc.ca/Publications/Public/Journals/Geophysics/2008/herrmann08GEOcbs/herrmann08GEOcbs.pdf } } @ARTICLE{herrmann2008GEOPacd, author = {Felix J. Herrmann and Deli Wang and D. J. Verschuur}, title = {Adaptive curvelet-domain primary-multiple separation}, journal = {Geophysics}, year = {2008}, volume = {73}, pages = {A17-A21}, number = {3}, month = {08}, abstract = {In many exploration areas, successful separation of primaries and multiples greatly determines the quality of seismic imaging. Despite major advances made by surface-related multiple elimination (SRME), amplitude errors in the predicted multiples remain a problem. When these errors vary for each type of multiple in different ways (as a function of offset, time, and dip), they pose a serious challenge for conventional least-squares matching and for the recently introduced separation by curvelet-domain thresholding. We propose a data-adaptive method that corrects amplitude errors, which vary smoothly as a function of location, scale (frequency band), and angle. With this method, the amplitudes can be corrected by an elementwise curvelet-domain scaling of the predicted multiples. We show that this scaling leads to successful estimation of primaries, despite amplitude, sign, timing, and phase errors in the predicted multiples. Our results on synthetic and real data show distinct improvements over conventional least-squares matching in terms of better suppression of multiple energy and high-frequency clutter and better recovery of estimated primaries. {\copyright}2008 Society of Exploration Geophysicists}, keywords = {Geophysics, SLIM, processing}, doi = {10.1190/1.2904986}, publisher = {SEG}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Journals/Geophysics/2008/herrmann08GEOacd/herrmann08GEOacd.pdf } } @ARTICLE{herrmann2011RECORDERcsse1, author = {Felix J. Herrmann and Haneet Wason and Tim T.Y. Lin}, title = {Compressive sensing in seismic exploration: an outlook on a new paradigm}, journal = {CSEG Recorder}, year = {2011}, volume = {36}, pages = {19-33}, number = {4}, month = {04}, abstract = {Many seismic exploration techniques rely on the collection of massive data volumes that are subsequently mined for information during processing. While this approach has been extremely successful in the past, current efforts toward higher resolution images in increasingly complicated regions of the Earth continue to reveal fundamental shortcomings in our workflows. Chiefly amongst these is the so-called "curse of dimensionality" exemplified by Nyquist's sampling criterion, which disproportionately strains current acquisition and processing systems as the size and desired resolution of our survey areas continues to increase. We offer an alternative sampling method leveraging recent insights from compressive sensing towards seismic acquisition and processing for data that, from a traditional point of view, are considered to be undersampled. The main outcome of this approach is a new technology where acquisition and processing related costs are decoupled the stringent Nyquist sampling criterion. At the heart of our approach lies randomized incoherent sampling that breaks subsampling-related interferences by turning them into harmless noise, which we subsequently remove by promoting sparsity in a transform-domain. Acquisition schemes designed to fit into this regime no longer grow significantly in cost with increasing resolution and dimensionality of the survey area, but instead its cost ideally only depends on transform-domain sparsity of the expected data. Our contribution is split into two part.}, url = {http://209.91.124.56/publications/recorder/2011/06jun/Jun2011-Compressive-Sensing-in-Seismic-Expl.pdf}, html_version = {http://csegrecorder.com/articles/view/compressive-sensing-in-seismic-exploration-an-outlook-on-a-new-paradig} } @ARTICLE{kumar2010TNPecr, author = {Vishal Kumar and Jounada Oueity and Ron Clowes and Felix J. Herrmann}, title = {Enhancing crustal reflection data through curvelet denoising}, journal = {Technophysics}, year = {2011}, volume = {508}, pages = {106-116}, number = {1-4}, month = {07}, abstract = {Suppression of incoherent noise, which is present in the seismic signal and may often lead to ambiguous interpretation, is a key step in processing associated with crustal reflection data. In this paper, we make use of the parsimonious representation of seismic data in the curvelet domain to perform the noise attenuation while preserving the coherent energy and its amplitude information. Curvelets are a recently developed mathematical transform that has as one of its properties minimal overlap between seismic signal and noise in the transform domain, thereby facilitating signal-noise separation. The problem is cast as an inverse problem and the results are obtained by updating the solution at each iteration. We demonstrate the effectiveness of this procedure at removing noise on both synthetic shot gathers and a synthetic stacked seismic section. We then apply curvelet denoising to deep crustal seismic reflection data where the signal-to-noise ratio is low. The reflection data were recorded along Lithoprobe's SNORCLE Line 1 across Paleoproterozoic-Archean domains in Canada's Northwest Territories. After initial processing, we apply the iterative curvelet denoising to both pre-stack shot gathers and post-stack data. Ground roll, random noise and much of the anomalous vertical energy is removed from the pre-stack shot gathers, to the extent that crustal reflections, including those from the Moho, are clearly seen on individual gathers. Denoised stacked data show a series of dipping reflections in the lower crust that extend into the Moho. The Moho itself is relatively flat and characterized by a sharp, narrow band of reflections. Comparing the results for the stacked data with those from F-X deconvolution, curvelet denoising outperforms the latter by attenuating incoherent noise with minimal harm to the signal. Because curvelet denoising retains amplitude information, it provides opportunities for further studies of seismic sections through attribute analyses. Curvelet denoising provides an important new tool in the processing toolbox for crustal seismic reflection data.}, keywords = {SLIM, processing}, doi = {10.1016/j.tecto.2010.07.01}, url = {http://www.sciencedirect.com/science/article/pii/S0040195110003227} } @ARTICLE{vanLeeuwen2010IJGswi, author = {Tristan van Leeuwen and Aleksandr Y. Aravkin and Felix J. Herrmann}, title = {Seismic waveform inversion by stochastic optimization}, journal = {International Journal of Geophysics}, year = {2011}, volume = {2011}, month = {12}, abstract = {We explore the use of stochastic optimization methods for seismic waveform inversion. The basic principle of such methods is to randomly draw a batch of realizations of a given misfit function and goes back to the 1950s. The ultimate goal of such an approach is to dramatically reduce the computational cost involved in evaluating the misfit. Following earlier work, we introduce the stochasticity in waveform inversion problem in a rigorous way via a technique called randomized trace estimation. We then review theoretical results that underlie recent developments in the use of stochastic methods for waveform inversion. We present numerical experiments to illustrate the behavior of different types of stochastic optimization methods and investigate the sensitivity to the batch size and the noise level in the data. We find that it is possible to reproduce results that are qualitatively similar to the solution of the full problem with modest batch sizes, even on noisy data. Each iteration of the corresponding stochastic methods requires an order of magnitude fewer PDE solves than a comparable deterministic method applied to the full problem, which may lead to an order of magnitude speedup for waveform inversion in practice.}, keywords = {SLIM, FWI, optimization}, note = {Article ID: 689041, 18pages}, doi = {10.1155/2011/689041}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Journals/InternationJournalOfGeophysics/2011/vanLeeuwen10IJGswi/vanLeeuwen10IJGswi.pdf} } @ARTICLE{VanLeeuwen11TRfwiwse, author = {Tristan van Leeuwen and Felix J. Herrmann}, title = {Fast waveform inversion without source encoding}, journal = {Geophysical Prospecting}, year = {2013}, month = {06}, volume = {61}, pages = {10-19}, abstract = {Randomized source encoding has recently been proposed as a way to dramatically reduce the costs of full waveform inversion. The main idea is to replace all sequential sources by a small number of simultaneous sources. This introduces random crosstalk in the model updates and special stochastic optimization strategies are required to deal with this. Two problems arise with this approach: i) source encoding can only be applied to fixed-spread acquisition setups, and ii) stochastic optimization methods tend to converge very slowly, relying on averaging to get rid of the cross-talk. Although the slow convergence is partly offset by the low iteration cost, we show that conventional optimization strategies are bound to outperform stochastic methods in the long run. In this paper we argue that we don¬øt need randomized source encoding to reap the benefits of stochastic optimization and we review an optimization strategy that combines the benefits of both conventional and stochastic optimization. The method uses a gradually increasing batch of sources. Thus, iterations are very cheap initially and this allows the method to make fast progress in the beginning. As the batch size grows, the method behaves like conventional optimization, allowing for fast convergence. Numerical examples suggest that the stochastic and hybrid method perform equally well with and without source encoding and that the hybrid method outperforms both conventional and stochastic optimization. The method does not rely on source encoding techniques and can thus be applied to non fixed-spread data.}, keywords = {SLIM, FWI, optimization}, doi = {10.1111/j.1365-2478.2012.01096.x}, url = {http://onlinelibrary.wiley.com/doi/10.1111/j.1365-2478.2012.01096.x/abstract}, note = {Article first published online: 10 JULY 2012} } @ARTICLE{Li11TRfrfwi, author = {Xiang Li and Aleksandr Y. Aravkin and Tristan van Leeuwen and Felix J. Herrmann}, title = {Fast randomized full-waveform inversion with compressive sensing}, journal = {Geophysics}, year = {2012}, volume = {77}, pages = {A13-A17}, number = {3}, month = {05}, abstract = {Wave-equation based seismic inversion can be formulated as a nonlinear inverse problem where the medium properties are obtained via minimization of a least- squares misfit functional. The demand for higher resolution models in more geologically complex areas drives the need to develop techniques that explore the special structure of full-waveform inversion to reduce the computational burden and to regularize the inverse problem. We meet these goals by using ideas from compressive sensing and stochastic optimization to design a novel Gauss-Newton method, where the updates are computed from random subsets of the data via curvelet-domain sparsity promotion. Application of this idea to a realistic synthetic shows improved results compared to quasi-Newton methods, which require passes through all data. Two different subset sampling strategies are considered: randomized source encoding, and drawing sequential shots firing at random source locations from marine data with missing near and far offsets. In both cases, we obtain excellent inversion results compared to conventional methods at reduced computational costs. }, keywords = {SLIM, FWI, compressive sensing, optimization}, doi = {10.1190/geo2011-0410.1}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Journals/Geophysics/2012/Li11TRfrfwi/Li11TRfrfwi.pdf} } @ARTICLE{lin2007GEOPcwe, author = {Tim T.Y. Lin and Felix J. Herrmann}, title = {Compressed wavefield extrapolation}, journal = {Geophysics}, year = {2007}, volume = {72}, pages = {SM77-SM93}, number = {5}, month = {08}, abstract = {An explicit algorithm for the extrapolation of one-way wavefields is proposed that combines recent developments in information theory and theoretical signal processing with the physics of wave propagation. Because of excessive memory requirements, explicit formulations for wave propagation have proven to be a challenge in 3D. By using ideas from compressed sensing, we are able to formulate the (inverse) wavefield extrapolation problem on small subsets of the data volume, thereby reducing the size of the operators. Compressed sensing entails a new paradigm for signal recovery that provides conditions under which signals can be recovered from incomplete samplings by nonlinear recovery methods that promote sparsity of the to-be-recovered signal. According to this theory, signals can be successfully recovered when the measurement basis is incoherent with the representa-tion in which the wavefield is sparse. In this new approach, the eigenfunctions of the Helmholtz operator are recognized as a basis that is incoherent with curvelets that are known to compress seismic wavefields. By casting the wavefield extrapolation problem in this framework, wavefields can be successfully extrapolated in the modal domain, despite evanescent wave modes. The degree to which the wavefield can be recovered depends on the number of missing (evanescent) wavemodes and on the complexity of the wavefield. A proof of principle for the compressed sensing method is given for inverse wavefield extrapolation in 2D, together with a pathway to 3D during which the multiscale and multiangular properties of curvelets, in relation to the Helmholz operator, are exploited. The results show that our method is stable, has reduced dip limitations, and handles evanescent waves in inverse extrapolation. {\copyright}2007 Society of Exploration Geophysicists}, keywords = {SLIM, wave propagation, modelling}, doi = {10.1190/1.2750716}, publisher = {SEG}, url = { https://www.slim.eos.ubc.ca/Publications/Public/Journals/Geophysics/2007/lin07cwe/lin07cwe.pdf} } @ARTICLE{Mansour11TRssma, author = {Hassan Mansour and Haneet Wason and Tim T.Y. Lin and Felix J. Herrmann}, title = {Randomized marine acquisition with compressive sampling matrices}, journal = {Geophysical Prospecting}, year = {2012}, volume = {60}, pages = {648-662}, number = {4}, month = {07}, abstract = {Seismic data acquisition in marine environments is a costly process that calls for the adoption of simultaneous-source or randomized acquisition - an emerging technology that is stimulating both geophysical research and commercial efforts. Simultaneous marine acquisition calls for the development of a new set of design principles and post-processing tools. In this paper, we discuss the properties of a specific class of randomized simultaneous acquisition matrices and demonstrate that sparsity-promoting recovery improves the quality of reconstructed seismic data volumes. We propose a practical randomized marine acquisition scheme where the sequential sources fire airguns at only randomly time-dithered instances. We demonstrate that the recovery using sparse approximation from random time-dithering with a single source approaches the recovery from simultaneous-source acquisition with multiple sources. Established findings from the field of compressive sensing indicate that the choice of the sparsifying transform that is incoherent with the compressive sampling matrix can significantly impact the reconstruction quality. Leveraging these findings, we then demonstrate that the compressive sampling matrix resulting from our proposed sampling scheme is incoherent with the curvelet transform. The combined measurement matrix exhibits better isometry properties than other transform bases such as a non-localized multidimensional Fourier transform. We illustrate our results with simulations of "ideal" simultaneous-source marine acquisition, which dithers both in time and space, compared with periodic and randomized time-dithering.}, keywords = {curvelet transform, Fourier, marine acquisition}, doi = {10.1111/j.1365-2478.2012.01075.x}, url = {http://onlinelibrary.wiley.com/doi/10.1111/j.1365-2478.2012.01075.x/abstract} } @ARTICLE{saab2008ACHAsrb, author = {Rayan Saab and Ozgur Yilmaz}, title = {Sparse recovery by non-convex optimization - instance optimality}, journal = {Applied and Computational Harmonic Analysis}, year = {2010}, volume = {29}, pages = {30-48}, number = {1}, month = {07}, abstract = {In this note, we address the theoretical properties of $\Delta_p$, a class of compressed sensing decoders that rely on $l^p$ minimization with $p \in (0,1)$ to recover estimates of sparse and compressible signals from incomplete and inaccurate measurements. In particular, we extend the results of Cand{\textquoteleft}es, Romberg and Tao [3] and Wojtaszczyk [30] regarding the decoder $\Delta_1$, based on $\ell^1$ minimization, to $\Delta p$ with $p \in (0,1)$. Our results are two-fold. First, we show that under certain sufficient conditions that are weaker than the analogous sufficient conditions for $\Delta_1$ the decoders $\Delta_p$ are robust to noise and stable in the sense that they are $(2,p)$ instance optimal. Second, we extend the results of Wojtaszczyk to show that, like $\Delta_1$, the decoders $\Delta_p$ are (2,2) instance optimal in probability provided the measurement matrix is drawn from an appropriate distribution. While the extension of the results of [3] to the setting where $p \in (0,1)$ is straightforward, the extension of the instance optimality in probability result of [30] is non-trivial. In particular, we need to prove that the $LQ_1$ property, introduced in [30], and shown to hold for Gaussian matrices and matrices whose columns are drawn uniformly from the sphere, generalizes to an $LQ_p$ property for the same classes of matrices. Our proof is based on a result by Gordon and Kalton [18] about the Banach-Mazur distances of p-convex bodies to their convex hulls.}, keywords = {non-convex, compressive sensing}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Journals/ACHA/2010/saab2008ACHAsrb/saab2008ACHAsrb.pdf} } @ARTICLE{wang2008GEOPbws, author = {Deli Wang and Rayan Saab and Ozgur Yilmaz and Felix J. Herrmann}, title = {Bayesian wavefield separation by transform-domain sparsity promotion}, journal = {Geophysics}, year = {2008}, volume = {73}, pages = {1-6}, number = {5}, month = {07}, abstract = {Successful removal of coherent noise sources greatly determines the quality of seismic imaging. Major advances were made in this direction, e.g., Surface-Related Multiple Elimination (SRME) and interferometric ground-roll removal. Still, moderate phase, timing, amplitude errors and clutter in the predicted signal components can be detrimental. Adopting a Bayesian approach along with the assumption of approximate curvelet-domain independence of the to-be-separated signal components, we construct an iterative algorithm that takes the predictions produced by for example SRME as input and separates these components in a robust fashion. In addition, the proposed algorithm controls the energy mismatch between the separated and predicted components. Such a control, which was lacking in earlier curvelet-domain formulations, produces improved results for primary-multiple separation on both synthetic and real data.}, keywords = {curvelet transform, SLIM, Geophysics, processing, optimization}, doi = {10.1190/1.2952571}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Journals/Geophysics/2008/wang08GEObws/wang08GEObws.pdf} } % This file was created with JabRef 2.9. % Encoding: MacRoman @MANUAL{hennenfent08MNrap, title = {Repro: a {Python} package for automating reproducible research in scientific computing}, author = {Gilles Hennenfent and Sean Ross-Ross}, year = {2008}, abstract = {Repro is a Python package for automating reproducible research in scientific computing. Repro works in combination with SCons, a next-generation build tool. The package is freely available over the Internet. Downloading and installation instructions are provided in this gui de. The repro package is documented in various ways (many comments in source code, this guide{\textendash}-written using repro itself!{\textendash}-and a reference guide). In this user{\textquoteright}s guide, we present a few pedagogical examples that uses Matlab, Python, Seismic Unix (SU), and Madagascar. We also include demo pa pers. These papers are written in LaTeX and compiled using repro. The figures they contain are automatically generated from the source codes prov ided. In that sense, the demo papers are a model of self-contained documents that are fully reproducible. The repro package is largely inspired by some parts of Madagascar, a geophysical software package for reproducible research. However, the repro package is intended for a broad audience co ming from a wide spectrum of interest areas.}, keywords = {SLIM}, month = {08}, url = {http://repro.sourceforge.net/Site/Home.html} } @MANUAL{rossross07MNsda, title = {{SLIMpy} development and programming interface for seismic processing}, author = {Sean Ross-Ross and Henryk Modzelewski and Cody R. Brown and Felix J. Herrmann}, year = {2007}, abstract = {Inverse problems in (exploration) seismology are known for their large to very large scale. For instance, certain sparsity-promoting inversion techniques involve vectors that easily exceed unknowns while seismic imaging involves the construction and application of matrix-free discretized operators where single matrix-vector evaluations may require hours, days or even weeks on large compute clusters. For these reasons, software development in this field has remained the domain of highly technical codes programmed in low-level languages with little eye for easy development, code reuse and integration with (nonlinear) programs that solve inverse problems.Following ideas from the Symes{\textquoteright} Rice Vector Library and Bartlett{\textquoteright}s C++ object-oriented interface, Thyra, and Reduction/Transformation operators (both part of the Trilinos software package), we developed a software-development environment based on overloading. This environment provides a pathway from in-core prototype development to out-of-core and MPI {\textquoteright}production{\textquoteright} code with a high level of code reuse. This code reuse is accomplished by integrating the out-of-core and MPI functionality into the dynamic object-oriented programming language Python. This integration is implemented through operator overloading and allows for the development of a coordinate-free solver framework that (i) promotes code reuse; (ii) analyses the statements in an abstract syntax tree and (iii) generates executable statements. In the current implementation, we developed an interface to generate executable statements for the out-of-core unix-pipe based (seismic) processing package RSF-Madagascar (rsf.sf.net). The modular design allows for interfaces to other seismic processing packages and to in-core Python packages such as numpy. So far, the implementation overloads linear operators and element-wise reduction/transformation operators. We are planning extensions towards nonlinear operators and integration with existing (parallel) solver frameworks such as Trilinos.}, keywords = {SLIM, software}, url = {https://www.slim.eos.ubc.ca/Software/SLIM/SLIMpy/} } @MANUAL{rossross08MNsai, title = {{SLIMPy}: a python interface for unix-pipe based coordinate-free scientific computing}, author = {Sean Ross-Ross and Henryk Modzelewski and Felix J. Herrmann}, year = {2008}, abstract = {SLIMpy is a Python interface that exposes the functionality of seismic data processing packages, such as MADAGASCAR, through oper ator overloading. SLIMpy provides a concrete coordinate-free implementation of classes for out-of-core linear (implicit matrix-vector), and element-wise operations, including calculation of norms and other basic vector operations. The library is intended to provide the user with an abstract sc ripting language to program iterative algorithms from numerical linear algebra. These algorithms require repeated evaluation of operators that were initially designed to be run as part of batch-oriented processing flows. The current implementation supports a plugin for Madagascar{\textquoteright}s out-of-core UNIX pipe-based applications and is extenable to pipe-based collections of programs such as Seismic Unix, SEPLib, and FreeUSP. To optimize perform ance, SLIMpy uses an Abstract Syntax Tree that parses the algorithm and optimizes the pipes.}, month = {07}, url = {https://www.slim.eos.ubc.ca/Software/SLIM/SLIMpy/} } @MANUAL{vandenberg07MNsat, title = {{SPARCO}: a toolbox for testing sparse reconstruction algorithms}, author = {Ewout {van den Berg} and Michael P. Friedlander}, year = {2007}, abstract = {Sparco is a suite of problems for testing and benchmarking algorithms for sparse signal reconstruction. It is also an environment for creating new test problems, and a suite of standard linear operators is provided from which new problems can be assembled. Sparco is implement ed entirely in Matlab and is self contained. (A few optional test problems are based on the CurveLab toolbox, which can be installed separately.) At the core of the sparse recovery problem is the linear system $Ax+r=b$, where $A$ is an $m$-by-$n$ linear operator and the $m$-vector $b$ is the observed signal. The goal is to find a sparse $n$-vector $x$ such that $r$ is small in norm.}, keywords = {SLIM}, month = {10}, url = {http://www.cs.ubc.ca/labs/scl/sparco/} } % This file was created with JabRef 2.9. % Encoding: ISO8859_1 @PHDTHESIS{hennenfent08phd, author = {Gilles Hennenfent}, title = {Sampling and reconstruction of seismic wavefields in the curvelet domain}, school = {The University of British Columbia}, year = {2008}, type = {phd}, address = {Vancouver, BC Canada}, abstract = {Wavefield reconstruction is a crucial step in the seismic processing flow. For instance, unsuccessful interpolation leads to erroneous multiple predictions that adversely affect the performance of multiple elimination, and to imaging artifacts. We present a new non-parametric transform-based reconstruction method that exploits the compression of seismic data by the recently developed curvelet transform. The elements of this transform, called curvelets, are multi-dimensional, multi-scale, and multi-directional. They locally resemble wavefronts present in the data, which leads to a compressible representation for seismic data. This compression enables us to formulate a new curvelet-based seismic data recovery algorithm through sparsity-promoting inversion (CRSI). The concept of sparsity-promoting inversion is in itself not new to geophysics. However, the recent insights from the field of {\textquoteleft}{\textquoteleft}compressed sensing{\textquoteright}{\textquoteright} are new since they clearly identify the three main ingredients that go into a successful formulation of a reconstruction problem, namely a sparsifying transform, a sub-Nyquist sampling strategy that subdues coherent aliases in the sparsifying domain, and a data-consistent sparsity-promoting program. After a brief overview of the curvelet transform and our seismic-oriented extension to the fast discrete curvelet transform, we detail the CRSI formulation and illustrate its performance on synthetic and real datasets. Then, we introduce a sub-Nyquist sampling scheme, termed jittered undersampling, and show that, for the same amount of data acquired, jittered data are best interpolated using CRSI compared to regular or random undersampled data. We also discuss the large-scale one-norm solver involved in CRSI. Finally, we extend CRSI formulation to other geophysical applications and present results on multiple removal and migration-amplitude recovery.}, keywords = {curvelet transform, reconstruction, SLIM}, month = {05}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Thesis/2008/hennenfent08phd.pdf} } @PHDTHESIS{moghaddam10phd, author = {Peyman P. Moghaddam}, title = {Curvelet-based migration amplitude recovery}, school = {The University of British Columbia}, year = {2010}, type = {phd}, address = {Vancouver, BC Canada}, abstract = {Migration can accurately locate reflectors in the earth but in most cases fails to correctly resolve their amplitude. This might lead to mis-interpretation of the nature of reflector. In this thesis, I introduced a method to accurately recover the amplitude of the seismic reflector. This method relies on a new transform-based recovery that exploits the expression of seismic images by the recently developed curvelet transform. The elements of this transform, called curvelets, are multi-dimensional, multi-scale, and multi-directional. They also remain approximately invariant under the imaging operator. I exploit these properties of the curvelets to introduce a method called Curvelet Match Filtering (CMF) for recovering the seismic amplitude in presence of noise in both migrated image and data. I detail the method and illustrate its performance on synthetic dataset. I also extend CMF formulation to other geophysical applications and present results on multiple removal. In addition of that, I investigate preconditioning of the migration which results to rapid convergence rate of the iterative method using migration.}, month = {05}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Thesis/2010/moghaddam10phd.pdf} } % This file was created with JabRef 2.6. % Encoding: MacRoman @CONFERENCE{erlangga08SINBADimf, author = {Yogi A. Erlangga and K. Vuik and K. Oosterlee and D. Riyanti and R. Nabben}, title = {Iterative methods for 2{D}/3{D} {Helmholtz} operator}, booktitle = {SINBAD 2008}, year = {2008}, abstract = {We present an iterative method for solving the 2D/3D Helmholtz equation. The method is mainly based on a Krylov method, preconditioned by a special operator which represents a damped Helmholtz operator. The discretization of the preconditioning operator is then solved by one multigrid sweep. It can be shown that while the spectrum is bounded above by one, the smallest eigenvalue of the preconditioned system is of order $k^{-1}$. In this situation, the convergence of a Krylov method will be proportional to the frequency of the problem. Further convergence acceleration can be achieved if eigenvalues of order $k^{-1}$ are projected from the spectrum. This can be done by a projection operator, similar to but more stable than deflation. This projection operator has been the core of a new multilevel method, called multilevel Krylov method, proposed by Erlangga and Nabben only recently. Putting the preconditioned Helmholtz operator in this setting, a convergence which is independent of frequency can be obtained.}, keywords = {Presentation, SINBAD, SLIM}, url = {http://slim.eos.ubc.ca/SINBAD2008/Program_files/SINBAD2008_Erlangga_Ite.pdf} } @ARTICLE{berkhout97eom, author = {A. J. Berkhout and D. J. Verschuur}, title = {Estimation of multiple scattering by iterative inversion, {Part} {I}: {Theoretical} considerations}, journal = {Geophysics}, year = {1997}, volume = {62}, pages = {1586-1595}, number = {5}, abstract = {A review has been given of the surface-related multiple problem by making use of the so-called feedback model. From the resulting equations it has been concluded that the proposed solution does not require any properties of the subsurface. However, source-detector and reflectivity properties of the surface need be specified. Those properties have been quantified in a surface operator and this operator is estimated as part of the multiple removal problem. The surface-related multiple removal algorithm has been formulated in terms of a Neumann series and in terms of an iterative equation. The Neumann formulation requires a nonlinear optimization process for the surface operator; while the iterative formulation needs a number of linear optimizations. The iterative formulation also has the advantage that it can be integrated easily with another multiple removal method. An algorithm for the removal of internal multiples has been proposed as well. This algorithm is an extension of the surface-related method. Removal of internal multiples requires knowledge of the macro velocity model between the surface and the upper boundary of the multiple generating layer. In part II (also published in this issue) the success of the proposed algorithms has been demonstrated on numerical experiments and field data examples. {\copyright}1997 Society of Exploration Geophysicists}, bdsk-url-1 = {http://library.seg.org/doi/abs/10.1190/1.1444261}, bdsk-url-2 = {http://dx.doi.org/10.1190/1.1444261}, date-added = {2008-05-07 18:38:50 -0700}, date-modified = {2008-08-14 14:46:15 -0700}, doi = {10.1190/1.1444261}, issue = {5}, keywords = {SRME}, publisher = {SEG}, url = {http://library.seg.org/doi/abs/10.1190/1.1444261} } @BOOK{biondo063ds, author = {B. L. Biondi}, title = {3-{D} seismic imaging}, publisher = {SEG}, year = {2006}, number = {14}, series = {Investigations in Geophysics}, date-added = {2008-05-08 15:25:18 -0700}, date-modified = {2008-05-20 19:45:00 -0700}, issue = {14}, keywords = {imaging} } @ARTICLE{cordoba78wpa, author = {A. C\'ordoba and C. Fefferman}, title = {Wave packets and {Fourier} integral operators}, journal = {Communications in Partial Differential Equations}, year = {1978}, volume = {3}, pages = {979-1005}, number = {11}, bdsk-url-1 = {http://dx.doi.org/10.1080/03605307808820083}, date-added = {2008-05-07 11:53:23 -0700}, date-modified = {2008-05-20 11:48:08 -0700}, doi = {10.1080/03605307808820083}, issue = {11}, keywords = {wave packets, FIO}, publisher = {Taylor \& Francis} } @PHDTHESIS{candes98rta, author = {E. J. Cand\`es}, title = {Ridgelets: theory and applications}, school = {Stanford University}, year = {1998}, address = {Stanford, CA}, bdsk-url-1 = {http://www-stat.stanford.edu/%7Ecandes/papers/Thesis.ps.gz}, date-added = {2008-05-27 18:24:11 -0700}, date-modified = {2008-05-27 18:26:14 -0700}, keywords = {ridgelet transform} } @ARTICLE{candes05tcr, author = {E. J. Cand\`es and L. Demanet}, title = {The curvelet representation of wave propagators is optimally sparse}, journal = {Communications on Pure and Applied Mathematics}, year = {2005}, volume = {58}, pages = {1472-1528}, number = {11}, abstract = {This paper argues that curvelets provide a powerful tool for representing very general linear symmetric systems of hyperbolic differential equations. Curvelets are a recently developed multiscale system [10, 7] in which the elements are highly anisotropic at fine scales, with effective support shaped according to the parabolic scaling principle width ≈ length^2 at fine scales. We prove that for a wide class of linear hyperbolic differential equations, the curvelet representation of the solution operator is both optimally sparse and well organized. * It is sparse in the sense that the matrix entries decay nearly exponentially fast (i.e. faster than any negative polynomial), * and well-organized in the sense that the very few nonnegligible entries occur near a few shifted diagonals. Indeed, we show that the wave group maps each curvelet onto a sum of curvelet-like waveforms whose locations and orientations are obtained by following the different Hamiltonian flows---hence the diagonal shifts in the curvelet representation. A physical interpretation of this result is that curvelets may be viewed as coherent waveforms with enough frequency localization so that they behave like waves but at the same time, with enough spatial localization so that they simultaneously behave like particles.}, bdsk-url-1 = {http://www-stat.stanford.edu/%7Ecandes/papers/CurveletsWaves.pdf}, date-added = {2008-05-07 11:10:43 -0700}, date-modified = {2008-08-14 14:57:23 -0700}, doi = {10.1002/cpa.20078}, issue = {11}, keywords = {curvelet transform, FIO}, pdf = {http://www-stat.stanford.edu/%7Ecandes/papers/CurveletsWaves.pdf} } @ARTICLE{candes06fdc, author = {E. J. Cand\`es and L. Demanet and D. L. Donoho and L. Ying}, title = {Fast discrete curvelet transforms}, journal = {Multiscale Modeling and Simulation}, year = {2006}, volume = {5}, pages = {861-899}, number = {3}, abstract = {This paper describes two digital implementations of a new mathematical transform, namely, the second generation curvelet transform [12, 10] in two and three dimensions. The first digital transformation is based on unequally-spaced fast Fourier transforms (USFFT) while the second is based on the wrapping of specially selected Fourier samples. The two implementations essentially differ by the choice of spatial grid used to translate curvelets at each scale and angle. Both digital transformations return a table of digital curvelet coefficients indexed by a scale parameter, an orientation parameter, and a spatial location parameter. And both implementations are fast in the sense that they run in O(n^2 log n) flops for n by n Cartesian arrays; in addition, they are also invertible, with rapid inversion algorithms of about the same complexity. Our digital transformations improve upon earlier implementations---based upon the first generation of curvelets---in the sense that they are conceptually simpler, faster and far less redundant. The software CurveLab, which implements both transforms presented in this paper, is available at http://www.curvelet.org.}, bdsk-url-1 = {http://dx.doi.org/10.1137/05064182X}, bdsk-url-2 = {http://www-stat.stanford.edu/%7Ecandes/papers/FDCT.pdf}, date-added = {2008-05-06 19:34:41 -0700}, date-modified = {2008-08-14 14:58:30 -0700}, doi = {10.1137/05064182X}, issue = {3}, keywords = {curvelet transform}, pdf = {http://www-stat.stanford.edu/%7Ecandes/papers/FDCT.pdf}, publisher = {SIAM} } @INCOLLECTION{candes00cas, author = {E. J. Cand\`es and D. L. Donoho}, title = {Curvelets: a surprisingly effective nonadaptive representation of objects with edges}, booktitle = {Curve and surface fitting}, publisher = {Vanderbilt University Press}, year = {2000}, editor = {A. Cohen, C. Rahut, and L. L. Schumaker}, pages = {105-120}, address = {Nashville, TN}, abstract = {It is widely believed that to efficiently represent an otherwise smooth ob ject with discontinuities along edges, one must use an adaptive representation that in some sense `tracks' the shape of the discontinuity set. This folk-belief --- some would say folk-theorem --- is incorrect. At the very least, the possible quantitative advantage of such adaptation is vastly smaller than commonly believed. We have recently constructed a tight frame of curvelets which provides stable, efficient, and near-optimal representation of otherwise smooth ob jects having discontinuities along smooth curves. By applying naive thresholding to the curvelet transform of such an ob ject, one can form m-term approximations with rate of L2 approximation rivaling the rate obtainable by complex adaptive schemes which attempt to `track' the discontinuity set. In this article we explain the basic issues of efficient m-term approximation, the construction of efficient adaptive representation, the construction of the curvelet frame, and a crude analysis of the performance of curvelet schemes.}, bdsk-url-1 = {http://www-stat.stanford.edu/%7Ecandes/papers/Curvelet-SMStyle.pdf}, date-added = {2008-05-26 17:48:55 -0700}, date-modified = {2008-08-14 15:26:58 -0700}, keywords = {curvelet transform} } @ARTICLE{candes05cct, author = {E. J. Cand\`es and D. L. Donoho}, title = {Continuous curvelet transform: {I.} {Resolution} of the wavefront set}, journal = {Applied and Computational Harmonic Analysis}, year = {2005}, volume = {19}, pages = {162-197}, number = {2}, month = {September}, bdsk-url-1 = {http://dx.doi.org/10.1016/j.acha.2005.02.003}, date-added = {2008-05-26 18:21:22 -0700}, date-modified = {2008-05-26 18:23:57 -0700}, issue = {2}, keywords = {curvelet transform} } @ARTICLE{candes05cct1, author = {E. J. Cand\`es and D. L. Donoho}, title = {Continuous curvelet transform: {II.} {Discretization} and frames}, journal = {Applied and Computational Harmonic Analysis}, year = {2005}, volume = {19}, pages = {198-222}, number = {2}, month = {September}, bdsk-url-1 = {http://dx.doi.org/10.1016/j.acha.2005.02.004}, date-added = {2008-05-26 18:23:17 -0700}, date-modified = {2008-05-26 18:24:36 -0700}, issue = {2}, keywords = {curvelet transform} } @ARTICLE{candes04ntf, author = {E. J. Cand\`es and D. L. Donoho}, title = {New tight frames of curvelets and optimal representations of objects with piecewise-{C}$^2$ singularities}, journal = {Communications on Pure and Applied Mathematics}, year = {2004}, volume = {57}, pages = {219-266}, number = {2}, bdsk-url-1 = {http://dx.doi.org/10.1002/cpa.10116}, bdsk-url-2 = {http://www-stat.stanford.edu/%7Ecandes/papers/CurveEdges.pdf}, date-added = {2008-05-07 11:47:59 -0700}, date-modified = {2008-08-14 14:46:59 -0700}, doi = {10.1002/cpa.10116}, issue = {2}, keywords = {curvelet transform}, pdf = {http://www-stat.stanford.edu/%7Ecandes/papers/CurveEdges.pdf} } @ARTICLE{chauris08sdm, author = {H. Chauris and T. Nguyen}, title = {Seismic demigration/migration in the curvelet domain}, journal = {Geophysics}, year = {2008}, volume = {73}, pages = {S35-S46}, number = {2}, abstract = {Curvelets can represent local plane waves. They efficiently decompose seismic images and possibly imaging operators. We study how curvelets are distorted after demigration followed by migration in a different velocity model. We show that for small local velocity perturbations, the demigration/migration is reduced to a simple morphing of the initial curvelet. The derivation of the expected curvature of the curvelets shows that it is easier to sparsify the demigration/migration operator than the migration operator. An application on a 2D synthetic data set, generated in a smooth heterogeneous velocity model and with a complex reflectivity, demonstrates the usefulness of curvelets to predict what a migrated image would become in a locally different velocity model without the need for remigrating the full input data set. Curvelets are thus well suited to study the sensitivity of a prestack depth-migrated image with respect to the heterogeneous velocity model used for migration. {\copyright}2008 Society of Exploration Geophysicists}, bdsk-url-1 = {http://library.seg.org/doi/abs/10.1190/1.2831933}, bdsk-url-2 = {http://dx.doi.org/10.1190/1.2831933}, date-added = {2008-05-07 14:48:33 -0700}, date-modified = {2008-08-14 14:59:04 -0700}, doi = {10.1190/1.2831933}, issue = {2}, keywords = {curvelet transform, imaging}, pdf = {http://library.seg.org/doi/abs/10.1190/1.2831933}, publisher = {SEG} } @BOOK{claerbout92esa, author = {J. F. Claerbout}, title = {Earth soundings analysis: processing versus inversion}, publisher = {Blackwell Scientific Publications}, year = {1992}, address = {Boston}, bdsk-url-1 = {http://sepwww.stanford.edu/sep/prof/pvi.pdf}, date-added = {2008-05-06 19:27:28 -0700}, date-modified = {2008-05-07 11:44:19 -0700}, keywords = {PEF}, pdf = {http://sepwww.stanford.edu/sep/prof/pvi.pdf} } @ARTICLE{claerbout71tau, author = {J. F. Claerbout}, title = {Toward a unified theory of reflector mapping}, journal = {Geophysics}, year = {1971}, volume = {36}, pages = {467-481}, number = {3}, abstract = {Schemes for seismic mapping of reflectors in the presence of an arbitrary velocity model, dipping and curved reflectors, diffractions, ghosts, surface elevation variations, and multiple reflections are reviewed and reduced to a single formula involving up and downgoing waves. The mapping formula may be implemented without undue complexity by means of difference approximations to the relativistic Schroedinger equation. {\copyright}1971 Society of Exploration Geophysicists}, bdsk-url-1 = {http://library.seg.org/doi/abs/10.1190/1.1440185}, bdsk-url-2 = {http://dx.doi.org/10.1190/1.1440185}, date-added = {2008-05-08 14:59:36 -0700}, date-modified = {2008-08-14 14:59:35 -0700}, doi = {10.1190/1.1440185}, issue = {3}, keywords = {WEM, imaging}, pdf = {http://library.seg.org/doi/abs/10.1190/1.1440185}, publisher = {SEG} } @ARTICLE{daubechies04ait, author = {I. Daubechies and M. Defrise and C. {De Mol}}, title = {An iterative thresholding algorithm for linear inverse problems with a sparsity constraint}, journal = {Communications on Pure and Applied Mathematics}, year = {2004}, volume = {57}, pages = {1413-1457}, number = {11}, abstract = {We consider linear inverse problems where the solution is assumed to have a sparse expansion on an arbitrary preassigned orthonormal basis. We prove that replacing the usual quadratic regularizing penalties by weighted p-penalties on the coefficients of such expansions, with 1 p 2, still regularizes the problem. Use of such p-penalized problems with p < 2 is often advocated when one expects the underlying ideal noiseless solution to have a sparse expansion with respect to the basis under consideration. To compute the corresponding regularized solutions, we analyze an iterative algorithm that amounts to a Landweber iteration with thresholding (or nonlinear shrinkage) applied at each iteration step. We prove that this algorithm converges in norm. {\copyright} 2004 Wiley Periodicals, Inc.}, bdsk-url-1 = {http://dx.doi.org/10.1002/cpa.20042}, date-added = {2008-05-20 13:58:17 -0700}, date-modified = {2008-08-14 15:01:17 -0700}, issue = {11}, pdf = {http://dx.doi.org/10.1002/cpa.20042}, refer1 = {10.1002/cpa.20042} } @ARTICLE{do2002can, author = {M. N. Do and M. Vetterli}, title = {Contourlets: a new directional multiresolution image representation}, journal = {Proceedings. 2002 International Conference on Image Processing.}, year = {2002}, volume = {1}, abstract = {We propose a new scheme, named contourlet, that provides a flexible multiresolution, local and directional image expansion. The contourlet transform is realized efficiently via a double iterated filter bank structure. Furthermore, it can be designed to satisfy the anisotropy scaling relation for curves, and thus offers a fast and structured curvelet-like decomposition. As a result, the contourlet transform provides a sparse representation for two-dimensional piecewise smooth signals resembling images. Finally, we show some numerical experiments demonstrating the potential of contourlets in several image processing tasks.}, bdsk-url-1 = {http://dx.doi.org/10.1109/ICIP.2002.1038034}, date-added = {2008-05-07 11:58:00 -0700}, date-modified = {2008-08-14 15:01:55 -0700}, doi = {10.1109/ICIP.2002.1038034}, keywords = {contourlet transform} } @TECHREPORT{donoho99dct, author = {D. L. Donoho and M. R. Duncan}, title = {Digital curvelet transform: strategy, implementation, and experiments}, institution = {Stanford Statistics Department}, year = {1999}, month = {November}, bdsk-url-1 = {http://citeseer.ist.psu.edu/rd/44392127,300178,1,0.25,Download/http://citeseer.ist.psu.edu/cache/papers/cs/15527/http:zSzzSzwww-stat.stanford.eduzSz~donohozSzReportszSz1999zSzDCvT.pdf/donoho99digital.pdf}, date-added = {2008-05-26 17:33:51 -0700}, date-modified = {2008-05-26 17:35:32 -0700}, keywords = {curvelet transform} } @ARTICLE{douma07los, author = {H. Douma and M. V. de Hoop}, title = {Leading-order seismic imaging using curvelets}, journal = {Geophysics}, year = {2007}, volume = {72}, pages = {S231-S248}, number = {6}, abstract = {Curvelets are plausible candidates for simultaneous compression of seismic data, their images, and the imaging operator itself. We show that with curvelets, the leading-order approximation (in angular frequency, horizontal wavenumber, and migrated location) to common-offset (CO) Kirchhoff depth migration becomes a simple transformation of coordinates of curvelets in the data, combined with amplitude scaling. This transformation is calculated using map migration, which employs the local slopes from the curvelet decomposition of the data. Because the data can be compressed using curvelets, the transformation needs to be calculated for relatively few curvelets only. Numerical examples for homogeneous media show that using the leading-order approximation only provides a good approximation to CO migration for moderate propagation times. As the traveltime increases and rays diverge beyond the spatial support of a curvelet; however, the leading-order approximation is no longer accurate enough. This shows the need for correction beyond leading order, even for homogeneous media. {\copyright}2007 Society of Exploration Geophysicists}, bdsk-url-1 = {http://library.seg.org/doi/abs/10.1190/1.2785047}, bdsk-url-2 = {http://dx.doi.org/10.1190/1.2785047}, date-added = {2008-05-07 14:35:47 -0700}, date-modified = {2008-08-14 15:02:25 -0700}, doi = {10.1190/1.2785047}, issue = {6}, keywords = {curvelet transform, imaging}, pdf = {http://library.seg.org/doi/abs/10.1190/1.2785047}, publisher = {SEG} } @INCOLLECTION{feichtinger94tap, author = {H. G. Feichtinger and K. Grochenig}, title = {Theory and practice of irregular sampling}, booktitle = {Wavelets: mathematics and applications}, publisher = {CRC Press}, year = {1994}, editor = {J. J. Benedetto and M. Frazier}, series = {Studies in Advanced Mathematics}, chapter = {8}, pages = {305-363}, address = {Boca Raton, FL}, bdsk-url-1 = {http://www.univie.ac.at/nuhag-php/bibtex/open_files/fegr94_fgthpra.pdf}, date-added = {2008-05-20 17:10:18 -0700}, date-modified = {2008-05-20 17:24:38 -0700}, keywords = {sampling}, pdf = {http://www.univie.ac.at/nuhag-php/bibtex/open_files/fegr94_fgthpra.pdf} } @MISC{fenelon08msc, author = {Lloyd Fenelon}, title = {Nonequispaced discrete curvelet transform for seismic data reconstruction}, howpublished = {BSc thesis, Ecole Nationale Superieure De Physique de Strasbourg}, month = {August}, year = {2008}, abstract = {Physical constraints during seismic acquisitions lead to incomplete seismic datasets. Curvelet Reconstruction with Sparsity promoting Inversion (CRSI) is one of the most efficient interpolation method available to recover complete datasets from data with missing traces. The method uses in its definition the curvelet transform which is well suited to process seismic data. However, its main shortcoming is to not be able to provide an accurate result if the data are acquired at irregular positions. This come from the curvelet transform implementation which cannot handle this type of data. In this thesis the implementation of the curvelet transform is modified to offer the possibility to CRSI to give better representation of seismic data for high quality seismic imaging.}, bdsk-url-1 = {http://slim.eos.ubc.ca/Publications/Public/Theses/2008/fenelon08msc.pdf}, date-added = {2008-09-03 16:18:08 -0700}, date-modified = {2008-09-03 16:25:10 -0700}, keywords = {SLIM, BSc}, pdf = {http://slim.eos.ubc.ca/Publications/Public/Theses/2008/fenelon08msc.pdf} } @MISC{fomel07mos, author = {S. Fomel and P. Sava}, title = {{MADAGASCAR}: open-source software package for geophysical data processing and reproducible numerical experiments}, year = {2007}, abstract = {Madagascar is an open-source software package for geophysical data analysis and reproducible numerical experiments. Its mission is to provide -a convenient and powerful environment -a convenient technology transfer tool for researchers working with digital image and data processing. The technology developed using the Madagascar project management system is transferred in the form of recorded processing histories, which become "computational recipes" to be verified, exchanged, and modified by users of the system.}, bdsk-url-1 = {http://rsf.sf.net}, date-added = {2008-06-26 15:31:10 -0700}, date-modified = {2008-08-14 15:31:44 -0700}, keywords = {software}, url = {http://rsf.sf.net} } @ARTICLE{guo07osm, author = {K. Guo and D. Labate}, title = {Optimally sparse multidimensional representation using shearlets}, journal = {Journal of Mathematical Analysis}, year = {2007}, volume = {39}, pages = {298-318}, number = {1}, bdsk-url-1 = {http://www4.ncsu.edu/~dlabate/shear_GL.pdf}, bdsk-url-2 = {http://dx.doi.org/10.1137/060649781}, date-added = {2008-05-07 12:03:03 -0700}, date-modified = {2008-05-08 10:28:30 -0700}, doi = {10.1137/060649781}, issue = {1}, keywords = {shearlet transform}, pdf = {http://www4.ncsu.edu/~dlabate/shear_GL.pdf}, publisher = {SIAM} } @ARTICLE{hampson86ivs, author = {D. Hampson}, title = {Inverse velocity stacking for multiple elimination}, journal = {Journal of the Canadian Society of Exploration Geophysicists}, year = {1986}, volume = {22}, pages = {44-45}, number = {1}, bdsk-url-1 = {http://209.91.124.56/publications/journal/1986_12/1986_Hampson_D_inverse_velocity_stacking.pdf}, date-added = {2008-05-06 19:09:45 -0700}, date-modified = {2008-05-07 11:44:52 -0700}, issue = {1}, keywords = {Radon transform}, pdf = {http://209.91.124.56/publications/journal/1986_12/1986_Hampson_D_inverse_velocity_stacking.pdf}, publisher = {CSEG} } @ARTICLE{hindriks00ro3, author = {K. Hindriks and A. J. W. Duijndam}, title = {Reconstruction of {3-D} seismic signals irregularly sampled along two spatial coordinates}, journal = {Geophysics}, year = {2000}, volume = {65}, pages = {253-263}, number = {1}, abstract = {Seismic signals are often irregularly sampled along spatial coordinates, leading to suboptimal processing and imaging results. Least-squares estimation of Fourier components is used for the reconstruction of band-limited seismic signals that are irregularly sampled along two spatial coordinates. A simple and efficient diagonal weighting scheme, based on the areas surrounding the spatial samples, takes the properties of the noise (signal outside the bandwidth) into account in an approximate sense. Diagonal stabilization based on the energies of the signal and the noise ensures robust estimation. Reconstruction by temporal frequency component allows the specification of varying bandwidth in two dimensions, depending on the minimum apparent velocity. This parameterization improves the reconstruction capability for lower temporal frequencies. The shape of the spatial aperture affects the method of sampling the Fourier domain. Taking into account this property, a larger bandwidth can be recovered. The properties of the least-squares estimator allow a very efficient implementation which, when using a conjugate gradient algorithm, requires a modest number of 2-D fast Fourier transforms per temporal frequency. The method shows signicant improvement over the conventionally used binning and stacking method on both synthetic and real data. The method can be applied to any subset of seismic data with two varying spatial coordinates. {\copyright}2000 Society of Exploration Geophysicists}, bdsk-url-1 = {http://link.aip.org/link/?GPY/65/253/1}, bdsk-url-2 = {http://dx.doi.org/10.1190/1.1444716}, date-added = {2008-05-20 16:12:37 -0700}, date-modified = {2008-08-14 15:05:01 -0700}, doi = {10.1190/1.1444716}, issue = {1}, keywords = {reconstruction}, pdf = {http://link.aip.org/link/?GPY/65/253/1}, publisher = {SEG} } @PHDTHESIS{kunis06nff, author = {S. Kunis}, title = {Nonequispaced {FFT}: generalisation and inversion}, school = {L\"ubeck university}, year = {2006}, bdsk-url-1 = {http://www.analysis.uni-osnabrueck.de/kunis/paper/KunisDiss.pdf}, date-added = {2008-05-07 18:51:16 -0700}, date-modified = {2008-05-20 11:49:04 -0700}, keywords = {NFFT}, pdf = {http://www.analysis.uni-osnabrueck.de/kunis/paper/KunisDiss.pdf} } @ARTICLE{lu07mdf, author = {Y. M. Lu and M. N. Do}, title = {Multidimensional directional filter banks and surfacelets}, journal = {IEEE Transactions on Image Processing}, year = {2007}, volume = {16}, pages = {918-931}, number = {4}, month = {April}, abstract = {In 1992, Bamberger and Smith proposed the directional filter bank (DFB) for an efficient directional decomposition of 2-D signals. Due to the nonseparable nature of the system, extending the DFB to higher dimensions while still retaining its attractive features is a challenging and previously unsolved problem. We propose a new family of filter banks, named NDFB, that can achieve the directional decomposition of arbitrary N-dimensional (Nges2) signals with a simple and efficient tree-structured construction. In 3-D, the ideal passbands of the proposed NDFB are rectangular-based pyramids radiating out from the origin at different orientations and tiling the entire frequency space. The proposed NDFB achieves perfect reconstruction via an iterated filter bank with a redundancy factor of N in N-D. The angular resolution of the proposed NDFB can be iteratively refined by invoking more levels of decomposition through a simple expansion rule. By combining the NDFB with a new multiscale pyramid, we propose the surfacelet transform, which can be used to efficiently capture and represent surface-like singularities in multidimensional data}, bdsk-url-1 = {http://dx.doi.org/10.1109/TIP.2007.891785}, date-added = {2008-05-07 12:19:48 -0700}, date-modified = {2008-08-14 15:05:31 -0700}, doi = {10.1109/TIP.2007.891785}, issn = {1057-7149}, issue = {4}, keywords = {surfacelet transform}, publisher = {IEEE} } @BOOK{mallat99awt, title = {A wavelet tour of signal processing, second edition}, publisher = {Academic Press}, year = {1999}, author = {S. Mallat}, month = {September}, date-added = {2008-05-22 16:32:31 -0700}, date-modified = {2008-05-22 16:33:57 -0700}, howpublished = {Hardcover}, isbn = {012466606X}, keywords = {wavelet transform} } @CONFERENCE{morton98fsr, author = {S. A. Morton and C. C. Ober}, title = {Faster shot-record depth migrations using phase encoding}, booktitle = {SEG Technical Program Expanded Abstracts}, year = {1998}, volume = {17}, number = {1}, pages = {1131-1134}, publisher = {SEG}, bdsk-url-1 = {http://link.aip.org/link/?SGA/17/1131/1}, bdsk-url-2 = {http://dx.doi.org/10.1190/1.1820088}, date-added = {2008-05-27 16:44:01 -0700}, date-modified = {2008-05-27 16:45:21 -0700}, doi = {10.1190/1.1820088}, issue = {1}, pdf = {http://link.aip.org/link/?SGA/17/1131/1} } @ARTICLE{paige82lsq, author = {C. C. Paige and M. A. Saunders}, title = {{LSQR}: an algorithm for sparse linear equations and sparse least squares}, journal = {Transactions on Mathematical Software}, year = {1982}, volume = {8}, pages = {43-71}, number = {1}, address = {New York, NY, USA}, bdsk-url-1 = {http://doi.acm.org/10.1145/355984.355989}, date-added = {2008-05-20 14:00:44 -0700}, date-modified = {2008-05-20 19:47:37 -0700}, doi = {http://doi.acm.org/10.1145/355984.355989}, issn = {0098-3500}, issue = {1}, keywords = {LSQR}, publisher = {ACM} } @INCOLLECTION{potts01mst, author = {D. Potts and G. Steidl and M. Tasche}, title = {Fast {Fourier} transforms for nonequispaced data: a tutorial}, booktitle = {Modern sampling theory: mathematics and applications}, publisher = {Birkhauser}, year = {2001}, editor = {J. J. Benedetto and P. Ferreira}, chapter = {12}, pages = {249-274}, abstract = {In this section, we consider approximate methods for the fast computiation of multivariate discrete Fourier transforms for nonequispaced data (NDFT) in the time domain and in the frequency domain. In particular, we are interested in the approximation error as function of arithmetic complexity of the algorithm. We discuss the robustness of NDFT-algorithms with respect to roundoff errors and apply NDFT-algorithms for the fast computation of Bessel transforms.}, bdsk-url-1 = {http://www.tu-chemnitz.de/~potts/paper/ndft.pdf}, date-added = {2008-05-07 18:44:29 -0700}, date-modified = {2008-08-14 15:28:37 -0700}, keywords = {NFFT}, pdf = {http://www.tu-chemnitz.de/~potts/paper/ndft.pdf} } @ARTICLE{romero00peo, author = {L. A. Romero and D. C. Ghiglia and C. C. Ober and S. A. Morton}, title = {Phase encoding of shot records in prestack migration}, journal = {Geophysics}, year = {2000}, volume = {65}, pages = {426-436}, number = {2}, abstract = {Frequency-domain shot-record migration can produce higher quality images than Kirchhoff migration but typically at a greater cost. The computing cost of shot-record migration is the product of the number of shots in the survey and the expense of each individual migration. Many attempts to reduce this cost have focused on the speed of the individual migrations, trying to achieve a better trade-off between accuracy and speed. Another approach is to reduce the number of migrations. We investigate the simultaneous migration of shot records using frequency-domain shot-record migration algorithms. The difficulty with this approach is the production of so-called crossterms between unrelated shot and receiver wavefields, which generate unwanted artifacts or noise in the final image. To reduce these artifacts and obtain an image comparable in quality to the single-shot-per-migration result, we have introduced a process called phase encoding, which shifts or disperses these crossterms. The process of phase encoding thus allows one to trade S/N ratio for the speed of migrating the entire survey. Several encoding functions and two application strategies have been tested. The first strategy, combining multiple shots per migration and using each shot only once, reduces computation in direct relation to the number of shots combined. The second strategy, performing multiple migrations of all the shots in the survey, provides a means to reduce the crossterm noise by stacking the resulting images. The additional noise in both strategies may be tolerated if it is no stronger than the inherent seismic noise in the migrated image and if the final image is achieved with less cost. {\copyright}2000 Society of Exploration Geophysicists}, bdsk-url-1 = {http://library.seg.org/doi/abs/10.1190/1.1444737}, bdsk-url-2 = {http://dx.doi.org/10.1190/1.1444737}, date-added = {2008-05-27 16:42:50 -0700}, date-modified = {2008-08-14 15:07:08 -0700}, doi = {10.1190/1.1444737}, issue = {2}, pdf = {http://library.seg.org/doi/abs/10.1190/1.1444737}, publisher = {SEG} } @ARTICLE{sacchi98iae, author = {M. D. Sacchi and T. J. Ulrych and C. J. Walker}, title = {Interpolation and extrapolation using a high-resolution discrete {Fourier} transform}, journal = {IEEE Transactions on Signal Processing}, year = {1998}, volume = {46}, pages = {31-38}, number = {1}, abstract = {We present an iterative nonparametric approach to spectral estimation that is particularly suitable for estimation of line spectra. This approach minimizes a cost function derived from Bayes' theorem. The method is suitable for line spectra since a ``long tailed'' distribution is used to model the prior distribution of spectral amplitudes. An important aspect of this method is that since the data themselves are used as constraints, phase information can also be recovered and used to extend the data outside the original window. The objective function is formulated in terms of hyperpa- rameters that control the degree of fit and spectral resolution. Noise rejection can also be achieved by truncating the number of iterations. Spectral resolution and extrapolation length are controlled by a single parameter. When this parameter is large compared with the spectral powers, the algorithm leads to zero extrapolation of the data, and the estimated Fourier transform yields the periodogram. When the data are sampled at a constant rate, the algorithm uses one Levinson recursion per iteration. For irregular sampling (unevenly sampled and/or gapped data), the algorithm uses one Cholesky decomposition per iteration. The performance of the algorithm is illustrated with three different problems that frequently arise in geophysical data processing: 1) harmonic retrieval from a time series contaminated with noise; 2) linear event detection from a finite aperture array of receivers [which, in fact, is an extension of 1)], 3) interpolation/extrapolation of gapped data. The performance of the algorithm as a spectral estimator is tested with the Kay and Marple data set. It is shown that the achieved resolution is comparable with parametric methods but with more accurate representation of the relative power in the spectral lines.}, bdsk-url-1 = {http://saig.physics.ualberta.ca/s/sites/default/files/upload/articles/Sacchi_Ulrych_Walker_IEEE_98.pdf}, date-added = {2008-05-06 19:18:50 -0700}, date-modified = {2008-08-14 15:08:37 -0700}, doi = {10.1109/78.651165}, issue = {1}, keywords = {Fourier transform, reconstruction}, pdf = {http://saig.physics.ualberta.ca/s/sites/default/files/upload/articles/Sacchi_Ulrych_Walker_IEEE_98.pdf}, publisher = {IEEE} } @PHDTHESIS{schonewille00phd, author = {M. A. Schonewille}, title = {Fourier reconstruction of irregularly sampled seismic data}, school = {Delft University of Technology}, year = {2000}, address = {Delft, The Netherlands}, month = {November}, date-added = {2008-05-06 19:03:35 -0700}, date-modified = {2008-05-09 14:43:57 -0700}, keywords = {Fourier transform, reconstruction}, rating = {0}, read = {Yes} } @ARTICLE{smith98ahs, author = {H. Smith}, title = {A {Hardy} space for {Fourier} integral operators}, journal = {Journal of Geometric Analysis}, year = {1998}, volume = {8}, pages = {629-653}, number = {4}, date-added = {2008-05-07 12:25:03 -0700}, date-modified = {2008-08-14 15:09:47 -0700}, issue = {4}, keywords = {FIO} } @BOOK{snieder93giu, author = {R. Snieder}, title = {Global inversions using normal mode and long-period surface waves}, publisher = {Chapman and Hall}, year = {1993}, date-added = {2008-05-20 17:16:42 -0700}, date-modified = {2008-05-20 17:19:44 -0700}, keywords = {sampling} } @ARTICLE{spitz91sti, author = {S. Spitz}, title = {Seismic trace interpolation in the {FX} domain}, journal = {Geophysics}, year = {1991}, volume = {56}, pages = {785-794}, number = {6}, abstract = {Interpolation of seismic traces is an effective means of improving migration when the data set exhibits spatial aliasing. A major difficulty of standard interpolation methods is that they depend on the degree of reliability with which the various geological events can be separated. In this respect, a multichannel interpolation method is described which requires neither a priori knowledge of the directions of lateral coherence of the events, nor estimation of these directions. The method is based on the fact that linear events present in a section made of equally spaced traces may be interpolated exactly, regardless of the original spatial interval, without any attempt to determine their true dips. The predictability of linear events in the f-x domain allows the missing traces to be expressed as the output of a linear system, the input of which consists of the recorded traces. The interpolation operator is obtained by solving a set of linear equations whose coefficients depend only on the spectrum of the spatial prediction filter defined by the recorded traces. Synthetic examples show that this method is insensitive to random noise and that it correctly handles curvatures and lateral amplitude variations. Assessment of the method with a real data set shows that the interpolation yields an improved migrated section. {\copyright}1991 Society of Exploration Geophysicists}, bdsk-url-1 = {http://dx.doi.org/10.1190/1.1443096}, date-added = {2008-05-06 19:29:12 -0700}, date-modified = {2008-08-14 15:18:16 -0700}, doi = {10.1190/1.1443096}, issue = {6}, keywords = {PEF}, publisher = {SEG} } @ARTICLE{starck02tct, author = {J.-L. Starck and E. J. Cand\`es and D. L. Donoho}, title = {The curvelet transform for image denoising}, journal = {IEEE Transactions on Image Processing}, year = {2002}, volume = {11}, pages = {670-684}, number = {6}, month = {June}, abstract = {We describe approximate digital implementations of two new mathematical transforms, namely, the ridgelet transform and the curvelet transform. Our implementations offer exact reconstruction, stability against perturbations, ease of implementation, and low computational complexity. A central tool is Fourier-domain computation of an approximate digital Radon transform. We introduce a very simple interpolation in the Fourier space which takes Cartesian samples and yields samples on a rectopolar grid, which is a pseudo-polar sampling set based on a concentric squares geometry. Despite the crudeness of our interpolation, the visual performance is surprisingly good. Our ridgelet transform applies to the Radon transform a special overcomplete wavelet pyramid whose wavelets have compact support in the frequency domain. Our curvelet transform uses our ridgelet transform as a component step, and implements curvelet subbands using a filter bank of a` trous wavelet filters. Our philosophy throughout is that transforms should be overcomplete, rather than critically sampled. We apply these digital transforms to the denoising of some standard images embedded in white noise. In the tests reported here, simple thresholding of the curvelet coefficients is very competitive with "state of the art" techniques based on wavelets, including thresholding of decimated or undecimated wavelet transforms and also including tree-based Bayesian posterior mean methods. Moreover, the curvelet reconstructions exhibit higher perceptual quality than wavelet-based reconstructions, offering visually sharper images and, in particular, higher quality recovery of edges and of faint linear and curvilinear features. Existing theory for curvelet and ridgelet transforms suggests that these new approaches can outperform wavelet methods in certain image reconstruction problems. The empirical results reported here are in encouraging agreement}, bdsk-url-1 = {http://dx.doi.org/10.1109/TIP.2002.1014998}, bdsk-url-2 = {http://ieeexplore.ieee.org/iel5/83/21845/01014998.pdf}, date-added = {2008-05-26 17:38:14 -0700}, date-modified = {2008-08-14 15:19:16 -0700}, doi = {10.1109/TIP.2002.1014998}, issn = {1057-7149}, issue = {6}, keywords = {curvelet transform}, publisher = {IEEE} } @ARTICLE{symes07rtm, author = {W. W. Symes}, title = {Reverse time migration with optimal checkpointing}, journal = {Geophysics}, year = {2007}, volume = {72}, pages = {SM213-SM221}, number = {5}, abstract = {Reverse time migration (RTM) requires that fields computed in forward time be accessed in reverse order. Such out-of-order access, to recursively computed fields, requires that some part of the recursion history be stored (checkpointed), with the remainder computed by repeating parts of the forward computation. Optimal checkpointing algorithms choose checkpoints in such a way that the total storage is minimized for a prescribed level of excess computation, or vice versa. Optimal checkpointing dramatically reduces the storage required by RTM, compared to that needed for nonoptimal implementations, at the price of a small increase in computation. This paper describes optimal checkpointing in a form which applies both to RTM and other applications of the adjoint state method, such as construction of velocity updates from prestack wave equation migration. {\copyright}2007 Society of Exploration Geophysicists}, bdsk-url-1 = {http://link.aip.org/link/?GPY/72/SM213/1}, bdsk-url-2 = {http://dx.doi.org/10.1190/1.2742686}, date-added = {2008-05-08 14:42:11 -0700}, date-modified = {2008-08-14 15:19:43 -0700}, doi = {10.1190/1.2742686}, issue = {5}, keywords = {RTM, imaging}, pdf = {http://link.aip.org/link/?GPY/72/SM213/1}, publisher = {SEG} } @ARTICLE{thorson85vsa, author = {J. R. Thorson and J. F. Claerbout}, title = {Velocity-stack and slant-stack stochastic inversion}, journal = {Geophysics}, year = {1985}, volume = {50}, pages = {2727-2741}, number = {12}, abstract = {Normal moveout (NMO) and stacking, an important step in analysis of reflection seismic data, involves summation of seismic data over paths represented by a family of hyperbolic curves. This summation process is a linear transformation and maps the data into what might be called a velocity space: a two-dimensional set of points indexed by time and velocity. Examination of data in velocity space is used for analysis of subsurface velocities and filtering of undesired coherent events (e.g., multiples), but the filtering step is useful only if an approximate inverse to the NMO and stack operation is available. One way to effect velocity filtering is to use the operator LT (defined as NMO and stacking) and its adjoint L as a transform pair, but this leads to unacceptable filtered output. Designing a better estimated inverse to L than LT is a generalization of the inversion problem of computerized tomography: deconvolving out the point-spread function after back projection. The inversion process is complicated by missing data, because surface seismic data are recorded only within a finite spatial aperture on the Earth's surface. Our approach to solving the problem of an ill-conditioned or nonunique inverse L--1, brought on by missing data, is to design a stochastic inverse to L. Starting from a maximum a posteriori (MAP) estimator, a system of equations can be set up in which a priori information is incorporated into a sparseness measure: the output of the stochastic inverse is forced to be locally focused, in order to obtain the best possible resolution in velocity space. The size of the resulting nonlinear system of equations is immense, but using a few iterations with a gradient descent algorithm is adequate to obtain a reasonable solution. This theory may also be applied to other large, sparse linear operators. The stochastic inverse of the slant-stack operator (a particular form of the Radon transform), can be developed in a parallel manner, and will yield an accurate slant-stack inverse pair. {\copyright}1985 Society of Exploration Geophysicists}, bdsk-url-1 = {http://dx.doi.org/10.1190/1.1441893}, date-added = {2008-05-06 19:06:15 -0700}, date-modified = {2008-08-14 15:20:19 -0700}, doi = {10.1190/1.1441893}, issue = {12}, keywords = {Radon transform}, publisher = {SEG} } @ARTICLE{trad03lvo, author = {D. Trad and T. J. Ulrych and M. D. Sacchi}, title = {Latest views of the sparse {Radon} transform}, journal = {Geophysics}, year = {2003}, volume = {68}, pages = {386-399}, number = {1}, abstract = {The Radon transform (RT) suffers from the typical problems of loss of resolution and aliasing that arise as a consequence of incomplete information, including limited aperture and discretization. Sparseness in the Radon domain is a valid and useful criterion for supplying this missing information, equivalent somehow to assuming smooth amplitude variation in the transition between known and unknown (missing) data. Applying this constraint while honoring the data can become a serious challenge for routine seismic processing because of the very limited processing time available, in general, per common midpoint. To develop methods that are robust, easy to use and flexible to adapt to different problems we have to pay attention to a variety of algorithms, operator design, and estimation of the hyperparameters that are responsible for the regularization of the solution.In this paper, we discuss fast implementations for several varieties of RT in the time and frequency domains. An iterative conjugate gradient algorithm with fast Fourier transform multiplication is used in all cases. To preserve the important property of iterative subspace methods of regularizing the solution by the number of iterations, the model weights are incorporated into the operators. This turns out to be of particular importance, and it can be understood in terms of the singular vectors of the weighted transform. The iterative algorithm is stopped according to a general cross validation criterion for subspaces. We apply this idea to several known implementations and compare results in order to better understand differences between, and merits of, these algorithms. {\copyright}2003 Society of Exploration Geophysicists}, bdsk-url-1 = {http://link.aip.org/link/?GPY/68/386/1}, bdsk-url-2 = {http://dx.doi.org/10.1190/1.1543224}, date-added = {2008-05-07 19:03:39 -0700}, date-modified = {2008-08-14 15:20:56 -0700}, doi = {10.1190/1.1543224}, issue = {1}, keywords = {Radon transform}, pdf = {http://link.aip.org/link/?GPY/68/386/1}, publisher = {SEG} } @ARTICLE{verschuur97eom, author = {D. J. Verschuur and A. J. Berkhout}, title = {Estimation of multiple scattering by iterative inversion, {Part} {II}: {Practical} aspects and examples}, journal = {Geophysics}, year = {1997}, volume = {62}, pages = {1596-1611}, number = {5}, abstract = {A surface-related multiple-elimination method can be formulated as an iterative procedure: the output of one iteration step is used as input for the next iteration step (part I of this paper). In this paper (part II) it is shown that the procedure can be made very efficient if a good initial estimate of the multiple-free data set can be provided in the first iteration, and in many situations, the Radon-based multiple-elimination method may provide such an estimate. It is also shown that for each iteration, the inverse source wavelet can be accurately estimated by a linear (least-squares) inversion process. Optionally, source and detector variations and directivity effects can be included, although the examples are given without these options. The iterative multiple elimination process, together with the source wavelet estimation, are illustrated with numerical experiments as well as with field data examples. The results show that the surface-related multiple-elimination process is very effective in time gates where the moveout properties of primaries and multiples are very similar (generally deep data), as well as for situations with a complex multiple-generating system. {\copyright}1997 Society of Exploration Geophysicists}, bdsk-url-1 = {http://link.aip.org/link/?GPY/62/1596/1}, bdsk-url-2 = {http://dx.doi.org/10.1190/1.1444262}, date-added = {2008-05-07 18:40:45 -0700}, date-modified = {2008-08-14 15:21:18 -0700}, doi = {10.1190/1.1444262}, issue = {5}, keywords = {SRME}, pdf = {http://link.aip.org/link/?GPY/62/1596/1}, publisher = {SEG} } @ARTICLE{xu05aft, author = {S. Xu and Y. Zhang and D. Pham and G. Lambar\'{e}}, title = {Antileakage {Fourier} transform for seismic data regularization}, journal = {Geophysics}, year = {2005}, volume = {70}, pages = {V87-V95}, number = {4}, abstract = {Seismic data regularization, which spatially transforms irregularly sampled acquired data to regularly sampled data, is a long-standing problem in seismic data processing. Data regularization can be implemented using Fourier theory by using a method that estimates the spatial frequency content on an irregularly sampled grid. The data can then be reconstructed on any desired grid. Difficulties arise from the nonorthogonality of the global Fourier basis functions on an irregular grid, which results in the problem of "spectral leakage": energy from one Fourier coefficient leaks onto others. We investigate the nonorthogonality of the Fourier basis on an irregularly sampled grid and propose a technique called "antileakage Fourier transform" to overcome the spectral leakage. In the antileakage Fourier transform, we first solve for the most energetic Fourier coefficient, assuming that it causes the most severe leakage. To attenuate all aliases and the leakage of this component onto other Fourier coefficients, the data component corresponding to this most energetic Fourier coefficient is subtracted from the original input on the irregular grid. We then use this new input to solve for the next Fourier coefficient, repeating the procedure until all Fourier coefficients are estimated. This procedure is equivalent to "reorthogonalizing" the global Fourier basis on an irregularly sampled grid. We demonstrate the robustness and effectiveness of this technique with successful applications to both synthetic and real data examples. {\copyright}2005 Society of Exploration Geophysicists}, bdsk-url-1 = {http://link.aip.org/link/?GPY/70/V87/1}, bdsk-url-2 = {http://dx.doi.org/10.1190/1.1993713}, date-added = {2008-05-09 17:43:47 -0700}, date-modified = {2008-08-14 15:21:45 -0700}, doi = {10.1190/1.1993713}, issue = {4}, keywords = {Fourier transform, reconstruction}, pdf = {http://link.aip.org/link/?GPY/70/V87/1}, publisher = {SEG} } @ARTICLE{ying053dd, author = {L. Ying and L. Demanet and E. J. Cand\`es}, title = {3-{D} discrete curvelet transform}, journal = {Proceedings SPIE wavelets XI, San Diego}, year = {2005}, volume = {5914}, pages = {344-354}, month = {January}, abstract = {In this paper, we present the first 3D discrete curvelet transform. This transform is an extension to the 2D transform described in Candes et al..1 The resulting curvelet frame preserves the important properties, such as parabolic scaling, tightness and sparse representation for singularities of codimension one. We describe three different implementations: in-core, out-of-core and MPI-based parallel implementations. Numerical results verify the desired properties of the 3D curvelets and demonstrate the efficiency of our implementations.}, bdsk-url-1 = {http://dx.doi.org/10.1117/12.616205}, date-added = {2008-05-07 14:14:59 -0700}, date-modified = {2008-08-14 15:21:59 -0700}, doi = {10.1117/12.616205}, keywords = {curvelet transform} } @PHDTHESIS{zwartjes05phd, author = {P. M. Zwartjes}, title = {Fourier reconstruction with sparse inversion}, school = {Delft University of Technology}, year = {2005}, address = {Delft, The Netherlands}, month = {December}, date-added = {2008-05-06 18:58:35 -0700}, date-modified = {2008-05-09 14:44:04 -0700}, keywords = {Fourier transform, reconstruction}, rating = {0}, read = {Yes} }