@conference {herrmann2022EAGEcvm, title = {Capturing velocity-model uncertainty and two-phase flow with Fourier Neural Operators}, booktitle = {EAGE Annual Conference Proceedings}, year = {2022}, note = {(EAGE, Madrid)}, month = {06}, pages = {AI in Geoscience and Geophysics: Current Trends and Future Prospects (Dedicated Session)}, keywords = {CCS, EAGE, Fourier neural operators, JRM, seismic imaging, Uncertainty quantification}, presentation = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2022/herrmann2022EAGEcvm/index.html}, author = {Ali Siahkoohi and Thomas J. Grady II and Abhinav Prakash Gahlot and Huseyin Tuna Erdinc and Felix J. Herrmann} } @conference {louboutin2022EAGEewi, title = {Enabling wave-based inversion on GPUs with randomized trace estimation}, booktitle = {EAGE Annual Conference Proceedings}, year = {2022}, note = {(EAGE, Madrid)}, month = {06}, pages = {Seismic Wave Modelling and Least Square Migration 2 session}, abstract = {By building on recent advances in the use of randomized trace estimation to drastically reduce the memory footprint of adjoint-state methods, we present and validate an imaging approach that can be executed exclusively on accelerators. Results obtained on field-realistic synthetic datasets, which include salt and anisotropy, show that our method produces high-fidelity images. These findings open the enticing perspective of 3D wave-based inversion technology with a memory footprint that matches the hardware and that runs exclusively on clusters of GPUs without the undesirable need to offload certain tasks to CPUs.}, keywords = {EAGE, Image Volumes, inversion, RTM, SEAM, stochastic, TTI}, doi = {10.3997/2214-4609.202210531}, url = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2022/louboutin2022eageewi/louboutinp.html}, presentation = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2022/louboutin2022eageewi/Jun-9-3-15-mloubout.pdf}, software = {https://github.com/slimgroup/TimeProbeSeismic.jl}, author = {Mathias Louboutin and Felix J. Herrmann} } @conference {siahkoohi2022EAGEweb, title = {Wave-equation based inversion with amortized variational Bayesian inference}, booktitle = {EAGE Annual Conference Proceedings}, year = {2022}, note = {(EAGE, Madrid)}, month = {06}, pages = {Session 2: Velocity model building and imaging (different domains)}, abstract = {Solving inverse problems involving measurement noise and modeling errors requires regularization in order to avoid data overfit. Geophysical inverse problems, in which the Earth{\textquoteright}s highly heterogeneous structure is unknown, present a challenge in encoding prior knowledge through analytical expressions. Our main contribution is a generative-model-based regularization approach, robust to out-of-distribution data, which exploits the prior knowledge embedded in existing data and model pairs. Utilizing an amortized variational inference objective, a conditional normalizing flow (NF) is pretrained on pairs of low- and high-fidelity migrated images in order to achieve a low-fidelity approximation to the seismic imaging posterior distribution for previously unseen data. The NF is used after pretraining to reparameterize the unknown seismic image in an inversion scheme involving physics-guided data misfit and a Gaussian prior on the NF latent variable. Solving this optimization problem with respect to the latent variable enables us to leverage the benefits of data-driven conditional priors whilst being informed by physics and data. The numerical experiments demonstrate that the proposed inversion scheme produces seismic images with limited artifacts when dealing with noisy and out-of-distribution data.}, keywords = {conditional priors, EAGE, Normalizing flows, seismic imaging}, url = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2022/siahkoohi2022EAGEweb/abstract.html}, presentation = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2022/siahkoohi2022EAGEweb/Mon-Herrmann.pdf}, software = {https://github.com/slimgroup/ConditionalNFs4Imaging.jl}, author = {Ali Siahkoohi and Rafael Orozco and Gabrio Rizzuti and Felix J. Herrmann} } @conference {siahkoohi2020EAGEdlb, title = {A deep-learning based Bayesian approach to seismic imaging and uncertainty quantification}, booktitle = {EAGE Annual Conference Proceedings}, year = {2020}, note = {Accepted in EAGE}, month = {1}, abstract = {Uncertainty quantification is essential when dealing with ill-conditioned inverse problems due to the inherent nonuniqueness of the solution. Bayesian approaches allow us to determine how likely an estimation of the unknown parameters is via formulating the posterior distribution. Unfortunately, it is often not possible to formulate a prior distribution that precisely encodes our prior knowledge about the unknown. Furthermore, adherence to handcrafted priors may greatly bias the outcome of the Bayesian analysis. To address this issue, we propose to use the functional form of a randomly initialized convolutional neural network as an implicit structured prior, which is shown to promote natural images and excludes images with unnatural noise. In order to incorporate the model uncertainty into the final estimate, we sample the posterior distribution using stochastic gradient Langevin dynamics and perform Bayesian model averaging on the obtained samples. Our synthetic numerical experiment verifies that deep priors combined with Bayesian model averaging are able to partially circumvent imaging artifacts and reduce the risk of overfitting in the presence of extreme noise. Finally, we present pointwise variance of the estimates as a measure of uncertainty, which coincides with regions that are difficult to image.}, keywords = {deep learning, EAGE, seismic imaging, stochastic gradient Langevin dynamics, Uncertainty quantification}, url = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2020/siahkoohi2020EAGEdlb/siahkoohi2020EAGEdlb.html}, author = {Ali Siahkoohi and Gabrio Rizzuti and Felix J. Herrmann} } @conference {rizzuti2020EAGEtwri, title = {Time-domain wavefield reconstruction inversion for large-scale seismics}, booktitle = {EAGE Annual Conference Proceedings}, year = {2020}, note = {Accepted in EAGE, withdrawn due to logistical challenge}, month = {1}, abstract = {Wavefield reconstruction inversion is an imaging technique akin to full-waveform inversion, albeit based on a relaxed version of the wave equation. This relaxation aims to beat the multimodality typical of full-waveform inversion. However it prevents the use of time-marching solvers for the augmented equation and, as a consequence, cannot be straightforwardly employed to large 3D problems. In this work, we formulate a dual version of wavefield reconstruction inversion amenable to explicit time-domain solvers, yielding a robust and scalable inversion technique.}, keywords = {3D, EAGE, Full-waveform inversion, Time-domain}, url = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2020/rizzuti2020EAGEtwri/rizzuti2020EAGEtwri.html}, author = {Gabrio Rizzuti and Mathias Louboutin and Rongrong Wang and Felix J. Herrmann} } @conference {herrmann2019EAGEHPCaii, title = {Accelerating ideation and innovation cheaply in the Cloud the power of abstraction, collaboration and reproducibility}, booktitle = {4th EAGE Workshop on High-performance Computing}, year = {2019}, note = {(EAGE HPC Workshop, Dubai)}, month = {10}, keywords = {cloud, devito, EAGE, HPC, JUDI}, presentation = {https://slim.gatech.edu/Publications/Public/Conferences/EAGEHPC/2019/herrmann2019EAGEHPCaii/herrmann2019EAGEHPCaii_pres.pdf}, author = {Felix J. Herrmann and Charles Jones and Gerard Gorman and Jan H{\"u}ckelheim and Keegan Lensink and Paul H. J. Kelly and Navjot Kukreja and Henryk Modzelewski and Michael Lange and Mathias Louboutin and Fabio Luporini and James Selvages and Philipp A. Witte} } @conference {rizzuti2019EAGElis, title = {Learned iterative solvers for the Helmholtz equation}, booktitle = {EAGE Annual Conference Proceedings}, year = {2019}, note = {(EAGE, Copenhagen)}, month = {06}, abstract = {We propose a {\textquoteleft}learned{\textquoteright} iterative solver for the Helmholtz equation, by combining traditional Krylov-based solvers with machine learning. The method is, in principle, able to circumvent the shortcomings of classical iterative solvers, and has clear advantages over purely data-driven ap- proaches. We demonstrate the effectiveness of this approach under a 1.5-D assumption, when ade- quate a priori information about the velocity distribution is known.}, keywords = {EAGE, Helmholtz, Iterative, machine learning}, doi = {10.3997/2214-4609.201901542}, url = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2019/rizzuti2019EAGElis/rizzuti2019EAGElis.pdf}, presentation = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2019/rizzuti2019EAGElis/rizzuti2019EAGElis_pres.pdf}, author = {Gabrio Rizzuti and Ali Siahkoohi and Felix J. Herrmann} } @conference {alfaraj2018EAGEasa, title = {Automatic statics and residual statics correction with low-rank approximation}, booktitle = {EAGE Annual Conference Proceedings}, year = {2018}, note = {(EAGE, Copenhagen)}, month = {06}, abstract = {Seismic data, mainly on land, suffers from long-wavelength statics due to the laterally varying and heterogeneous nature of the near-surface weathering layers. We propose an automatic, data-driven and computationally efficient statics correction method based on low-rank approximation to correct for such statics. The method does not require a model to estimate static time shifts, which is the case for other static correction methods; rather it applies the appropriate static corrections on the data such that it becomes low rank in a certain domain. As of now, the method is applicable to data that has been corrected for elevation statics. Due to the near-surface irregularities and due to approximations used by static correction methods that lead to not fully correcting for statics, an iterative residual statics correction becomes necessary. Our proposed method corrects for residual statics without the necessity of the surface consistency assumption and a multi-iterate process. Additional benefits of the method include artifacts and noise suppression. We demonstrate the successful application of our method on several synthetic data examples.}, keywords = {EAGE, land, Rank, residual statics, statics}, doi = {10.3997/2214-4609.201801107}, url = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2018/alfaraj2018EAGEasa/alfaraj2018EAGEasa.html}, presentation = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2018/alfaraj2018EAGEasa/alfaraj2018EAGEasa_pres.pdf}, author = {Ali M. Alfaraj and Rajiv Kumar and Felix J. Herrmann} } @conference {kumar2018EAGEcsb, title = {Compressed sensing based land simultaneous acquisition using encoded sweeps}, booktitle = {EAGE Annual Conference Proceedings}, year = {2018}, note = {(EAGE, Copenhagen)}, month = {06}, abstract = {Simultaneous shooting methods using encoded sweeps can enhance the productivity of land acquisition in situations where deployment of many vibrators and larger receiver spread is not possible in the field due to obstructions or permit limitations. However, the existing framework requires shooting the full sequence of encoded sweeps on each shot point to reconstruct the complete frequency bandwidth Green{\textquoteright}s function. Although this simultaneous shooting method reduces the sweeping time vs conventional sequential shooting, the gain in efficiency is limited. To further reduce the sweeping time, we propose to acquire randomly selected subsets of the encoded sweeps sequences followed by a rank-minimization based joint source separation and spectral interpolation framework to reconstruct the full bandwidth deblended Green{\textquoteright}s function. We demonstrate the advantages of proposed sampling and reconstruction framework using a synthetic seismic line simulated using SEG-SEAM Phase II land velocity and density model.}, keywords = {compressed sensing, EAGE, land, SEAM, source separation, spectral interpolation}, doi = {10.3997/2214-4609.201800643}, url = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2018/kumar2018EAGEcsb/kumar2018EAGEcsb.html}, presentation = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2018/kumar2018EAGEcsb/kumar2018EAGEcsb_pres.pdf}, author = {Rajiv Kumar and Shashin Sharan and Nick Moldoveanu and Felix J. Herrmann} } @conference {kadu2018EAGEdfwi, title = {Decentralized full-waveform inversion}, booktitle = {EAGE Annual Conference Proceedings}, year = {2018}, note = {(EAGE, Copenhagen)}, month = {06}, abstract = {With the advent of efficient seismic data acquisition, we are having a surplus of seismic data, which is improving the imaging of the earth using full-waveform inversion. However, such inversion suffers from many issues, including (i) substantial network waiting time due to repeated communications of function and gradient values in the distributed environment, and (ii) requirement of the sophisticated optimizer to solve an optimization problem involving non-smooth regularizers. To circumvent these issues, we propose a decentralized full-waveform inversion, a scheme where connected agents in a network optimize their objectives locally while being in consensus. The proposed formulation can be solved using the ADMM method efficiently. We demonstrate using the standard marmousi model that such scheme can decouple the regularization from data fitting and reduce the network waiting time.}, keywords = {EAGE}, doi = {10.3997/2214-4609.201801230}, url = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2018/kadu2018EAGEdfwi/kadu2018EAGEdfwi.pdf}, author = {Ajinkya Kadu and Rajiv Kumar} } @conference {siahkoohi2018EAGEsdr, title = {Seismic data reconstruction with Generative Adversarial Networks}, booktitle = {EAGE Annual Conference Proceedings}, year = {2018}, note = {(EAGE, Copenhagen)}, month = {06}, abstract = {A main challenge in seismic imaging is acquiring densely sampled data. Compressed Sensing has provided theoretical foundations upon which desired sampling rate can be achieved by applying a sparsity promoting algorithm on sub-sampled data. The key point in successful recovery is to deploy a randomized sampling scheme. In this paper, we propose a novel deep learning-based method for fast and accurate reconstruction of heavily under-sampled seismic data, regardless of type of sampling. A neural network learns to do reconstruction directly from data via an adversarial process. Once trained, the reconstruction can be done by just feeding the frequency slice with missing data into the neural network. This adaptive nonlinear model makes the algorithm extremely flexible, applicable to data with arbitrarily type of sampling. With the assumption that we have access to training data, the quality of reconstructed slice is superior even for extremely low sampling rate (as low as 10\%) due to the data-driven nature of the method.}, keywords = {data reconstruction, EAGE, generative adversarial networks, machine learning}, doi = {10.3997/2214-4609.201801393}, url = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2018/siahkoohi2018EAGEsdr/siahkoohi2018EAGEsdr.html}, presentation = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2018/siahkoohi2018EAGEsdr/siahkoohi2018EAGEsdr_pres.pdf}, author = {Ali Siahkoohi and Rajiv Kumar and Felix J. Herrmann} } @conference {louboutin2017EAGEdns, title = {Data normalization strategies for full-waveform inversion}, booktitle = {EAGE Annual Conference Proceedings}, year = {2017}, note = {(EAGE, Paris)}, month = {06}, abstract = {Amplitude mismatch is an inherent problem in seismic inversion. Most of the source estimation techniques are associated with amplitude uncertainty due to incomplete representation of the physics or estimation method parameters. Rewriting the inversion problem in an amplitude free formulation allows to mitigate the amplitude ambiguity and help the inversion process to converge. We present in this work two different strategies to lessen amplitude effects in seismic inversion, derive the corresponding update directions and show how we handle scaling error correctly in both the objective function and the gradient.}, keywords = {EAGE, FWI, inversion, nonlinear, normalization}, doi = {10.3997/2214-4609.201700720}, url = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2017/louboutin2017EAGEdns/louboutin2017EAGEdns.html}, presentation = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2017/louboutin2017EAGEdns/louboutin2017EAGEdns_poster.pdf}, author = {Mathias Louboutin and Llu{\'\i}s Guasch and Felix J. Herrmann} } @conference {kumar2017EAGEdha, title = {Denoising high-amplitude cross-flow noise using curvelet-based stable principle component pursuit}, booktitle = {EAGE Annual Conference Proceedings}, year = {2017}, note = {(EAGE, Paris)}, month = {06}, abstract = {Removal of high-amplitude cross-flow noise in marine towed-streamer acquisition is of great interest because cross-flow noise hinders the success of subsequent processing (e.g. EPSI) and migration. However, the removal of cross-flow noise is a challenging process because cross-flow noise dominates steep angles and low-frequency components of the signal. As a result, applying a simple high-pass filter can result in a loss of coherent diving waves and reflected energy. We propose a stable curvelet-based principle-component pursuit approach that does not suffer from this shortcoming because it uses angle- and scale-adaptivity of the curvelet transform in combination with the low-rank property of cross-flow noise. As long as the cross-flow noise exhibits low-rank in the curvelet domain, our method successfully separates this signal component from the diving waves and seismic reflectivity, which is well-know to be sparse in the curvelet domain. Experimental results on a common-shot gather extracted from a coil shooting survey in the Gulf of Mexico shows the potential of our approach.}, keywords = {coil data, cross-flow noise, curvelet, denoising, EAGE, SPCP}, doi = {10.3997/2214-4609.201701055}, url = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2017/kumar2017EAGEdha/kumar2017EAGEdha.html}, presentation = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2017/kumar2017EAGEdha/kumar2017EAGEdha_poster.pdf}, author = {Rajiv Kumar and Nick Moldoveanu and Felix J. Herrmann} } @conference {louboutin2017EAGEess, title = {Extending the search space of time-domain adjoint-state FWI with randomized implicit time shifts}, booktitle = {EAGE Annual Conference Proceedings}, year = {2017}, note = {(EAGE, Paris)}, month = {06}, abstract = {Adjoint-state full-waveform inversion aims to obtain subsurface properties such as velocity, density or anisotropy parameters, from surface recorded data. As with any (non-stochastic) gradient based optimization procedure, the solution of this inversion procedure is to a large extend determined by the quality of the starting model. If this starting model is too far from the true model, these derivative-based optimizations will likely end up in local minima and erroneous inversion results. In certain cases, extension of the search space, e.g. by making the wavefields or focused matched sources additional unknowns, has removed some of these non-uniqueness issues but these rely on time-harmonic formulations. Here, we follow a different approach by combining an implicit extension of the velocity model, time compression techniques and recent results on stochastic sampling in non-smooth/non-convex optimization}, keywords = {cycle skipping, EAGE, FWI, inversion, nonconvex, time domain}, doi = {10.3997/2214-4609.201700831}, url = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2017/louboutin2017EAGEess/louboutin2017EAGEess.html}, presentation = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2017/louboutin2017EAGEess/louboutin2017EAGEess_pres.pdf}, author = {Mathias Louboutin and Felix J. Herrmann} } @conference {oghenekohwo2017EAGEitl, title = {Improved time-lapse data repeatability with randomized sampling and distributed compressive sensing}, booktitle = {EAGE Annual Conference Proceedings}, year = {2017}, note = {(EAGE, Paris)}, month = {06}, abstract = {Recently, new ideas on randomized sampling for time-lapse seismic acquisition have been proposed to address some of the challenges of replicating time-lapse surveys. These ideas, which stem from distributed compressed sensing (DCS) led to the birth of a joint recovery model (JRM) for processing time-lapse data (noise-free) acquired from non-replicated acquisition geometries. However, when the earth does not change{\textendash}-i.e. no time-lapse{\textemdash}the recovered vintages from two non-replicated surveys should show high repeatability measured in terms of normalized RMS, which is a standard metric for quantifying time-lapse data repeatability. Under this assumption of no time-lapse change, we demonstrate improved repeatability (with JRM) of the recovered data from non-replicated random samplings, first with noisy data and secondly in situations where there are calibration errors i.e. where the acquisition parameters such as source/receiver coordinates are not precise.}, keywords = {calibration, Compressive Sensing, EAGE, noise, repeatability, time lapse}, doi = {10.3997/2214-4609.201701389}, url = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2017/oghenekohwo2017EAGEitl/oghenekohwo2017EAGEitl.html}, presentation = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2017/oghenekohwo2017EAGEitl/oghenekohwo2017EAGEitl_poster.pdf}, author = {Felix Oghenekohwo and Felix J. Herrmann} } @conference {alfaraj2017EAGEswr, title = {Shear wave reconstruction from low cost randomized acquisition}, booktitle = {EAGE Annual Conference Proceedings}, year = {2017}, note = {(EAGE, Paris)}, month = {06}, abstract = {Shear waves travel in the subsurface at a lower speed compared with compressional waves. Therefore, much finer spatial sampling is required to properly record the shear waves. This leads to higher acquisition costs which are typically avoided by designing surveys geared towards only compressional waves imaging. We propose using randomly under-sampled ocean bottom acquisition for recording both compressional and shear waves. The recorded multicomponent data is then interpolated using an SVD-free low rank interpolation scheme that is feasible for large scale seismic data volumes to obtain finely sampled data. Following that, we perform elastic wavefield decomposition at the ocean bottom to recover accurate up- and dow-going S-waves. Synthetic data results indicate that using randomized under-sampled acquisition, we can recover accurate S-waves with an economical cost compared with conventional acquisition designs.}, keywords = {EAGE, interpolation, randomized acquisition, rank minimization, shear waves}, doi = {10.3997/2214-4609.201700594}, url = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2017/alfaraj2017EAGEswr/alfaraj2017EAGEswr.pdf}, presentation = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2017/alfaraj2017EAGEswr/alfaraj2017EAGEswr_pres.pdf}, author = {Ali M. Alfaraj and Rajiv Kumar and Felix J. Herrmann} } @conference {witte2017EAGEspl, title = {Sparsity-promoting least-squares migration with the linearized inverse scattering imaging condition}, booktitle = {EAGE Annual Conference Proceedings}, year = {2017}, note = {(EAGE, Paris)}, month = {06}, abstract = {Reverse-time migration (RTM) with the conventional cross-correlation imaging condition suffers from low-frequency artifacts that result from backscattered energy in the background velocity models. This problem translates to least-squares reverse-time migration (LS-RTM), where these artifacts slow down the convergence, as many of the initial iterations are spent on removing them. In RTM, this problem has been successfully addressed by the introduction of the so-called inverse scattering imaging condition, which naturally removes these artifacts. In this work, we derive the corresponding linearized forward operator of the inverse scattering imaging operator and incorporate this forward/adjoint operator pair into a sparsity-promoting (SPLS-RTM) workflow. We demonstrate on a challenging salt model, that LS-RTM with the inverse scattering imaging condition is far less prone to low-frequency artifacts than the conventional cross-correlation imaging condition, improves the convergence and does not require any type of additional image filters within the inversion. Through source subsampling and sparsity promotion, we reduce the computational cost in terms of PDE solves to a number comparable to conventional RTM, making our workflow applicable to large-scale problems.}, keywords = {EAGE, imaging condition, least-squares migration, linearized Bregman}, doi = {10.3997/2214-4609.201701125}, url = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2017/witte2017EAGEspl/witte2017EAGEspl.html}, presentation = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2017/witte2017EAGEspl/witte2017EAGEspl_poster.pdf}, author = {Philipp A. Witte and Mengmeng Yang and Felix J. Herrmann} } @conference {vanleeuwen2015EAGEafs, title = {Affordable full subsurface image volume{\textendash}-an application to WEMVA}, booktitle = {EAGE Annual Conference Proceedings}, year = {2015}, note = {(EAGE Workshop on Wave Equation based Migration Velocity Analysis, Madrid)}, month = {06}, abstract = {Common image gathers are used in building velocity models, inverting for anisotropy parameters, and analyzing reservoir attributes. In this paper, we offer a new perspective on image gathers, where we glean information from the image volume via efficient matrix-vector products. The proposed formulation make the computation of full subsurface image volume feasible. We illustrate how this matrix-vector product can be used to construct objective functions for automatic MVA.}, keywords = {EAGE, MVA, randomized trace estimation, wave-equation}, doi = {10.3997/2214-4609.201413498}, url = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2015/vanleeuwen2015EAGEafs/vanleeuwen2015EAGEafs.pdf}, presentation = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2015/vanleeuwen2015EAGEafs/vanleeuwen2015EAGEafs_pres.pdf}, author = {Tristan van Leeuwen and Rajiv Kumar and Felix J. Herrmann} } @conference {wason2015EAGEcsm, title = {Compressed sensing in 4-D marine{\textendash}-recovery of dense time-lapse data from subsampled data without repetition}, booktitle = {EAGE Annual Conference Proceedings}, year = {2015}, note = {(EAGE, Madrid)}, month = {06}, abstract = {We present an extension of our time-jittered marine acquisition for time-lapse surveys by working on more realistic field acquisition scenarios by incorporating irregular spatial grids without insisting on repeatability between the surveys. Since we are always subsampled in both the baseline and monitor surveys, we are interested in recovering the densely sampled baseline and monitor, and then the (complete) 4-D difference from subsampled/incomplete baseline and monitor data.}, keywords = {EAGE, NFFT, off-the-grid, simultaneous acquisition, time-lapse}, doi = {10.3997/2214-4609.201413088}, url = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2015/wason2015EAGEcsm/wason2015EAGEcsm.html}, presentation = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2015/wason2015EAGEcsm/wason2015EAGEcsm_poster.pdf}, author = {Haneet Wason and Felix Oghenekohwo and Felix J. Herrmann} } @conference {smithyman2015EAGEcwi, title = {Constrained waveform inversion of colocated VSP and surface seismic data}, booktitle = {EAGE Annual Conference Proceedings}, year = {2015}, note = {(EAGE, Madrid)}, month = {06}, abstract = {Constrained Full-Waveform Inversion (FWI) is applied to produce a high-resolution velocity model from both Vertical Seismic Profiling (VSP) and surface seismic data. The case study comes from the Permian Basin in Texas, USA. This dataset motivates and tests several new developments in methodology that enable recovery of model results that sit within multiple a priori constraint sets. These constraints are imposed through a Projected Quasi-Newton (PQN) approach, wherein the projection set is the intersection of physical property bounds and anisotropic wavenumber filtering. This enables the method to recover geologically-reasonable models while preserving the fast model convergence offered by a quasi-Newton optimization scheme like l-BFGS. In the Permian Basin example, low-frequency data from both arrays are inverted together and regularized by this projection approach. Careful choice of the constraint sets is possible without requiring tradeoff parameters as in a quadratic penalty approach to regularization. Multiple 2D FWI results are combined to produce an interpolated 3D model that is consistent with the models from migration velocity analysis and VSP processing, while offering improved resolution and illumination of features from both datasets.}, keywords = {EAGE, VSP, waveform inversion}, doi = {10.3997/2214-4609.201412906}, url = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2015/smithyman2015EAGEcwi/smithyman2015EAGEcwi.pdf}, presentation = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2015/smithyman2015EAGEcwi/smithyman2015EAGEcwi_pres.pdf}, author = {Brendan R. Smithyman and Bas Peters and Felix J. Herrmann} } @conference {herrmann2015EAGEfom, title = {Fast "online" migration with Compressive Sensing}, booktitle = {EAGE Annual Conference Proceedings}, year = {2015}, note = {(EAGE, Madrid)}, month = {06}, abstract = {We present a novel adaptation of a recently developed relatively simple iterative algorithm to solve large-scale sparsity-promoting optimization problems. Our algorithm is particularly suitable to large-scale geophysical inversion problems, such as sparse least-squares reverse-time migration or Kirchoff migration since it allows for a tradeoff between parallel computations, memory allocation, and turnaround times, by working on subsets of the data with different sizes. Comparison of the proposed method for sparse least-squares imaging shows a performance that rivals and even exceeds the performance of state-of-the art one-norm solvers that are able to carry out least-squares migration at the cost of a single migration with all data.}, keywords = {EAGE, LSRTM}, doi = {10.3997/2214-4609.201412942}, url = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2015/herrmann2015EAGEfom/herrmann2015EAGEfom.html}, presentation = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2015/herrmann2015EAGEfom/herrmann2015EAGEfom_pres.pdf}, author = {Felix J. Herrmann and Ning Tu and Ernie Esser} } @conference {esser2015EAGElcs, title = {A lifted $\ell_1/\ell_2$ constraint for sparse blind deconvolution}, booktitle = {EAGE Annual Conference Proceedings}, year = {2015}, note = {(EAGE, Madrid)}, month = {06}, abstract = {We propose a modification to a sparsity constraint based on the ratio of $\ell_1$ and $\ell_2$ norms for solving blind seismic deconvolution problems in which the data consist of linear convolutions of different sparse reflectivities with the same source wavelet. We also extend the approach to the Estimation of Primaries by Sparse Inversion (EPSI) model, which includes surface related multiples. Minimizing the ratio of $\ell_1$ and $\ell_2$ norms has been previously shown to promote sparsity in a variety of applications including blind deconvolution. Most existing implementations are heuristic or require smoothing the $\ell_1/\ell_2$ penalty. Lifted versions of $\ell_1/\ell_2$ constraints have also been proposed but are challenging to implement. Inspired by the lifting approach, we propose to split the sparse signals into positive and negative components and apply an $\ell_1/\ell_2$ constraint to the difference, thereby obtaining a constraint that is easy to implement without smoothing the $\ell_1$ or $\ell_2$ norms. We show that a method of multipliers implementation of the resulting model can recover source wavelets that are not necessarily minimum phase and approximately reconstruct the sparse reflectivities. Numerical experiments demonstrate robustness to the initialization as well as to noise in the data.}, keywords = {blind deconvolution, EAGE, EPSI}, doi = {10.3997/2214-4609.201413420}, url = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2015/esser2015EAGElcs/esser2015EAGElcs.html}, presentation = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2015/esser2015EAGElcs/esser2015EAGElcs_poster.pdf}, author = {Ernie Esser and Tim T.Y. Lin and Rongrong Wang and Felix J. Herrmann} } @conference {kumar2015EAGEmcu, title = {Matrix completion on unstructured grids : 2-D seismic data regularization and interpolation}, booktitle = {EAGE Annual Conference Proceedings}, year = {2015}, note = {(EAGE, Madrid)}, month = {06}, abstract = {Seismic data interpolation via rank-minimization techniques has been recently introduced in the seismic community. All the existing rank-minimization techniques assume the recording locations to be on a regular grid, e.g., sampled periodically, but seismic data are typically irregularly sampled along spatial axes. Other than the irregularity of the sampled grid, we often have missing data. In this paper, we study the effect of grid irregularity to conduct matrix completion on a regular grid for unstructured data. We propose an improvement of existing rank-minimization techniques to do regularization. We also demonstrate that we can perform seismic data regularization and interpolation simultaneously. We illustrate the advantages of the modification using a real seismic line from the Gulf of Suez to obtain high quality results for regularization and interpolation, a key application in exploration geophysics.}, keywords = {EAGE, interpolation, matrix completion, NFFT, regularization}, doi = {10.3997/2214-4609.201413448}, url = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2015/kumar2015EAGEmcu/kumar2015EAGEmcu.html}, presentation = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2015/kumar2015EAGEmcu/kumar2015EAGEmcu_poster.pdf}, author = {Rajiv Kumar and Oscar Lopez and Ernie Esser and Felix J. Herrmann} } @conference {dasilva2015EAGEogt, title = {Off the grid tensor completion for seismic data interpolation}, booktitle = {EAGE Annual Conference Proceedings}, year = {2015}, note = {(EAGE, Madrid)}, month = {06}, abstract = {The practical realities of acquiring seismic data in a realistic survey are often at odds with the stringent requirements of Shannon-Nyquist-based sampling theory. The unpredictable movement of the ocean{\textquoteleft}s currents can be detrimental in acquiring exactly equally-spaced samples while sampling at Nyquist-rates are expensive, given the huge dimensionality and size of the data volume. Recent work in matrix and tensor completion for seismic data interpolation aim to alleviate such stringent Nyquist-based sampling requirements but are fundamentally posed on a regularly-spaced grid. In this work, we extend our previous results in using the so-called Hierarchical Tucker (HT) tensor format for recovering seismic data to the irregularly sampled case. We introduce an interpolation operator that resamples our tensor from a regular grid (in which we impose our low-rank constraints) to our irregular sampling grid. Our framework is very flexible and efficient, depending primarily on the computational costs of this operator. We demonstrate the superiority of this approach on a realistic BG data set compared to using low-rank tensor methods that merely use binning.}, keywords = {EAGE, hierarchical tucker, irregular sampling, off the grid, structured tensor, tensor interpolation}, doi = {10.3997/2214-4609.201412978}, url = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2015/dasilva2015EAGEogt/dasilva2015EAGEogt.html}, presentation = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2015/dasilva2015EAGEogt/dasilva2015EAGEogt_pres.pdf}, author = {Curt Da Silva and Felix J. Herrmann} } @conference {lopez2015EAGErma, title = {Rank minimization via alternating optimization: seismic data interpolation}, booktitle = {EAGE Annual Conference Proceedings}, year = {2015}, note = {(EAGE, Madrid)}, month = {06}, abstract = {Low-rank matrix completion techniques have recently become an effective tool for seismic trace interpolation problems. In this talk, we consider an alternating optimization scheme for nuclear norm minimization and discuss the applications to large scale wave field reconstruction. By adopting a factorization approach to the rank minimization problem we write our low-rank matrix in bi-linear form, and modify this workflow by alternating our optimization to handle a single matrix factor at a time. This allows for a more tractable procedure that can robustly handle large scale, highly oscillatory and critically subsampled seismic data sets. We demonstrate the potential of this approach with several numerical experiments on a seismic line from the Nelson 2D data set and a frequency slice from the Gulf of Mexico data set.}, keywords = {EAGE, interpolation, low-rank, matrix completion}, doi = {10.3997/2214-4609.201413453}, url = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2015/lopez2015EAGErma/lopez2015EAGErma.html}, presentation = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2015/lopez2015EAGErma/lopez2015EAGErma_poster.pdf}, author = {Oscar Lopez and Rajiv Kumar and Felix J. Herrmann} } @conference {fang2015EAGEsew, title = {Source estimation for Wavefield Reconstruction Inversion}, booktitle = {EAGE Annual Conference Proceedings}, year = {2015}, note = {(EAGE, Madrid)}, month = {06}, abstract = {Wavefield reconstruction inversion is a new approach to waveform based inversion that helps overcome the {\textquoteleft}cycle skipping{\textquoteright} problem. However, like most waveform based inversion methods, wavefield reconstruction inversion also requires good source wavelets. Without correct source wavelets, wavefields cannot be reconstructed correctly and the velocity model cannot be updated correctly neither. In this work, we propose a source estimation method for wavefield reconstruction inversion based on the variable projection method. In this method, we reconstruct wavefields and estimate source wavelets simultaneously by solving an extended least-squares problem, which contains source wavelets. This approach does not increase the computational cost compared to conventional wavefield reconstruction inversion. Numerical results illustrates with our source estimation method we are able to recover source wavelets and obtain inversion results that are comparable to results obtained with true source wavelets.}, keywords = {EAGE, source estimation, WRI}, doi = {10.3997/2214-4609.201412588}, url = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2015/fang2015EAGEsew/fang2015EAGEsew.html}, presentation = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2015/fang2015EAGEsew/fang2015EAGEsew_pres.pdf}, author = {Zhilong Fang and Felix J. Herrmann} } @conference {fang2015EAGEuqw, title = {Uncertainty quantification for Wavefield Reconstruction Inversion}, booktitle = {EAGE Annual Conference Proceedings}, year = {2015}, note = {(EAGE, Madrid)}, month = {06}, abstract = {In this work, we propose a method to quantify the uncertainty of wavefield reconstruction inversion under the framework of Bayesian inference. Unlike the conventional method using the wave equation as the forward mapping, we involve the wave equation misfit in the posterior distribution and propose a new posterior distribution. The negative log-likelihood of the new distribution is less oscillatory than that of the conventional posterior distribution, and its Gauss-Newton Hessian is a diagonal matrix that can be generated without any additional computational cost. We use the diagonal Gauss-Newton Hessian to derive an approximate Gaussian distribution at the maximum likelihood point to quantify the uncertainty. This method makes the uncertainty quantification for WRI computationally tractable and is able to provide reasonable uncertainty analysis based on our numerical results.}, keywords = {EAGE, FWI, UQ, WRI}, doi = {10.3997/2214-4609.201413198}, url = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2015/fang2015EAGEuqw/fang2015EAGEuqw.html}, presentation = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2015/fang2015EAGEuqw/fang2015EAGEuqw_pres.pdf}, author = {Zhilong Fang and Chia Ying Lee and Curt Da Silva and Felix J. Herrmann and Rachel Kuske} } @conference {oghenekohwo2015EAGEuci, title = {Using common information in compressive time-lapse full-waveform inversion}, booktitle = {EAGE Annual Conference Proceedings}, year = {2015}, note = {(EAGE, Madrid)}, month = {06}, abstract = {The use of time-lapse seismic data to monitor changes in the subsurface has become standard practice in industry. In addition, full-waveform inversion has also been extended to time-lapse seismic to obtain useful time-lapse information. The computational cost of this method are becoming more pronounced as the volume of data increases. Therefore, it is necessary to develop fast inversion algorithms that can also give improved time-lapse results. Rather than following existing joint inversion algorithms, we are motivated by a joint recovery model which exploits the common information among the baseline and monitor data. We propose a joint inversion framework, leveraging ideas from distributed compressive sensing and the modified Gauss-Newton method for full-waveform inversion, by using the shared information in the time-lapse data. Our results on a realistic synthetic example highlight the benefits of our joint inversion approach over a parallel inversion method that does not exploit the shared information. Preliminary results also indicate that our formulation can address time-lapse data with inconsistent acquisition geometries.}, keywords = {EAGE, FWI, time-lapse}, doi = {10.3997/2214-4609.201413086}, url = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2015/oghenekohwo2015EAGEuci/oghenekohwo2015EAGEuci.html}, presentation = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2015/oghenekohwo2015EAGEuci/oghenekohwo2015EAGEuci_poster.pdf}, author = {Felix Oghenekohwo and Rajiv Kumar and Ernie Esser and Felix J. Herrmann} } @conference {petrenko2014EAGEaih, title = {Accelerating an iterative Helmholtz solver with FPGAs}, booktitle = {EAGE Annual Conference Proceedings}, year = {2014}, month = {06}, abstract = {We implement the Kaczmarz row-projection algorithm (Kaczmarz (1937)) on a CPU host + FPGA accelerator platform using techniques of dataflow programming. This algorithm is then used as the preconditioning step in CGMN, a modified version of the conjugate gradients method (Bj{\"o}rck and Elfving (1979)) that we use to solve the time-harmonic acoustic isotropic constant density wave equation. Using one accelerator we achieve a speed-up of over 2{\texttimes} compared with one Intel core.}, keywords = {CGMN, EAGE, FPGA, Helmholtz equation, Kaczmarz, reconfigurable computing}, doi = {10.3997/2214-4609.20141141}, url = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2014/petrenko2014EAGEaih/petrenko2014EAGEaih.pdf}, presentation = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2014/petrenko2014EAGEaih/petrenko2014EAGEaih_pres.pdf}, author = {Art Petrenko and Tristan van Leeuwen and Diego Oriato and Simon Tilbury and Felix J. Herrmann} } @conference {esser2014EAGEacp, title = {Application of a convex phase retrieval method to blind seismic deconvolution}, booktitle = {EAGE Annual Conference Proceedings}, year = {2014}, month = {06}, abstract = {A classical strategy for blind seismic deconvolution is to first estimate the autocorrelation of the unknown source wavelet from the data and then recover the wavelet by assuming it has minimum phase. However, computing the minimum phase wavelet directly from the amplitude spectrum can be sensitive to even extremely small errors, especially in the coefficients close to zero. Since the minimum phase requirement follows from an assumption that the wavelet should be as impulsive as possible, we propose to directly estimate an impulsive wavelet by minimizing a weighted l2 penalty subject to a constraint on its amplitude spectrum. This nonconvex model has the form of a phase retrieval problem, in this case recovering a signal given only estimates of the magnitudes of its Fourier coefficients. Following recent work on convex relaxations of phase retrieval problems, we propose a convex semidefinite program for computing an impulsive minimum phase wavelet whose amplitude spectrum is close to a given estimate, and we show that this can be robustly solved by a Douglas Rachford splitting method for convex optimization.}, keywords = {blind deconvolution, convex phase retrieval, EAGE, source wavelet estimation}, doi = {10.3997/2214-4609.20141590}, url = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2014/esser2014EAGEacp/esser2014EAGEacp.pdf}, presentation = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2014/esser2014EAGEacp/esser2014EAGEacp_poster.pdf}, author = {Ernie Esser and Felix J. Herrmann} } @conference {zheglova2014EAGEams, title = {Application of matrix square root and its inverse to downward wavefield extrapolation}, booktitle = {EAGE Annual Conference Proceedings}, year = {2014}, month = {06}, abstract = {In this paper we propose a method for computation of the square root of the Helmholtz operator and its inverse that arise in downward extrapolation methods based on one-way wave equation. Our approach involves factorization of the discretized Helmholtz operator at each depth by extracting the matrix square root after performing the spectral projector in order to eliminate the evanescent modes. The computation of the square root of the discrete Helmholtz operator and its inverse is done using polynomial recursions and can be combined with low rank matrix approximations to reduce the computational cost for large problems. The resulting square root operator is able to model the propagating modes kinematically correctly at the angles of up to 90 degree. Preliminary results on convergence of iterations are presented in this abstract. Potential applications include seismic modeling, imaging and inversion.}, keywords = {EAGE, extrapolation, Modelling, one-way wave equation, square root}, doi = {10.3997/2214-4609.20141184}, url = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2014/zheglova2014EAGEams/zheglova2014EAGEams.pdf}, presentation = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2014/zheglova2014EAGEams/zheglova2014EAGEams_pres.pdf}, author = {Polina Zheglova and Felix J. Herrmann} } @conference {kumar2014EAGEeia, title = {Extended images in action: efficient WEMVA via randomized probing}, booktitle = {EAGE Annual Conference Proceedings}, year = {2014}, month = {06}, abstract = {Image gathers as a function of subsurface offset are an important tool for velocity analysis in areas of complex geology. In this paper, we offer a new perspective on image gathers by organizing the extended image as a function of all subsurface offsets and all subsurface points into a matrix whose (i,j)^th entry captures the interaction between gridpoints i and j. For even small problems, it is infeasible to form and store this matrix. Instead, we propose an efficient algorithm to glean information from the image volume via efficient matrix-vector products. We illustrate how this can be used to construct objective functions for automatic MVA.}, keywords = {EAGE, extended imaging, MVA, probing}, doi = {10.3997/2214-4609.20141492}, url = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2014/kumar2014EAGEeia/kumar2014EAGEeia.pdf}, presentation = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2014/kumar2014EAGEeia/kumar2014EAGEeia_pres.pdf}, author = {Rajiv Kumar and Tristan van Leeuwen and Felix J. Herrmann} } @conference {lago2014EAGEfst, title = {Fast solution of time-harmonic wave-equation for full-waveform inversion}, booktitle = {EAGE Annual Conference Proceedings}, year = {2014}, month = {06}, abstract = {For many full-waveform inversion techniques, the most computationally intensive step is the computation of a numerical solution for the wave equation on every iteration. In the frequency domain approach, this requires the solution of very large, complex, sparse, ill-conditioned linear systems. In this extended abstract we bring out attention specifically to CGMN method for solving PDEs, known for being flexible (i.e. it is able to treat equally acoustic data as well as visco-elastic or more complex scenarios) efficient with respect both to memory and computation time, and controllable accuracy of the final approximation. We propose an improvement for the known CGMN method by imposing a minimal residual condition, which incurs in one extra model vector storage. The resulting algorithm called CRMN enjoys several interesting properties as monotonically nonincreasing behaviour of the norm of the residual and minimal residual, guaranteeing optimal convergence for the relative residual criterion. We discuss numerical experiments both in an isolated PDE solve and also within the inversion procedure, showing that in a realistic scenario we can expect a speedup around 25\% when using CRMN rather than CGMN.}, keywords = {CGMN, CRMN, EAGE, FWI, Time-Harmonic Wave Equation}, doi = {10.3997/2214-4609.20140812}, url = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2014/lago2014EAGEfst/lago2014EAGEfst.pdf}, presentation = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2014/lago2014EAGEfst/lago2014EAGEfst_pres.pdf}, author = {Rafael Lago and Art Petrenko and Zhilong Fang and Felix J. Herrmann} } @conference {fang2014EAGEfuq, title = {Fast uncertainty quantification for 2D full-waveform inversion with randomized source subsampling}, booktitle = {EAGE Annual Conference Proceedings}, year = {2014}, month = {06}, abstract = {Uncertainties arise in every area of seismic exploration, especially in full-waveform inversion, which is highly non-linear. In the framework of Bayesian inference, uncertainties can be analyzed by sampling the posterior probability density distribution with a Markov chain Monte-Carlo (McMC) method. We reduce the cost of computing the posterior distribution by working with randomized subsets of sources. These approximations, together with the Gaussian assumption and approximation of the Hessian, leads to a computational tractable uncertainty quantification. Application of this approach to a synthetic leads to standard deviations and confidence intervals that are qualitatively consistent with our expectations.}, keywords = {EAGE, FWI, Markov chain Monte Carlo, randomized sources subsampling, Uncertainty quantification}, doi = {10.3997/2214-4609.20140715}, url = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2014/fang2014EAGEfuq/fang2014EAGEfuq.pdf}, presentation = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2014/fang2014EAGEfuq/fang2014EAGEfuq_pres.pdf}, author = {Zhilong Fang and Curt Da Silva and Felix J. Herrmann} } @conference {dasilva2014EAGEhtucknoisy, title = {Low-rank promoting transformations and tensor interpolation - applications to seismic data denoising}, booktitle = {EAGE Annual Conference Proceedings}, year = {2014}, month = {06}, abstract = {In this abstract, we extend our previous work in Hierarchical Tucker (HT) tensor completion, which uses an extremely efficient representation for representing high-dimensional tensors exhibit- ing low-rank structure, to handle subsampled tensors with noisy entries. We consider a "low-noise" case, so that the energies of the noise and the signal are nearly indistinguishable, and a {\textquoteright}high-noise{\textquoteright} case, in which the noise energy is now scaled to the amplitude of the entire data volume. We examine the effect of the noise in terms of the singular values along different matricizations of the data, i.e., reshaping of the tensor along different modes. By interpreting this effect in the context of tensor completion, we demonstrate the inefficacy of denoising by this method in the source-receiver do- main. In light of this observation, we transform the decimated, noisy data in to the midpoint-offset domain, which promotes low-rank behaviour in the signal and high-rank behaviour in the noise. This distinction between signal and noise allows low-rank interpolation to effectively denoise the signal with only a marginal increase in computational cost. We demonstrate the effectiveness of this approach on a 4D frequency slice.}, keywords = {EAGE, hierarchical tucker, low-rank transform, riemannian optimization, seismic denoising, structured tensor, tensor interpolation}, url = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2014/dasilva2014EAGEhtucknoisy/dasilva2014EAGEhtucknoisy.pdf}, presentation = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2014/dasilva2014EAGEhtucknoisy/dasilva2014EAGEhtucknoisy_pres.pdf}, author = {Curt Da Silva and Felix J. Herrmann} } @conference {lin2014EAGEmas, title = {Multilevel acceleration strategy for the robust estimation of primaries by sparse inversion}, booktitle = {EAGE Annual Conference Proceedings}, year = {2014}, month = {06}, abstract = {We propose a method to substantially reduce the computational costs of the Robust Estimation of Primaries by Sparse Inversion algorithm, based on a multilevel inversion strategy that shifts early iterations of the method to successively coarser spatial sampling grids. This method requires no change in the core implementation of the original algorithm, and additionally only relies on trace decimation, low-pass filtering, and rudimentary interpolation techniques. We furthermore demonstrate with a synthetic seismic line significant computational speedups using this approach.}, keywords = {EAGE, EPSI, multigrid, multilevel, multiples, multiscale, REPSI}, doi = {10.3997/2214-4609.20140672}, url = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2014/lin2014EAGEmas/lin2014EAGEmas.pdf}, presentation = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2014/lin2014EAGEmas/lin2014EAGEmas_pres.pdf}, author = {Tim T.Y. Lin and Felix J. Herrmann} } @conference {leeuwen2014EAGEntf, title = {A new take on FWI: wavefield reconstruction inversion}, booktitle = {EAGE Annual Conference Proceedings}, year = {2014}, month = {06}, abstract = {We discuss a recently proposed novel method for waveform inversion: Wavefield Reconstruction Inversion (WRI). As opposed to conventional FWI {\textendash} which attempts to minimize the error between observed and predicted data obtained by solving a wave equation {\textendash} WRI reconstructs a wave-field from the data and extracts a model-update from this wavefield by minimizing the wave-equation residual. The method does not require explicit computation of an adjoint wavefield as all the necessary information is contained in the reconstructed wavefield. We show how the corresponding model updates can be interpreted physically analogously to the conventional imaging-condition-based approach.}, keywords = {EAGE, Full-waveform inversion, Optimization, penalty method, Wavefield Reconstruction Inversion}, doi = {10.3997/2214-4609.20140703}, url = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2014/leeuwen2014EAGEntf/leeuwen2014EAGEntf.pdf}, presentation = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2014/leeuwen2014EAGEntf/leeuwen2014EAGEntf_pres.pdf}, author = {Tristan van Leeuwen and Felix J. Herrmann and Bas Peters} } @conference {herrmann2014EAGEWSrrt, title = {Randomization and repeatability in time-lapse marine acquisition}, booktitle = {EAGE Workshop on Land and Ocean Bottom; Broadband Full Azimuth Seismic Surveys; Spain}, year = {2014}, note = {(EAGE Workshop, Spain)}, month = {04}, keywords = {4D seismic, EAGE, Marine acquisition, workshop}, url = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2014/herrmann2014EAGEWSrrt.pdf}, author = {Haneet Wason and Felix Oghenekohwo and Felix J. Herrmann} } @conference {kumar2014EAGErank, title = {SVD-free low-rank matrix factorization : wavefield reconstruction via jittered subsampling and reciprocity}, booktitle = {EAGE Annual Conference Proceedings}, year = {2014}, month = {06}, abstract = {Recently computationally efficient rank optimization techniques have been studied extensively to develop a new mathematical tool for the seismic data interpolation. So far, matrix completion problems have been discussed where sources are subsample according to a discrete uniform distribution. In this paper, we studied the effect of two different subsampling techniques on seismic data interpolation using rank-regularized formulations, namely jittered subsampling over uniform random subsampling. The other objective of this paper is to combine the fact of source-receiver reciprocity with the rank-minimization techniques to enhance the accuracy of missing-trace interpolation. We illustrate the advantages of jittered subsampling and reciprocity using a seismic line from Gulf of Suez to obtain high quality results for interpolation, a key application in exploration geophysics.}, keywords = {EAGE, interpolation, low-rank, reciprocity}, doi = {10.3997/2214-4609.20141394}, url = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2014/kumar2014EAGErank/kumar2014EAGErank.pdf}, presentation = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2014/kumar2014EAGErank/kumar2014EAGErank_pres.pdf}, author = {Rajiv Kumar and Aleksandr Y. Aravkin and Ernie Esser and Hassan Mansour and Felix J. Herrmann} } @conference {oghenekohwo2014EAGEtls, title = {Time-lapse seismic without repetition: reaping the benefits from randomized sampling and joint recovery}, booktitle = {EAGE Annual Conference Proceedings}, year = {2014}, month = {06}, abstract = {In the current paradigm of 4-D seismic, guaranteeing repeatability in acquisition and processing of the baseline and monitor surveys ranks highest amongst the technical challenges one faces in detecting time-lapse signals. By using recent insights from the field of compressive sensing, we show that the condition of survey repeatability can be relaxed as long as we carry out a sparsitypromoting program that exploits shared information between the baseline and monitor surveys. By inverting for the baseline and monitor survey as the common "background", we are able to compute high-fidelity 4-D differences from carefully selected synthetic surveys that have different sets of source/receivers missing. This synthetic example is proof of concept of an exciting new approach to randomized 4-D acquisition where time-lapse signal can be computed as long as the survey details, such as source/receiver locations are known afterwards.}, keywords = {4-D seismic, EAGE, joint recovery, time-lapse}, doi = {10.3997/2214-4609.20141478}, url = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2014/oghenekohwo2014EAGEtls/oghenekohwo2014EAGEtls.pdf}, presentation = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2014/oghenekohwo2014EAGEtls/oghenekohwo2014EAGEtls_pres.pdf}, author = {Felix Oghenekohwo and Ernie Esser and Felix J. Herrmann} } @conference {peters2014EAGEweb, title = {Wave-equation based inversion with the penalty method: adjoint-state versus wavefield-reconstruction inversion}, booktitle = {EAGE Annual Conference Proceedings}, year = {2014}, month = {06}, abstract = {In this paper we make a comparison between wave-equation based inversions based on the adjoint-state and penalty methods. While the adjoint-state method involves the minimization of a data-misfit and exact solutions of the wave-equation for the current velocity model, the penalty-method aims to first find a wavefield that jointly fits the data and honours the physics, in a least-squares sense. Given this reconstructed wavefield, which is a proxy for the true wavefield in the true model, we calculate updates for the velocity model. Aside from being less nonlinear{\textendash}the acoustic wave equation is linear in the wavefield and model parameters but not in both{\textendash}the inversion is carried out over a solution space that includes both the model and the wavefield. This larger search space allows the algortihm to circumnavigate local minima, very much in the same way as recently proposed model extentions try to acomplish. We include examples for low frequencies, where we compare full-waveform inversion results for both methods, for good and bad starting models, and for high frequencies where we compare reverse-time migration with linearized imaging based on wavefield-reconstruction inversion. The examples confirm the expected benefits of the proposed method.}, keywords = {EAGE, Full-waveform inversion, Imaging, Optimization, penalty method}, doi = {10.3997/2214-4609.20140704}, url = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2014/peters2014EAGEweb/peters2014EAGEweb.pdf}, presentation = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2014/peters2014EAGEweb/peters2014EAGEweb_pres.pdf}, author = {Bas Peters and Felix J. Herrmann and Tristan van Leeuwen} } @conference {lin2013EAGEcsd, title = {Cosparse seismic data interpolation}, booktitle = {EAGE Annual Conference Proceedings}, year = {2013}, month = {06}, abstract = {Many modern seismic data interpolation and redatuming algorithms rely on the promotion of transform-domain sparsity for high-quality results. Amongst the large diversity of methods and different ways of realizing sparse reconstruction lies a central question that often goes unaddressed: is it better for the transform-domain sparsity to be achieved through explicit construction of sparse representations (e.g., by thresholding of small transform-domain coefficients), or by demanding that the algorithm return physical signals which produces sparse coefficients when hit with the forward transform? Recent results show that the two approaches give rise to different solutions when the transform is redundant, and that the latter approach imposes a whole new class of constraints related to where the forward transform produces zero coefficients. From this framework, a new reconstruction algorithm is proposed which may allow better reconstruction from subsampled signaled than what the sparsity assumption alone would predict. In this work we apply the new framework and algorithm to the case of seismic data interpolation under the curvelet domain, and show that it admits better reconstruction than some existing L1 sparsity-based methods derived from compressive sensing for a range of subsampling factors.}, keywords = {algorithm, cosparsity, curvelet, EAGE, interpolation, Optimization}, doi = {10.3997/2214-4609.20130387}, url = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2013/lin2013EAGEcsd/lin2013EAGEcsd.pdf}, presentation = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2013/lin2013EAGEcsd/lin2013EAGEcsd_pres.pdf}, author = {Tim T.Y. Lin and Felix J. Herrmann} } @conference {tu2013EAGElsm, title = {Fast least-squares migration with multiples and source estimation}, booktitle = {EAGE Annual Conference Proceedings}, year = {2013}, month = {06}, abstract = {The advent of modern computing has made it possible to do seismic imaging using least-squares reverse-time migration. We obtain superior images by solving an optimization problem that recovers the true-amplitude images. However, its success hinges on overcoming several issues, including overwhelming problem size, unknown source wavelet, and interfering coherent events like multiples. In this abstract, we reduce the problem size by using ideas from compressive sensing, and estimate source wavelet by generalized variable projection. We also demonstrate how to invert for subsurface information encoded in surface-related multiples by incorporating the free-surface operator as an areal source in reverse-time migration. Our synthetic examples show that multiples help to improve the resolution of the image, as well as remove the amplitude ambiguity in wavelet estimation.}, keywords = {EAGE, Imaging, multiples, source estimation, sparse}, doi = {10.3997/2214-4609.20130727}, url = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2013/tu2013EAGElsm/tu2013EAGElsm.pdf}, presentation = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2013/tu2013EAGElsm/tu2013EAGElsm_pres.pdf}, author = {Ning Tu and Aleksandr Y. Aravkin and Tristan van Leeuwen and Felix J. Herrmann} } @conference {herrmann2013EAGEfrtm, title = {Fast RTM with multiples and source estimation}, booktitle = {EAGE/SEG Forum - Turning noise into geological information: The next big step?}, year = {2013}, month = {11}, abstract = {During this talk, we present a computationally efficient (cost of 1-2 RTM{\textquoteright}s with all data) iterative sparsity-promoting inversion framework where surface-related multiples are jointly imaged with primaries and where the source signature is estimated on the fly. Our imaging algorithm is computationally efficient because it works during each iteration with small independent randomized subsets of data. The multiples are handled by introducing an areal source term that includes the upgoing wavefield. We update the source signature for each iteration using a variable projection method. The resulting algorithm removes imaging artifacts from surface-related multiples, estimates and removes the imprint of the source, recovers true amplitudes, is fast, and robust to linearization errors by virtue of the statistical independence of the subsets of data we are working with at each iteration.}, keywords = {EAGE, multiples, RTM, SEG, source estimation}, url = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2013/herrmann2013EAGEfrtm/herrmann2013EAGEfrtm_pres.pdf}, author = {Felix J. Herrmann and Ning Tu} } @conference {dasilva2013EAGEhtucktensor, title = {Hierarchical Tucker tensor optimization - applications to 4D seismic data interpolation}, booktitle = {EAGE Annual Conference Proceedings}, year = {2013}, month = {06}, abstract = {In this work, we develop optimization algorithms on the manifold of Hierarchical Tucker (HT) tensors, an extremely efficient format for representing high-dimensional tensors exhibiting particular low-rank structure. With some minor alterations to existing theoretical developments, we develop an optimization framework based on the geometric understanding of HT tensors as a smooth manifold, a generalization of smooth curves/surfaces. Building on the existing research of solving optimization problems on smooth manifolds, we develop Steepest Descent and Conjugate Gradient methods for HT tensors. The resulting algorithms converge quickly, are immediately parallelizable, and do not require the computation of SVDs. We also extend ideas about favourable sampling conditions for missing-data recovery from the field of Matrix Completion to Tensor Completion and demonstrate how the organization of data can affect the success of recovery. As a result, if one has data with randomly missing source pairs, using these ideas, coupled with an efficient solver, one can interpolate large-scale seismic data volumes with missing sources and/or receivers by exploiting the multidimensional dependencies in the data. We are able to recover data volumes amidst extremely high subsampling ratios (in some cases, > 75\%) using this approach.}, keywords = {3D data interpolation, EAGE, riemannian optimization, structured tensor}, doi = {10.3997/2214-4609.20130390}, url = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2013/dasilva2013EAGEhtucktensor/dasilva2013EAGEhtucktensor.pdf}, presentation = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2013/dasilva2013EAGEhtucktensor/dasilva2013EAGEhtucktensor_pres.pdf}, author = {Curt Da Silva and Felix J. Herrmann} } @conference {vanleeuwen2013EAGErobustFWI, title = {In which domain should we measure the misfit for robust full waveform inversion?}, booktitle = {EAGE Annual Conference Proceedings}, year = {2013}, month = {06}, abstract = {Full-waveform inversion relies on minimizing the difference between observed and modeled data, as measured by some penalty function. A popular choice, of course, is the least-squares penalty. However, when outliers are present in the data, the use of robust penalties such as the Huber or Student{\textquoteright}s t may significantly improve the results since they put relatively less weight on large residuals. In order for robust penalties to be effective, the outliers must be somehow localized and distinguishable from the good data. We propose to first transform the residual into a domain where the outliers are localized before measuring the misfit with a robust penalty. This is exactly how one would normally devise filters to remove the noise before applying conventional FWI. We propose to merge the two steps and let the inversion process implicitly filter out the noise. Results on a synthetic dataset show the effectiveness of the approach.}, keywords = {EAGE, full waveform inversion}, doi = {10.3997/2214-4609.20130839}, url = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2013/vanleeuwen2013EAGErobustFWI/vanleeuwen2013EAGErobustFWI.pdf}, presentation = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2013/vanleeuwen2013EAGErobustFWI/vanleeuwen2013EAGErobustFWI_pres.pdf}, author = {Tristan van Leeuwen and Aleksandr Y. Aravkin and Henri Calandra and Felix J. Herrmann} } @conference {wason2013EAGEobs, title = {Ocean bottom seismic acquisition via jittered sampling}, booktitle = {EAGE Annual Conference Proceedings}, year = {2013}, month = {06}, abstract = {We present a pragmatic marine acquisition scheme where multiple source vessels sail across an ocean-bottom array firing at airgunsjittered source locations and instances in time. Following the principles of compressive sensing, we can significantly impact the reconstruction quality of conventional seismic data (from jittered data) and demonstrate successful recovery by sparsity promotion. In contrast to random (under)sampling, acquisition via jittered (under)sampling helps in controlling the maximum gap size, which is a practical requirement of wavefield reconstruction with localized sparsifying transforms. Results are illustrated with simulations of time-jittered marine acquisition, which translates to jittered source locations for a given speed of the source vessel, for two source vessels.}, keywords = {Acquisition, blended, deblending, EAGE, interpolation, marine}, doi = {10.3997/2214-4609.20130379}, url = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2013/wason2013EAGEobs/wason2013EAGEobs.pdf}, presentation = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2013/wason2013EAGEobs/wason2013EAGEobs_pres.pdf}, author = {Haneet Wason and Felix J. Herrmann} } @conference {kumar2013EAGEsind, title = {Seismic data interpolation and denoising using SVD-free low-rank matrix factorization}, booktitle = {EAGE Annual Conference Proceedings}, year = {2013}, month = {06}, abstract = {Recent developments in rank optimization have allowed new approaches for seismic data interpolation and denoising. In this paper, we propose an approach for simultaneous seismic data interpolation and denoising using robust rank-regularized formulations. The proposed approach is suitable for large scale problems, since it avoids SVD computations by using factorized formulations. We illustrate the advantages of the new approach using a seismic line from Gulf of Suez and 5D synthetic seismic data to obtain high quality results for interpolation and denoising, a key application in exploration geophysics.}, keywords = {denoising, EAGE, interpolation}, doi = {10.3997/2214-4609.20130388}, url = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2013/kumar2013EAGEsind/kumar2013EAGEsind.pdf}, presentation = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2013/kumar2013EAGEsind/kumar2013EAGEsind_pres.pdf}, author = {Rajiv Kumar and Aleksandr Y. Aravkin and Hassan Mansour and Ben Recht and Felix J. Herrmann} } @conference {herrmann2012EAGEcsm, title = {Compressive sensing in marine acquisition and beyond}, booktitle = {EAGE Annual Conference Proceedings}, year = {2012}, month = {06}, abstract = {Simultaneous-source marine acquisition is an example of compressive sensing where acquisition with a single vessel is replaced by simultaneous acquisition by multiple vessels with sources that fire at randomly dithered times. By identifying simultaneous acquisition as compressive sensing, we are able to design acquisitions that favour recovery by sparsity promotion. Compared to conventional processing that yields estimates for sequential data, sparse recovery leads to significantly improved results for simultaneous data volumes that are collected in shorter times. These improvements are the result of proper design of the acquisition, selection of the appropriate transform domain, and solution of the recovery problem by sparsity promotion. During this talk, we will show how these design principles can be applied to marine acquisition and to other problems in exploration seismology that can benefit from compressive sensing.}, keywords = {Acquisition, EAGE, marine, workshop}, url = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2012/herrmann2012EAGEcsm/herrmann2012EAGEcsm.pdf}, presentation = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2012/herrmann2012EAGEcsm/herrmann2012EAGEcsm_pres.pdf}, url2 = {http://earthdoc.eage.org/publication/publicationdetails/?publication=59854}, author = {Felix J. Herrmann and Haneet Wason} } @conference {min2012EAGEefwi, title = {Frequency-domain elastic waveform inversion using weighting factors related to source-deconvolved residuals}, booktitle = {EAGE Annual Conference Proceedings}, year = {2012}, month = {06}, abstract = {One of the limitations in seismic waveform inversion is that inversion results are very sensitive to initial guesses, which may be because the gradients computed at each frequency are not properly weighted depending on given models.Analyzingthe conventional waveform inversion algorithms using the pseudo-Hessian matrix as a pre-conditioner shows that the gradientsdo not properly describe the feature of given models or high- and low-end frequencies do not contribute the model parameter updates due to banded spectra of source wavelet. For a better waveform inversion algorithm, we propose applying weighting factors to gradients computed at each frequency. The weighting factors are designed using the source-deconvolved back-propagated wavefields. Numerical results for the SEG/EAGE salt model show that the weighting method improves gradient images and its inversion results are compatible with true velocities even with poorly estimated initial guesses.}, keywords = {EAGE, elastic, frequency-domain, waveform inversion, weighting factors}, url = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2012/min2012EAGEefwi/min2012EAGEefwi.pdf}, url2 = {http://earthdoc.eage.org/publication/publicationdetails/?publication=59623}, author = {Ju-Won Oh and Dong-Joo Min and Felix J. Herrmann} } @conference {tu2012EAGElsm, title = {Least-squares migration of full wavefield with source encoding}, booktitle = {EAGE Annual Conference Proceedings}, year = {2012}, month = {06}, abstract = {Multiples can provide valuable information that is missing in primaries, and there is a growing interest in using them for seismic imaging. In our earlier work, we proposed to combine primary estimation and migration to image from the total up-going wavefield. The method proves to be effective but computationally expensive. In this abstract, we propose to reduce the computational cost by removing the multi-dimensional convolution required by primary estimation, and reducing the number of PDE solves in migration by introducing simultaneous sources with source renewal. We gain great performance boost without compromising the quality of the image.}, keywords = {depth migration, EAGE, surface-related multiples}, url = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2012/tu2012EAGElsm/tu2012EAGElsm.pdf}, presentation = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2012/tu2012EAGElsm/tu2012EAGElsm_pres.pdf}, url2 = {http://earthdoc.eage.org/publication/publicationdetails/?publication=59688}, author = {Ning Tu and Felix J. Herrmann} } @conference {dasilva2012EAGEprobingprecond, title = {Matrix probing and simultaneous sources: a new approach for preconditioning the Hessian}, booktitle = {EAGE Annual Conference Proceedings}, year = {2012}, month = {06}, abstract = {Recent advances based on the mathematical understanding of the Hessian as, under certain conditions, a pseudo-differential operator have resulted in a new preconditioner by L. Demanet et al. Basing their approach on a suitable basis expansion for the Hessian, by suitably {\textquoteright}probing{\textquoteright} the Hessian, i.e. applying the Hessian to a small number of randomized model perturbations, one can obtain an approximation to the inverse Hessian in an efficient manner. Building upon this approach, we consider this preconditioner in the context of least-squares migration and Full Waveform Inversion and specifically dimensionality reduction techniques in these domains. By utilizing previous work in simultaneous sources, we are able to develop an efficient least-squares migration scheme which recovers higher quality images and hence higher quality search directions in the context of a Gauss-Newton method for Full Waveform Inversion while simultaneously avoiding inordinate amounts of additional work.}, keywords = {EAGE, matrix probing, pseudo-differential operator}, url = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2012/dasilva2012EAGEprobingprecond/dasilva2012EAGEprobingprecond.pdf}, presentation = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2012/dasilva2012EAGEprobingprecond/dasilva2012EAGEprobingprecond_pres.pdf}, url2 = {http://earthdoc.eage.org/publication/publicationdetails/?publication=59193}, author = {Curt Da Silva and Felix J. Herrmann} } @conference {wason2012EAGEode, title = {Only dither: efficient simultaneous marine acquisition}, booktitle = {EAGE Annual Conference Proceedings}, year = {2012}, month = {06}, abstract = {Simultaneous-source acquisition is an emerging technology that is stimulating both geophysical research and commercial efforts. The focus here is on simultaneous-source marine acquisition design and sparsity-promoting sequential-source data recovery. We propose a pragmatic simultaneous-source, randomized marine acquisition scheme where multiple vessels sail across an ocean-bottom array firing airguns at {\textendash}- sequential locations and randomly time-dithered instances. Within the context of compressive sensing, where the choice of the sparsifying transform needs to be incoherent with the compressive sampling matrix, we can significantly impact the reconstruction quality, and demonstrate that the compressive sampling matrix resulting from the proposed sampling scheme is sufficiently incoherent with the curvelet transform to yield successful recovery by sparsity promotion. Results are illustrated with simulations of {\textquoteleft}{\textquoteleft}purely" random marine acquisition, which requires an airgun to be located at each source location, and random time-dithering marine acquisition with one and two source vessels. Size of the collected data volumes in all cases is the same. Compared to the recovery from the former acquisition scheme (SNR = 10.5dB), we get good results by dithering with only one source vessel (SNR = 8.06dB) in the latter scheme, which improve at the cost of having an additional source vessel (SNR = 9.44dB).}, keywords = {Acquisition, EAGE, marine, simultaneous}, url = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2012/wason2012EAGEode/wason2012EAGEode.pdf}, presentation = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2012/wason2012EAGEode/wason2012EAGEode_pres.pdf}, url2 = {http://earthdoc.eage.org/publication/publicationdetails/?publication=58915}, author = {Haneet Wason and Felix J. Herrmann} } @conference {herrmann2012EAGEpmr, title = {Pass on the message: recent insights in large-scale sparse recovery}, booktitle = {EAGE Annual Conference Proceedings}, year = {2012}, month = {06}, abstract = {Data collection, data processing, and imaging in exploration seismology increasingly hinge on large-scale sparsity promoting solvers to remove artifacts caused by efforts to reduce costs. We show how the inclusion of a "message term" in the calculation of the residuals improves the convergence of these iterative solvers by breaking correlations that develop between the model iterate and the linear system that needs to be inverted. We compare this message-passing scheme to state-of-the-art solvers for problems in missing-trace interpolation and in dimensionality-reduced imaging with phase encoding.}, keywords = {EAGE, message passing, sparse inversion}, url = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2012/herrmann2012EAGEpmr/herrmann2012EAGEpmr.pdf}, presentation = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2012/herrmann2012EAGEpmr/herrmann2012EAGEpmr_pres.pdf}, url2 = {http://earthdoc.eage.org/publication/publicationdetails/?publication=58935}, author = {Felix J. Herrmann} } @conference {vanleeuwen2012EAGEcarpcg, title = {Preconditioning the Helmholtz equation via row-projections}, booktitle = {EAGE Annual Conference Proceedings}, year = {2012}, month = {06}, abstract = {3D frequency-domain full waveform inversion relies on being able to efficiently solve the 3D Helmholtz equation. Iterative methods require sophisticated preconditioners because the Helmholtz matrix is typically indefinite. We review a preconditioning technique that is based on row-projections. Notable advantages of this preconditioner over existing ones are that it has low algorithmic complexity, is easily parallelizable and extendable to time-harmonic vector equations.}, keywords = {EAGE, Helmholtz equation, precondition}, url = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2012/vanleeuwen2012EAGEcarpcg/vanleeuwen2012EAGEcarpcg.pdf}, presentation = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2012/vanleeuwen2012EAGEcarpcg/vanleeuwen2012EAGEcarpcg_pres.pdf}, url2 = {http://earthdoc.eage.org/publication/publicationdetails/?publication=58891}, author = {Tristan van Leeuwen and Dan Gordon and Rachel Gordon and Felix J. Herrmann} } @conference {aravkin2012EAGErobust, title = {Source estimation for frequency-domain FWI with robust penalties}, booktitle = {EAGE Annual Conference Proceedings}, year = {2012}, month = {06}, abstract = {Source estimation is an essential component of full waveform inversion. In the standard frequency domain formulation, there is closed form solution for the the optimal source weights, which can thus be cheaply estimated on the fly. A growing body of work underscores the importance of robust modeling for data with large outliers or artifacts that are not captured by the forward model. Effectively, the least-squares penalty on the residual is replaced by a robust penalty, such as Huber, Hybrid {\textquoteleft}1-{\textquoteleft}2 or Student{\textquoteright}s t. As we will demonstrate, it is essential to use the same robust penalty for source estimation. In this abstract, we present a general approach to robust waveform inversion with robust source estimation. In this general formulation, there is no closed form solution for the optimal source weights so we need to solve a scalar optimization problem to obtain these weights. We can efficiently solve this optimization problem with a Newton-like method in a few iterations. The computational cost involved is of the same order as the usual least-squares source estimation procedure. We show numerical examples illustrating robust source estimation and robust waveform inversion on synthetic data with outliers.}, keywords = {EAGE}, url = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2012/aravkin2012EAGErobust/aravkin2012EAGErobust.pdf}, presentation = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2012/aravkin2012EAGErobust/aravkin2012EAGErobust_pres.pdf}, url2 = {http://earthdoc.eage.org/publication/publicationdetails/?publication=59196}, author = {Aleksandr Y. Aravkin and Tristan van Leeuwen and Henri Calandra and Felix J. Herrmann} } @conference {vanderneut2012EAGEdecomp, title = {Up / down wavefield decomposition by sparse inversion}, booktitle = {EAGE Annual Conference Proceedings}, year = {2012}, month = {06}, abstract = {Expressions have been derived for the decomposition of multi-component seismic recordings into up- and down-going constituents. However, these expressions contain singularities at critical angles and can be sensitive for noise. By interpreting wavefield decomposition as an inverse problem and imposing constraints on the sparseness of the solution, we arrive at a robust formalism that can be applied to noisy data. The method is demonstrated on synthetic data with multi-component receivers in a horizontal borehole, but can also be applied for different configurations, including OBC and dual-sensor streamers.}, keywords = {EAGE, sparse inversion, wavefield decomposition}, url = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2012/vanderneut2012EAGEdecomp/vanderneut2012EAGEdecomp.pdf}, presentation = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2012/vanderneut2012EAGEdecomp/vanderneut2012EAGEdecomp_pres.pdf}, url2 = {http://earthdoc.eage.org/publication/publicationdetails/?publication=58907}, author = {Joost van der Neut and Felix J. Herrmann} } @conference {vanleeuwen2012EAGEext, title = {Wave-equation extended images: computation and velocity continuation}, booktitle = {EAGE Annual Conference Proceedings}, year = {2012}, month = {06}, abstract = {An extended image is a multi-dimensional correlation of source and receiver wavefields. For a kinematically correct velocity, most of the energy will be concentrated at zero offset. Because of the computational cost involved in correlating the wavefields for all offsets, such exteded images are computed for a subsurface offset that is aligned with the local dip. In this paper, we present an efficient way to compute extended images for all subsurface offsets without explicitly calculating the receiver wavefields, thus making it computationally feasible to compute such extended images. We show how more conventional image gathers, where the offset is aligned with the dip, can be extracted from this extended image. We also present a velocity continuation procedure that allows us to compute the extended image for a given velocity without recomputing all the source wavefields.}, keywords = {EAGE, extended image, velocity continuation}, url = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2012/vanleeuwen2012EAGEext/vanleeuwen2012EAGEext.pdf}, presentation = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2012/vanleeuwen2012EAGEext/vanleeuwen2012EAGEext_pres.pdf}, url2 = {http://earthdoc.eage.org/publication/publicationdetails/?publication=59616}, author = {Tristan van Leeuwen and Felix J. Herrmann} } @conference {herrmann2011EAGEefmsp, title = {Efficient least-squares migration with sparsity promotion}, booktitle = {EAGE Annual Conference Proceedings}, year = {2011}, month = {05}, abstract = {Seismic imaging relies on the collection of multi-experimental data volumes in combination with a sophisticated back-end to create high-fidelity inversion results. While significant improvements have been made in linearized inversion, the current trend of incessantly pushing for higher quality models in increasingly complicated regions reveals fundamental shortcomings in handling increasing problem sizes numerically. The so-called {\textquoteleft}{\textquoteleft}curse of dimensionality" is the main culprit because it leads to an exponential growth in the number of sources and the corresponding number of wavefield simulations required by {\textquoteleft}{\textquoteleft}wave-equation" migration. We address this issue by reducing the number of sources by a randomized dimensionality reduction technique that combines recent developments in stochastic optimization and compressive sensing. As a result, we replace the cur- rent formulations of imaging that rely on all data by a sequence of smaller imaging problems that use the output of the previous inversion as input for the next. Empirically, we find speedups of at least one order-of-magnitude when each reduced experiment is considered theoretically as a separate compressive-sensing experiment.}, keywords = {EAGE, Imaging, Presentation}, url = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2011/herrmann11EAGEefmsp/herrmann11EAGEefmsp.pdf}, presentation = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2011/herrmann11EAGEefmsp/herrmann11EAGEefmsp_pres.pdf}, url2 = {http://earthdoc.eage.org/publication/publicationdetails/?publication=50333}, author = {Felix J. Herrmann and Xiang Li} } @conference {lin2011EAGEepsic, title = {Estimating primaries by sparse inversion in a curvelet-like representation domain}, booktitle = {EAGE Annual Conference Proceedings}, year = {2011}, month = {05}, abstract = {We present an uplift in the fidelity and wavefront continuity of results obtained from the Estimation of Primaries by Sparse Inversion (EPSI) program by reconstructing the primary events in a hybrid wavelet-curvelet representation domain. EPSI is a multiple removal technique that belongs to the class of wavefield inversion methods, as an alternative to the traditional adaptive-subtraction process. The main assumption is that the correct primary events should be as sparsely-populated in time as possible. A convex reformulation of the original EPSI algorithm allows its convergence property to be preserved even when the solution wavefield is not formed in the physical domain. Since wavefronts and edge-type singularities are sparsely represented in the curvelet domain, sparse solutions formed in this domain will exhibit vastly improved continuity when compared to those formed in the physical domain, especially for the low-energy events at later arrival times. Further- more, a wavelet-type representation domain will preserve sparsity in the reflected events even if they originate from non-zero-order discontinuities in the subsurface, providing an additional level of robustness. This method does not require any changes in the underlying computational algorithm and does not explicitly impose continuity constraints on each update.}, keywords = {EAGE, Presentation, Processing}, url = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2011/lin11EAGEepsic/lin11EAGEepsic.pdf}, presentation = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2011/lin11EAGEepsic/lin11EAGEepsic_pres.pdf}, url2 = {http://earthdoc.eage.org/publication/publicationdetails/?publication=50427}, author = {Tim T.Y. Lin and Felix J. Herrmann} } @conference {li2011EAGEfwirr, title = {Full-waveform inversion with randomized L1 recovery for the model updates}, booktitle = {EAGE Annual Conference Proceedings}, year = {2011}, month = {05}, abstract = {Full-waveform inversion (FWI) is a data fitting procedure that relies on the collection of seismic data volumes and sophisticated computing to create high-resolution results. With the advent of FWI, the improvements in acquisition and inversion have been substantial, but these improvements come at a high cost because FWI involves extremely large multi-experiment data volumes. The main obstacle is the "curse of dimensionality" exemplified by Nyquist{\~O}s sampling criterion, which puts a disproportionate strain on current acquisition and processing systems as the size and desired resolution increases. In this paper, we address the "curse of dimensionality" by randomized dimensionality reduction of the FWI problem adapted from the field of CS. We invert for model updates by replacing the Gauss-Newton linearized subproblem for subsampled FWI with a sparsity promoting formulation, and solve this formulation using the SPGl1 algorithm. We speed up the algorithm and avoid overfitting the data by solving for the linearized updates only approximately. Our approach is successful because it reduces the size of seismic data volumes without loss of information. With this reduction, we can compute a Newton-like update with the reduced data volume at the cost of roughly one gradient update for the fully sampled wavefield.}, keywords = {EAGE, Full-waveform inversion, Presentation}, url = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2011/li11EAGEfwirr/li11EAGEfwirr.pdf}, presentation = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2011/li11EAGEfwirr/li11EAGEfwirr_pres.pdf}, url2 = {http://earthdoc.eage.org/publication/publicationdetails/?publication=50345}, author = {Xiang Li and Aleksandr Y. Aravkin and Tristan van Leeuwen and Felix J. Herrmann} } @conference {vanleeuwen2011EAGEhsdomwi, title = {A hybrid stochastic-deterministic optimization method for waveform inversion}, booktitle = {EAGE Annual Conference Proceedings}, year = {2011}, month = {05}, abstract = {Present-day high quality 3D acquisition can give us lower frequencies and longer offsets with which to invert. However, the computational costs involved in handling this data explosion are tremendous. Therefore, recent developments in full-waveform inversion have been geared towards reducing the computational costs involved. A key aspect of several approaches that have been proposed is a dramatic reduction in the number of sources used in each iteration. A reduction in the number of sources directly translates to less PDE-solves and hence a lower computational cost. Recent attention has been drawn towards reducing the sources by randomly combining the sources in to a few supershots, but other strategies are also possible. In all cases, the full data misfit, which involves all the sequential sources, is replaced by a reduced misfit that is much cheaper to evaluate because it involves only a small number of sources (batchsize). The batchsize controls the accuracy with which the reduced misfit approximates the full misfit. The optimization of such an inaccurate, or noisy, misfit is the topic of stochastic optimization. In this paper, we propose an optimization strategy that borrows ideas from the field of stochastic optimization. The main idea is that in the early stage of the optimization, far from the true model, we do not need a very accurate misfit. The strategy consists of gradually increasing the batchsize as the iterations proceed. We test the proposed strategy on a synthetic dataset. We achieve a very reasonable inversion result at the cost of roughly 13 evaluations of the full misfit. We observe a speed-up of roughly a factor 20.}, keywords = {EAGE, Full-waveform inversion, Optimization, Presentation}, url = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2011/vanleeuwen11EAGEhsdomwi/vanleeuwen11EAGEhsdomwi.pdf}, presentation = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2011/vanleeuwen11EAGEhsdomwi/vanleeuwen11EAGEhsdomwi_pres.pdf}, url2 = {http://earthdoc.eage.org/publication/publicationdetails/?publication=50341}, author = {Tristan van Leeuwen and Felix J. Herrmann and Mark Schmidt and Michael P. Friedlander} } @conference {aravkin2011EAGEnspf, title = {A nonlinear sparsity promoting formulation and algorithm for full waveform inversion}, booktitle = {EAGE Annual Conference Proceedings}, year = {2011}, month = {05}, abstract = {Full Waveform Inversion (FWI) is a computational procedure to extract medium parameters from seismic data. FWI is typically formulated as a nonlinear least squares optimization problem, and various regularization techniques are used to guide the optimization because the problem is illposed. In this paper, we propose a novel sparse regularization which exploits the ability of curvelets to efficiently represent geophysical images. We then formulate a corresponding sparsity promoting constrained optimization problem, which we call Nonlinear Basis Pursuit Denoise (NBPDN) and present an algorithm to solve this problem to recover medium parameters. The utility of the NBPDN formulation and efficacy of the algorithm are demonstrated on a stylized cross-well experiment, where a sparse velocity perturbation is recovered with higher quality than the standard FWI formulation (solved with LBFGS). The NBPDN formulation and algorithm can recover the sparse perturbation even when the data volume is compressed to 5 \% of the original size using random superposition.}, keywords = {EAGE, Full-waveform inversion, Optimization}, url = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2011/aravkin11EAGEnspf/aravkin11EAGEnspf.pdf}, presentation = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2011/aravkin11EAGEnspf/aravkin11EAGEnspf_pres.pdf}, url2 = {http://earthdoc.eage.org/publication/publicationdetails/?publication=50199}, author = {Aleksandr Y. Aravkin and James V. Burke and Felix J. Herrmann and Tristan van Leeuwen} } @conference {aravkin2011EAGEspfwi, title = {Sparsity promoting formulations and algorithms for FWI}, booktitle = {EAGE Annual Conference Proceedings}, year = {2011}, keywords = {EAGE, Full-waveform inversion}, presentation = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2011/Aravkin2011EAGEspfwi/Aravkin2011EAGEspfwi.pdf}, author = {Aleksandr Y. Aravkin and Tristan van Leeuwen and James V. Burke and Felix J. Herrmann} } @conference {tu2011EAGEspmsrm, title = {Sparsity-promoting migration with surface-related multiples}, booktitle = {EAGE Annual Conference Proceedings}, year = {2011}, month = {05}, abstract = {Multiples, especially the surface-related multiples, form a significant part of the total up-going wave- field. If not properly dealt with, they can lead to false reflectors in the final image. So conventionally practitioners remove them prior to migration. Recently research has revealed that multiples can actually provide extra illumination so different methods are proposed to address the issue that how to use multiples in seismic imaging, but with various kinds of limitations. In this abstract, we combine primary estimation and sparsity-promoting migration into one convex-optimization process to include information from multiples. Synthetic examples show that multiples do make active contributions to seismic migration. Also by this combination, we can benefit from better recoveries of the Greens function by using sparsity-promoting algorithms since reflectivity is sparser than the Greens function.}, keywords = {EAGE, Imaging, Presentation, Processing}, url = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2011/tu11EAGEspmsrm/tu11EAGEspmsrm.pdf}, presentation = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2011/tu11EAGEspmsrm/tu11EAGEspmsrm_pres.pdf}, url2 = {http://earthdoc.eage.org/publication/publicationdetails/?publication=50369}, author = {Ning Tu and Tim T.Y. Lin and Felix J. Herrmann} } @conference {frijlink2010EAGEcos, title = {Comparison of standard adaptive subtraction and primary-multiple separation in the curvelet domain}, booktitle = {EAGE Annual Conference Proceedings}, year = {2010}, month = {06}, abstract = {In recent years, data-driven multiple prediction methods and wavefield extrapolation methods have proven to be powerful methods to attenuate multiples from data acquired in complex 3-D geologic environments. These methods make use of a two-stage approach, where first the multiples (surface-related and / or internal) multiples are predicted before they are subtracted from the original input data in an adaptively. The quality of these predicted multiples often raises high expectations for the adaptive subtraction techniques, but for various reasons these expectations are not always met in practice. Standard adaptive subtraction methods use the well-known minimum energy criterion, stating that the total energy after optimal multiple attenuation should be minimal. When primaries and multiples interfere, the minimum energy criterion is no longer appropriate. Also, when multiples of different orders interfere, adaptive energy minimization will lead to a compromise between different amplitudes corrections for the different orders of multiples. This paper investigates the performance of two multiple subtraction schemes for a real data set that exhibits both interference problems. Results from an adaptive subtraction in the real curvelet domain, separating primaries and multiples, are compared to those obtained using a more conventional adaptive subtraction method in the spatial domain.}, keywords = {EAGE}, url = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2010/frijlink10EAGEcos/frijlink10EAGEcos.pdf}, url2 = {http://earthdoc.eage.org/publication/publicationdetails/?publication=39875}, author = {M. O. Frijlink and Reza Shahidi and Felix J. Herrmann and R. G. van Borselen} } @conference {johnson2010EAGEeop, title = {Estimation of primaries via sparse inversion with reciprocity}, booktitle = {EAGE Annual Conference Proceedings}, year = {2010}, abstract = {Accurate removal of surface related multiples is a key step in seismic data processing. The industry standard for removing multiples is SRME, which involves convolving the data with itself to predict the multiples, followed by an adaptive subtraction procedure to recover the primaries (Verschuur and Berkhout, 1997). Other methods involve multidimensional division of the up-going and down-going wavefields (Amundsen, 2001). However, this approach may suffer from stability problems. With the introduction of the {\textquoteleft}{\textquoteleft}estimation of primaries by sparse inversion{\textquoteright}{\textquoteright}(EPSI), van Groenestijn and Verschuur (2009) recentely reformulated SRME to jointly estimate the surface-free impulse response and the source signature directly from the data. The advantage of EPSI is that it recovers the primary response directly, and does not require a second processing step for the subtraction of estimated multiples from the original data. However, because it estimates both the primary impulse response and source signature from the data EPSI must be regularized. Motivated by recent successful application of the curvelet transform in seismic data processing (Herrmann et al., 2007), we formulate EPSI as a bi-convex optimization problem that seeks sparsity on the surface-free Green{\textquoteright}s function and Fourier-domain smoothness on the source wavelet. Our main contribution compared to previous work (Lin and Herrmann, 2009), and the contribution of that author to the proceedings of this meeting(Lin and Herrmann, 2010), is that we employ the physical principle of as source-receiver reciprocity to improve the inversion.}, keywords = {EAGE}, url = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2010/johnson10EAGEeop/johnson10EAGEeop.pdf}, author = {James Johnson and Tim T.Y. Lin and Felix J. Herrmann} } @conference {herrmann2010EAGErds, title = {Randomized dimensionality reduction for full-waveform inversion}, booktitle = {EAGE Annual Conference Proceedings}, year = {2010}, month = {06}, abstract = {Full-waveform inversion relies on the collection of large multi-experiment data volumes in combination with a sophisticated back-end to create high-fidelity inversion results. While improvements in acquisition and inversion have been extremely successful, the current trend of incessantly pushing for higher quality models in increasingly complicated regions of the Earth continues to reveal fundamental shortcomings in our ability to handle the ever increasing problem size numerically. Two causes can be identified as the main culprits responsible for this barrier. First, there is the so-called {\textquoteleft}{\textquoteleft}curse of dimensionality{\textquoteright}{\textquoteright} exemplified by Nyquist{\textquoteright}s sampling criterion, which puts disproportionate strain on current acquisition and processing systems as the size and desired resolution of our survey areas continues to increase. Secondly, there is the recent {\textquoteleft}{\textquoteleft}departure from Moore{\textquoteright}s law{\textquoteright}{\textquoteright} that forces us to lower our expectations to compute ourselves out of this. In this paper, we address this situation by randomized dimensionality reduction, which we adapt from the field of compressive sensing. In this approach, we combine deliberate randomized subsampling with structure-exploiting transform-domain sparsity promotion. Our approach is successful because it reduces the size of seismic data volumes without loss of information. With this reduction, we compute Newton-like updates at the cost of roughly one gradient update for the fully-sampled wavefield.}, keywords = {EAGE, Presentation}, url = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2010/herrmann10EAGErds/herrmann10EAGErds.pdf}, presentation = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2010/herrmann10EAGErds/herrmann10EAGErds_pres.pdf}, url2 = {http://earthdoc.eage.org/publication/publicationdetails/?publication=39352}, author = {Felix J. Herrmann and Xiang Li} } @conference {herrmann2010EAGErss, title = {Randomized sampling strategies}, booktitle = {EAGE Annual Conference Proceedings}, year = {2010}, month = {06}, abstract = {Seismic exploration relies on the collection of massive data volumes that are subsequently mined for information during seismic processing. While this approach has been extremely successful in the past, the current trend towards higher quality images in increasingly complicated regions continues to reveal fundamental shortcomings in our workflows for high-dimensional data volumes. Two causes can be identified. First, there is the so-called {\textquoteleft}{\textquoteleft}curse of dimensionality{\textquoteright}{\textquoteright} exemplified by Nyquist{\textquoteright}s sampling criterion, which puts disproportionate strain on current acquisition and processing systems as the size and desired resolution of our survey areas continues to increase. Secondly, there is the recent {\textquoteleft}{\textquoteleft}departure from Moore{\textquoteright}s law{\textquoteright}{\textquoteright} that forces us to lower our expectations to compute ourselves out of this curse of dimensionality. In this paper, we offer a way out of this situation by a deliberate randomized subsampling combined with structure-exploiting transform-domain sparsity promotion. Our approach is successful because it reduces the size of seismic data volumes without loss of information. As such we end up with a new technology where the costs of acquisition and processing are no longer dictated by the size of the acquisition but by the transform-domain sparsity of the end-product.}, keywords = {EAGE, Presentation}, url = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2010/herrmann10EAGErss/herrmann10EAGErss.pdf}, presentation = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2010/herrmann10EAGErss/herrmann10EAGErss_pres.pdf}, url2 = {http://earthdoc.eage.org/publication/publicationdetails/?publication=39131}, author = {Felix J. Herrmann} } @conference {lin2010EAGEseo, title = {Stabilized estimation of primaries via sparse inversion}, booktitle = {EAGE Annual Conference Proceedings}, year = {2010}, month = {06}, abstract = {Estimation of Primaries by Sparse Inversion (EPSI) is a recent method for surface-related multiple removal using a direct estimation method closely related to Amundsen inversion, where under a sparsity assumption the primary impulse response is determined directly from a data-driven wavefield inversion process. One of the major difficulties in its practical adoption is that one must have precise knowledge of a time-window that contains multiple-free primaries during each update. Moreover, due to the nuances involved in regularizing the model impulse response in the inverse problem, the EPSI approach has an additional number of inversion parameters where it may be difficult to choose a reasonable value. We show that the specific sparsity constraint on the EPSI updates lead to an inherently intractable problem, and that the time-window and other inversion variables arise in the context of additional regularizations that attempts to drive towards a meaningful solution. We furthermore suggest a way to remove almost all of these parameters via convexification, which stabilizes the inversion while preserving the crucial sparsity assumption in the primary impulse response model.}, keywords = {EAGE, Presentation, Processing}, url = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2010/lin10EAGEseo/lin10EAGEseo.pdf}, presentation = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2010/lin10EAGEseo/lin10EAGEseo_pres.pdf}, url2 = {http://earthdoc.eage.org/publication/publicationdetails/?publication=39122}, author = {Tim T.Y. Lin and Felix J. Herrmann} } @conference {herrmann2009EAGEcsa, title = {Compressive sensing applied to full-waveform inversion}, booktitle = {EAGE Annual Conference Proceedings}, year = {2009}, month = {06}, abstract = {With the recent resurgence of full-waveform inversion, the computational cost of solving forward modeling problems has become{\textendash}-aside from issues with non-uniqueness{\textendash}-one of the major impediments withstanding successful application of this technology to industry-size data volumes. To overcome this impediment, we argue that further improvements in this area will depend on a problem formulation with a computational complexity that is no longer strictly determined by the size of the discretization but by transform-domain sparsity of its solution. In this new paradigm, we bring computational costs in par with our ability to compress seismic data and images. This premise is related to two recent developments. First, there is the new field of compressive sensing (CS in short throughout the paper, Cand{\textquoteleft}es et al., 2006; Donoho, 2006){\textendash}-where the argument is made, and rigorously proven, that compressible signals can be recovered from severely sub-Nyquist sampling by solving a sparsity promoting program. Second, there is in the seismic community the recent resurgence of simultaneous-source acquisition (Beasley, 2008; Krohn and Neelamani, 2008; Herrmann et al., 2009; Berkhout, 2008; Neelamani et al., 2008), and continuing efforts to reduce the cost of seismic modeling, imaging, and inversion through phase encoding of simultaneous sources (Morton and Ober, 1998; Romero et al., 2000; Krohn and Neelamani, 2008; Herrmann et al., 2009), removal of subsets of angular frequencies (Sirgue and Pratt, 2004; Mulder and Plessix, 2004; Lin et al., 2008) or plane waves (Vigh and Starr, 2008). By using CS principles, we remove sub-sampling interferences asocciated with these approaches through a combination of exploiting transform-domain sparsity, properties of certain sub-sampling schemes, and the existence of sparsity promoting solvers.}, keywords = {EAGE}, url = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2009/Herrmann09EAGEcsa/Herrmann09EAGEcsa.pdf}, presentation = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2009/Herrmann09EAGEcsa/Herrmann09EAGEcsa_pres.pdf}, url2 = {http://earthdoc.eage.org/publication/publicationdetails/?publication=23961}, author = {Felix J. Herrmann and Yogi A. Erlangga and Tim T.Y. Lin} } @conference {lin2009EAGEdsa, title = {Designing simultaneous acquisitions with compressive sensing}, booktitle = {EAGE Annual Conference Proceedings}, year = {2009}, month = {06}, abstract = {The goal of this paper is in designing a functional simultaneous acquisition scheme by applying the principles of compressive sensing. By framing the acquisition in a compressive sensing setting we immediately gain insight into not only how to choose the source signature and shot patterns, but also in how well we can hope to demultiplex the data when given a set amount of reduction in the number of sweeps. The principles of compressive sensing dictates that the quality of the demultiplexed data is closely related to the transform-domain sparsity of the solution. This means that, given an estimate in the complexity of the expectant data wavefield, it is possible to controllably reduce the number of shots that needs to be recorded in the field. We show a proof of concept by introducing an acquisition compatible with compressive sensing based on randomly phase-encoded vibroseis sweeps.}, keywords = {EAGE, Presentation}, url = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2009/lin09EAGEdsa/lin09EAGEdsa.pdf}, presentation = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2009/lin09EAGEdsa/lin2009EAGEdsa_pres.pdf}, author = {Tim T.Y. Lin and Felix J. Herrmann} } @conference {erlangga2009EAGEmwi, title = {Migration with implicit solvers for the time-harmonic Helmholtz equation}, booktitle = {EAGE Annual Conference Proceedings}, year = {2009}, month = {06}, abstract = {From the measured seismic data, the location and the amplitude of reflectors can be determined via a migration algorithm. Classically, following Claerbout{\textquoteright}s imaging principle [2], a reflector is located at the position where the source{\textquoteright}s forward-propagated wavefield correlates with the backward-propagated wavefield of the receiver data. Lailly and Tarantola later showed that this imaging principle is an instance of inverse problems, with the associated migration operator formulated via a least-squares functional; see [6, 12, 13]. Furthermore, they showed that the migrated image is associated with the gradient of this functional with respect to the image. If the solution of the least-squares functional is done iteratively, the correlation-based image coincides up to a constant with the first iteration of a gradient method. In practice, this migration is done either in the time domain or in the frequency domain. In the frequency-domain migration, the main bottleneck thus far, which renders its full implementation to large scale problems, is the lack of efficient solvers for computing wavefields. Robust direct methods easily run into excessive memory requirements as the size of the problem increases. On the other hand, iterative methods, which are less demanding in terms of memory, suffered from lack of convergence. During the past years, however, progress has been made in the development of an efficient iterative method [4, 3] for the frequency-domain wavefield computations. In this paper, we will show the significance of this method (called MKMG) in the context of the frequency-domain migration, where multi-shot-frequency wavefields (of order of 10,000 related wavefields) need to be computed.}, keywords = {EAGE}, url = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2009/Erlangga09EAGEmwi/Erlangga09EAGEmwi.pdf}, presentation = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2009/Erlangga09EAGEmwi/Erlangga09EAGEmwi_pres.pdf}, url2 = {http://earthdoc.eage.org/publication/publicationdetails/?publication=23955}, author = {Yogi A. Erlangga and Felix J. Herrmann} } @conference {yarham2007EAGEcai, title = {Curvelet applications in surface wave removal}, booktitle = {EAGE Workshop on Curvelets, contourlets, seislets, {\textellipsis} in seismic data processing - where are we and where are we going?}, year = {2007}, month = {06}, abstract = {Ground roll removal of seismic signals can be a challenging prospect. Dealing with undersampleing causing aliased waves amplitudes orders of magnitude higher than reflector signals and low frequency loss of information due to band ...}, keywords = {EAGE, SLIM}, url = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2007/yarham07EAGEcai/yarham07EAGEcai.pdf}, url2 = {http://earthdoc.eage.org/publication/publicationdetails/?publication=7590}, author = {Carson Yarham and Gilles Hennenfent and Felix J. Herrmann} } @conference {hennenfent2007EAGEcrw, title = {Curvelet reconstruction with sparsity-promoting inversion: successes and challenges}, booktitle = {EAGE Workshop on Curvelets, contourlets, seislets, {\textellipsis} in seismic data processing - where are we and where are we going?}, year = {2007}, month = {06}, abstract = {In this overview of the recent Curvelet Reconstruction with Sparsity-promoting Inversion (CRSI) method, we present our latest 2-D and 3-D interpolation results on both synthetic and real datasets. We compare these results to interpolated data using other existing methods. Finally, we discuss the challenges related to sparsity-promoting solvers for the large-scale problems the industry faces.}, keywords = {EAGE, Presentation, SLIM, workshop}, url = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2007/hennenfent07EAGEcrw/hennenfent07EAGEcrw.pdf}, presentation = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2007/hennenfent07EAGEcrw/hennenfent07EAGEcrw_pres.pdf}, url2 = {http://earthdoc.eage.org/publication/publicationdetails/?publication=7553}, author = {Gilles Hennenfent and Felix J. Herrmann} } @conference {hennenfent2007EAGEisf, title = {Irregular sampling: from aliasing to noise}, booktitle = {EAGE Annual Conference Proceedings}, year = {2007}, month = {06}, abstract = {Seismic data is often irregularly and/or sparsely sampled along spatial coordinates. We show that these acquisition geometries are not necessarily a source of adversity in order to accurately reconstruct adequately-sampled data. We use two examples to illustrate that it may actually be better than equivalent regularly subsampled data. This comment was already made in earlier works by other authors. We explain this behavior by two key observations. Firstly, a noise-free underdetermined problem can be seen as a noisy well-determined problem. Secondly, regularly subsampling creates strong coherent acquisition noise (aliasing) difficult to remove unlike the noise created by irregularly subsampling that is typically weaker and Gaussian-like.}, keywords = {EAGE, Presentation, SLIM}, url = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2007/hennenfent07EAGEisf/hennenfent07EAGEisf.pdf}, presentation = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2007/hennenfent07EAGEisf/hennenfent07EAGEisf_pres.pdf}, url2 = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2007/hennenfent07EAGEisf/hennenfent07EAGEisf_WS.pdf}, author = {Gilles Hennenfent and Felix J. Herrmann} } @conference {herrmann2007EAGEjda, title = {Just diagonalize: a curvelet-based approach to seismic amplitude recovery}, booktitle = {EAGE Workshop on Curvelets, contourlets, seislets, {\textellipsis} in seismic data processing - where are we and where are we going?}, year = {2007}, month = {06}, abstract = {In his presentation we present a nonlinear curvelet-based sparsity-promoting formulation for the recovery of seismic amplitudes. We show that the curvelet{\textquoteright}s wavefront detection capability and invariance under wave propagation lead to a formulation of this recovery problem that is stable under noise and missing data. \copyright2007 Society of Exploration Geophysicists}, keywords = {EAGE, Presentation, SLIM}, url = {https://slim.gatech.edu/Publications/Public/Conferences/SINBAD/2007/herrmann2007EAGEjda/herrmann2007EAGEjda_paper.pdf}, presentation = {https://slim.gatech.edu/Publications/Public/Conferences/SINBAD/2007/herrmann2007EAGEjda/herrmann2007EAGEjda_pres.pdf}, url2 = {http://earthdoc.eage.org/publication/publicationdetails/?publication=7555}, author = {Felix J. Herrmann and Peyman P. Moghaddam} } @conference {yarham2007EAGEnsw, title = {Nonlinear surface wave prediction and separation}, booktitle = {EAGE 2007}, year = {2007}, abstract = {Removal of surface waves is an integral step in seismic processing. There are many standard techniques for removal of this type of coherent noise, such as f-k filtering, but these methods are not always effective. One of the common problems with removal of surface waves is that they tend to be aliased in the frequency domain. This can make removal difficult and affect the frequency content of the reflector signals, as this signals will not be completely separated. As seen in (Hennenfent, G. and F. Herrmann, 2006, Application of stable signal recovery to seismic interpolation) interpolation can be used effectively to resample the seismic record thus dealiasing the surface waves. This separates the signals in the frequency domain allowing for a more precise and complete removal. The use of this technique with curvelet based surface wave predictions and an iterative L1 separation scheme can be used to remove surface waves from shot records more completely that with standard techniques.}, keywords = {EAGE, Presentation, SLIM}, url = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2007/yarham2007EAGEnsw/yarham2007EAGEnsw.pdf}, author = {Carson Yarham} } @conference {herrmann2007EAGErdi, title = {Recent developments in curvelet-based seismic processing}, booktitle = {EAGE Annual Conference Proceedings}, year = {2007}, month = {06}, abstract = {Combinations of parsimonious signal representations with nonlinear sparsity promoting programs hold the key to the next-generation of seismic data processing algorithms, since they allow for a formulation that is stable w.r.t. noise \& incomplete data do not require prior information on the velocity or locations and dips of the events}, keywords = {EAGE, Presentation, SLIM}, url = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2007/herrmann07EAGErdi/herrmann07EAGErdi.pdf}, presentation = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2007/herrmann07EAGErdi/herrmann07EAGErdi_pres.pdf}, url2 = {http://earthdoc.eage.org/publication/publicationdetails/?publication=7548}, author = {Felix J. Herrmann} } @conference {moghaddam2007EAGEsar, title = {Seismic amplitude recovery with curvelets}, booktitle = {EAGE Annual Conference Proceedings}, year = {2007}, month = {06}, abstract = {A non-linear singularity-preserving solution to the least-squares seismic imaging problem with sparseness and continuity constraints is proposed. The applied formalism explores curvelets as a directional frame that, by their sparsity on the image, and their invariance under the imaging operators, allows for a stable recovery of the amplitudes. Our method is based on the estimation of the normal operator in the form of an {\textquoteright}eigenvalue{\textquoteright} decompsoition with curvelets as the {\textquoteright}eigenvectors{\textquoteright}. Subsequently, we propose an inversion method that derives from estimation of the normal operator and is formulated as a convex optimization problem. Sparsity in the curvelet domain as well as continuity along the reflectors in the image domain are promoted as part of this optimization. Our method is tested with a reverse-time {\textquoteright}wave-equation{\textquoteright} migration code simulating the acoustic wave equation.}, keywords = {EAGE, SLIM}, url = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2007/moghaddam07EAGEsar/moghaddam07EAGEsar.pdf}, url2 = {http://earthdoc.eage.org/publication/publicationdetails/?publication=6935}, author = {Peyman P. Moghaddam and Felix J. Herrmann and Christiaan C. Stolk} } @conference {herrmann2007EAGEsia, title = {Seismic imaging and processing with curvelets}, booktitle = {EAGE Annual Conference Proceedings}, year = {2007}, month = {06}, abstract = {In this paper, we present a nonlinear curvelet-based sparsity-promoting formulation for three problems in seismic processing and imaging namely, seismic data regularization from data with large percentages of traces missing; seismic amplitude recovery for sub-salt images obtained by reverse-time migration and primary-multiple separation, given an inaccurate multiple prediction. We argue why these nonlinear formulations are beneficial.}, keywords = {EAGE, Presentation, SLIM}, url = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2007/herrmann07EAGEsia/herrmann07EAGEsia.pdf}, presentation = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2007/herrmann07EAGEsia/herrmann07EAGEsia_pres.pdf}, url2 = {http://earthdoc.eage.org/publication/publicationdetails/?publication=7075}, author = {Felix J. Herrmann and Gilles Hennenfent and Peyman P. Moghaddam} } @conference {maysami2007EAGEsrc, title = {Seismic reflector characterization by a multiscale detection-estimation method}, booktitle = {EAGE Annual Conference Proceedings}, year = {2007}, month = {06}, abstract = {Seismic transitions of the subsurface are typically considered as zero-order singularities (step functions). According to this model, the conventional deconvolution problem aims at recovering the seismic reflectivity as a sparse spike train. However, recent multiscale analysis on sedimentary records revealed the existence of accumulations of varying order singularities in the subsurface, which give rise to fractional-order discontinuities. This observation not only calls for a richer class of seismic reflection waveforms, but it also requires a different methodology to detect and characterize these reflection events. For instance, the assumptions underlying conventional deconvolution no longer hold. Because of the bandwidth limitation of seismic data, multiscale analysis methods based on the decay rate of wavelet coefficients may yield ambiguous results. We avoid this problem by formulating the estimation of the singularity orders by a parametric nonlinear inversion method.}, keywords = {EAGE, Presentation, SLIM}, url = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2007/maysami07EAGEsrc/maysami07EAGEsrc.pdf}, presentation = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2007/maysami07EAGEsrc/maysami07EAGEsrc_pres.pdf}, url2 = {http://earthdoc.eage.org/publication/publicationdetails/?publication=7081}, author = {Mohammad Maysami and Felix J. Herrmann} } @conference {challa2007EAGEsrf, title = {Signal reconstruction from incomplete and misplaced measurements}, booktitle = {EAGE Annual Conference Proceedings}, year = {2007}, month = {06}, abstract = {Constrained by practical and economical considerations, one often uses seismic data with missing traces. The use of such data results in image artifacts and poor spatial resolution. Sometimes due to practical limitations, measurements may be available on a perturbed grid, instead of on the designated grid. Due to algorithmic requirements, when such measurements are viewed as those on the designated grid, the recovery procedures may result in additional artifacts. This paper interpolates incomplete data onto regular grid via the Fourier domain, using a recently developed greedy algorithm. The basic objective is to study experimentally as to what could be the size of the perturbation in measurement coordinates that allows for the measurements on the perturbed grid to be considered as on the designated grid for faithful recovery. Our experimental work shows that for compressible signals, a uniformly distributed perturbation can be offset with slightly more number of measurements.}, keywords = {EAGE, SLIM}, url = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2007/challa07EAGEsrf/challa07EAGEsrf.pdf}, url2 = {http://earthdoc.eage.org/publication/publicationdetails/?publication=6917}, author = {Challa S. Sastry and Gilles Hennenfent and Felix J. Herrmann} } @conference {herrmann2007EAGEsrm, title = {Surface related multiple prediction from incomplete data}, booktitle = {EAGE Annual Conference Proceedings}, year = {2007}, month = {06}, abstract = {Incomplete data, unknown source-receiver signatures and free-surface reflectivity represent challenges for a successful prediction and subsequent removal of multiples. In this paper, a new method will be represented that tackles these challenges by combining what we know about wavefield (de-)focussing, by weighted convolutions/correlations, and recently developed curvelet-based recovery by sparsity-promoting inversion (CRSI). With this combination, we are able to leverage recent insights from wave physics towards a nonlinear formulation for the multiple-prediction problem that works for incomplete data and without detailed knowledge on the surface effects.}, keywords = {EAGE, Presentation, SLIM}, url = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2007/herrmann07EAGEsrm/herrmann07EAGEsrm.pdf}, presentation = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2007/herrmann07EAGEsrm/herrmann07EAGEsrm_pres.pdf}, url2 = {http://earthdoc.eage.org/publication/publicationdetails/?publication=6496}, author = {Felix J. Herrmann} } @conference {herrmann2005EAGEosf, title = {Optimization strategies for sparseness- and continuity-enhanced imaging: theory}, booktitle = {EAGE Annual Conference Proceedings}, year = {2005}, month = {06}, abstract = {Two complementary solution strategies to the least-squares migration problem with sparseness- and continuity constraints are proposed. The applied formalism explores the sparseness of curvelets on the reflectivity and their invariance under the demigration-migration operator. Sparseness is enhanced by (approximately) minimizing a (weighted) l1-norm on the curvelet coefficients. Continuity along imaged reflectors is brought out by minimizing the anisotropic diffusion or total variation norm which penalizes variations along and in between reflectors. A brief sketch of the theory is provided as well as a number of synthetic examples. Technical details on the implementation of the optimization strategies are deferred to an accompanying paper: implementation.}, keywords = {EAGE, SLIM}, url = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2005/Herrmann05EAGEosf/Herrmann05EAGEosf.pdf}, url2 = {http://earthdoc.eage.org/publication/publicationdetails/?publication=1343}, author = {Felix J. Herrmann and Peyman P. Moghaddam and R. Kirlin} } @conference {herrmann2005EAGErcd, title = {Robust curvelet-domain data continuation with sparseness constraints}, booktitle = {EAGE Annual Conference Proceedings}, year = {2005}, month = {06}, abstract = {A robust data interpolation method using curvelets frames is presented. The advantage of this method is that curvelets arguably provide an optimal sparse representation for solutions of wave equations with smooth coefficients. As such curvelets frames circum- vent {\textendash} besides the assumption of caustic-free data {\textendash} the necessity to make parametric assumptions (e.g. through linear/parabolic Radon or demigration) regarding the shape of events in seismic data. A brief sketch of the theory is provided as well as a number of examples on synthetic and real data.}, keywords = {EAGE, Presentation, SLIM}, url = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2005/Herrmann05EAGErcd/Herrmann05EAGErcd.pdf}, presentation = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2005/Herrmann05EAGErcd/Herrmann05EAGErcd.pdf}, url2 = {http://earthdoc.eage.org/publication/publicationdetails/?publication=1112}, author = {Felix J. Herrmann and Gilles Hennenfent} } @conference {herrmann2005EAGErcd1, title = {Robust curvelet-domain primary-multiple separation with sparseness constraints}, booktitle = {EAGE Annual Conference Proceedings}, year = {2005}, month = {06}, abstract = {A non-linear primary-multiple separation method using curvelets frames is presented. The advantage of this method is that curvelets arguably provide an optimal sparse representation for both primaries and multiples. As such curvelets frames are ideal candidates to separate primaries from multiples given inaccurate predictions for these two data components. The method derives its robustness regarding the presence of noise; errors in the prediction and missing data from the curvelet frame{\textquoteright}s ability (i) to represent both signal components with a limited number of multi-scale and directional basis functions; (ii) to separate the components on the basis of differences in location, orientation and scales and (iii) to minimize correlations between the coefficients of the two components. A brief sketch of the theory is provided as well as a number of examples on synthetic and real data.}, keywords = {EAGE, SLIM}, url = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2005/Herrmann05EAGErcd1/Herrmann05EAGErcd1.pdf}, url2 = {http://earthdoc.eage.org/publication/publicationdetails/?publication=1384}, author = {Felix J. Herrmann and D. J. Verschuur} } @conference {hennenfent2005EAGEsdr, title = {Seismic deconvolution revisited with curvelet frames}, booktitle = {EAGE Annual Conference Proceedings}, year = {2005}, month = {06}, abstract = {We propose an efficient iterative curvelet-regularized deconvolution algorithm that exploits continuity along reflectors in seismic images. Curvelets are a new multiscale transform that provides sparse representations for images (such as seismic images) that comprise smooth objects separated by piece-wise smooth discontinuities. Our technique combines conjugate gradient-based convolution operator inversion with noise regularization that is performed using non-linear curvelet coefficient shrinkage (thresholding). The shrinkage operation leverages the sparsity of curvelets representations. Simulations demonstrate that our algorithm provides improved resolution compared to the traditional Wiener-based deconvolution approach.}, keywords = {EAGE, Presentation, SLIM}, url = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2005/Hennenfent05EAGEsdr/Hennenfent05EAGEsdr.pdf}, presentation = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2005/Hennenfent05EAGEsdr/Hennenfent05EAGEsdr_poster.pdf}, url2 = {http://earthdoc.eage.org/publication/publicationdetails/?publication=1383}, author = {Gilles Hennenfent and R. Neelamani and Felix J. Herrmann} } @conference {beyreuther2004EAGEcdo, title = {Curvelet denoising of 4-D seismic}, booktitle = {EAGE Annual Conference Proceedings}, year = {2004}, month = {06}, abstract = {With burgeoning world demand and a limited rate of discovery of new reserves, there is increasing impetus upon the industry to optimize recovery from already existing fields. 4D, or time-lapse, seismic imaging is an emerging technology that holds great promise to better monitor and optimise reservoir production. The basic idea behind 4D seismic is that when multiple 3D surveys are acquired at separate calendar times over a producing field, the reservoir geology will not change from survey to survey but the state of the reservoir fluids will change. Thus, taking the difference between two 3D surveys should remove the static geologic contribution to the data and isolate the time- varying fluid flow component. However, a major challenge in 4D seismic is that acquisition and processing differences between 3D surveys often overshadow the changes caused by fluid flow. This problem is compounded when 4D effects are sought to be derived from vintage 3D data sets that were not originally acquired with 4D in mind. The goal of this study is to remove the acquisition and imaging artefacts from a 4D seismic difference cube using Curveket processing techniques.}, keywords = {EAGE, SLIM}, url = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2004/Beyreuther04EAGEcdo/beyreuther2004EAGEcdo_paper.pdf}, presentation = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2004/Beyreuther04EAGEcdo/Beyreuther04EAGEcdo_pres.pdf}, url2 = {https://circle.ubc.ca/bitstream/handle/2429/453/EAGE4D2004.pdf?sequence=1}, author = {Moritz Beyreuther and Felix J. Herrmann and Jamin Cristall} } @conference {herrmann2004EAGEcdl, title = {Curvelet-domain least-squares migration with sparseness constraints}, booktitle = {EAGE Annual Conference Proceedings}, year = {2004}, month = {06}, abstract = {A non-linear edge-preserving solution to the least-squares migration problem with sparseness constraints is introduced. The applied formalism explores Curvelets as basis functions that, by virtue of their sparseness and locality, not only allow for a reduction of the dimensionality of the imaging problem but which also naturally lead to a non-linear solution with significantly improved signal-to-noise ratio. Additional conditions on the image are imposed by solving a constrained optimization problem on the estimated Curvelet coefficients initialized by thresholding. This optimization is designed to also restore the amplitudes by (approximately) inverting the normal operator, which is like-wise the (de)-migration operators, almost diagonalized by the Curvelet transform.}, keywords = {EAGE, Presentation, SLIM}, url = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2004/Herrmann04EAGEcdl/Herrmann04EAGEcdl.pdf}, presentation = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2004/Herrmann04EAGEcdl/Herrmann04EAGEcdl_pres.pdf}, url2 = {http://earthdoc.eage.org/publication/publicationdetails/?publication=2073}, author = {Felix J. Herrmann and Peyman P. Moghaddam} }