@techreport {orozco2022MIDLmei, title = {Memory Efficient Invertible Neural Networks for 3D Photoacoustic Imaging}, number = {TR-CSE-2022-2}, year = {2022}, month = {04}, abstract = {Photoacoustic imaging (PAI) can image high-resolution structures of clinical interest such as vascularity in cancerous tumor monitoring. When imaging human subjects, geometric restrictions force limited-view data retrieval causing imaging artifacts. Iterative physical model based approaches reduce artifacts but require prohibitively time consuming PDE solves. Machine learning (ML) has accelerated PAI by combining physical models and learned networks. However, the depth and overall power of ML methods is limited by memory intensive training. We propose using invertible neural networks (INNs) to alleviate memory pressure. We demonstrate INNs can image 3D photoacoustic volumes in the setting of limited-view, noisy, and subsampled data. The frugal constant memory usage of INNs enables us to train an arbitrary depth of learned layers on a consumer GPU with 16GB RAM.}, keywords = {invertible networks, medical imaging, photoacoustic imaging, Physics and Machine Learning Hybrid}, url = {https://slim.gatech.edu/Publications/Public/TechReport/2022/orozco2022MIDLmei/midl_2022.html}, author = {Rafael Orozco and Mathias Louboutin and Felix J. Herrmann} } @techreport {louboutin2021NIPSmcte, title = {Low-memory stochastic backpropagation with multi-channel randomized trace estimation}, number = {TR-CSE-2021-1}, year = {2021}, month = {06}, abstract = {Thanks to the combination of state-of-the-art accelerators and highly optimized open software frameworks, there has been tremendous progress in the performance of deep neural networks. While these developments have been responsible for many breakthroughs, progress towards solving large-scale problems, such as video encoding and semantic segmentation in 3D, is hampered because access to on-premise memory is often limited. Instead of relying on (optimal) checkpointing or invertibility of the network layers{\textendash}-to recover the activations during backpropagation{\textendash}-we propose to approximate the gradient of convolutional layers in neural networks with a multi-channel randomized trace estimation technique. Compared to other methods, this approach is simple, amenable to analyses, and leads to a greatly reduced memory footprint. Even though the randomized trace estimation introduces stochasticity during training, we argue that this is of little consequence as long as the induced errors are of the same order as errors in the gradient due to the use of stochastic gradient descent. We discuss the performance of networks trained with stochastic backpropagation and how the error can be controlled while maximizing memory usage and minimizing computational overhead.}, keywords = {Convolutions, HPC, Low Memory, machine learning, randomized linear algebra}, url = {https://slim.gatech.edu/Publications/Public/TechReport/2021/louboutin2021NIPSmcte/louboutin2021NIPSmcte.pdf}, software = {https://github.com/slimgroup/XConv}, author = {Mathias Louboutin and Ali Siahkoohi and Rongrong Wang and Felix J. Herrmann} } @techreport {siahkoohi2020TRfuqf, title = {Faster Uncertainty Quantification for Inverse Problems with Conditional Normalizing Flows}, number = {TR-CSE-2020-2}, year = {2020}, month = {07}, institution = {Georgia Institute of Technology}, abstract = {In inverse problems, we often have access to data consisting of paired samples $(x,y)\sim p_X,Y(x,y)$ where $y$ are partial observations of a physical system, and $x$ represents the unknowns of the problem. Under these circumstances, we can employ supervised training to learn a solution $x$ and its uncertainty from the observations $y$. We refer to this problem as the "supervised" case. However, the data $y\sim p_Y(y)$ collected at one point could be distributed differently than observations $y{\textquoteright}\sim p_Y{\textquoteright}(y{\textquoteright})$, relevant for a current set of problems. In the context of Bayesian inference, we propose a two-step scheme, which makes use of normalizing flows and joint data to train a conditional generator $q_θ(x|y)$ to approximate the target posterior density $p_X|Y(x|y)$. Additionally, this preliminary phase provides a density function $q_θ(x|y)$, which can be recast as a prior for the "unsupervised" problem, e.g. when only the observations $y{\textquoteright}\sim p_Y{\textquoteright}(y{\textquoteright})$, a likelihood model $y{\textquoteright}|x$, and a prior on $x{\textquoteright}$ are known. We then train another invertible generator with output density $q{\textquoteright}_φ(x|y{\textquoteright})$ specifically for $y{\textquoteright}$, allowing us to sample from the posterior $p_X|Y{\textquoteright}(x|y{\textquoteright})$. We present some synthetic results that demonstrate considerable training speedup when reusing the pretrained network $q_θ(x|y{\textquoteright})$ as a warm start or preconditioning for approximating $p_X|Y{\textquoteright}(x|y{\textquoteright})$, instead of learning from scratch. This training modality can be interpreted as an instance of transfer learning. This result is particularly relevant for large-scale inverse problems that employ expensive numerical simulations.}, keywords = {deep learning, invertible networks, Uncertainty quantification}, url = {https://slim.gatech.edu/Publications/Public/TechReport/2020/siahkoohi2020TRfuqf/siahkoohi2020TRfuqf.html}, author = {Ali Siahkoohi and Gabrio Rizzuti and Philipp A. Witte and Felix J. Herrmann} } @techreport {sharan2020lsh, title = {Large scale high-frequency wavefield reconstruction with recursively weighted matrix factorizations}, number = {TR-CSE-2020-4}, year = {2020}, month = {10}, abstract = {Acquiring seismic data on a regular periodic fine grid is challenging. By exploiting the low-rank approximation property of fully sampled seismic data in some transform domain, low-rank matrix completion offers a scalable way to reconstruct seismic data on a regular periodic fine grid from coarsely randomly sampled data acquired in the field. While wavefield reconstruction have been applied successfully at the lower end of the spectrum, its performance deteriorates at the higher frequencies where the low-rank assumption no longer holds rendering this type of wavefield reconstruction ineffective in situations where high resolution images are desired. We overcome this shortcoming by exploiting similarities between adjacent frequency slices explicitly. During low-rank matrix factorization, these similarities translate to alignment of subspaces of the factors, a notion we propose to employ as we reconstruct monochromatic frequency slices recursively starting at the low frequencies. While this idea is relatively simple in its core, to turn this recent insight into a successful scalable wavefield reconstruction scheme for 3D seismic requires a number of important steps. First, we need to move the weighting matrices, which encapsulate the prior information from adjacent frequency slices, from the objective to the data misfit constraint. This move considerably improves the performance of the weighted low-rank matrix factorization on which our wavefield reconstructions is based. Secondly, we introduce approximations that allow us to decouple computations on a row-by-row and column-by-column basis, which in turn allow to parallelize the alternating optimization on which our low-rank factorization relies. The combination of weighting and decoupling leads to a computationally feasible full-azimuth wavefield reconstruction scheme that scales to industry-scale problem sizes. We demonstrate the performance of the proposed parallel algorithm on a 2D field data and on a 3D synthetic dataset. In both cases our approach produces high-fidelity broadband wavefield reconstructions from severely subsampled data.}, keywords = {5D reconstruction, compressed sensing, frequency-domain, parallel, signal processing}, url = {https://slim.gatech.edu/Publications/Public/TechReport/2020/sharan2020lsh/sharan2020lsh.html}, author = {Shashin Sharan and Yijun Zhang and Oscar Lopez and Felix J. Herrmann} } @techreport {louboutin2020SCsta, title = {Scaling through abstractions {\textendash} high-performance vectorial wave simulations for seismic inversion with Devito}, number = {TR-CSE-2020-3}, year = {2020}, month = {4}, institution = {Georgia Institute of Technology}, abstract = {[Devito] is an open-source Python project based on domain-specific language and compiler technology. Driven by the requirements of rapid HPC applications development in exploration seismology, the language and compiler have evolved significantly since inception. Sophisticated boundary conditions, tensor contractions, sparse operations and features such as staggered grids and sub-domains are all supported; operators of essentially arbitrary complexity can be generated. To accommodate this flexibility whilst ensuring performance, data dependency analysis is utilized to schedule loops and detect computational-properties such as parallelism. In this article, the generation and simulation of MPI-parallel propagators (along with their adjoints) for the pseudo-acoustic wave-equation in tilted transverse isotropic media and the elastic wave-equation are presented. Simulations are carried out on industry scale synthetic models in a HPC Cloud system and reach a performance of 28TFLOP/s, hence demonstrating Devito{\textquoteright}s suitability for production-grade seismic inversion problems.}, keywords = {devito, elastic, finite-difference, HPC, large-scale, RTM, TTI}, url = {https://slim.gatech.edu/Publications/Public/TechReport/2020/louboutin2020SCsta/louboutin2020SCsta.html}, author = {Mathias Louboutin and Fabio Luporini and Philipp A. Witte and Rhodri Nelson and George Bisbas and Jan Thorbecke and Felix J. Herrmann and Gerard Gorman} } @techreport {louboutin2020SEGtwri, title = {Time-domain Wavefield Reconstruction Inversion in a TTI medium}, number = {TR-CSE-2020-1}, year = {2020}, month = {4}, institution = {Georgia Institute of Technology}, abstract = {We introduce a generalization of time-domain wavefield reconstruction inversion to anisotropic acoustic modeling. Wavefield reconstruction inversion has been extensively researched in recent years for its ability to mitigate cycle skipping. The original method was formulated in the frequency domain with acoustic isotropic physics. However, frequency-domain modeling requires sophisticated iterative solvers that are difficult to scale to industrial-size problems and more realistic physical assumptions, such as tilted transverse isotropy, object of this study. The work presented here is based on a recently proposed dual formulation of wavefield reconstruction inversion, which allows time-domain propagator that are suitable to both large scales and more accurate physics.}, keywords = {anisotropy, FWI, TTI, WRI}, url = {https://slim.gatech.edu/Publications/Public/TechReport/2020/louboutin2020SEGtwri/louboutin2020SEGtwri.html}, url2 = {https://arxiv.org/pdf/2004.07355.pdf}, author = {Mathias Louboutin and Gabrio Rizzuti and Felix J. Herrmann} } @techreport {siahkoohi2019TRnna, title = {Neural network augmented wave-equation simulation}, number = {TR-CSE-2019-1}, year = {2019}, month = {09}, institution = {Georgia Institute of Technology}, abstract = {Accurate forward modeling is important for solving inverse problems. An inaccurate wave-equation simulation, as a forward operator, will offset the results obtained via inversion. In this work, we consider the case where we deal with incomplete physics. One proxy of incomplete physics is an inaccurate discretization of Laplacian in simulation of wave equation via finite-difference method. We exploit intrinsic one-to-one similarities between timestepping algorithm with Convolutional Neural Networks (CNNs), and propose to intersperse CNNs between low-fidelity timesteps. Augmenting neural networks with low-fidelity timestepping algorithms may allow us to take large timesteps while limiting the numerical dispersion artifacts. While simulating the wave-equation with low-fidelity timestepping algorithm, by correcting the wavefield several time during propagation, we hope to limit the numerical dispersion artifact introduced by a poor discretization of the Laplacian. As a proof of concept, we demonstrate this principle by correcting for numerical dispersion by keeping the velocity model fixed, and varying the source locations to generate training and testing pairs for our supervised learning algorithm.}, keywords = {deep learning, finite difference, wave equation}, url = {https://slim.gatech.edu/Publications/Public/TechReport/2019/siahkoohi2019TRnna/siahkoohi2019TRnna.html}, url2 = {https://arxiv.org/pdf/1910.00925.pdf}, author = {Ali Siahkoohi and Mathias Louboutin and Felix J. Herrmann} } @techreport {witte2017TRcls, title = {Compressive least-squares migration with source estimation}, number = {TR-EOAS-2017-3}, year = {2017}, institution = {UBC}, abstract = {Least-squares reverse-time migration is a powerful approach for true-amplitude seismic imaging of complex geological structures. The successful application of this method is hindered by its exceedingly large computational cost and required prior knowledge of the generally unknown source wavelet. We address these problems by introducing an algorithm for low-cost sparsity-promoting least-squares migration with source estimation. We adapt a recent algorithm from sparse optimization, which allows to work with randomized subsets of shots during each iteration of least-squares migration, while still converging to an artifact-free solution. We modify the algorithm to incorporate on-the-fly source estimation through variable projection, which lets us estimate the wavelet without additional PDE solves. The algorithm is easy to implement and allows imaging at a fraction of the cost of conventional least squares reverse-time migration, requiring only around two passes trough the data, making the method feasible for large-scale imaging problems with unknown source wavelets.}, keywords = {LSRTM, migration, source estimation, sparsity, time domain}, url = {https://slim.gatech.edu/Publications/Public/TechReport/2017/witte2017TRcls/witte2017TRcls.html}, author = {Philipp A. Witte and Mengmeng Yang and Felix J. Herrmann} } @techreport {witte2016OGHPClst, title = {A large-scale time-domain seismic modeling and inversion workflow in Julia}, number = {TR-EOAS-2017-1}, year = {2017}, institution = {UBC}, abstract = {We present our initial steps towards the development of a large-scale seismic modeling workflow in Julia that provides a framework for wave equation based inversion methods like full waveform inversion or least squares migration. Our framework is based on the Devito, a finite difference domain specific language compiler that generates highly optimized and parallel code. We develop a flexible workflow that is based on abstract matrixfree linear operators and enables developers to write code that closely resembles the underlying math, while at the same time leveraging highly optimized wave equation solvers, allowing us to solve large-scale three-dimensional inverse problems.}, keywords = {HPC, inversion, Modelling}, url = {https://slim.gatech.edu/Publications/Public/TechReport/2017/witte2016OGHPClst/witte2016OGHPClst.pdf}, author = {Philipp A. Witte and Mathias Louboutin and Gerard Gorman and Felix J. Herrmann} } @techreport {louboutin2016OGHPCocp, title = {Optimizing the computational performance of time-domain modelling{\textendash}-leveraging multiple right-hand-sides}, number = {TR-EOAS-2017-2}, year = {2017}, institution = {UBC}, abstract = {Exploration geophysics heavily relies upon fast solvers for the wave-equation and its adjoint. The main computational cost of a wave-equation solver is to compute the Laplacian, or more complex finite-difference operators, at every time step. The performance of many discretizations is limited by the relatively low operational intensity (number of floating point operations divided by memory traffic) of the finite-difference stencil. Solving the wave-equation for multiple sources/right-hand-sides (RHSs) at once mitigates this problem by increasing the operational intensity. This is implemented by rewriting the classical matrix-vector product into a matrix-matrix product where each column of the second matrix represent the solution wavefield for each given source. This minor modification to the solver is shown to achieve a 2-4 times speedup compared to a single source solver. We concentrate in this paper on acoustic modelling, but our approach can easily be extended to anisotropic or elastic cases for both forward and adjoint modelling.}, keywords = {finite differences, HPC, Modelling, time domain}, url = {https://slim.gatech.edu/Publications/Public/TechReport/2017/louboutin2016OGHPCocp/louboutin2016OGHPCocp.pdf}, author = {Mathias Louboutin and Gerard Gorman and Felix J. Herrmann} } @techreport {louboutin2016SEGocp, title = {Optimizing the computational performance and maintainability of time-domain modelling{\textendash}-leveraging multiple right-hand-sides}, number = {TR-EOAS-2016-2}, year = {2016}, month = {06}, institution = {UBC}, abstract = {Numerical solvers for the wave equation are a key component of Full-Waveform Inversion (FWI) and Reverse-Time Migration (RTM). The main computational cost of a wave-equation solver stems from the computation of the Laplacian at each time step. When using a finite difference discretization this can be characterized as a structured grid computation within Colella{\textquoteright}s Seven Dwarfs. Independent of the degree of parallelization the performance will be limited by the relatively low operational intensity (number of operations divided by memory traffic) of finite-difference stencils, that is so say that the method is memory bandwidth bound. For this reason many developers have focused on porting their code to platforms that have higher memory bandwidth, such as GPU{\textquoteright}s, or put significant effort into highly intrusive optimisations. However, these optimisations rarely strike the right performance vs productivity balance as the software becomes less maintainable and extensible. By solving the wave equation for multiple sources/right-hand-sides (RHSs) at once, we overcome this problem arriving at a time-stepping solver with higher operational intensity. In essence, we arrive at this result by turning the usual matrix-vector products into a matrix-matrix products where the first matrix implements the discretized wave equation and each column of the second matrix contain separate wavefields for each given source. By making this relatively minor change to the solver we readily achieved a $\times2$ speedup. While we limit ourselves to acoustic modeling, our approach can easily be extended to the anisotropic or elastic cases.}, keywords = {3D, Modelling, time domain}, url = {https://slim.gatech.edu/Publications/Public/TechReport/2016/louboutin2016SEGocp/louboutin2016SEGocp.html}, author = {Mathias Louboutin and Gerard Gorman and Felix J. Herrmann} } @techreport {herrmann2016SLBors, title = {Overview research at the SINBAD Consortium}, number = {TR-EOAS-2016-1}, year = {2016}, note = {Presented at a seminar at Schlumberger Gould, Cambridge on March 17, 2016.}, month = {03}, institution = {UBC}, keywords = {Presentation, SLIM}, url = {https://slim.gatech.edu/Publications/Public/TechReport/2016/SLB/herrmann2016SLBors/herrmann2016SLBors.pdf}, author = {Felix J. Herrmann} } @techreport {kumar2015EAGElse, title = {Least-squares extended imaging with surface-related multiples}, number = {TR-EOAS-2015-1}, year = {2015}, month = {01}, institution = {UBC}, abstract = {Common image gathers are used in building velocity models, inverting for anisotropy parameters, and analyzing reservoir attributes. Typically, only primary reflections are used to form image gathers as multiples can cause artifacts that interfere with the events of interest. However, it has been shown that multiples can actually provide extra illumination of the subsurface, especially for delineating the near-surface features. In this paper, we aim to form common image gathers directly from the data with surface related multiples by applying concepts that have been used to successfully deal with surface-related multiples in imaging. We achieve this by effectively inverting an extended migration operator. This results in extended images with better near-surface illumination that are free of artifacts that can hamper velocity analysis. In addition, being able to generate extended images directly from the total data avoids the need for (time-consuming) pre-processing. Synthetic examples on a layered model show that the proposed formulation is promising.}, keywords = {image gathers, surface-related multiples}, url = {https://slim.gatech.edu/Publications/Public/TechReport/2015/kumar2015EAGElse/kumar2015EAGElse.html}, author = {Rajiv Kumar and Ning Tu and Tristan van Leeuwen and Felix J. Herrmann} } @techreport {witte2015TRoam, title = {Overview on anisotropic modeling and inversion}, number = {TR-EOAS-2015-6}, year = {2015}, month = {08}, institution = {UBC}, abstract = {This note provides an overview on strategies for modeling and inversion with the anisotropic wave equation. Since linear and non-linear inversion methods like least squares RTM and Full Waveform Inversion depend on matching observed field data with synthetically modelled data, accounting for anisotropy effects is necessary in order to accurately match waveforms at long offsets and propagation times. In this note, the two main strategies for anisotropic modelling by solving either a pseudo acoustic wave equation or a pure quasi-P-wave equation are discussed and an inversion workflow using the pure quasi-P-wave equation is provided. In particular, we derive the exact adjoint of the anisotropic forward modelling and jacobian operator and give a detailled describtion of their implementation. The anistropic FWI workflow is tested on a sythetic data example.}, keywords = {anisotropy, full waveform inversion, Modeling}, url = {https://slim.gatech.edu/Publications/Public/TechReport/2015/witte2015TRoam/witte2015TRoam.html}, author = {Philipp A. Witte and Mathias Louboutin and Felix J. Herrmann} } @techreport {peters2015EAGErwi, title = {Regularizing waveform inversion by projection onto intersections of convex sets}, number = {TR-EOAS-2015-4}, year = {2015}, month = {01}, institution = {UBC}, abstract = {A framework is proposed for regularizing the waveform inversion problem by projections onto intersections of convex sets. Multiple pieces of prior information about the geology are represented by multiple convex sets, for example limits on the velocity or minimum smoothness conditions on the model. The data-misfit is then minimized, such that the estimated model is always in the intersection of the convex sets. Therefore, it is clear what properties the estimated model will have at each iteration. This approach does not require any quadratic penalties to be used and thus avoids the known problems and limitations of those types of penalties. It is shown that by formulating waveform inversion as a constrained problem, regularization ideas such as Tikhonov regularization and gradient filtering can be incorporated into one framework. The algorithm is generally applicable, in the sense that it works with any (differentiable) objective function, several gradient and quasi-Newton based solvers and does not require significant additional computation. The method is demonstrated on the inversion of very noisy synthetic data and vertical seismic profiling field data.}, keywords = {convex sets, regularization, waveform inversion}, url = {https://slim.gatech.edu/Publications/Public/TechReport/2015/peters2015EAGErwi/peters2015EAGErwi.html}, author = {Bas Peters and Brendan R. Smithyman and Felix J. Herrmann} } @techreport {peters2015SEGrwi, title = {Regularizing waveform inversion by projections onto convex sets {\textendash}- application to the 2D Chevron 2014 synthetic blind-test dataset}, number = {TR-EOAS-2015-7}, year = {2015}, month = {06}, institution = {UBC}, abstract = {A framework is proposed for regularizing the waveform inversion problem by projections onto intersections of convex sets. Multiple pieces of prior information about the geology are represented by multiple convex sets, for example limits on the velocity or minimum smoothness conditions on the model. The data-misfit is then minimized, such that the estimated model is always in the intersection of the convex sets. Therefore, it is clear what properties the estimated model will have at each iteration. This approach does not require any quadratic penalties to be used and thus avoids the known problems and limitations of those types of penalties. It is shown that by formulating waveform inversion as a constrained problem, regularization ideas such as Tikhonov regularization and gradient filtering can be incorporated into one framework. The algorithm is generally applicable, in the sense that it works with any (differentiable) objective function and does not require significant additional computation. The method is demonstrated on the inversion of the 2D marine isotropic elastic synthetic seismic benchmark by Chevron using an acoustic modeling code. To highlight the effect of the projections, we apply no data pre-processing.}, keywords = {blind-test, projection, regularization, SEG, Wavefield Reconstruction Inversion, waveform inversion}, url = {https://slim.gatech.edu/Publications/Public/TechReport/2015/peters2015SEGrwi/peters2015SEGrwi.html}, author = {Bas Peters and Zhilong Fang and Brendan R. Smithyman and Felix J. Herrmann} } @techreport {kumar2015EAGEtjm, title = {Time-jittered marine acquisition: low-rank v/s sparsity}, number = {TR-EOAS-2015-2}, year = {2015}, month = {01}, institution = {UBC}, abstract = {Time-jittered simultaneous marine acquisition has been recognized as an economic way of improving the spatial sampling, and speedup acquisition, where a single (or multiple) source vessel fires at {\textendash} jittered source locations and instances in time. It has been shown in the past that this problem can be setup as a {\textendash} compressed sensing problem, where conventional seismic data is reconstructed from blended data via a sparsity-promoting optimization formulation. While the recovery quality of deblended data is very good, the recovery process is computationally very expensive. In this paper, we present a computationally efficient rank-minimization algorithm to deblend the seismic data. The proposed algorithm is suitable for large-scale seismic data, since it avoids SVD computations and uses a low-rank factorized formulation instead. Results are illustrated with simulations of time-jittered marine acquisition, which translates to jittered source locations for a given speed of the source vessel, for a single source vessel with two airgun arrays.}, keywords = {deblending, low-rank}, url = {https://slim.gatech.edu/Publications/Public/TechReport/2015/kumar2015EAGEtjm/kumar2015EAGEtjm.html}, author = {Rajiv Kumar and Haneet Wason and Felix J. Herrmann} } @techreport {esser2015tvwri, title = {Total variation regularization strategies in full waveform inversion for improving robustness to noise, limited data and poor initializations}, number = {TR-EOAS-2015-5}, year = {2015}, month = {06}, institution = {UBC}, abstract = {We propose an extended full waveform inversion formulation that includes convex constraints on the model. In particular, we show how to simultaneously constrain the total variation of the slowness squared while enforcing bound constraints to keep it within a physically realistic range. Synthetic experiments show that including total variation regularization can improve the recovery of a high velocity perturbation to a smooth background model, removing artifacts caused by noise and limited data. Total variation-like constraints can make the inversion results significantly more robust to a poor initial model, leading to reasonable results in some cases where unconstrained variants of the method completely fail. Numerical results are presented for portions of the SEG/EAGE salt model and the 2004 BP velocity benchmark. ***Disclaimer.*** *This technical report is ongoing work (and posted as is except for the addition of another author) of the late John "Ernie" Esser (May 19, 1980 - March 8, 2015), who passed away under tragic circumstances. We will work hard to finalize and submit this work to the peer-review literature.* Felix J. Herrmann}, keywords = {cones constraints, hinge loss, total-variation, Wavefield Reconstruction Inversion}, url = {https://slim.gatech.edu/Publications/Public/TechReport/2015/esser2015tvwri/esser2015tvwri.html}, author = {Ernie Esser and Llu{\'\i}s Guasch and Tristan van Leeuwen and Aleksandr Y. Aravkin and Felix J. Herrmann} } @techreport {lago2015EAGEtrg, title = {Towards a robust geometric multigrid scheme for Helmholtz equation}, number = {TR-EOAS-2015-3}, year = {2015}, month = {01}, institution = {UBC}, abstract = {We discuss an improvement of existing multigrid techniques for the solution of the time harmonic wave equation targeting application to seismic inversion and imaging, using non-traditional smoothing and coarse correction techniques, namely the CGMN and CRMN methods. We aim at developing a multigrid scheme to be used as a preconditioner for FGMRES showing less sensibility to changes in the discretization of the operator. We compare this multigrid scheme with recent developments in the multigrid field obtaining very satisfactory results. Our numerical experiments using SEG/EAGE Overthrust velocity model showing not only more robustness when switching from a basic 7 points stencil to a more compact 27 points stencil, but also a considerable reduction in the number of preconditioning steps required to attain convergence, a result encouraging further investigation.}, keywords = {CGMN, CRMN, Helmholtz, multigrid}, url = {https://slim.gatech.edu/Publications/Public/TechReport/2015/lago2015EAGEtrg/lago2015EAGEtrg.pdf}, author = {Rafael Lago and Felix J. Herrmann} } @techreport {wang2014SEGfwi, title = {Full waveform inversion with interferometric measurements}, number = {TR-EOAS-2014-5}, year = {2014}, month = {04}, institution = {UBC}, abstract = {In this note, we design new misfit functions for full-waveform inversion by using interferometric measurements to reduce sensitivity to phase errors. Though established within a completely different setting from the linear case, we obtain a similar observation: the interferometry can improve robustness under certain modeling errors. Moreover, in order to deal with errors on both source and receiver sides, we propose a higher order interferometry, which, as a generalization of the usual definition, involves the cross correlation of four traces. A proof of principle simulations is included on a stylized example.}, keywords = {FWI}, url = {https://slim.gatech.edu/Publications/Public/TechReport/2014/wang2014SEGfwi/wang2014SEGfwi.html}, author = {Rongrong Wang and Ozgur Yilmaz and Felix J. Herrmann} } @techreport {smithyman2014SEGjfw, title = {Joint full-waveform inversion of on-land surface and VSP data from the Permian Basin}, number = {TR-EOAS-2014-4}, year = {2014}, month = {04}, institution = {UBC}, abstract = {Full-waveform Inversion is applied to generate a high-resolution model of P-wave velocity for a site in the Permian Basin, Texas, USA. This investigation jointly inverts seismic waveforms from a surface 3-D vibroseis surface seismic survey and a co-located 3-D Vertical Seismic Profiling (VSP) survey, which shared common source Vibration Points (VPs). The resulting velocity model captures features that were not resolvable by conventional migration velocity analysis.}, keywords = {downhole receivers, Full-waveform inversion, land, seismic, vibroseis}, url = {https://slim.gatech.edu/Publications/Public/TechReport/2014/smithyman2014SEGjfw/smithyman2014SEGjfw.html}, author = {Brendan R. Smithyman and Bas Peters and Bryan DeVault and Felix J. Herrmann} } @techreport {kumar2014SEGmcu, title = {Matrix completion on unstructured grids : 2-D seismic data regularization and interpolation}, number = {TR-EOAS-2014-3}, year = {2014}, month = {04}, institution = {UBC}, abstract = {Seismic data interpolation via rank-minimization techniques has been recently introduced in the seismic community. All the existing rank-minimization techniques assume the recording locations to be on a regular grid, e.g. sampled periodically, but seismic data are typically irregularly sampled along spatial axes. Other than the irregularity of the sampled grid, we often have missing data. In this paper, we study the effect of grid irregularity to conduct matrix completion on a regular grid for unstructured data. We propose an improvement of existing rank-minimization techniques to do regularization. We also demonstrate that we can perform seismic data regularization and interpolation simultaneously. We illustrate the advantages of the modification using a real seismic line from the Gulf of Suez to obtain high quality results for regularization and interpolation, a key application in exploration geophysics.}, keywords = {interpolation, matrix completion, NFFT, regularization}, url = {https://slim.gatech.edu/Publications/Public/TechReport/2014/kumar2014SEGmcu/kumar2014SEGmcu.html}, author = {Rajiv Kumar and Oscar Lopez and Ernie Esser and Felix J. Herrmann} } @techreport {slim2014NSERCapp, title = {NSERC 2014 DNOISE application}, number = {TR-EOAS-2014-7}, year = {2014}, institution = {UBC}, abstract = {This current proposal describes a comprehensive five-year continuation of our research project in dynamic nonlinear optimization for imaging in seismic exploration (DNOISE). DNOISE III{\textemdash}Exploration Seismology in the Petascale Age builds on the proven track record of our multidisciplinary research team that conducts transformative research in the fields of seismic-data acquisition, processing, and wave-equation based inversion. The overarching goals of the DNOISE series of projects can be simply summarized as: {\textquoteleft}{\textquoteleft}How to image more deeply and with more detail?" and {\textquoteleft}{\textquoteleft}How to do more with less data?" Also, to help overcome the current substantial challenges in the oil and gas industry, we maintain this focus with more specific follow-up questions such as: {\textquoteleft}{\textquoteleft}How can we control costs and remove acquisition-related artifacts in 3D (time-lapse) seismic data sets?" and {\textquoteleft}{\textquoteleft}How can we replace conventional seismic data processing with wave-equation based inversion, control computational costs, assess uncertainties, extract reservoir information and remove sensitivity to starting models?" To answer these questions, we have assembled an expanded cross-disciplinary research team with backgrounds in scientific computing (SC), machine learning (ML), compressive sensing (CS), hardware design, and computational and observational exploration seismology (ES). With this team, we will continue to drive innovations in ES by utilizing our unparalleled access to high-performance computing (HPC), our expertise and experience in CS and wave-equation based inversion (WEI) and our proven abilities in incorporating our research findings into practical scalable software of our inversion solutions.}, keywords = {DNOISE, NSERC}, url = {https://slim.gatech.edu/Publications/Public/TechReport/NSERC/2014/DNOISEIII/crd.html}, author = {Felix J. Herrmann} } @techreport {slim2014NSERCpr, title = {NSERC 2014 DNOISE progress report}, number = {TR-EOAS-2014-8}, year = {2014}, institution = {UBC}, abstract = {As we entered the second half of the DNOISE II project, we are happy to report that we have made significant progress on several fronts. Firstly, our work on seismic data acquisition with compressive sensing is becoming widely recognized. For instance, ConocoPhilips ran a highly successful field trial on Marine acquisition with compressive sensing and obtained significant improvements compared to standard production (see figure below). Moreover, one of the main outcomes of this year{\textquoteright}s EAGE workshop was that industry is ready to adapt randomized sampling as a new acquisition paradigm. Needless to say this is a big success for what we have been trying to accomplish with DNOISE II. Finally, we have made a breakthrough in the application of randomized sampling in 4-D seismic, which is receiving a lot of interest from industry. Secondly, our work on large-scale optimization in the context of wave-equation based inversion is also increasingly widely adapted. For instance, our batching techniques are making the difference between making a loss or profit for a large contractor company active in the area of full-waveform inversion. We also continued to make progress in exciting new directions that go beyond sparsity promotion and which allow us to exploit other types of structure within the data, such as low-rank for matrices or hierarchical Tucker formats for tensors. Application of these techniques show excellent results and in certain cases, such as source separation problems with small dithering, show significant improvements over transform-domain methods. Thirdly, we continued to make significant progress in wave-equation based inversion. We extended our new penalty-based formulation now called Wavefield Reconstruction Inversion/Imaging to include total-variation regularization and density variations. We also continued to make progress on multiples, imaging with multiples and 3-D full-waveform inversion. Statoil is the latest company to join and we have several other companies that have shown a keen interest. We also received substantial in-kind contributions including a license to WesternGeco{\textquoteright}s iOmega and HPC equipment discounts. After many years of support BP decided unfortunately to no longer support SINBAD quoting financial headwind related to the Deep horizon disaster. On a more positive note, we are extremely happy to report major progress on our efforts to secure access to high-performance compute, including renewed funding from NSERC and our involvement in the International Inversion Initiative in Brazil. 9 peer-reviewed journal publications have resulted from our work within the reporting period, with a further 6 submitted, and DNOISE members disseminated the results of our research at 49 major national and international conference presentations. On the HQP training side, 4 MSc students have recently graduated, with one obtaining a position with CGG Calgary, and we added 4 postdocs and 3 PhD students to our team in September 2014, greatly increasing our research capacity. As can be seen from the report below, we are well on schedule and on certain topics well beyond the milestones included in the original proposal. With the purchase of the new cluster we expect to see a surge of activity in extending our algorithms to 3D. With this increased capacity, we continue to be in an excellent position to make fundamental contributions to the fields of seismic data acquisition, processing, and wave-equation based inversion. In the sections below, we give a detailed overview of the research and publication activities of the different members of the group and how these relate to the objectives of the grant, to industrial uptake, and to outreach. Unless stated otherwise the students and PDFs are (co)-supervised by the PI. We refer to the publications section 4.0 for a complete list of our presentations, conference proceedings, and journal publications. We also refer to our mindmap, which clearly establishes connections between the different research topics we have embarked upon as part of the DNOISE II project.}, keywords = {DNOISE, NSERC}, url = {https://slim.gatech.edu/Publications/Public/TechReport/NSERC/2014/Progress_Report_2014.html}, author = {Felix J. Herrmann} } @techreport {herrmann2014pmpde, title = {A penalty method for PDE-constrained optimization}, number = {WO 2014/172787}, year = {2014}, note = {(International publication date 30 October 2014. International publication number WO 2014/172787.)}, month = {10}, institution = {UBC}, type = {Patent}, abstract = {The invention is directed to a computer-implemented method for obtaining a physical model having physical model parameters wherein solutions to one or more partial-differential-equations (PDE{\textquoteright}s) are calculated ans wherein (i) an appropriate initial model is selected, (ii) setup a system of equations referred to as the data-augmented PDE for the field, comprising of the discretized PDE, the sampling operator, the source function and the observed data, and solve the data-augmented PDE in a suitable manner to obtain a field that both satisfies the PDE and fits the data to some degree, (iii) setup a system of equations by using the PDE, the source function and the field obtained in step (ii) and solve this system of equations in a suitable manner to obtain an update of the physical model parameters and repeat steps (ii)-(iii) until a predetermined stopping criterion is met.}, keywords = {Optimization, patent, penalty method}, url = {https://slim.gatech.edu/Publications/Public/Patents/2014/herrmann2014pmpde/herrmann2014pmpde_WO2014172787.pdf}, url2 = {http://patentscope.wipo.int/search/en/WO2014172787}, author = {Felix J. Herrmann and Tristan van Leeuwen} } @techreport {esser2014SEGsgp, title = {A scaled gradient projection method for total variation regularized full waveform inversion}, number = {TR-EOAS-2014-2}, year = {2014}, month = {04}, institution = {UBC}, abstract = {We propose an extended full waveform inversion formulation that includes convex constraints on the model. In particular, we show how to simultaneously constrain the total variation of the slowness squared while enforcing bound constraints to keep it within a physically realistic range. Synthetic experiments show that including total variation regularization can improve the recovery of a high velocity perturbation to a smooth background model.}, keywords = {convex constraints, full waveform inversion, total variation regularization}, url = {https://slim.gatech.edu/Publications/Public/TechReport/2014/esser2014SEGsgp/esser2014SEGsgp.html}, author = {Ernie Esser and Tristan van Leeuwen and Aleksandr Y. Aravkin and Felix J. Herrmann} } @techreport {esser2014sln, title = {Some lifting notes}, number = {TR-EOAS-2014-1}, year = {2014}, note = {written on February 15, 2014}, month = {02}, institution = {UBC}, keywords = {convex semidefinite programming, lifting, low rank, nonconvex quadratic problems}, url = {https://slim.gatech.edu/Publications/Public/TechReport/2014/esser2014sln/esser2014sln.pdf}, author = {Ernie Esser} } @techreport {zfang2014SEGsqn, title = {A stochastic quasi-Newton McMC method for uncertainty quantification of full-waveform inversion}, number = {TR-EOAS-2014-6}, year = {2014}, month = {04}, institution = {UBC}, abstract = {In this work we propose a stochastic quasi-Newton Markov chain Monte Carlo (McMC) method to quantify the uncertainty of full-waveform inversion (FWI). We formulate the uncertainty quantification problem in the framework of the Bayesian inference, which formulates the posterior probability as the conditional probability of the model given the observed data. The Metropolis-Hasting algorithm is used to generate samples satisfying the posterior probability density function (pdf) to quantify the uncertainty. However it suffers from the challenge to construct a proposal distribution that simultaneously provides a good representation of the true posterior pdf and is easy to manipulate. To address this challenge, we propose a stochastic quasi-Newton McMC method, which relies on the fact that the Hessian of the deterministic problem is equivalent to the inverse of the covariance matrix of the posterior pdf. The l-BFGS (limited-memory Broyden{\textendash}Fletcher{\textendash}Goldfarb{\textendash}Shanno) Hessian is used to approximate the inverse of the covariance matrix efficiently, and the randomized source sub-sampling strategy is used to reduce the computational cost of evaluating the posterior pdf and constructing the l-BFGS Hessian. Numerical experiments show the capability of this stochastic quasi-Newton McMC method to quantify the uncertainty of FWI with a considerable low cost.}, keywords = {FWI, McMC, quasi-Newton, Uncertainty quantification}, url = {https://slim.gatech.edu/Publications/Public/TechReport/2014/zfang2014SEGsqn/zfang2014SEGsqn.html}, author = {Zhilong Fang and Felix J. Herrmann} } @techreport {tschannen2014tdl, title = {Time domain least squares migration and dimensionality reduction}, number = {TR-EOAS-2014-9}, year = {2014}, month = {06}, institution = {UBC}, abstract = {Least-squares seismic migration (LSM) is a wave equation based linearized inversion problem relying on the minimization of a least-squares misfit function, with respect to the medium perturbation, between recorded and modeled wavefields. Today{\textquoteright}s challenges in Hydrocarbon ex- ploration are to build high resolution images of more and more complicated geological reservoirs, which requires to handle very large systems of equations. The extreme size of the problem com- bined with the fact that it is ill-conditioned make LSM not yet feasible for industrial purposes. To overcome this "curse of dimensionality", dimension reduction and divide-and-conquer tech- niques that aim to decrease the computation time and the required memory, while conserving the image quality, have recently been developed. By borrowing ideas from stochastic optimiza- tion and compressive sensing, the imaging problem is reformulated as an L1-regularized, sparsity promoted LSM. The idea is to take advantage of the compressibility of the model perturbation in the curvelet domain and to work on series of smaller subproblems each involving a small ran- domized subset of data. We try two different subset sampling strategies, artificial randomized simultaneous sources experiments ("supershots") and drawing sequential shots firing at random source locations. These subsets are changed after each subproblem is solved. In both cases we obtain good migration results at significantly reduced computational cost. Application of these methods to a complicated synthetic model yields to encouraging results, underlining the usefulness of sparsity promotion and randomization in time stepping formulation.}, keywords = {Compressive Sensing, sparsity promotion, Stochastic optimization, Wave equation migration}, url = {https://slim.gatech.edu/Publications/Public/TechReport/2014/tschannen2014tdl/tschannen2014tdl.pdf}, author = {Valentin Tschannen and Zhilong Fang and Felix J. Herrmann} } @techreport {slim2013NSERCpr, title = {NSERC 2013 DNOISE progress report}, number = {TR-EOAS-2013-5}, year = {2013}, institution = {UBC}, abstract = {As we enter the second half of the DNOISE II project, we are happy to report that we have made significant progress on several fronts. Firstly, our work on seismic data acquisition with compressive sensing is becoming widely recognized, reflected in adaptations of this technology by industry and in this year{\textquoteright}s SEG Karcher award, which went to Gilles Hennenfent, who was one of the researchers who started working in this area in our group. As this report shows, we continued to make progress on this topic with numerous presentations, publications, and software releases. Secondly, our work on large-scale optimization is also widely adapted and instrumental to the different research areas on the grant. In particular, we are excited about new directions that go beyond sparsity promotion and which allow us to exploit other types of structure within the data, such as low-rank. Over the near future, we expect to see a body of new research based on these findings touching acquisition as well as the wave-equation based inversion aspects of our research program. Thirdly, we are also very happy to report that we continued to make substantial progress in wave-equation base inversion. In particular, we would like to mention successes in the areas of acceleration of sparsity-promoting imaging with source estimation and multiples and in theoretical as well as practical aspects of full-waveform inversion. We derived a highly practical and economic formulation of 3-D FWI and we also came up with a complete new formulation of FWI, which mitigates issues related to cycle skipping. Finally, we made a lot of progress applying our algorithm to industrial datasets, which has been well received by industry. Our findings show that FWI is still an immature technology calling for more theoretical input and for the development of practical workflows. Over the last year our work cumulated in 14 peer-reviewed journal publications, 5 submitted journal publications, 13 (+ 9) extended abstracts, 32 talks at international conferences, and 6 software packages. Finally, we are happy to report that we have been joined by several new companies, namely, ION Geophysical, CGG, and Woodside. At this midpoint of the Grant, we are also happy to report that we are well on schedule to meet the milestones included in the original proposal. Given our wide range of expertise and our plans to replace our compute cluster, we continue to be in an excellent position to make fundamental contributions to the fields of seismic data acquisition, processing, and wave-equation based inversion.}, keywords = {DNOISE, NSERC}, url = {https://slim.gatech.edu/Publications/Public/TechReport/NSERC/2013/Progress_Report_2013.pdf}, author = {Felix J. Herrmann} } @techreport {vanLeeuwen2013Penalty2, title = {A penalty method for PDE-constrained optimization}, number = {TR-EOAS-2013-6}, year = {2013}, month = {04}, institution = {UBC}, abstract = {We present a method for solving PDE constrained optimization problems based on a penalty formulation. This method aims to combine advantages of both full-space and reduced methods by exploiting a large search-space (consisting of both control and state variables) while allowing for an efficient implementation that avoids storing and updating the state-variables. This leads to a method that has roughly the same per-iteration complexity as conventional reduced approaches while dening an objective that is less non-linear in the control variable by implicitly relaxing the constraint. We apply the method to a seismic inverse problem where it leads to a particularly ecient implementation when compared to a conventional reduced approach as it avoids the use of adjoint state-variables. Numerical examples illustrate the approach and suggest that the proposed formulation can indeed mitigate some of the well-known problems with local minima in the seismic inverse problem.}, keywords = {Optimization, waveform inversion}, url = {https://slim.gatech.edu/Publications/Public/TechReport/2013/vanLeeuwen2013Penalty2/vanLeeuwen2013Penalty2.pdf}, author = {Tristan van Leeuwen and Felix J. Herrmann} } @techreport {petrenko2013SEGsaoc, title = {Software acceleration of CARP, an iterative linear solver and preconditioner}, number = {TR-EOAS-2013-4}, year = {2013}, institution = {UBC}, abstract = {We present the results of software optimization of a row-wise preconditioner (Component Averaged Row Projections) for the method of conjugate gradients, which is used to solve the diagonally banded Helmholtz system representing frequency domain, isotropic acoustic seismic wave simulation. We demonstrate that in our application, a preconditioner bound to one processor core and accessing memory contiguously reduces execution time by 7\% for matrices having on the order of 108 non-zeros. For reference we note that our C implementation is over 80 times faster than the corresponding code written for a high-level numerical analysis language.}, keywords = {frequency-domain, Helmholtz equation, Kaczmarz, software, wave propagation}, url = {https://slim.gatech.edu/Publications/Public/TechReport/2013/petrenko2013SEGsaoc/petrenko2013SEGsaoc.pdf}, author = {Art Petrenko and Tristan van Leeuwen and Felix J. Herrmann} } @techreport {kumar2013ICMLlr, title = {An SVD-free Pareto curve approach to rank minimization}, number = {TR-EOAS-2013-2}, year = {2013}, month = {02}, institution = {UBC}, abstract = {Recent SVD-free matrix factorization formulations have enabled rank optimization for extremely large-scale systems (millions of rows and columns). In this paper, we consider rank-regularized formulations that only require a target data-fitting error level, and propose an algorithm for the corresponding problem. We illustrate the advantages of the new approach using the Netflix problem, and use it to obtain high quality results for seismic trace interpolation, a key application in exploration geophysics. We show that factor rank can be easily adjusted as the inversion proceeds, and propose a weighted extension that allows known subspace information to improve the results of matrix completion formulations. Using these methods, we obtain high-quality reconstructions for large scale seismic interpolation problems with real data.}, keywords = {interpolation, low rank}, url = {https://slim.gatech.edu/Publications/Public/TechReport/2013/kumar2013ICMLlr/kumar2013ICMLlr.pdf}, author = {Aleksandr Y. Aravkin and Rajiv Kumar and Hassan Mansour and Ben Recht and Felix J. Herrmann} } @techreport {oghenekohwo2013SEGtlswrs, title = {Time-lapse seismics with randomized sampling}, number = {TR-EOAS-2013-3}, year = {2013}, institution = {UBC}, abstract = {In time-lapse or 4D seismics, repeatability of the acquisition is a very crucial step, as we do not want spurious events that are not there. In this paper, we propose an approach which avoids any requirement to repeat the surveys, by using randomized sampling technique which allows us to be more efficient in the acquisition. Our method applies to sampling data using ocean bottom nodes (OBN) as receivers. We test the efficacy of our proposed randomized acquisition geometry for time-lapse survey on two different models. In the first example, model properties does not change with time, while in the second example, model exhibit a time-lapse effect which may be caused by the migration of fluid within the reservoir. We perform two types of randomized sampling - uniform randomized sampling and jittered sampling to visualize the effects of non-repeatability in time-lapse survey. We observe that jittered randomized sampling is a more efficient method compared to randomized sampling, due to it{\textquoteright}s requirement to control the maximum spacing between the receivers. The results are presented, in the image space, as a least-squares migration of the model perturbation and they are shown for a subset of a synthetic model - the Marmousi model}, keywords = {Acquisition, migration, time-lapse}, url = {https://slim.gatech.edu/Publications/Public/TechReport/2013/oghenekohwo2013SEGtlswrs/oghenekohwo2013SEGtlswrs.pdf}, author = {Felix Oghenekohwo and Felix J. Herrmann} } @techreport {li2013EAGEwebmplijsp, title = {Wave-equation based multi-parameter linearized inversion with joint-sparsity promotion}, number = {TR-EOAS-2013-1}, year = {2013}, month = {01}, institution = {UBC}, abstract = {The successful application of linearized inversion is affected by the prohibitive size of the data, computational resources required, and how accurately the model parameters reflects the real Earth properties. The issue of data size and computational resources can be addressed by combining ideas from sparsity promoting and stochastic optimization, which can allow us to invert model perturbation with a small subset of the data, yielding a few PDE solves for the inversion. In this abstract, we are aiming at addressing the issue of accuracy of model parameters by inverting density and velocity simultaneously rather than only using velocity. As a matter of face, the effects of density and velocity variations towards the wavefield are very similar, which will cause energy leakage between density and velocity images. To overcome this issue, we proposed a incoherence enhanced method that can reduce the similarity between the effect of density and velocity. Moreover, the location of structural variations in velocity and density are often overlapped in geological setting, thus in this abstract, we also exploit this property with joint-sparsity promoting to further improve the imaging result.}, keywords = {incoherence enhancement, joint-sparsity, Linearized inversion}, url = {https://slim.gatech.edu/Publications/Public/TechReport/2013/li2013EAGEwebmplijsp/li2013EAGEwebmplijsp.pdf}, author = {Xiang Li and Felix J. Herrmann} } @techreport {rajiv2012SEGFRM, title = {Fast methods for rank minimization with applications to seismic-data interpolation}, number = {TR-EOAS-2012-3}, year = {2012}, month = {04}, institution = {Department of Earth and Ocean Sciences}, address = {University of British Columbia, Vancouver}, abstract = {Rank penalizing techniques are an important direction in seismic inverse problems, since they allow improved recovery by exploiting low-rank structure. A major downside of current state of the art techniques is their reliance on the SVD of seismic data structures, which can be prohibitively expensive. Fortunately, recent work allows us to circumvent this problem by working with matrix factorizations. We review a novel approach to rank penalization, and successfully apply it to the seismic interpolation problem by exploiting the low-rank structure of seismic data in the midpoint-offset domain. Experiments for the recovery of 2D monochromatic data matrices and seismic lines represented as 3D volumes support the feasibility and potential of the new approach.}, keywords = {Optimization, Rank, Seismic data Interpolation}, url = {https://slim.gatech.edu/Publications/Public/TechReport/2012/rajiv2012SEGFRM/rajiv2012SEGFRM.pdf}, author = {Rajiv Kumar and Aleksandr Y. Aravkin and Felix J. Herrmann} } @techreport {vanleeuwen2012CGMN, title = {Fourier analysis of the CGMN method for solving the Helmholtz equation}, number = {TR-EOAS-2012-1}, year = {2012}, institution = {Department of Earth, Ocean and Atmospheric Sciences}, address = {The University of British Columbia, Vancouver}, abstract = {The Helmholtz equation arises in many applications, such as seismic and medical imaging. These application are characterized by the need to propagate many wavelengths through an inhomogeneous medium. The typical size of the problems in 3D applications precludes the use of direct factorization to solve the equation and hence iterative methods are used in practice. For higher wavenumbers, the system becomes increasingly indefinite and thus good preconditioners need to be constructed. In this note we consider an accelerated Kazcmarz method (CGMN) and present an expression for the resulting iteration matrix. This iteration matrix can be used to analyze the convergence of the CGMN method. In particular, we present a Fourier analysis for the method applied to the 1D Helmholtz equation. This analysis suggests an optimal choice of the relaxation parameter. Finally, we present some numerical experiments.}, keywords = {Helmholtz equation, Modelling}, url = {http://arxiv.org/abs/1210.2644}, author = {Tristan van Leeuwen} } @techreport {slim2012NSERCpr, title = {NSERC 2012 DNOISE progress report}, number = {TR-EOAS-2012-4}, year = {2012}, institution = {UBC}, keywords = {DNOISE, NSERC}, url = {https://slim.gatech.edu/Publications/Public/TechReport/NSERC/2012/Progress_Report_2012.pdf}, author = {Felix J. Herrmann} } @techreport {vanleeuwen2012smii, title = {A parallel matrix-free framework for frequency-domain seismic modelling, imaging and inversion in Matlab}, number = {TR-EOAS-2012-5}, year = {2012}, month = {07}, abstract = {I present a parallel matrix-free framework for frequency-domain seismic modeling, imaging and inversion. The framework provides basic building blocks for designing and testing optimization-based formulations of both linear and non-linear seismic in- verse problems. By overloading standard linear-algebra operations, such as matrix-vector multiplications, standard optimization packages can be used to work with the code without any modification. This leads to a scalable testbed on which new methods can be rapidly prototyped and tested on medium-sized 2D problems. I present some numerical examples on both linear and non-linear seismic inverse problems.}, keywords = {Matlab, object-oriented programming, Optimization, seismic imaging}, url = {https://slim.gatech.edu/Publications/Public/TechReport/2012/vanleeuwen2012smii/vanleeuwen2012smii.pdf}, author = {Tristan van Leeuwen} } @techreport {vanleeuwen2012SEGparallel, title = {A parallel, object-oriented framework for frequency-domain wavefield imaging and inversion.}, number = {TR-EOAS-2012-2}, year = {2012}, month = {04}, institution = {Department of Earth and Ocean Sciences}, address = {University of British Columbia, Vancouver}, abstract = {We present a parallel object-oriented matrix-free framework for frequency-domain seismic modeling, imaging and inversion. The key aspects of the framework are its modularity and level of abstraction, which allows us to write code that reflects the underlying mathematical structure and develop unit-tests that guarantee the fidelity of the code. By overloading standard linear-algebra operations, such as matrix-vector multiplications, we can use standard optimization packages to work with our code without any modification. This leads to a scalable testbed on which new methods can be rapidly prototyped and tested on medium-sized 2D problems. Although our current implementation uses (parallel) Matlab, all of these design principles can also be met by using lower-level languages which is important when we want to scale to realistic 3D problems. We present some numerical examples on synthetic data.}, keywords = {Imaging, inversion, Modeling, SEG}, url = {https://slim.gatech.edu/Publications/Public/TechReport/2012/vanleeuwen2012SEGparallel/vanleeuwen2012SEGparallel.pdf}, author = {Tristan van Leeuwen and Felix J. Herrmann} } @techreport {slim2011NSERCpr, title = {NSERC 2011 DNOISE progress report}, number = {TR-EOAS-2011-1}, year = {2011}, institution = {UBC}, abstract = {The main thrust of the DNOISE project is focused on the following researth themes: [1] seismic acquisition design and recovery from incomplete data with the goal to reduce acquisition costs while increasing the spatial bandwidth and aperture of seismic data; [2] Removal of the {\textquoteright}surface nonlinearity{\textquoteright} by simultaneous estimation of the source signature and the surface-free Green{\textquoteright}s function by inverting the surface-related multiple prediction operator; [3] Reduction of the computational complexity of full-waveform inversion (FWI) by randomized dimensionality reduction; [4] "Convexification" of FWI to remove or at least diminish the adverse effects of non-uniqueness that has plagued FWI since its inception; The first three themes are directed towards removing major impediments faced by FWI related to the costs of acquiring data, the computational costs of processing and inverting data, and to issues with source calibration and surface-related multiples. The final theme is more {\textquoteright}blue sky{\textquoteright} and tries to incorporate ideas from migration-velocity analysis into the formulation of full-waveform inversion. Aside from these themes, we will continue to work on seismic data acquisition schemes that favor sparsity-promoting recovery and on the development of large-scale solvers using recent developments in convex and stochastic optimization.}, keywords = {DNOISE, NSERC}, url = {https://slim.gatech.edu/Publications/Public/TechReport/NSERC/2011/nserc-2011-dnoise-progress-report.pdf}, author = {Felix J. Herrmann} } @techreport {herrmann2010SEGerc, title = {Empirical recovery conditions for seismic sampling}, number = {TR-EOAS-2010-2}, year = {2010}, institution = {Department of Earth and Ocean Sciences, UBC}, abstract = {In this paper, we offer an alternative sampling method leveraging recent insights from compressive sensing towards seismic acquisition and processing for data that are traditionally considered to be undersampled. The main outcome of this approach is a new technology where acquisition and processing related costs are no longer determined by overly stringent sampling criteria, such as Nyquist. At the heart of our approach lies randomized incoherent sampling that breaks subsampling related interferences by turning them into harmless noise, which we subsequently remove by promoting transform-domain sparsity. Now, costs no longer grow with resolution and dimensionality of the survey area, but instead depend on transform-domain sparsity only. Our contribution is twofold. First, we demonstrate by means of carefully designed numerical experiments that compressive sensing can successfully be adapted to seismic acquisition. Second, we show that accurate recovery can be accomplished for compressively sampled data volumes sizes that exceed the size of conventional transform-domain data volumes by only a small factor. Because compressive sensing combines transformation and encoding by a single linear encoding step, this technology is directly applicable to acquisition and to dimensionality reduction during processing. In either case, sampling, storage, and processing costs scale with transform-domain sparsity.}, url = {https://slim.gatech.edu/Publications/Public/Conferences/SEG/2010/herrmann10SEGerc/herrmann10SEGerc.pdf}, author = {Felix J. Herrmann} } @techreport {almatar10SEGesfd, title = {Estimation of surface-free data by curvelet-domain matched filtering and sparse inversion}, number = {TR-EOAS-2010-1}, year = {2010}, institution = {Department of Earth and Ocean Sciences}, address = {University of British Columbia, Vancouver}, abstract = {Matching seismic wavefields and images lies at the heart of many pre-post-processing steps part of seismic imaging- whether one is matching predicted wavefield components, such as multiples, to the actual to-be-separated wavefield components present in the data or whether one is aiming to restore migration amplitudes by scaling, using an image-to-remigrated- image matching procedure to calculate the scaling coefficients. The success of these wavefield matching procedures depends on our ability to (i) control possible overfitting, which may lead to accidental removal of energy or to inaccurate image-amplitude corrections, (ii) handle data or images with nonunique dips, and (iii) apply subsequent wavefield separations or migraton amplitude corrections stably. In this paper, we show that the curvelet transform allows us to address all these issues by im- posing smoothness in phase space, by using their capability to handle conflicting dips, and by leveraging their ability to represent seismic data and images sparsely. This latter property renders curvelet-domain sparsity promotion an effective prior.}, keywords = {SEG}, url = {https://slim.gatech.edu/Publications/Public/Conferences/SEG/2010/almatar10SEGesfd/almatar10SEGesfd.pdf}, author = {Mufeed H. AlMatar and Tim T.Y. Lin and Felix J. Herrmann} } @techreport {slim2010NSERCapp, title = {NSERC 2010 DNOISE application}, number = {TR-EOAS-2010-3}, year = {2010}, institution = {UBC}, abstract = {DNOISE II: Dynamic Nonlinear Optimization for Imaging in Seismic Exploration is a multidisciplinary research project that involves faculty from the Mathematics, Computer Science, and Earth and Ocean Sciences Departments at the University of British Columbia. DNOISE II constitutes a transformative research program towards a new paradigm in seismic exploration where the acquisition- and processing-related costs are no longer determined by the survey area and discretization but by transform-domain sparsity of the final result. In this approach, we rid ourselves from the confinements of conventional overly stringent sampling criteria that call for regular sampling with sequentiual sources at Nyquist rates. By adapting the principles of compressive sensing, DNOISE II promotes a ground-up formulation for seismic imaging where adverse subsampling-related artifacts are removed by intelligent simultaneous-acquisition design and recovery by transform-domain sparsity promotion. This development{\textendash}-in conjunction with our track records in sparse recovery and time-harmonic Helmholtz solvers{\textendash}-puts us in an unique position to deliver on fundamental breakthroughs in the development and implementation of the next-generation of processing, imaging, and full-waveform inversion solutions.}, keywords = {DNOISE, NSERC}, url = {https://slim.gatech.edu/Publications/Public/TechReport/NSERC/2010/nserc-2010-dnoise-application.pdf}, author = {Felix J. Herrmann} } @techreport {tang09TRdtr, title = {Design of two-dimensional randomized sampling schemes for curvelet-based sparsity-promoting seismic data recovery}, number = {TR-EOAS-2009-3}, year = {2009}, institution = {UBC Earth and Ocean Sciences Department}, abstract = {The tasks of sampling, compression and reconstruction are very common and often necessary in seismic data processing due to the large size of seismic data. Curvelet-based Recovery by Sparsity-promoting Inversion, motivated by the newly developed theory of compressive sensing, is among the best recovery strategies for seismic data. The incomplete data input to this curvelet-based recovery is determined by randomized sampling of the original complete data. Unlike usual regular undersampling, randomized sampling can convert aliases to easy-to-eliminate noise, thus facilitating the process of reconstruction of the complete data from the incomplete data. Randomized sampling methods such as jittered sampling have been developed in the past that are suitable for curvelet-based recovery, however most have only been applied to sampling in one dimension. Considering that seismic datasets are usually higher dimensional and extremely large, in the present paper, we extend the 1D version of jittered sampling to two dimensions, both with underlying Cartesian and hexagonal grids. We also study separable and non-separable two dimensional jittered sampling, the former referring to the Kronecker product of two one-dimensional jittered samplings. These different categories of jittered sampling are compared against one another in terms of signal-to-noise ratio and visual quality, from which we find that jittered hexagonal sampling is better than jittered Cartesian sampling, while fully non-separable jittered sampling is better than separable sampling. Because in the image processing and computer graphics literature, sampling patterns with blue-noise spectra are found to be ideal to avoid aliasing, we also introduce two other randomized sampling methods, possessing sampling spectra with beneficial blue noise characteristics, Poisson Disk sampling and Farthest Point sampling. We compare these methods, and apply the introduced sampling methodologies to higher dimensional curvelet-based reconstruction. These sampling schemes are shown to lead to better results from CRSI compared to the other more traditional sampling protocols, e.g. regular subsampling.}, url = {https://slim.gatech.edu/Publications/Public/Journals/2009/tang09TRdtr/tang09TRdtr.pdf}, author = {Gang Tang and Reza Shahidi and Jianwei Ma} } @techreport {vandenberg08gsv, title = {Group sparsity via linear-time projection}, number = {TR-2008-09}, year = {2008}, month = {06}, institution = {UBC - Department of Computer Science}, abstract = {We present an efficient spectral projected-gradient algorithm for optimization subject to a group one-norm constraint. Our approach is based on a novel linear-time algorithm for Euclidean projection onto the one- and group one-norm constraints. Numerical experiments on large data sets suggest that the proposed method is substantially more efficient and scalable than existing methods.}, keywords = {Optimization, SLIM}, url = {http://www.optimization-online.org/DB_FILE/2008/07/2056.pdf}, author = {Ewout van den Berg and Mark Schmidt and Michael P. Friedlander and K. Murphy} } @techreport {lebed08TRhgg, title = {A hitchhiker{\textquoteright}s guide to the galaxy of transform-domain sparsification}, number = {TR-EOAS-2008-4}, year = {2008}, institution = {UBC Earth and Ocean Sciences Department}, abstract = {The ability to efficiently and sparsely represent seismic data is becoming an increasingly important problem in geophysics. Over the last decade many transforms such as wavelets, curvelets, contourlets, surfacelets, shearlets, and many other types of {\textquoteright}x-lets{\textquoteright} have been developed to try to resolve this issue. In this abstract we compare the properties of four of these commonly used transforms, namely the shift-invariant wavelets, complex wavelets, curvelets and surfacelets. We also briefly explore the performance of these transforms for the problem of recovering seismic wavefields from incomplete measurements.}, keywords = {SLIM}, url = {https://slim.gatech.edu/Publications/Public/TechReport/2008/lebed08TRhgg/lebed08TRhgg.pdf}, author = {Evgeniy Lebed and Felix J. Herrmann} } @techreport {hennenfent08TRori, title = {One-norm regularized inversion: learning from the Pareto curve}, number = {TR-EOAS-2008-5}, year = {2008}, institution = {UBC Earth and Ocean Sciences Department}, abstract = {Geophysical inverse problems typically involve a trade off between data misfit and some prior. Pareto curves trace the optimal trade off between these two competing aims. These curves are commonly used in problems with two-norm priors where they are plotted on a log-log scale and are known as L-curves. For other priors, such as the sparsity-promoting one norm, Pareto curves remain relatively unexplored. First, we show how these curves provide an objective criterion to gauge how robust one-norm solvers are when they are limited by a maximum number of matrix-vector products that they can perform. Second, we use Pareto curves and their properties to define and compute one-norm compressibilities. We argue this notion is key to understand one-norm regularized inversion. Third, we illustrate the correlation between the one-norm compressibility and the performance of Fourier and curvelet reconstructions with sparsity promoting inversion.}, keywords = {SLIM}, url = {https://slim.gatech.edu/Publications/Public/TechReport/2008/hennenfent08TRori/hennenfent08TRori.pdf}, author = {Gilles Hennenfent and Felix J. Herrmann} } @techreport {vandenberg07TRipr, title = {In pursuit of a root}, number = {TR-EOAS-2007-19}, year = {2007}, month = {06}, institution = {Department of Computer Science}, address = {University of British Columbia, Vancouver}, abstract = {The basis pursuit technique is used to find a minimum one-norm solution of an underdetermined least-squares problem. Basis pursuit denoise fits the least-squares problem only approximately, and a single parameter determines a curve that traces the trade-off between the least-squares fit and the one-norm of the solution. We show that the function that describes this curve is convex and continuously differentiable over all points of interest. The dual solution of a least-squares problem with an explicit one-norm constraint gives function and derivative information needed for a root-finding method. As a result, we can compute arbitrary points on this curve. Numerical experiments demonstrate that our method, which relies on only matrix-vector operations, scales well to large problems.}, url = {http://www.optimization-online.org/DB_HTML/2007/06/1708.html}, author = {Ewout van den Berg and Michael P. Friedlander} }