@article{clement2013, author = {Clement, Greg}, year = {2013}, month = {12}, pages = {125010}, title = {A projection-based approach to diffraction tomography on curved boundaries}, volume = {30}, journal = {Inverse Problems}, doi = {10.1088/0266-5611/30/12/125010} } @misc{hauptmann2018approximate, title={Approximate k-space models and Deep Learning for fast photoacoustic reconstruction}, author={Andreas Hauptmann and Ben Cox and Felix Lucka and Nam Huynh and Marta Betcke and Paul Beard and Simon Arridge}, year={2018}, eprint={1807.03191}, archivePrefix={arXiv}, primaryClass={cs.CV} } @conference {herrmann2019NIPSliwcuc, title = {Learned imaging with constraints and uncertainty quantification}, booktitle = {Neural Information Processing Systems (NeurIPS)}, year = {2019}, month = {12}, abstract = {We outline new approaches to incorporate ideas from convolutional networks into wave-based least-squares imaging. The aim is to combine hand-crafted constraints with deep convolutional networks allowing us to directly train a network capable of generating samples from the posterior. The main contributions include combination of weak deep priors with hard handcrafted constraints and a possible new way to sample the posterior.}, keywords = {constraint, deep learning, Imaging, Uncertainty quantification}, url = {https://slim.gatech.edu/Publications/Public/Conferences/NIPS/2019/herrmann2019NIPSliwcuc/herrmann2019NIPSliwcuc.html}, presentation = {https://slim.gatech.edu/Publications/Public/Conferences/NIPS/2019/herrmann2019NIPSliwcuc/herrmann2019NIPSliwcuc_pres.pdf}, url2 = {https://openreview.net/pdf?id=Hyet2Q29IS}, author = {Felix J. Herrmann and Ali Siahkoohi and Gabrio Rizzuti} } @conference {siahkoohi2020EAGEdlb, title = {A deep-learning based Bayesian approach to seismic imaging and uncertainty quantification}, year = {2020}, month = {1}, abstract = {Uncertainty quantification is essential when dealing with ill-conditioned inverse problems due to the inherent nonuniqueness of the solution. Bayesian approaches allow us to determine how likely an estimation of the unknown parameters is via formulating the posterior distribution. Unfortunately, it is often not possible to formulate a prior distribution that precisely encodes our prior knowledge about the unknown. Furthermore, adherence to handcrafted priors may greatly bias the outcome of the Bayesian analysis. To address this issue, we propose to use the functional form of a randomly initialized convolutional neural network as an implicit structured prior, which is shown to promote natural images and excludes images with unnatural noise. In order to incorporate the model uncertainty into the final estimate, we sample the posterior distribution using stochastic gradient Langevin dynamics and perform Bayesian model averaging on the obtained samples. Our synthetic numerical experiment verifies that deep priors combined with Bayesian model averaging are able to partially circumvent imaging artifacts and reduce the risk of overfitting in the presence of extreme noise. Finally, we present pointwise variance of the estimates as a measure of uncertainty, which coincides with regions that are difficult to image.}, keywords = {deep learning, EAGE, seismic imaging, stochastic gradient Langevin dynamics, Uncertainty quantification}, url = {https://slim.gatech.edu/Publications/Public/Conferences/EAGE/2020/siahkoohi2020EAGEdlb/siahkoohi2020EAGEdlb.html}, author = {Ali Siahkoohi and Gabrio Rizzuti and Felix J. Herrmann} } @article{arridge2019solving, title={Solving inverse problems using data-driven models}, author={Arridge, Simon and Maass, Peter and {\"O}ktem, Ozan and Sch{\"o}nlieb, Carola-Bibiane}, journal={Acta Numerica}, volume={28}, pages={1--174}, year={2019}, publisher={Cambridge University Press} } @article {Guasch809707, author = {Guasch, Llu{\'\i}s and Calder{\'o}n Agudo, Oscar and Tang, Meng-Xing and Nachev, Parashkev and Warner, Michael}, title = {Full-waveform inversion imaging of the human brain}, elocation-id = {809707}, year = {2019}, doi = {10.1101/809707}, publisher = {Cold Spring Harbor Laboratory}, abstract = {Magnetic resonance imaging and X-ray computed tomography provide the two principal methods available for imaging the brain at high spatial resolution, but these methods are not easily portable and cannot be applied safely to all patients. Ultrasound imaging is portable and universally safe, but existing modalities cannot image usefully inside the adult human skull. We use in-silico simulations to demonstrate that full-waveform inversion, a computational technique originally developed in geophysics, is able to generate accurate three-dimensional images of the brain with sub-millimetre resolution. This approach overcomes the familiar problems of conventional ultrasound neuroimaging by using: transcranial ultrasound that is not obscured by strong reflections from the skull, low frequencies that are readily transmitted with good signal-to-noise ratio, an accurate wave equation that properly accounts for the physics of wave propagation, and an accurate model of the skull that compensates properly for wavefront distortion. Laboratory ultrasound data, using ex-vivo human skulls, demonstrate that our computational experiments mimic the penetration and signal-to-noise ratios expected in clinical applications. This form of non-invasive neuroimaging has the potential for the rapid diagnosis of stroke and head trauma, and for the provision of routine monitoring of a wide range of neurological conditions.}, URL = {https://www.biorxiv.org/content/early/2019/10/18/809707}, eprint = {https://www.biorxiv.org/content/early/2019/10/18/809707.full.pdf}, journal = {bioRxiv} } @article{Warner2016, author = {Michael Warner and LluĂ­s Guasch}, title = {Adaptive waveform inversion: Theory}, journal = {GEOPHYSICS}, volume = {81}, number = {6}, pages = {R429-R445}, year = {2016}, doi = {10.1190/geo2015-0387.1}, URL = {https://doi.org/10.1190/geo2015-0387.1}, eprint = {https://doi.org/10.1190/geo2015-0387.1}, abstract = {Conventional full-waveform seismic inversion attempts to find a model of the subsurface that is able to predict observed seismic waveforms exactly; it proceeds by minimizing the difference between the observed and predicted data directly, iterating in a series of linearized steps from an assumed starting model. If this starting model is too far removed from the true model, then this approach leads to a spurious model in which the predicted data are cycle skipped with respect to the observed data. Adaptive waveform inversion (AWI) provides a new form of full-waveform inversion (FWI) that appears to be immune to the problems otherwise generated by cycle skipping. In this method, least-squares convolutional filters are designed that transform the predicted data into the observed data. The inversion problem is formulated such that the subsurface model is iteratively updated to force these Wiener filters toward zero-lag delta functions. As that is achieved, the predicted data evolve toward the observed data and the assumed model evolves toward the true model. This new method is able to invert synthetic data successfully, beginning from starting models and under conditions for which conventional FWI fails entirely. AWI has a similar computational cost to conventional FWI per iteration, and it appears to converge at a similar rate. The principal advantages of this new method are that it allows waveform inversion to begin from less-accurate starting models, does not require the presence of low frequencies in the field data, and appears to provide a better balance between the influence of refracted and reflected arrivals upon the final-velocity model. The AWI is also able to invert successfully when the assumed source wavelet is severely in error. } } @article{Huang2018, author = {Guanghui Huang and Rami Nammour and William W. Symes}, title = {Volume source-based extended waveform inversion}, journal = {GEOPHYSICS}, volume = {83}, number = {5}, pages = {R369-R387}, year = {2018}, doi = {10.1190/geo2017-0330.1}, URL = {https://doi.org/10.1190/geo2017-0330.1}, eprint = {https://doi.org/10.1190/geo2017-0330.1}, abstract = {Full-waveform inversion (FWI) faces the persistent challenge of cycle skipping, which can result in stagnation of the iterative methods at uninformative models with poor data fit. Extended reformulations of FWI avoid cycle skipping through adding auxiliary parameters to the model so that a good data fit can be maintained throughout the inversion process. The volume-based matched source waveform inversion algorithm introduces source parameters by relaxing the location constraint of source energy: It is permitted to spread in space, while being strictly localized at time t=0. The extent of source energy spread is penalized by weighting the source energy with distance from the survey source location. For transmission data geometry (crosswell, diving wave, etc.) and transparent (nonreflecting) acoustic models, this penalty function is stable with respect to the data-frequency content, unlike the standard FWI objective. We conjecture that the penalty function is actually convex over much larger region in model space than is the FWI objective. Several synthetic examples support this conjecture and suggest that the theoretical limitation to pure transmission is not necessary: The inversion method can converge to a solution of the inverse problem in the absence of low-frequency data from an inaccurate initial velocity model even when reflections and refractions are present in the data along with transmitted energy. } } @article {vanleeuwen2015IPpmp, title = {A penalty method for PDE-constrained optimization in inverse problems}, journal = {Inverse Problems}, volume = {32}, number = {1}, year = {2015}, month = {12}, pages = {015007}, abstract = {Many inverse and parameter estimation problems can be written as PDE-constrained optimization problems. The goal is to infer the parameters, typically coefficients of the PDE, from partial measurements of the solutions of the PDE for several right-hand sides. Such PDE-constrained problems can be solved by finding a stationary point of the Lagrangian, which entails simultaneously updating the parameters and the (adjoint) state variables. For large-scale problems, such an all-at-once approach is not feasible as it requires storing all the state variables. In this case one usually resorts to a reduced approach where the constraints are explicitly eliminated (at each iteration) by solving the PDEs. These two approaches, and variations thereof, are the main workhorses for solving PDE-constrained optimization problems arising from inverse problems. In this paper, we present an alternative method that aims to combine the advantages of both approaches. Our method is based on a quadratic penalty formulation of the constrained optimization problem. By eliminating the state variable, we develop an efficient algorithm that has roughly the same computational complexity as the conventional reduced approach while exploiting a larger search space. Numerical results show that this method indeed reduces some of the nonlinearity of the problem and is less sensitive to the initial iterate.}, keywords = {Inverse problems, Optimization, PDE, penalty method}, url = {https://slim.gatech.edu/Publications/Public/Journals/InverseProblems/2015/vanleeuwen2015IPpmp/vanleeuwen2015IPpmp.pdf}, url2 = {http://stacks.iop.org/0266-5611/32/i=1/a=015007}, author = {{van} Leeuwen and Felix J. Herrmann} } @article {vanLeeuwen2013GJImlm, title = {Mitigating local minima in full-waveform inversion by expanding the search space}, journal = {Geophysical Journal International}, volume = {195}, year = {2013}, month = {10}, pages = {661-667}, abstract = {Wave equation based inversions, such as full-waveform inversion and reverse-time migration, are challenging because of their computational costs, memory requirements and reliance on accurate initial models. To confront these issues, we propose a novel formulation of wave equation based inversion based on a penalty method. In this formulation, the objective function consists of a data-misfit term and a penalty term, which measures how accurately the wavefields satisfy the wave equation. This new approach is a major departure from current formulations where forward and adjoint wavefields, which both satisfy the wave equation, are correlated to compute updates for the unknown model parameters. Instead, we carry out the inversions over two alternating steps during which we first estimate the wavefield everywhere, given the current model parameters, source and observed data, followed by a second step during which we update the model parameters, given the estimate for the wavefield everywhere and the source. Because the inversion involves both the synthetic wavefields and the medium parameters, its search space is enlarged so that it suffers less from local minima. Compared to other formulations that extend the search space of wave equation based inversion, our method differs in several aspects, namely (i) it avoids storage and updates of the synthetic wavefields because we calculate these explicitly by finding solutions that obey the wave equation and fit the observed data and (ii) no adjoint wavefields are required to update the model, instead our updates are calculated from these solutions directly, which leads to significant computational savings. We demonstrate the validity of our approach by carefully selected examples and discuss possible extensions and future research.}, doi = {10.1093/gji/ggt258}, url = {https://slim.gatech.edu/Publications/Public/Journals/GeophysicalJournalInternational/2013/vanLeeuwen2013GJImlm/vanLeeuwen2013mlm.pdf}, author = {{van} Leeuwen and Felix J. Herrmann} } @article {peters2018pmf, title = {Projection methods and applications for seismic nonlinear inverse problems with multiple constraints}, journal = {Geophysics}, volume = {84}, number = {2}, year = {2018}, month = {02}, pages = {R251-R269}, abstract = {Nonlinear inverse problems are often hampered by non-uniqueness and local minima because of missing low frequencies and far offsets in the data, lack of access to good starting models, noise, and modeling errors. A well-known approach to counter these deficiencies is to include prior information on the unknown model, which regularizes the inverse problem. While conventional regularization methods have resulted in enormous progress in ill-posed (geophysical) inverse problems, challenges remain when the prior information consists of multiple pieces. To handle this situation, we propose an optimization framework that allows us to add multiple pieces of prior information in the form of constraints. Compared to additive regularization penalties, constraints have a number of advantages making them more suitable for inverse problems such as full-waveform inversion. The proposed framework is rigorous because it offers assurances that multiple constraints are imposed uniquely at each iteration, irrespective of the order in which they are invoked. To project onto the intersection of multiple sets uniquely, we employ Dykstra{\textquoteright}s algorithm that scales to large problems and does not rely on trade-off parameters. In that sense, our approach differs substantially from approaches such as Tikhonov regularization, penalty methods, and gradient filtering. None of these offer assurances, which makes them less suitable to full-waveform inversion where unrealistic intermediate results effectively derail the iterative inversion process. By working with intersections of sets, we keep expensive objective and gradient calculations unaltered, separate from projections, and we also avoid trade-off parameters. These features allow for easy integration into existing code bases. In addition to more predictable behavior, working with constraints also allows for heuristics where we built up the complexity of the model gradually by relaxing the constraints. This strategy helps to avoid convergence to local minima that represent unrealistic models. We illustrate this unique feature with examples of varying complexity.}, keywords = {constraints, Full-waveform inversion, intersection, Optimization, projection, regularization}, doi = {10.1190/geo2018-0192.1}, url = {https://slim.gatech.edu/Publications/Public/Journals/Geophysics/2018/peters2018pmf/peters2018pmf.html}, author = {Bas Peters and Brendan R. Smithyman and Felix J. Herrmann} } @article{aravkin2012IPNuisance, title={Estimating nuisance parameters in inverse problems}, author={Aravkin, Aleksandr Y and {van} Leeuwen, Tristan}, journal={Inverse Problems}, volume={28}, number={11}, pages={115016}, year={2012}, publisher={IOP Publishing} } @article{devito-api, author = {Louboutin, M. and Lange, M. and Luporini, F. and Kukreja, N. and Witte, P. A. and Herrmann, F. J. and Velesko, P. and Gorman, G. J.}, title = {Devito (v3.1.0): an embedded domain-specific language for finite differences and geophysical exploration}, journal = {Geoscientific Model Development}, volume = {12}, year = {2019}, number = {3}, pages = {1165--1187}, url = {https://www.geosci-model-dev.net/12/1165/2019/}, doi = {10.5194/gmd-12-1165-2019} } @article{devito-compiler, author = { {Luporini}, F. and {Lange}, M. and {Louboutin}, M. and {Kukreja}, N. and {H{\"u}ckelheim}, J. and {Yount}, C. and {Witte}, P. and {Kelly}, P.~H.~J. and {Herrmann}, F.~J. and {Gorman}, G.~J.}, title = { Architecture and performance of Devito, a system for automated stencil computation }, journal = { CoRR }, volume = { abs/1807.03032 }, month = { jul }, year = { 2018 }, url = { http://arxiv.org/abs/1807.03032 }, archivePrefix = { arXiv }, eprint = { 1807.03032 } } @article{zhang2014photoacoustic, title={A photoacoustic image reconstruction method using total variation and nonconvex optimization}, author={Zhang, Chen and Zhang, Yan and Wang, Yuanyuan}, journal={Biomedical engineering online}, volume={13}, number={1}, pages={117}, year={2014}, publisher={Springer} } @article{liu2012compressed, title={Compressed sensing photoacoustic imaging based on fast alternating direction algorithm}, author={Liu, Xueyan and Peng, Dong and Guo, Wei and Ma, Xibo and Yang, Xin and Tian, Jie}, journal={International journal of biomedical imaging}, volume={2012}, year={2012}, publisher={Hindawi} } @article{Xu2006, author = {Xu,Minghua and Wang,Lihong V. }, title = {Photoacoustic imaging in biomedicine}, journal = {Review of Scientific Instruments}, volume = {77}, number = {4}, pages = {041101}, year = {2006}, doi = {10.1063/1.2195024}, URL = {https://doi.org/10.1063/1.2195024}, eprint = {https://doi.org/10.1063/1.2195024} } @article{Ku2005, author = {Geng Ku and Bruno D. Fornage and Xing Jin and Minghua Xu and Kelly K. Hunt and Lihong V. Wang}, title ={Thermoacoustic and Photoacoustic Tomography of Thick Biological Tissues toward Breast Imaging}, journal = {Technology in Cancer Research \& Treatment}, volume = {4}, number = {5}, pages = {559-565}, year = {2005}, doi = {10.1177/153303460500400509}, URL = {https://doi.org/10.1177/153303460500400509}, eprint = {https://doi.org/10.1177/153303460500400509}, abstract = {Microwave-based thermoacoustic tomography (TAT) and laser-based photoacoustic tomography (PAT) in a circular scanning configuration were both developed to image deeply seated lesions and objects in biological tissues. Because malignant breast tissue absorbs microwaves more strongly than benign breast tissue, cancers were imaged with good spatial resolution and contrast by TAT in human breast mastectomy specimens. Based on the intrinsic optical contrast between blood and chicken breast muscle, an embedded blood object that was 5 cm deep in the tissue was also detected using PAT at a wavelength of 1064 nm. } } @article {esser2016tvr, title = {Total-variation regularization strategies in full-waveform inversion}, journal = {SIAM Journal on Imaging Sciences}, volume = {11}, number = {1}, year = {2018}, pages = {376-406}, abstract = {We propose an extended full-waveform inversion formulation that includes general convex constraints on the model. Though the full problem is highly nonconvex, the overarching optimization scheme arrives at geologically plausible results by solving a sequence of relaxed and warm-started constrained convex subproblems. The combination of box, total-variation, and successively relaxed asymmetric total-variation constraints allows us to steer free from parasitic local minima while keeping the estimated physical parameters laterally continuous and in a physically realistic range. For accurate starting models, numerical experiments carried out on the challenging 2004 BP velocity benchmark demonstrate that bound and total-variation constraints improve the inversion result significantly by removing inversion artifacts, related to source encoding, and by clearly improved delineation of top, bottom, and flanks of a high-velocity high-contrast salt inclusion. The experiments also show that for poor starting models these two constraints by themselves are insufficient to detect the bottom of high-velocity inclusions such as salt. Inclusion of the one-sided asymmetric total-variation constraint overcomes this issue by discouraging velocity lows to buildup during the early stages of the inversion. To the author{\textquoteright}s knowledge the presented algorithm is the first to successfully remove the imprint of local minima caused by poor starting models and band-width limited finite aperture data.}, keywords = {constrained optimization, Full-waveform inversion, hinge loss, salt, total variation}, doi = {10.1137/17M111328X}, url = {https://slim.gatech.edu/Publications/Public/Journals/CoRR/2016/esser2016tvr/esser2016tvr.pdf}, url2 = {https://doi.org/10.1137/17M111328X}, author = {Ernie Esser and Llu{\'\i}s Guasch and Tristan van Leeuwen and Aleksandr Y. Aravkin and Felix J. Herrmann} } @book{tarantola2005inverse, title={Inverse problem theory and methods for model parameter estimation}, author={Tarantola, Albert}, volume={89}, year={2005}, publisher={siam} } @misc{yang2020timedomain, title={Time-domain sparsity promoting least-squares reverse time migration with source estimation}, author={Mengmeng Yang and Zhilong Fang and Philipp Witte and Felix J. Herrmann}, year={2020}, eprint={2003.01159}, archivePrefix={arXiv}, primaryClass={physics.geo-ph} } @article{witteJUDI2019, author = {Philipp A. Witte and Mathias Louboutin and Navjot Kukreja and Fabio Luporini and Michael Lange and Gerard J. Gorman and Felix J. Herrmann}, title = {A large-scale framework for symbolic implementations of seismic inversion algorithms in Julia}, journal = {GEOPHYSICS}, volume = {84}, number = {3}, pages = {F57-F71}, year = {2019}, doi = {10.1190/geo2018-0174.1}, URL = {https://doi.org/10.1190/geo2018-0174.1}, eprint = {https://doi.org/10.1190/geo2018-0174.1} } @conference {sharan2018IEEEIUSspoa, title = {Sparsity-promoting photoacoustic imaging with source estimation}, booktitle = {2018 IEEE International Ultrasonics Symposium (IUS)}, year = {2018}, month = {10}, pages = {206-212}, abstract = {Photoacoustics has emerged as a high-contrast imaging modality that provides optical absorption maps inside of tissues, therefore complementing morphological information of conventional ultrasound. The laser-generated photoacoustic waves are usually envelope-detected, thus disregarding the specific waveforms generated by each photoabsorber. Here we propose a sparsity-promoting image reconstruction method that allows the estimation of each photoabsorber{\textquoteright}s source-time function. Preliminary studies showed the ability to reconstruct the optical absorption map of an in silico vessel phantom. By using a sparsity-promoting imaging method, absorption maps and source-time functions can still be recovered even in situations where the number of transducers is decreased by a factor of six. Moreover, the recovery is able to attain higher resolution than conventional beamforming methods. Because our method recovers the source-time function of the absorbers, it could potentially also be used to distinguish different types of photoabsorbers, or the degree of aggregation of exogenous agents, under the assumption that these would generate different source-time functions at the moment of laser irradiation.}, keywords = {Acceleration, estimation, IEEE, Image reconstruction, Phantoms, photoacoustic imaging, source-time function, sparsity promoting inversion, Transducers, wave equation}, doi = {10.1109/ULTSYM.2018.8580037}, url = {https://slim.gatech.edu/Publications/Public/Conferences/IEEEIUS/2018/sharan2018IEEEIUSspoa/sharan2018IEEEIUSspoa.pdf}, presentation = {https://slim.gatech.edu/Publications/Public/Conferences/IEEEIUS/2018/sharan2018IEEEIUSspoa/sharan2018IEEEIUSspoa_pres.pdf}, author = {Shashin Sharan and Rajiv Kumar and Diego S. Dumani and Mathias Louboutin and Rongrong Wang and Stanislav Emelianov and Felix J. Herrmann} } @article {herrmann2010GEOPrsg, title = {Randomized sampling and sparsity: getting more information from fewer samples}, journal = {Geophysics}, volume = {75}, number = {6}, year = {2010}, month = {12}, pages = {WB173-WB187}, publisher = {SEG}, abstract = {Many seismic exploration techniques rely on the collection of massive data volumes that are subsequently mined for information during processing. Although this approach has been extremely successful in the past, current efforts toward higher-resolution images in increasingly complicated regions of the earth continue to reveal fundamental shortcomings in our workflows. Chiefly among these is the so-called {\textquotedblleft}curse of dimensionality{\textquotedblright} exemplified by Nyquist{\textquoteright}s sampling criterion, which disproportionately strains current acquisition and processing systems as the size and desired resolution of our survey areas continue to increase. We offer an alternative sampling method leveraging recent insights from compressive sensing toward seismic acquisition and processing for data that are traditionally considered to be undersampled. The main outcome of this approach is a new technology where acquisition and processing related costs are no longer determined by overly stringent sampling criteria, such as Nyquist. At the heart of our approach lies randomized incoherent sampling that breaks subsampling related interferences by turning them into harmless noise, which we subsequently remove by promoting transform-domain sparsity. Now, costs no longer grow significantly with resolution and dimensionality of the survey area, but instead depend only on transform-domain sparsity. Our contribution is twofold. First, we demonstrate by means of carefully designed numerical experiments that compressive sensing can successfully be adapted to seismic exploration. Second, we show that accurate recovery can be accomplished for compressively sampled data volumes sizes that exceed the size of conventional transform-domain data volumes by only a small factor. Because compressive sensing combines transformation and encoding by a single linear encoding step, this technology is directly applicable to acquisition and to dimensionality reduction during processing. In either case, sampling, storage, and processing costs scale with transform-domain sparsity. We illustrate this principle by means of number of case studies.}, keywords = {Acquisition, Compressive Sensing, data acquisition, geophysical techniques, Nyquist criterion, Optimization, sampling methods, seismology, SLIM}, doi = {10.1190/1.3506147}, url = {https://slim.gatech.edu/Publications/Public/Journals/Geophysics/2010/herrmann2010GEOPrsg/herrmann2010GEOPrsg.pdf}, author = {Felix J. Herrmann} } @article {witte2018cls, title = {Compressive least-squares migration with on-the-fly Fourier transforms}, journal = {Geophysics}, volume = {84}, number = {5}, year = {2019}, month = {08}, pages = {R655-R672}, abstract = {Least-squares reverse-time migration is a powerful approach for true amplitude seismic imaging of complex geological structures, but the successful application of this method is currently hindered by its enormous computational cost, as well as high memory requirements for computing the gradient of the objective function. We tackle these problems by introducing an algorithm for low-cost sparsity-promoting least-squares migration using on-the-fly Fourier transforms. We formulate the least-squares migration objective function in the frequency domain and compute gradients for randomized subsets of shot records and frequencies, thus significantly reducing data movement and the number of overall wave equations solves. By using on-the-fly Fourier transforms, we can compute an arbitrary number of monochromatic frequency-domain wavefields with a time-domain modeling code, instead of having to solve individual Helmholtz equations for each frequency, which quickly becomes computationally infeasible when moving to high frequencies. Our numerical examples demonstrate that compressive imaging with on-the-fly Fourier transforms provides a fast and memory-efficient alternative to time-domain imaging with optimal checkpointing, whose memory requirements for a fixed background model and source wavelet is independent of the number of time steps. Instead, memory and additional computational cost grow with the number of frequencies and determine the amount of subsampling artifacts and crosstalk. In contrast to optimal checkpointing, this offers the possibility to trade both memory and computational cost for image quality or a larger number of iterations and is advantageous in new computing environments such as the cloud, where compute is often cheaper than memory and data movement.}, keywords = {Fourier, least squares migration, sparsity-promotion}, doi = {10.1190/geo2018-0490.1}, url = {https://slim.gatech.edu/Publications/Public/Journals/Geophysics/2019/witte2018cls/witte2018cls.pdf}, author = {Philipp A. Witte and Mathias Louboutin and Fabio Luporini and Gerard J. Gorman and Felix J. Herrmann} } @article{mosher2014increasing, title={Increasing the efficiency of seismic data acquisition via compressive sensing}, author={Mosher, Charles and Li, Chengbo and Morley, Larry and Ji, Yongchang and Janiszewski, Frank and Olson, Robert and Brewer, Joel}, journal={The Leading Edge}, volume={33}, number={4}, pages={386--391}, year={2014}, publisher={Society of Exploration Geophysicists} } @article {kumar2017hrc, title = {Highly repeatable 3D compressive full-azimuth towed-streamer time-lapse acquisition {\textendash}- a numerical feasibility study at scale}, journal = {The Leading Edge}, volume = {36}, number = {8}, year = {2017}, month = {08}, pages = {677-687}, abstract = {Most conventional 3D time-lapse (or 4D) acquisitions are ocean-bottom cable (OBC) or ocean-bottom node (OBN) surveys since these surveys are relatively easy to replicate compared to towed-streamer surveys. To attain high degrees of repeatability, survey replicability and dense periodic sampling has become the norm for 4D surveys that renders this technology expensive. Conventional towed-streamer acquisitions suffer from limited illumination of subsurface due to narrow azimuth. Although, acquisition techniques such as multi-azimuth, wide-azimuth, rich-azimuth acquisition, etc., have been developed to illuminate the subsurface from all possible angles, these techniques can be prohibitively expensive for densely sampled surveys. This leads to uneven sampling, i.e., dense receiver and coarse source sampling or vice-versa, in order to make these acquisitions more affordable. Motivated by the design principles of Compressive Sensing (CS), we acquire economic, randomly subsampled (or compressive) and simultaneous towed-streamer time-lapse data without the need of replicating the surveys. We recover densely sampled time-lapse data on one and the same periodic grid by using a joint-recovery model (JRM) that exploits shared information among different time-lapse recordings, coupled with a computationally cheap and scalable rank-minimization technique. The acquisition is low cost since we have subsampled measurements (about 70\% subsampled), simulated with a simultaneous long-offset acquisition configuration of two source vessels travelling across a survey area at random azimuths. We analyze the performance of our proposed compressive acquisition and subsequent recovery strategy by conducting a synthetic, at scale, seismic experiment on a 3D time-lapse model containing geological features such as channel systems, dipping and faulted beds, unconformities and a gas cloud. Our findings indicate that the insistence on replicability between surveys and the need for OBC/OBN 4D surveys can, perhaps, be relaxed. Moreover, this is a natural next step beyond the successful CS acquisition examples discussed in this special issue.}, keywords = {3D, CS, marine, rank minimization, simultaneous long offset, time-lapse seismic}, doi = {10.1190/tle36080677.1}, url2 = {https://slim.gatech.edu/Publications/Public/Journals/TheLeadingEdge/2017/kumar2017hrc/kumar2017hrc.html}, author = {Rajiv Kumar and Haneet Wason and Shashin Sharan and Felix J. Herrmann} } @book{peshkovsky2010acoustic, title={Acoustic cavitation theory and equipment design principles for industrial applications of high-intensity ultrasound}, author={Peshkovsky, Alexey S and Peshkovsky, Sergei L}, year={2010}, publisher={Nova Science Publishers} } @article{cox2007k, title={k-space propagation models for acoustically heterogeneous media: Application to biomedical photoacoustics}, author={Cox, Benjamin T and Kara, S and Arridge, Simon R and Beard, Paul C}, journal={The Journal of the Acoustical Society of America}, volume={121}, number={6}, pages={3453--3464}, year={2007}, publisher={Acoustical Society of America} }