@article{Witte2019, author = {Philipp A. Witte and Mathias Louboutin and Fabio Luporini and Gerard J. Gorman and Felix J. Herrmann}, title = {{Compressive least-squares migration with on-the-fly Fourier transforms}}, journal = {GEOPHYSICS}, volume = {84}, number = {5}, pages = {R655-R672}, year = {2019}, doi = {10.1190/geo2018-0490.1}, URL = {https://doi.org/10.1190/geo2018-0490.1}, eprint = {https://doi.org/10.1190/geo2018-0490.1}, abstract = { ABSTRACTLeast-squares reverse time migration is a powerful approach for true-amplitude seismic imaging of complex geologic structures, but the successful application of this method is currently hindered by its enormous computational cost, as well as its high memory requirements for computing the gradient of the objective function. We have tackled these problems by introducing an algorithm for low-cost sparsity-promoting least-squares migration using on-the-fly Fourier transforms. We formulate the least-squares migration objective function in the frequency domain (FD) and compute gradients for randomized subsets of shot records and frequencies, thus significantly reducing data movement and the number of overall wave equations solves. By using on-the-fly Fourier transforms, we can compute an arbitrary number of monochromatic FD wavefields with a time-domain (TD) modeling code, instead of having to solve individual Helmholtz equations for each frequency, which becomes computationally infeasible when moving to high frequencies. Our numerical examples demonstrate that compressive imaging with on-the-fly Fourier transforms provides a fast and memory-efficient alternative to TD imaging with optimal checkpointing, whose memory requirements for a fixed background model and source wavelet are independent of the number of time steps. Instead, the memory and additional computational costs grow with the number of frequencies and determine the amount of subsampling artifacts and crosstalk. In contrast to optimal checkpointing, this offers the possibility to trade the memory and computational costs for image quality or a larger number of iterations and is advantageous in new computing environments such as the cloud, where computing is often cheaper than memory and data movement. } } @INPROCEEDINGS{Lempitsky, author={V. {Lempitsky} and A. {Vedaldi} and D. {Ulyanov}}, booktitle={2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition}, title={{Deep Image Prior}}, year={2018}, volume={}, number={}, pages={9446-9454}, keywords={convolution;feedforward neural nets;image restoration;inverse problems;learning (artificial intelligence);neural net architecture;statistical analysis;deep convolutional networks;image generation;randomly-initialized neural network;deep neural representations;deep image prior;image statistics;inverse problems;image restoration;generator network architectures;realistic image priors learning;Image restoration;Image resolution;Noise reduction;Task analysis;Optimization;Generators;Image reconstruction}, doi={10.1109/CVPR.2018.00984}, month={June},} @InProceedings{Cheng_2019_CVPR, author = {Cheng, Zezhou and Gadelha, Matheus and Maji, Subhransu and Sheldon, Daniel}, title = {{A Bayesian Perspective on the Deep Image Prior}}, booktitle = {{The IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}}, month = {June}, year = {2019}, pages = {5443--5451} } @inproceedings{welling2011bayesian, author = {Welling, Max and Teh, Yee Whye}, title = {{Bayesian Learning via Stochastic Gradient Langevin Dynamics}}, year = {2011}, booktitle = {{Proceedings of the 28th International Conference on International Conference on Machine Learning}}, pages = {681--688}, series = {{ICML’11}} } @article{devito-api, author = {Louboutin, M. and Lange, M. and Luporini, F. and Kukreja, N. and Witte, P. A. and Herrmann, F. J. and Velesko, P. and Gorman, G. J.}, title = {Devito (v3.1.0): an embedded domain-specific language for finite differences and geophysical exploration}, journal = {{Geoscientific Model Development}}, volume = {12}, year = {2019}, number = {3}, pages = {1165--1187}, url = {https://www.geosci-model-dev.net/12/1165/2019/}, doi = {10.5194/gmd-12-1165-2019} } @article{mosser2018stochastic, title={{Stochastic Seismic Waveform Inversion Using Generative Adversarial Networks as a Geological Prior}}, author={Mosser, Lukas and Dubrule, Olivier and Blunt, M}, doi={10.1007/s11004-019-09832-6}, journal = {{Mathematical Geosciences}}, volume = {84}, number = {1}, pages = {53--79}, year = {2019}, } @conference {herrmann2019NIPSliwcuc, title = {Learned imaging with constraints and uncertainty quantification}, booktitle = {{Neural Information Processing Systems (NeurIPS) 2019 Deep Inverse Workshop}}, year = {2019}, month = {12}, abstract = {We outline new approaches to incorporate ideas from convolutional networks into wave-based least-squares imaging. The aim is to combine hand-crafted constraints with deep convolutional networks allowing us to directly train a network capable of generating samples from the posterior. The main contributions include combination of weak deep priors with hard handcrafted constraints and a possible new way to sample the posterior.}, keywords = {constraint, deep learning, Imaging, Uncertainty quantification}, url = {https://arxiv.org/pdf/1909.06473.pdf}, author = {Felix J. Herrmann and Ali Siahkoohi and Gabrio Rizzuti} } @article{siahkoohi2019transfer, author={Siahkoohi, Ali and Louboutin, Mathias and Herrmann, Felix J.}, title={The importance of transfer learning in seismic modeling and imaging}, month={11}, year={2019}, doi = {10.1190/geo2019-0056.1}, journal = {{GEOPHYSICS}}, number = {6}, pages = {A47--A52}, publisher={{Society of Exploration Geophysicists}}, volume = {84}, } @article{rizzuti2019EAGElis, title = {{Learned iterative solvers for the Helmholtz equation}}, year = {2019}, doi = {10.3997/2214-4609.201901542}, issn = {2214-4609}, journal={{81st EAGE Conference and Exhibition 2019}}, abstract = {We propose a {\textquoteleft}learned{\textquoteright} iterative solver for the Helmholtz equation, by combining traditional Krylov-based solvers with machine learning. The method is, in principle, able to circumvent the shortcomings of classical iterative solvers, and has clear advantages over purely data-driven ap- proaches. We demonstrate the effectiveness of this approach under a 1.5-D assumption, when ade- quate a priori information about the velocity distribution is known.}, keywords = {Helmholtz, Iterative, machine learning, private}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Submitted/2019/rizzuti2019EAGElis/rizzuti2019EAGElis.pdf}, author = {Rizzuti, Gabrio and Siahkoohi, Ali and Herrmann, Felix J.} } @article{wu2019parametric, title={Parametric convolutional neural network-domain full-waveform inversion}, author={Wu, Yulang and McMechan, George A}, journal={{GEOPHYSICS}}, volume={84}, number={6}, pages={R881--R896}, year={2019}, publisher={{Society of Exploration Geophysicists}}, doi={10.1190/geo2018-0224.1} } @incollection{NEURIPS2019_9015, title = {{PyTorch: An Imperative Style, High-Performance Deep Learning Library}}, author = {Paszke, Adam and Gross, Sam and Massa, Francisco and Lerer, Adam and Bradbury, James and Chanan, Gregory and Killeen, Trevor and Lin, Zeming and Gimelshein, Natalia and Antiga, Luca and Desmaison, Alban and Kopf, Andreas and Yang, Edward and DeVito, Zachary and Raison, Martin and Tejani, Alykhan and Chilamkurthy, Sasank and Steiner, Benoit and Fang, Lu and Bai, Junjie and Chintala, Soumith}, booktitle = {Advances in Neural Information Processing Systems 32}, pages = {8024--8035}, year = {2019}, url = {http://papers.neurips.cc/paper/9015-pytorch-an-imperative-style-high-performance-deep-learning-library.pdf} } @article {fang2017uqfip, title = {Uncertainty quantification for inverse problems with weak partial-differential-equation constraints}, journal={{GEOPHYSICS}}, volume = {83}, number = {6}, year = {2018}, pages = {R629--R647}, doi = {10.1190/geo2017-0824.1}, author = {Zhilong Fang and Curt Da Silva and Rachel Kuske and Felix J. Herrmann} }