@incollection{baardman2010estimation, title={Estimation of primaries by sparse inversion using dual-sensor data}, author={Baardman, Rolf H and Verschuur, Dirk J and van Borselen, Roald G and Frijlink, Martijn O and Hegge, Rob F}, booktitle={SEG Technical Program Expanded Abstracts 2010}, pages={3468--3472}, year={2010}, publisher={Society of Exploration Geophysicists} } @article {lin2013GEOPrepsi, title = {Robust estimation of primaries by sparse inversion via one-norm minimization}, journal = {Geophysics}, volume = {78}, number = {3}, year = {2013}, month = {05}, pages = {R133-R150}, abstract = {A recently proposed method called estimation of primaries by sparse inversion (EPSI) avoids the need for adaptive subtraction of approximate multiple predictions by directly inverting for the multiple-free subsurface impulse response as a collection of band-limited spikes. Although it can be shown that the correct primary impulse response is obtained through the sparsest possible solution, the original EPSI algorithm was not designed to take advantage of this result, and instead it relies on a multitude of inversion parameters, such as the level of sparsity per gradient update. We proposed and tested a new algorithm, named robust EPSI, in which we make obtaining the sparsest solution an explicit goal. Our approach remains a gradient-based approach like the original algorithm, but it is derived from a new biconvex optimization framework based on an extended basis-pursuit denoising formulation. Furthermore, because it is based on a general framework, robust EPSI can recover the impulse response in transform domains, such as sparsifying curvelet-based representations, without changing the underlying algorithm. We discovered that the sparsity-minimizing objective of our formulation enabled it to operate successfully on a variety of synthetic and field marine data sets without excessive tweaking of inversion parameters. We also found that recovering the solution in alternate sparsity domains can significantly improve the quality of the directly estimated primaries, especially for weaker late-arrival events. In addition, we found that robust EPSI produces a more artifact-free impulse response compared to the original algorithm.}, keywords = {algorithm, biconvex, EPSI, multiples, Optimization, Pareto, sparsity, waveform inversion}, doi = {10.1190/geo2012-0097.1}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Journals/Geophysics/2013/lin2013GEOPrepsi/lin2013GEOPrepsi.pdf}, author = {Tim T.Y. Lin and Felix J. Herrmann} } @article {wang2008GEOPbws, title = {Bayesian wavefield separation by transform-domain sparsity promotion}, journal = {Geophysics}, volume = {73}, number = {5}, year = {2008}, month = {07}, pages = {1-6}, abstract = {Successful removal of coherent noise sources greatly determines the quality of seismic imaging. Major advances were made in this direction, e.g., Surface-Related Multiple Elimination (SRME) and interferometric ground-roll removal. Still, moderate phase, timing, amplitude errors and clutter in the predicted signal components can be detrimental. Adopting a Bayesian approach along with the assumption of approximate curvelet-domain independence of the to-be-separated signal components, we construct an iterative algorithm that takes the predictions produced by for example SRME as input and separates these components in a robust fashion. In addition, the proposed algorithm controls the energy mismatch between the separated and predicted components. Such a control, which was lacking in earlier curvelet-domain formulations, produces improved results for primary-multiple separation on both synthetic and real data.}, keywords = {curvelet transform, Geophysics, Optimization, Processing, SLIM}, doi = {10.1190/1.2952571}, url = {https://www.slim.eos.ubc.ca/Publications/Public/Journals/Geophysics/2008/wang08GEObws/wang08GEObws.pdf}, html_version = {https://www.slim.eos.ubc.ca/Publications/Private/Journals/2008/wang08TRbss/paper_html/paper.html}, author = {Deli Wang and Rayan Saab and Ozgur Yilmaz and Felix J. Herrmann} } @article{guitton_verschuur_2004, title={Adaptive subtraction of multiples using the L1-norm}, volume={52}, DOI={10.1046/j.1365-2478.2004.00401.x}, number={1}, journal={Geophysical Prospecting}, author={Guitton, A. and Verschuur, D. J.}, year={2004}, pages={27–38} } @article{groenestijn_verschuur_2009, title={Estimating primaries by sparse inversion and application to near-offset data reconstruction}, volume={74}, DOI={10.1190/1.3111115}, number={3}, journal={GEOPHYSICS}, author={Groenestijn, G. J. Van and Verschuur, D. J.}, year={2009} } @article{moseley2018fast, title={Fast approximate simulation of seismic waves with deep learning}, author={Moseley, Benjamin and Markham, Andrew and Nissen-Meyer, Tarje}, journal={arXiv preprint arXiv:1807.06873}, year={2018} } @article{rizzuti2019EAGElis, title = {Learned iterative solvers for the Helmholtz equation}, year = {2019}, journal={81st EAGE Conference and Exhibition 2019}, abstract = {We propose a {\textquoteleft}learned{\textquoteright} iterative solver for the Helmholtz equation, by combining traditional Krylov-based solvers with machine learning. The method is, in principle, able to circumvent the shortcomings of classical iterative solvers, and has clear advantages over purely data-driven ap- proaches. We demonstrate the effectiveness of this approach under a 1.5-D assumption, when ade- quate a priori information about the velocity distribution is known.}, keywords = {Helmholtz, Iterative, machine learning, private}, url = {https://www.slim.eos.ubc.ca/Publications/Private/Submitted/2019/rizzuti2019EAGElis/rizzuti2019EAGElis.pdf}, author = {Rizzuti, Gabrio and Siahkoohi, Ali and Herrmann, Felix J.} } @incollection{das2018convolutional, title={Convolutional neural network for seismic impedance inversion}, author={Das, Vishal and Pollack, Ahinoam and Wollner, Uri and Mukerji, Tapan}, booktitle={SEG Technical Program Expanded Abstracts 2018}, pages={2071--2075}, year={2018}, publisher={Society of Exploration Geophysicists} } @article{richardson2018seismic, title={Seismic full-waveform inversion using deep learning tools and techniques}, author={Richardson, Alan}, journal={arXiv preprint arXiv:1801.07232}, year={2018} } @incollection{lewis2017deep, title={Deep learning prior models from seismic images for full-waveform inversion}, author={Lewis, Winston and Vigh, Denes}, booktitle={SEG Technical Program Expanded Abstracts 2017}, pages={1512--1517}, year={2017}, publisher={Society of Exploration Geophysicists} } @article{araya2018deep, title={Deep-learning tomography}, author={Araya-Polo, Mauricio and Jennings, Joseph and Adler, Amir and Dahlke, Taylor}, journal={The Leading Edge}, volume={37}, number={1}, pages={58--66}, year={2018}, publisher={Society of Exploration Geophysicists} } @article{siahkoohi2019transfer, author={Siahkoohi, Ali and Louboutin, Mathias and Herrmann, Felix J.}, title={The importance of transfer learning in seismic modeling and imaging}, year={2019}, note={Submitted to GEOPHYSICS in February 2019} } @book{Goodfellow-et-al-2016, title={Deep Learning}, author={Ian Goodfellow and Yoshua Bengio and Aaron Courville}, publisher={MIT Press}, note={\url{http://www.deeplearningbook.org}}, year={2016} } @article{bottou2018optimization, title={Optimization methods for large-scale machine learning}, author={Bottou, L{\'e}on and Curtis, Frank E and Nocedal, Jorge}, journal={SIAM Review}, volume={60}, number={2}, pages={223--311}, year={2018}, publisher={SIAM} } @article{mao2016least, title={Least squares generative adversarial networks}, author={Mao, Xudong and Li, Qing and Xie, Haoran and Lau, Raymond YK and Wang, Zhen and Smolley, Stephen Paul}, journal={arXiv preprint ArXiv:1611.04076}, year={2016} } @ARTICLE{goodfellow2016nips, title={NIPS 2016 Tutorial: Generative Adversarial Networks}, author={Goodfellow, Ian}, journal={arXiv preprint arXiv:1701.00160}, year={2016} } @article{hornik1989multilayer, title={Multilayer feedforward networks are universal approximators}, author={Hornik, Kurt and Stinchcombe, Maxwell and White, Halbert}, journal={Neural networks}, volume={2}, number={5}, pages={359--366}, year={1989}, publisher={Elsevier} } @article{quan2016fusionnet, title={{FusionNet: A deep fully residual convolutional neural network for image segmentation in connectomics}}, author={Tran Minh Quan and David G. C. Hildebrand and Won-Ki Jeong}, journal={CoRR}, year={2016}, volume={abs/1612.05360}, url = {https://arxiv.org/pdf/1612.05360.pdf} } @InProceedings{ronneberger2015u, author="Ronneberger, Olaf and Fischer, Philipp and Brox, Thomas", editor="Navab, Nassir and Hornegger, Joachim and Wells, William M. and Frangi, Alejandro F.", title={{U-Net: Convolutional Networks for Biomedical Image Segmentation}}, booktitle="Medical Image Computing and Computer-Assisted Intervention -- MICCAI 2015", year="2015", month = {Nov}, publisher="Springer International Publishing", address="Cham", pages="234--241", isbn="978-3-319-24574-4", url = {https://link.springer.com/chapter/10.1007/978-3-319-24574-4_28}, doi = {0.1007/978-3-319-24574-4_28} } @article{berkhout97eom, abstract = {A review has been given of the surface-related multiple problem by making use of the so-called feedback model. From the resulting equations it has been concluded that the proposed solution does not require any properties of the subsurface. However, source-detector and reflectivity properties of the surface need be specified. Those properties have been quantified in a surface operator and this operator is estimated as part of the multiple removal problem. The surface-related multiple removal algorithm has been formulated in terms of a Neumann series and in terms of an iterative equation. The Neumann formulation requires a nonlinear optimization process for the surface operator; while the iterative formulation needs a number of linear optimizations. The iterative formulation also has the advantage that it can be integrated easily with another multiple removal method. An algorithm for the removal of internal multiples has been proposed as well. This algorithm is an extension of the surface-related method. Removal of internal multiples requires knowledge of the macro velocity model between the surface and the upper boundary of the multiple generating layer. In part II (also published in this issue) the success of the proposed algorithms has been demonstrated on numerical experiments and field data examples.}, author = {Berkhout, A. J. and Verschuur, D. J.}, doi = {10.1190/1.1444261}, journal = {{GEOPHYSICS}}, keywords = {SRME}, url = {http://dx.doi.org/10.1190/1.1444261}, eprint = {http://dx.doi.org/10.1190/1.1444261}, number = {5}, pages = {1586--1595}, publisher = {SEG}, title = {{Estimation of multiple scattering by iterative inversion, Part I: Theoretical considerations}}, volume = {62}, year = {1997} } @inproceedings{Goodfellow2014, title = {{G}enerative {A}dversarial {N}ets}, author={Goodfellow, Ian and Pouget-Abadie, Jean and Mirza, Mehdi and Xu, Bing and Warde-Farley, David and Ozair, Sherjil and Courville, Aaron and Bengio, Yoshua}, booktitle = {Proceedings of the 27th International Conference on Neural Information Processing Systems}, series = {NIPS'14}, pages={2672--2680}, location = {Montreal, Canada}, year={2014}, url = {http://papers.nips.cc/paper/5423-generative-adversarial-nets.pdf}, eprint = {http://papers.nips.cc/paper/5423-generative-adversarial-nets.pdf} } @inproceedings{gupta2018deep, title={Random mesh projectors for inverse problems}, author={Konik Kothari and Sidharth Gupta and Maarten v. de Hoop and Ivan Dokmanic}, booktitle={International Conference on Learning Representations}, year={2019}, url={https://openreview.net/forum?id=HyGcghRct7}, eprint = {https://openreview.net/forum?id=HyGcghRct7} } @InProceedings{he2016deep, author = {He, Kaiming and Zhang, Xiangyu and Ren, Shaoqing and Sun, Jian}, title = {{D}eep {R}esidual {L}earning for {I}mage {R}ecognition}, DOI = {10.1109/CVPR.2016.90}, booktitle = {The IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, month = {June}, year = {2016}, url = {https://ieeexplore.ieee.org/document/7780459}, eprint = {https://ieeexplore.ieee.org/document/7780459}, pages={770--778} } @InProceedings{pix2pix2016, author = {Isola, Phillip and Zhu, Jun-Yan and Zhou, Tinghui and Efros, Alexei A.}, title = {{I}mage-to-{I}mage {T}ranslation with {C}onditional {A}dversarial {N}etworks}, booktitle = {The IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, DOI = {10.1109/CVPR.2017.632}, url = {https://ieeexplore.ieee.org/document/8100115}, eprint = {https://ieeexplore.ieee.org/document/8100115}, month = {July}, year = {2017}, pages={5967--5976} } @InProceedings{johnson2016perceptual, author={Johnson, Justin and Alahi, Alexandre and Fei-Fei, Li}, editor={Leibe, Bastian and Matas, Jiri and Sebe, Nicu and Welling, Max}, title={{P}erceptual {L}osses for {R}eal-{T}ime {S}tyle {T}ransfer and {S}uper-{R}esolution}, booktitle={Computer Vision -- European Conference on Computer Vision (ECCV) 2016}, year={2016}, publisher={Springer International Publishing}, address={Cham}, pages={694--711}, DOI={10.1007/978-3-319-46475-6_43}, url = {https://link.springer.com/chapter/10.1007%2F978-3-319-46475-6_43}, eprint = {https://link.springer.com/chapter/10.1007%2F978-3-319-46475-6_43}, abstract={We consider image transformation problems, where an input image is transformed into an output image. Recent methods for such problems typically train feed-forward convolutional neural networks using a per-pixel loss between the output and ground-truth images. Parallel work has shown that high-quality images can be generated by defining and optimizing perceptual loss functions based on high-level features extracted from pretrained networks. We combine the benefits of both approaches, and propose the use of perceptual loss functions for training feed-forward networks for image transformation tasks. We show results on image style transfer, where a feed-forward network is trained to solve the optimization problem proposed by Gatys et al. in real-time. Compared to the optimization-based method, our network gives similar qualitative results but is three orders of magnitude faster. We also experiment with single-image super-resolution, where replacing a per-pixel loss with a perceptual loss gives visually pleasing results.}, isbn={978-3-319-46475-6} } @article{kingma2015adam, title={{A}dam: {A} {M}ethod for {S}tochastic {O}ptimization}, author={Diederik P. Kingma and Jimmy Ba}, journal={CoRR}, year={2014}, archivePrefix = {arXiv}, arxivId = {1412.6980}, eprint = {1412.6980}, volume={abs/1412.6980} } @article{devito-compiler, author = { {Luporini}, F. and {Lange}, M. and {Louboutin}, M. and {Kukreja}, N. and {H{\"u}ckelheim}, J. and {Yount}, C. and {Witte}, P. and {Kelly}, P.~H.~J. and {Gorman}, G.~J. and {Herrmann}, F.~J. }, title = { Architecture and performance of Devito, a system for automated stencil computation }, journal = { CoRR }, volume = { abs/1807.03032 }, month = { jul }, year = { 2018 }, url = { http://arxiv.org/abs/1807.03032 }, archivePrefix = { arXiv }, eprint = { 1807.03032 } } @article{devito-api, author = { {Louboutin}, M. and {Lange}, M. and {Luporini}, F. and {Kukreja}, N. and {Witte}, P.~A. and {Herrmann}, F.~J. and {Velesko}, P. and {Gorman}, G.~J. }, title = {Devito: an embedded domain-specific language for finite differences and geophysical exploration}, journal = { CoRR }, volume = { abs/1808.01995 }, month = { Aug }, year = { 2018 }, url = { https://arxiv.org/abs/1808.01995 }, archivePrefix = { arXiv }, eprint = { 1808.01995 } } @article{lu2015separated, title={Separated-wavefield imaging using primary and multiple energy}, volume={34}, doi={10.1190/tle34070770.1}, url={http://dx.doi.org/10.1190/tle34070770.1}, eprint={http://dx.doi.org/10.1190/tle34070770.1}, number={7}, journal={The Leading Edge}, author={Lu, Shaoping and Whitmore, Dan N. and Valenciano, Alejandro A. and Chemingui, Nizar}, year={2015}, publisher={Society of Exploration Geophysicists}, pages={770--778} } @article{mandelli2018, author = { Sara Mandelli and Federico Borra and Vincenzo Lipari and Paolo Bestagini and Augusto Sarti and Stefano Tubaro }, title = {Seismic data interpolation through convolutional autoencoder}, journal = {SEG Technical Program Expanded Abstracts 2018}, chapter = {}, pages = {4101--4105}, year = {2018}, doi = {10.1190/segam2018-2995428.1}, URL = {https://library.seg.org/doi/abs/10.1190/segam2018-2995428.1}, eprint = {https://library.seg.org/doi/pdf/10.1190/segam2018-2995428.1} } @article{mikhailiuk2018deep, title={{D}eep {L}earning {A}pplied to {S}eismic {D}ata {I}nterpolation}, doi={10.3997/2214-4609.201800918}, url = {http://www.earthdoc.org/publication/publicationdetails/?publication=92298}, journal={80th EAGE Conference and Exhibition 2018}, author={Mikhailiuk, A. and Faul, A.}, year={2018}, month={Nov} } @article{mosser2018stochastic, title={{S}tochastic {S}eismic {W}aveform {I}nversion {U}sing {G}enerative {A}dversarial {N}etworks {A}s {A} {G}eological {P}rior}, DOI={10.3997/2214-4609.201803018}, url = {http://www.earthdoc.org/publication/publicationdetails/?publication=95011}, journal={First EAGE/PESGB Workshop Machine Learning}, author={Mosser, L. and Dubrule, O. and Blunt, M.}, year={2018}, month={Nov} } @article{ovcharenko2018, title={{L}ow-{F}requency {D}ata {E}xtrapolation {U}sing a {F}eed-{F}orward {ANN}}, DOI={10.3997/2214-4609.201801231}, url = {http://www.earthdoc.org/publication/publicationdetails/?publication=92618}, journal={80th EAGE Conference and Exhibition 2018}, author={Ovcharenko, O. and Kazei, V. and Peter, D. and Zhang, X. and Alkhalifah, T.}, year={2018} } @article{pathak2016context, title={{C}ontext {E}ncoders: {F}eature {L}earning by {I}npainting}, DOI={10.1109/cvpr.2016.278}, url = {https://ieeexplore.ieee.org/document/7780647}, eprint = {https://ieeexplore.ieee.org/document/7780647}, journal={2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, author={Pathak, Deepak and Krahenbuhl, Philipp and Donahue, Jeff and Darrell, Trevor and Efros, Alexei A.}, pages={2536--2544}, year={2016} } @article{richardson2018generative, title={{G}enerative {A}dversarial {N}etworks for {M}odel {O}rder {R}eduction in {S}eismic {F}ull-{W}aveform {I}nversion}, author={Richardson, Alan}, archivePrefix = { arXiv }, arxivId = {1806.00828}, eprint = {1806.00828}, volume = { abs/1806.00828 }, keywords = {preprint}, journal={arXiv}, month = { Jun }, year = { 2018 }, url = { https://arxiv.org/abs/1806.00828 }, eprint = { 1806.00828 } } @article{siahkoohi2018seismic, title={{S}eismic {D}ata {R}econstruction with {G}enerative {A}dversarial {N}etworks}, DOI={10.3997/2214-4609.201801393}, url = {http://www.earthdoc.org/publication/publicationdetails/?publication=92782}, journal={80th EAGE Conference and Exhibition 2018}, author={Siahkoohi, Ali and Kumar, Rumar and Herrmann, Felix J.}, year={2018}, month={Nov} } @article{siahkoohi2018deep, author = {Siahkoohi, Ali and Louboutin, Mathias and Kumar, Rajiv and Herrmann, Felix J.}, title = {Deep-convolutional neural networks in prestack seismic: Two exploratory examples}, journal = {SEG Technical Program Expanded Abstracts 2018}, chapter = {}, pages = {2196--2200}, year = {2018}, doi = {10.1190/segam2018-2998599.1}, URL = {https://library.seg.org/doi/abs/10.1190/segam2018-2998599.1}, eprint = {https://library.seg.org/doi/pdf/10.1190/segam2018-2998599.1} } @article{sun2018low, author = {Hongyu Sun and Laurent Demanet}, title = {Low-frequency extrapolation with deep learning}, journal = {SEG Technical Program Expanded Abstracts 2018}, chapter = {}, pages = {2011--2015}, year = {2018}, doi = {10.1190/segam2018-2997928.1}, URL = {https://library.seg.org/doi/abs/10.1190/segam2018-2997928.1}, eprint = {https://library.seg.org/doi/pdf/10.1190/segam2018-2997928.1} } @inproceedings{szegedy2017inception, title={{I}nception-v4, {I}nception-{R}es{N}et and the {I}mpact of {R}esidual {C}onnections on {L}earning.}, author={Szegedy, Christian and Ioffe, Sergey and Vanhoucke, Vincent and Alemi, Alexander A}, booktitle={Proceedings of the Thirty-First Association for the Advancement of Artificial Intelligence Conference on Artificial Intelligence (AAAI-17)}, volume={4}, pages={4278--4284}, year={2017}, crossref = {DBLP:conf/aaai/2017}, url = {http://aaai.org/ocs/index.php/AAAI/AAAI17/paper/view/14806}, } @article{tu2015fast, title={Fast least-squares imaging with surface-related multiples: Application to a North Sea data set}, volume={34}, doi={10.1190/tle34070788.1}, url={http://dx.doi.org/10.1190/tle34070788.1}, eprint={http://dx.doi.org/10.1190/tle34070788.1}, number={7}, journal={The Leading Edge}, publisher={Society of Exploration Geophysicists}, author={Tu, Ning and Herrmann, Felix J.}, year={2015}, pages={788--794} } @article{veillard2018, title={{F}ast {3D} {S}eismic {I}nterpretation with {U}nsupervised {D}eep {L}earning: {A}pplication to a {P}otash {N}etwork in the {N}orth {S}ea}, DOI={10.3997/2214-4609.201800738}, journal={80th EAGE Conference and Exhibition 2018}, author={Veillard, A. and Morère, O. and Grout, M. and Gruffeille, J.}, year={2018} } @article{Verschuur1992, title={Adaptive surface-related multiple elimination}, author={Verschuur, Dirk J and Berkhout, AJ and Wapenaar, CPA}, journal={{GEOPHYSICS}}, volume={57}, number={9}, pages={1166--1177}, year={1992}, publisher={Society of Exploration Geophysicists}, doi = {10.1190/1.1443330}, url = {http://dx.doi.org/10.1190/1.1443330}, eprint = {http://dx.doi.org/10.1190/1.1443330} } @article{verschuur97eom, abstract = {A surface-related multiple-elimination method can be formulated as an iterative procedure: the output of one iteration step is used as input for the next iteration step (part I of this paper). In this paper (part II) it is shown that the procedure can be made very efficient if a good initial estimate of the multiple-free data set can be provided in the first iteration, and in many situations, the Radon-based multiple-elimination method may provide such an estimate. It is also shown that for each iteration, the inverse source wavelet can be accurately estimated by a linear (least-squares) inversion process. Optionally, source and detector variations and directivity effects can be included, although the examples are given without these options. The iterative multiple elimination process, together with the source wavelet estimation, are illustrated with numerical experiments as well as with field data examples. The results show that the surface-related multiple-elimination process is very effective in time gates where the moveout properties of primaries and multiples are very similar (generally deep data), as well as for situations with a complex multiple-generating system.}, author = {Verschuur, D. J. and Berkhout, A. J.}, doi = {10.1190/1.1444262}, url = {http://dx.doi.org/10.1190/1.1444262}, eprint = {http://dx.doi.org/10.1190/1.1444262}, journal = {{GEOPHYSICS}}, keywords = {SRME}, number = {5}, pages = {1596--1611}, publisher = {SEG}, title = {{Estimation of multiple scattering by iterative inversion, Part II: Practical aspects and examples}}, volume = {62}, year = {1997} } @article{wang2018seismic, title={{S}eismic {D}ata {I}nterpolation {U}sing {D}eep {L}earning {B}ased {R}esidual {N}etworks}, DOI={10.3997/2214-4609.201801394}, url = {http://www.earthdoc.org/publication/publicationdetails/?publication=92783}, journal={80th EAGE Conference and Exhibition 2018}, author={Wang, B.f. and Zhang, N. and Lu, W.k. and Zhang, P. and Geng, J.h.}, year={2018}, month={Nov} } @InProceedings{wang2018pix2pixHD, author = {Wang, Ting-Chun and Liu, Ming-Yu and Zhu, Jun-Yan and Tao, Andrew and Kautz, Jan and Catanzaro, Bryan}, title = {{H}igh-{R}esolution {I}mage {S}ynthesis and {S}emantic {M}anipulation with {C}onditional {GAN}s}, booktitle = {The IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, month = {June}, pages = {8798--8807}, doi = {10.1109/CVPR.2018.00917}, url = {https://ieeexplore.ieee.org/document/8579015}, eprint = {https://ieeexplore.ieee.org/document/8579015}, year = {2018} } @article{yang2016time, author = {Mengmeng Yang and Philipp Witte and Zhilong Fang and Felix Herrmann}, title = {Time-domain sparsity-promoting least-squares migration with source estimation}, journal = {SEG Technical Program Expanded Abstracts 2016}, chapter = {}, pages = {4225-4229}, year = {2016}, doi = {10.1190/segam2016-13850609.1}, URL = {https://library.seg.org/doi/abs/10.1190/segam2016-13850609.1}, eprint = {https://library.seg.org/doi/pdf/10.1190/segam2016-13850609.1} } @inproceedings{yosinski2014transferable, author = {Yosinski, Jason and Clune, Jeff and Bengio, Yoshua and Lipson, Hod}, title = {How Transferable Are Features in Deep Neural Networks?}, booktitle = {Proceedings of the 27th International Conference on Neural Information Processing Systems}, series = {NIPS'14}, year = {2014}, location = {Montreal, Canada}, pages = {3320--3328}, numpages = {9}, url = {http://dl.acm.org/citation.cfm?id=2969033.2969197}, } @article{CycleGAN2017, title={{U}npaired {I}mage-to-{I}mage {T}ranslation {U}sing {C}ycle-{C}onsistent {A}dversarial {N}etworks}, DOI={10.1109/iccv.2017.244}, url = {https://ieeexplore.ieee.org/document/8237506}, eprint = {https://ieeexplore.ieee.org/document/8237506}, journal={2017 IEEE International Conference on Computer Vision (ICCV)}, author={Zhu, Jun-Yan and Park, Taesung and Isola, Phillip and Efros, Alexei A.}, year={2017}, pages = {2242--2251} } @misc{tensorflow2015, title={ {TensorFlow}: {L}arge-{S}cale {M}achine {L}earning on {H}eterogeneous {S}ystems}, url={https://www.tensorflow.org/}, note={Software available from tensorflow.org}, author={Abadi, Mart{\'\i}n and Barham, Paul and Chen, Jianmin and Chen, Zhifeng and Davis, Andy and Dean, Jeffrey and Devin, Matthieu and Ghemawat, Sanjay and Irving, Geoffrey and Isard, Michael and others}, year={2015}, }