<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.3 20070202//EN" "journalpublishing.dtd">
<article xml:lang="EN" xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" article-type="review-article">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Imaging.</journal-id>
<journal-title>Frontiers in Imaging</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Imaging.</abbrev-journal-title>
<issn pub-type="epub">2813-3315</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="doi">10.3389/fimag.2024.1336829</article-id>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Imaging</subject>
<subj-group>
<subject>Review</subject>
</subj-group>
</subj-group>
</article-categories>
<title-group>
<article-title>Computational optical imaging: challenges, opportunities, new trends, and emerging applications</article-title>
</title-group>
<contrib-group>
<contrib contrib-type="author">
<name><surname>Xiang</surname> <given-names>Meng</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<xref ref-type="aff" rid="aff3"><sup>3</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/1620688/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/conceptualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-original-draft/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Liu</surname> <given-names>Fei</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/1703197/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Liu</surname> <given-names>Jinpeng</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/1669613/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/investigation/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Dong</surname> <given-names>Xue</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/2649437/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/investigation/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
<contrib contrib-type="author">
<name><surname>Liu</surname> <given-names>Qianqian</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/2649439/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-original-draft/"/>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name><surname>Shao</surname> <given-names>Xiaopeng</given-names></name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<xref ref-type="corresp" rid="c001"><sup>&#x0002A;</sup></xref>
<uri xlink:href="http://loop.frontiersin.org/people/1842950/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/conceptualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/funding-acquisition/"/>
<role content-type="https://credit.niso.org/contributor-roles/validation/"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-review-editing/"/>
</contrib>
</contrib-group>
<aff id="aff1"><sup>1</sup><institution>School of Optoelectronic Engineering, Xidian University</institution>, <addr-line>Xi&#x00027;an</addr-line>, <country>China</country></aff>
<aff id="aff2"><sup>2</sup><institution>Xi&#x00027;an Key Laboratory of Computational Imaging</institution>, <addr-line>Xi&#x00027;an</addr-line>, <country>China</country></aff>
<aff id="aff3"><sup>3</sup><institution>Chinese Academic of Science Key Laboratory of Space Precision Measurement Technology</institution>, <addr-line>Xi&#x00027;an</addr-line>, <country>China</country></aff>
<author-notes>
<fn fn-type="edited-by"><p>Edited by: Pasquale Imperatore, National Research Council (CNR), Italy</p></fn>
<fn fn-type="edited-by"><p>Reviewed by: Pasquale Memmolo, National Research Council (CNR), Italy</p>
<p>Rosa Scapaticci, National Research Council (CNR), Italy</p></fn>
<corresp id="c001">&#x0002A;Correspondence: Xiaopeng Shao <email>xpshao&#x00040;xidian.edu.cn</email></corresp>
</author-notes>
<pub-date pub-type="epub">
<day>14</day>
<month>02</month>
<year>2024</year>
</pub-date>
<pub-date pub-type="collection">
<year>2024</year>
</pub-date>
<volume>3</volume>
<elocation-id>1336829</elocation-id>
<history>
<date date-type="received">
<day>11</day>
<month>11</month>
<year>2023</year>
</date>
<date date-type="accepted">
<day>24</day>
<month>01</month>
<year>2024</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#x000A9; 2024 Xiang, Liu, Liu, Dong, Liu and Shao.</copyright-statement>
<copyright-year>2024</copyright-year>
<copyright-holder>Xiang, Liu, Liu, Dong, Liu and Shao</copyright-holder>
<license xlink:href="http://creativecommons.org/licenses/by/4.0/"><p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (CC BY). The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</p></license>
</permissions>
<abstract>
<p>Computational imaging technology (CIT), with its many variations, addresses the limitations of industrial design. CIT can effectively overcome the bottlenecks in physical information acquisition, model development, and resolution by being tightly coupled with mathematical calculations and signal processing in information acquisition, transmission, and interpretation. Qualitative improvements are achieved in the dimensions, scale, and resolution of the information. Therefore, in this review, the concepts and meaning of CIT are summarized before establishing a real CIT system. The basic common problems and relevant challenging technologies are analyzed, particularly the non-linear imaging model. The five typical imaging requirements&#x02013;distance, resolution, applicability, field of view, and system size&#x02013;are detailed. The corresponding key issues of super-large-aperture imaging systems, imaging beyond the diffraction limit, bionic optics, interpretation of light field information, computational optical system design, and computational detectors are also discussed. This review provides a global perspective for researchers to promote technological developments and applications.</p></abstract>
<kwd-group>
<kwd>computational imaging interpretation</kwd>
<kwd>imaging model</kwd>
<kwd>computational imaging system</kwd>
<kwd>optical system design</kwd>
<kwd>information transfer</kwd>
</kwd-group>
<counts>
<fig-count count="26"/>
<table-count count="1"/>
<equation-count count="0"/>
<ref-count count="148"/>
<page-count count="28"/>
<word-count count="15919"/>
</counts>
<custom-meta-wrap>
<custom-meta>
<meta-name>section-at-acceptance</meta-name>
<meta-value>Image Capture</meta-value>
</custom-meta>
</custom-meta-wrap>
</article-meta>
</front>
<body>
<sec id="s1">
<title>1 Introduction</title>
<p>Computational imaging technology (CIT) is a new imaging modality that has recently received considerable attention because of its novel physical characteristics. With the rapid development of optoelectronics (Zhang et al., <xref ref-type="bibr" rid="B143">2017</xref>), information processing (Wickens and Carswell, <xref ref-type="bibr" rid="B133">2021</xref>), photon integration (Fu et al., <xref ref-type="bibr" rid="B39">2022</xref>), and other technical capabilities, photoelectric imaging technology is urgently required in various fields, such as remote sensing, biomedicine, deep space exploration, artificial intelligence, and resource exploration. However, owing to theoretical limitations, such as the Abbe diffraction limit, mutual restriction of a large field of view and high resolution, independent imaging links, and exponential attenuation of ballistic light intensity with an increase in propagation distance, the imaging effect of traditional imaging technology based on the object-image conjugation mode in photoelectric detection is limited by aspects such as imaging media, optical systems, and signal interpretation.</p>
<p>1) The imaging medium: Owing to the existence of strong scattering media due to meteorological conditions, such as haze, rain, and snow, photoelectric imaging systems cannot effectively receive information from the target directly. Sometimes, the low signal-to-noise ratio (SNR) causes a scattered light field information distribution, increasing the difficulty of SNR interpretation and reducing the effective imaging distance.</p>
<p>2) Optical system: The optical system cannot always simultaneously resolve the mutual conflict between the resolution and field of view, because the traditional imaging technology of the ray-tracing mode is affected by the principle and structure of the system. In addition, overcoming extremely small aberrations leads to increased complexity, volume, weight, and power consumption of the system.</p>
<p>3) Interpretation of the optical field information: the imaging mode with single-intensity information as the main means of detection is affected by considerable background noise that reduces the contrast between the target and the background, ease of aliasing, and difficulty in effective detection and interpretation.</p>
<p>Therefore, traditional imaging methods experience different degrees of information loss at the space, physical, and information levels, greatly limiting the application of photoelectric imaging systems in certain applications. Due to limitations in industrial design thinking, the development of traditional photoelectric imaging technology with &#x0201C;object-image conjugation&#x0201D; mode as the core has reached a plateau, and it is difficult to advance further. However, the emergence of CIT, which focuses on information acquisition and interpretation, lead to potential new opportunities (Lukac and Radha, <xref ref-type="bibr" rid="B79">2011</xref>). CIT embraces many disciplines, such as optics, mathematics, and informatics, which makes imaging no longer solely reliant on optical hardware but also includes mathematical calculations and signal processing, breaking the limits of traditional photoelectric imaging technology. The development of optical CIT has led to the development of many new imaging technologies, such as ptychography imaging (Enders et al., <xref ref-type="bibr" rid="B32">2014</xref>), lensless imaging (Monakhova et al., <xref ref-type="bibr" rid="B87">2020</xref>), scattered light imaging (Cua et al., <xref ref-type="bibr" rid="B27">2017</xref>), synthetic aperture imaging (Tian et al., <xref ref-type="bibr" rid="B125">2023</xref>), and quantum imaging (Bogdanski et al., <xref ref-type="bibr" rid="B15">2004</xref>), and has quickly become an important research direction in the imaging field globally. These developments play an important role in photoelectric detection.</p>
<p>Several publications analyze and condense computational imaging methods from varying standpoints. Ozcan provided a summary of recent research on emerging techniques in computational imaging, sensing, and diagnostics, as well as complementary non-computational methods that have the potential to revolutionize global healthcare delivery (Coskun and Ozcan, <xref ref-type="bibr" rid="B25">2014</xref>). The IEEE even launched a journal, IEEE Transactions on Computational Imaging, in 2015 dedicated to the topic. Qionghai Dai investigated the most recent and most promising progress in computational imaging, considering the various dimensions of visual signals including spatial, temporal, angular, spectral, and phase dimensions (Hu et al., <xref ref-type="bibr" rid="B51">2017</xref>). Ravindra A. Athale discusses the progress made in Computational Imaging since the mid-1990s and identified three motivations for using Computational Imaging: when a direct measurement of the desired parameter is physically impossible, when the dimensionality of the desired parameter is incompatible with present technology, and when making an indirect measurement is more advantageous than making a direct one (Mait et al., <xref ref-type="bibr" rid="B81">2012</xref>).</p>
<p>The purpose of this paper is to investigate the potential advancements of computational imaging technology through five future application perspectives: &#x0201C;higher,&#x0201D; &#x0201C;farther,&#x0201D; &#x0201C;smaller,&#x0201D; &#x0201C;wider,&#x0201D; and &#x0201C;stronger,&#x0201D; for promoting the continuous and comprehensive development of CIT and rapid application transformation of the technology. In this study, the concept of computational imaging is described from broad and narrow perspectives, and its components are analyzed and key technologies existing in the process of technological development are summarized. This study provides a new perspective on the status quo, development, and future of CIT, which can help the development of corresponding imaging technology research and promote the further development of CIT.</p></sec>
<sec id="s2">
<title>2 Concept, components, and development status of computational imaging</title>
<sec>
<title>2.1 Concept and components of computational imaging</title>
<p>CIT was born following the rapid development of information processing technology (Lee et al., <xref ref-type="bibr" rid="B67">2022</xref>), micro-nano fabrication technology (Qian and Wang, <xref ref-type="bibr" rid="B101">2010</xref>), artificial intelligence technology (Suo et al., <xref ref-type="bibr" rid="B123">2021</xref>), and high-speed computing power (Ying et al., <xref ref-type="bibr" rid="B139">2020</xref>), and is an innovation in photoelectric imaging technology. In a broad sense, all optical imaging methods introduced in the imaging process can be considered computational imaging. In addition, the use of the processing speed of powerful computers to assist or directly participate in the improvement of the imaging effect, such as image processing, belongs to computational imaging. In a narrow sense, CIT is driven by information, and the use of information acquisition, transmission, and interpretation to describe the optical imaging process, which is a multidisciplinary combination of new imaging technology, set optics, mathematics, and information technology.</p>
<p>Further, traditional optical imaging is &#x0201C;what you see is what you get, and what you get is what you see.&#x0201D; The information-centered computational imaging method combines a full-link imaging process with mathematical calculations and signal processing through information acquisition, transmission, and interpretation. In terms of the information dimension, scale, and resolution, information transmission is combined with mathematical analysis to overcome the bottleneck problems of difficult physical information acquisition, model development, and resolution in the imaging process, to achieve the imaging &#x0201C;get more than what you see, and get better than what you see&#x0201D; results.</p>
<p>CIT comprehensively considers the physical nature of the imaging process, promotes the movement of imaging system design from the traditional aberration-driven to information-driven methods, considers the full-link imaging process, and realizes the change in the information transmission mechanism. Its components can be divided into three aspects:</p>
<p>1) CIT designs imaging systems from the perspective of information transmission, improves the degrees of freedom of imaging, and fully excavates the light field information for the purpose of accurate information acquisition and transmission. This enables CIT to achieve a breakthrough in the &#x0201C;invisible,&#x0201D; &#x0201C;incomplete:&#x0201D; and other problems of traditional optical imaging technology.</p>
<p>2) From the perspective of the entire imaging process, CIT decomposes the traditional photoelectric imaging-independent optimization concept. The light field representation model migration to the transmission medium, and the imaging system into the imaging model driven by information transfer, give full play to the characteristics and advantages of the media optical system and information processing in the imaging link, breaking the limitations of traditional imaging.</p>
<p>3) CIT introduces the idea of information coding to broaden the information channel and increase the capacity of the information required for imaging in the form of active or passive coding. Through active coding methods such as light sources and optical system modulation, CIT expands the method of information acquisition and improves the efficiency of information collection. In addition, considering the encoding effect of the transmission medium on information, through the joint multiplexing of multi-dimensional physical quantities such as amplitude, phase, polarization, and spectrum, the interpretation ability of information is improved, and breakthroughs in imaging resolution, field of view, and action distance are achieved, turning &#x0201C;impossible&#x0201D; imaging into &#x0201C;possible.&#x0201D; However, CIT cannot process information that has not yet been acquired. Instead, it actively discards nonessential dimensional information and increases the amount of necessary dimensional information to improve the imaging performance and overcome the limitations of traditional imaging.</p></sec>
<sec>
<title>2.2 CIT history and development</title>
<p>The concept of computational imaging has existed since the beginning of image processing; however, it was not until the 1990s that Athale first introduced it (Mait et al., <xref ref-type="bibr" rid="B81">2012</xref>). Subsequently, Stanford University, Columbia University, the Massachusetts Institute of Technology, Boston University, and others formally initiated research on CIT. and established the Media Lab Camera Culture Group, Computational Imaging Lab, Hybrid Imaging Lab, Electrical and Computer Engineering Lab, and other laboratories that initiated research on CIT. In the USA, almost all top universities and research institutes have quickly established relevant laboratories and research centers. At the same time, many enterprises represented by the GelSight Company in the USA also quickly followed up and developed a series of products (Juyang et al., <xref ref-type="bibr" rid="B53">1992</xref>). In the military field, the US Defense Advanced Research Projects Agency (DARPA) has set up several computational imaging related projects since 2007, such as &#x0201C;ARGUS-IS (Leninger et al., <xref ref-type="bibr" rid="B68">2008</xref>),&#x0201D; &#x0201C;SCENICC (Sprague et al., <xref ref-type="bibr" rid="B120">2012</xref>),&#x0201D; and &#x0201C;AWARE (Bar-Noy et al., <xref ref-type="bibr" rid="B11">2011</xref>).&#x0201D; The North Atlantic Treaty Organization Science and Technology Agency also established a computational imaging task force in 2016, with defense units such as the US Army, Navy, Lockheed Martin, and the UK Ministry of Defense as the main members, and launched several projects such as SET-232 (Bosq et al., <xref ref-type="bibr" rid="B16">2018</xref>).</p>
<p>In China, research on computational optical imaging is consistent with international activities. Corresponding laboratories and research centers were set up by Xidian University, Tsinghua University, Beijing Institute of Technology, and other universities, China Aerospace Science and Technology Corporation, Institute of Aerospace Information Research Institute, Xi&#x00027;an Institute of Optics and Precision Mechanics, Changchun Institute of Optics, Fine Mechanics and Physics, and Chinese Academy of Sciences. To conduct research on computational optical imaging, the Computational Optical Imaging Technology Laboratory of the Institute of Aerospace Information Innovation, Chinese Academy of Sciences, has conducted extensive research in the fields of computational spectrum, light field, and active three dimensional (3D) imaging. The computational optical remote-sensor team at the Aerospace Information Research Institute developed and launched the world&#x00027;s first spaceborne computational spectral imaging payload (Liu et al., <xref ref-type="bibr" rid="B77">2020</xref>). The National Information Laboratory and Institute of Optoelectronics Engineering at Tsinghua University have made important contributions (Cao et al., <xref ref-type="bibr" rid="B19">2020</xref>). The Institute of Computational Imaging of Xidian University relies on the Key Laboratory of Computational Imaging of Xi&#x00027;an to conduct research based on technologies such as scattered light imaging, polarization imaging, and wide area high-resolution computational imaging, and has obtained internationally recognized research results (Fei et al., <xref ref-type="bibr" rid="B35">2019</xref>). The Optical Imaging and Computing Laboratory and the Measurement and Imaging Laboratory of the Beijing Institute of Technology have also proposed optimized solutions (Xinquan, <xref ref-type="bibr" rid="B137">2007</xref>) for computational display and computational spectral imaging. The Intelligent Computational Imaging Laboratory of Nanjing University of Science and Technology has achieved excellent results in quantitative phase imaging, digital holographic imaging, and computational 3D imaging (Zhang et al., <xref ref-type="bibr" rid="B142">2018</xref>). There are some notable research organizations in Europe that have been actively involved in computational imaging: Imperial College Computational Imaging Group (Imperial College London), Computational Imaging Group (University College London), Image and Video Analysis Group (Trinity College Dublin), Computer Vision Laboratory (ETH Zurich), Computer Graphics and Visualization Group (University of Zaragoza), Centre for Vision, Speech, and Signal Processing (University of Surrey), Max Planck Institute for Intelligent Systems (Germany), and Computer Vision and Image Processing Group (University of Verona).</p>
<p>Although CIT research continues, and many new imaging technologies have been derived, fragmented research has led to the difficulty of global systematic consideration of CIT, weak theoretical foundational support, and unclear application requirements. At the same time, the rapid development of Graphics Processing Unit (GPU) technology and advancements in Artificial Intelligence (AI) (Sinha et al., <xref ref-type="bibr" rid="B119">2017</xref>; Barbastathis et al., <xref ref-type="bibr" rid="B10">2019</xref>) have significantly contributed to the progress and application of computational imaging. In addition, as a research field covering many individual technologies, the current development ideas of CIT are disorganized. The complexity and breadth of the system make it difficult to present a clear research context, and the common basic problems and key technologies lack in-depth thinking. CIT is a type of target-oriented research technology, and its related research serves to develop or improve specific performance indicators and improve the target imaging quality by sacrificing other non-essential dimensions.</p>
<p>In summary, based on the demand orientation of CIT, the five application perspectives of &#x0201C;higher,&#x0201D; &#x0201C;farther,&#x0201D; &#x0201C;smaller,&#x0201D; &#x0201C;wider,&#x0201D; and &#x0201C;stronger&#x0201D; in future development are analyzed in this study. The characteristics, application prospects, and mutual relations of CIT are clearly defined, and new theories and ideas of CIT are discussed from another perspective, to promote the development of CIT in an orderly, systematic, and continuous manner.</p></sec></sec>
<sec id="s3">
<title>3 Higher (resolution)</title>
<p>The optical resolution indicates the fineness of the images. Generally, the higher the resolution of an image, the more information it contains. This is an important performance indicator for imaging applications. It is difficult for photoelectric imaging systems to obtain an ideal image point from a point target in conventional optical imaging systems due to the diffraction effect of light waves, which results in a diffuse spot being obtained. The larger the size of the spot, the lower the resolution. The Abbe diffraction limit indicates that the resolution of an ideal optical system is determined by the angular radius of an Airy spot. When the diffraction limit is exceeded, the image cannot be clearly observed, limiting the resolution of the system. The resolution is defined by <italic>n</italic> sin&#x003D5; = 1.22 &#x003BB;<italic>/D</italic> (Abbe, <xref ref-type="bibr" rid="B1">1873</xref>) [Hole Diameter: D, wavelength: &#x003BB;, n: refractive index of the working medium of the lens, &#x003D5; half of the maximum cone angle (objective lens aperture angle) at which light enters the lens]. In diffraction limited systems, the larger the aperture of the optical system, the higher the imaging resolution. However, owing to the limitations of the production technology, cost, and application scenarios, the aperture of the optical system cannot be unlimited. To achieve effective resolution improvement, CIT must explore new oversized-aperture optical systems and novel imaging techniques that exceed diffraction limits and improve image reconstruction methods.</p>
<sec>
<title>3.1 Oversized aperture imaging technology</title>
<p>The finiteness of the aperture of a photoelectric imaging system limits the imaging resolution. Traditional large-aperture photoelectric imaging systems under industrial design suffer from long development cycles, difficult processing and installation, high development costs, and poor environmental adaptability. In addition, they are prone to deterioration of the imaging quality owing to a decrease in the surface accuracy of the optical system. To solve this problem, it is necessary to develop a new type of ultra-large aperture optical imaging system, which is mainly realized using two synthetic aperture imaging technologies: primary mirror splicing and array complementary imaging. Compared with the traditional single-aperture photoelectric imaging system, the imaging resolution is higher, the mirror processing difficulty is lower, and the system is lighter.</p>
<p>In a primary mirror splicing space-based telescope, the primary mirror of the single-aperture telescope is divided into small pieces, with the pieces of sub-mirror spliced into an equivalent primary mirror, and folded in the fairing of the launch vehicle. After launch, the pieces are unfolded and reassembled through precision confocal adjustment and the resolution of the equivalent large-aperture telescope is achieved. As shown in <xref ref-type="fig" rid="F1">Figure 1</xref>, the James Webb Space Telescope, which is the successor to the Hubble Telescope, was designed to use 18 hexagonal sub-mirrors with a diagonal distance of 1.5 m (Sabelhaus, <xref ref-type="bibr" rid="B112">2004</xref>). However, even if primary mirror splicing technology is adopted, it is difficult to support a space telescope larger than 8&#x02013;10 m in the short term until the splicing and folding technology has been improved.</p>
<fig id="F1" position="float">
<label>Figure 1</label>
<caption><p>James Webb Space Telescope (Sabelhaus, <xref ref-type="bibr" rid="B112">2004</xref>).</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fimag-03-1336829-g0001.tif"/>
</fig>
<p>Array complementary image telescope technology is also known as distributed synthetic aperture interferometry imaging technology (Changsheng et al., <xref ref-type="bibr" rid="B21">2017</xref>). It collects light based on multiple small-aperture telescopes and then performs complementary imaging on the optical imaging telescope through an optical path delay line, baseline matching, beam pointing, and other relay optical systems. An imaging baseline is formed to achieve equivalent large-aperture effects. The larger the baseline, the higher the image resolution. These distributed devices can be launched separately, and assembled into a common frame structure in space. In theory, the array complementary image method can realize a larger aperture than the primary mirror splicing technique.</p>
<p>In Xiang et al. (<xref ref-type="bibr" rid="B136">2021</xref>) proposed a coherent synthetic aperture imaging system. <xref ref-type="fig" rid="F2">Figure 2</xref> shows a schematic diagram of the system in a scene of two synchronous orbit satellites, one of which is equipped with a camera, and the other with a laser source for angle-changing illumination.</p>
<fig id="F2" position="float">
<label>Figure 2</label>
<caption><p>Array coherent synthetic aperture imaging (CSAI) system. <bold>(A)</bold> Schematic of CSAI system, <bold>(B)</bold> simplified CSAI system, and <bold>(C)</bold> photograph of CSAI system.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fimag-03-1336829-g0002.tif"/>
</fig></sec>
<sec>
<title>3.2 Imaging beyond the diffraction limit</title>
<p>To achieve imaging beyond the diffraction limit, novel imaging methods, such as structure illumination microscopy (Gustafsson, <xref ref-type="bibr" rid="B43">2000</xref>), stochastic optical reconstruction microscopy (Rust et al., <xref ref-type="bibr" rid="B111">2006</xref>), negative refractive super lenses (Pendry, <xref ref-type="bibr" rid="B94">2000</xref>), and ptychography imaging (Wang et al., <xref ref-type="bibr" rid="B132">2022</xref>), have been proposed. The ptychography imaging concept was first proposed by HOPPE W in 1970s (Hegerl and Hoppe, <xref ref-type="bibr" rid="B47">1972</xref>). The core of this method is to search for a unique complex solution to satisfy the constraints of multiple far-field diffracted intensity images in overlapping scanning modes. An iterative algorithm of phase restoration is used, which sacrifices the time dimension and thus achieves an imaging result with an ultra-diffractive resolution limit. In Zheng et al. (<xref ref-type="bibr" rid="B146">2013</xref>) proposed the Fourier ptychographic microscopy technique that uses an objective lens with lower magnification to simulate the performance of an objective lens with higher calculated magnification. Subsequently, a reconstruction algorithm was adopted to recover the complex amplitude information of the object and obtain high-resolution images. A conventional Fourier ptychographic imaging system uses a light emitting diode (LED) plate as an illumination source. <xref ref-type="fig" rid="F3">Figure 3</xref> shows a schematic of a conventional Fourier ptychographic imaging system (Jiasong et al., <xref ref-type="bibr" rid="B52">2016</xref>) and the reconstructed images, which have a much higher imaging resolution than conventional optical microscopic imaging. Although imaging beyond the diffraction limit has been accomplished in the field of microscopic imaging, there has been no similar breakthrough in large-scale macroscopic imaging applications. This is also an urgent problem that needs to be solved using computational imaging in pursuit of higher imaging resolution.</p>
<fig id="F3" position="float">
<label>Figure 3</label>
<caption><p>Fourier ptychographic microscopy system based on light emitting diode (LED) array, its imaging reconstructed results (Jiasong et al., <xref ref-type="bibr" rid="B52">2016</xref>). <bold>(A)</bold> Fourier ptychographic microscopy system based on LED array, and <bold>(B)</bold> reconstructed images.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fimag-03-1336829-g0003.tif"/>
</fig>
<p>In addition to the non-interference computational imaging microscopy techniques introduced above, interferometric computational imaging techniques (Park et al., <xref ref-type="bibr" rid="B93">2018</xref>) such as synthetic aperture quantitative phase imaging (Cotte et al., <xref ref-type="bibr" rid="B26">2013</xref>) have doubled the maximum spatial frequency as shown in <xref ref-type="fig" rid="F4">Figure 4</xref>. Alexandrov et al. (<xref ref-type="bibr" rid="B5">2006</xref>) introduced a novel synthetic aperture optical microscopy technique that generates high-resolution, wide-field images in both amplitude and phase using Fourier holograms. The spatial and spectral qualities of the illumination field, as well as the collection and solid angles, determine the part of the complex two-dimensional spatial frequency spectrum of an object that is captured by each hologram. They showcased the use of synthetic microscopic imaging to capture spatial frequencies that are beyond the modulation transfer function of the collection optical system, all while maintaining a long working distance and wide field of view. While its capabilities are restricted by the numerical aperture of the objective lens.</p>
<fig id="F4" position="float">
<label>Figure 4</label>
<caption><p><bold>(A)</bold> Reconstructed image for azimuthal angle 0&#x000B0;; <bold>(B</bold>, <bold>C)</bold> phase images of selected areas of synthesized image; <bold>(D)</bold> confocal microscope image.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fimag-03-1336829-g0004.tif"/>
</fig></sec>
<sec>
<title>3.3 Super-resolution reconstruction</title>
<p>Super-resolution reconstruction is a type of information processing technique that uses low-resolution image recovery to obtain high-resolution images, and was first proposed by Harris (<xref ref-type="bibr" rid="B46">1964</xref>). It depends on the number of raw low-resolution images that can be classified as single-image and multi-image super-resolution. In Dong et al. (<xref ref-type="bibr" rid="B29">2014</xref>) applied deep learning to a natural image super-resolution reconstruction procedure and proposed a super-resolution convolutional neural network (SRCNN) model, as shown in <xref ref-type="fig" rid="F5">Figure 5</xref>. To address the problem of the weak learning ability of the SRCNN shallow model, Kim et al. (<xref ref-type="bibr" rid="B60">2016</xref>) proposed a very deep super-resolution network model that included 20 convolutional layers. The use of a deeper network model improved the reconstruction effect. However, a deeper network model results in slower convergence speed. Lim et al. (<xref ref-type="bibr" rid="B74">2017</xref>) proposed an enhanced deep super-resolution reconstruction network model with 69 convolutional layers. This method reduced the memory requirements by approximately 40% and improved the convergence speed by improving the residual. The restoration results are shown in <xref ref-type="fig" rid="F6">Figure 6</xref>.</p>
<fig id="F5" position="float">
<label>Figure 5</label>
<caption><p>Comparison of super-resolution reconstruction effect of convolutional neural network. <bold>(A)</bold> Image super-resolution results, and <bold>(B)</bold> peak SNR (PSNR) curve.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fimag-03-1336829-g0005.tif"/>
</fig>
<fig id="F6" position="float">
<label>Figure 6</label>
<caption><p>Enhanced deep super-resolution (EDSR) model restoration results.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fimag-03-1336829-g0006.tif"/>
</fig>
<p>However, because the super-resolution reconstruction technology relies only on subsequent data processing, the result of the super-resolution reconstruction is different from the real value. The next research direction is to organically combine super-resolution reconstruction with the imaging process to achieve real-time super-resolution.</p></sec></sec>
<sec id="s4">
<title>4 Further (imaging distance)</title>
<p>The traditional industrial design of optoelectronic imaging relies on ballistic light. Owing to the exponential decay of the energy intensity of ballistic light in imaging environments, such as clouds, smoke, and haze, the traditional imaging distance is severely limited. Moreover, for long-distance imaging, the target information presents a low SNR. For signals below 1 dB current recovery methods cannot effectively extract the target information. To pursue longer detection distances, new imaging methods that are suitable for extreme imaging environments are needed to mine and interpret imaging information.</p>
<sec>
<title>4.1 Scattered light imaging technology</title>
<p>An optical imaging system that is exposed to bad weather conditions such as fog, haze, rain, or snow, or even to underwater imaging conditions, cannot obtain the target information directly owing to the random transmission of photons, which can only result in an irregular distribution of the scattered light field. Imaging through scattering technology is a mainstream imaging technology that can recover clear target information through the deep interpretation of scattered light images carrying hidden target information. Currently, imaging through scattering technology includes wavefront shaping (Katz et al., <xref ref-type="bibr" rid="B56">2011</xref>), optical memory effect (OME)-based nonvisual imaging (Osnabrugge et al., <xref ref-type="bibr" rid="B90">2017</xref>), and deep learning methods.</p>
<sec>
<title>4.1.1 Wavefront shaping</title>
<p>In Vellekoop and Mosk (<xref ref-type="bibr" rid="B130">2007</xref>) proposed a scattered light imaging technique based on wavefront shaping (Vellekoop, <xref ref-type="bibr" rid="B128">2010</xref>). The experimental setup and imaging results are shown in <xref ref-type="fig" rid="F7">Figure 7</xref>. When light passes through a strong scattering medium, the phase at each position behind the scattering layer is randomly distributed, forming a speckle image as shown in <xref ref-type="fig" rid="F7">Figure 7B</xref>. However, when a feedback signal is introduced, as shown in <xref ref-type="fig" rid="F7">Figure 7C</xref>, the brightness of the target behind the scattering layer is three times higher than that of the speckle image, and the focusing effect is improved beyond that of an optical lens. In Vellekoop et al. (<xref ref-type="bibr" rid="B129">2010</xref>) used feedback-based wavefront shaping imaging technology to focus on a thickness 6 &#x003BC;m behind a traditional optical system. It had a spot diameter 1/10 that of traditional imaging, which significantly improved the resolution of the optical system. Subsequently, Katz et al. (<xref ref-type="bibr" rid="B57">2012</xref>) used a feedback-based wavefront-shaping method to achieve real-time imaging through scattering media using incoherent light sources. The imaging effect is shown in <xref ref-type="fig" rid="F7">Figures 7E</xref>, <xref ref-type="fig" rid="F7">F</xref>, which significantly promotes the engineering application of feedback-based wavefront shaping technology.</p>
<fig id="F7" position="float">
<label>Figure 7</label>
<caption><p>Principle of wavefront shaping based on feedback and imaging results. <bold>(A, C)</bold> Experimental diagrams, <bold>(B)</bold> speckle image, and <bold>(D)</bold> single-point focusing result. <bold>(E)</bold> Camera image with incoherent light before correction, and <bold>(F)</bold> optimized phase pattern.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fimag-03-1336829-g0007.tif"/>
</fig>
<p>In Popoff et al. (<xref ref-type="bibr" rid="B96">2010a</xref>) first proposed a method based on transfer matrix (TM) measurement of the scattering medium as shown in <xref ref-type="fig" rid="F8">Figure 8A</xref>. The core connects the incident light field to the outgoing light field through a complex matrix (Pendry, <xref ref-type="bibr" rid="B94">2000</xref>). By measuring this complex matrix and combining it with optical phase conjugation (OPC) technology, focused imaging at any position or time can be achieved (Popoff et al., <xref ref-type="bibr" rid="B95">2010b</xref>; Dr&#x000E9;meau et al., <xref ref-type="bibr" rid="B31">2015</xref>). In Liutkus et al. (<xref ref-type="bibr" rid="B78">2014</xref>) introduced compressed sensing in TM measurements, which significantly reduced the measurement difficulty of the transmission matrix. In the same year, Andreoli et al. (<xref ref-type="bibr" rid="B7">2015</xref>) proposed a method for measuring TM at different wavelengths, which solved the problem of wideband-focused imaging by establishing a 3D multispectral TM. In multispectral research, Dong et al. (<xref ref-type="bibr" rid="B30">2018</xref>) successfully achieved transparent scattering medium imaging using a multiplexing phase-inversion method, as shown in <xref ref-type="fig" rid="F8">Figure 8B</xref>.</p>
<fig id="F8" position="float">
<label>Figure 8</label>
<caption><p>Optical TM of scattering imaging and experimental results. <bold>(A)</bold> Optical TM of scattering imaging, <bold>(B)</bold> initial speckle pattern and single point focus.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fimag-03-1336829-g0008.tif"/>
</fig>
<p>The imaging through scattering technology based on optical phase conjugation obtains the original incident light-field information from the reverse light path through the reciprocity of turbid media and the invariance of the time reversal path, as shown in <xref ref-type="fig" rid="F9">Figure 9</xref> (Feld et al., <xref ref-type="bibr" rid="B36">2008</xref>). Compared with feedback-based wavefront shaping technology, the measurable number of channels in this method is not a single channel but multiple channels, making it suitable for real-time measurement. In Shen et al. (<xref ref-type="bibr" rid="B118">2016</xref>) successfully used the digital OPC (DOPC) technology to achieve light focusing through biological tissues with a thickness of 9.6 cm and imaging of chicken breast tissue <italic>in vitro</italic> with a thickness of 2.5 cm, effectively expanding the sample thickness in OPC technology. The imaging effect (shown in <xref ref-type="fig" rid="F9">Figure 9C</xref>) has a huge advantage in non-invasive optical imaging, manipulation, and treatment of deep tissues. Subsequently, Ruan et al. (<xref ref-type="bibr" rid="B110">2017</xref>) achieved precise control of neurons through focused imaging of 2 mm thick live brain tissue. Wavefront shaping technology has great application potential in fields such as endoscopy, super-resolution imaging, nano positioning, and cryptography.</p>
<fig id="F9" position="float">
<label>Figure 9</label>
<caption><p>Optical phase conjugation (OPC) of imaging through scattering. <bold>(A)</bold> Tissue turbidity information, and <bold>(B)</bold> OPC light field reconstruction. <bold>(C)</bold> Result of 2.5 cm isolated chicken breast tissue.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fimag-03-1336829-g0009.tif"/>
</fig></sec>
<sec>
<title>4.1.2 OME</title>
<p>In Feng et al. (<xref ref-type="bibr" rid="B37">1988</xref>) and Freund et al. (<xref ref-type="bibr" rid="B38">1988</xref>) first proposed the OME. Specifically, the intensity of the speckle pattern obtained after scattering in the medium does not change significantly; it just changes by a small amount when the incident angle of the light wave changes within a small range. By using the OME of scattering media, Bertolotti et al. (<xref ref-type="bibr" rid="B14">2012</xref>) proposed a scanning-based speckle correlation imaging technique based on OME. The incident light scans the imaging target within the OME range to obtain speckle images at different angles. The target is reconstructed using a phase-recovery algorithm, as shown in <xref ref-type="fig" rid="F10">Figure 10A</xref>. Although this method can achieve single-frame imaging, the complex data collection process cannot meet the requirements of real-time detection.</p>
<fig id="F10" position="float">
<label>Figure 10</label>
<caption><p><bold>(A)</bold> Schematic of scanning-based speckle correlation imaging principle. <bold>(B)</bold> Rotation tracking results of different objects.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fimag-03-1336829-g0010.tif"/>
</fig>
<p>In addition, Katz et al. (<xref ref-type="bibr" rid="B55">2014</xref>) proposed a non-invasive imaging method based on single-shot speckle correlation (SSC) that does not need to consider spatial scanning and is suitable for non-invasive imaging through scattering. Through the long-term efforts of researchers, the SSC technology has emerged in multiple fields. In Stasio et al. (<xref ref-type="bibr" rid="B121">2015</xref>) proposed a light manipulation method for multi-core fibers by combining DOPC and OME in fiber imaging. The following year, Porat et al. (<xref ref-type="bibr" rid="B99">2016</xref>) also introduced SSC into fiber optic imaging, breaking the constraint that the input and output ends of traditional fiber optic endoscopes need to fit with the imaging object and image plane. In Qiao et al. (<xref ref-type="bibr" rid="B102">2017</xref>) proposed a non-invasive 3D light field manipulation method based on OME. The following year, Chengfei et al. (<xref ref-type="bibr" rid="B22">2018</xref>) tracked 3D targets hidden behind scattering media by studying the correlation of imaging objects through different positions and postures, as shown in <xref ref-type="fig" rid="F10">Figure 10B</xref>.</p>
<p>The size of the image object in the scattered light imaging technique based on the OME is constrained by the OME range, which limits its field of view. In actual scene imaging, OME is not applicable when the scattering medium is thick and the imaging object is too large.</p></sec>
<sec>
<title>4.1.3 Non-line-of-sight imaging technology</title>
<p>In a specific combat environment, such as urban street combat and military counterterrorism, it is necessary to be able to observe terrorist activities and master the initiative of counterterrorism by circumnavigating obstacles, such as streets and walls, over a long distance. Therefore, there is a need to view objects that are hidden by obstacles and that cannot be achieved using traditional optics. In the non-visual field scenario, the information carried in back-scattered light can be interpreted to realize real-time monitoring of multiple targets bypassing obstacles.</p>
<p>In Ramesh and Davis (<xref ref-type="bibr" rid="B103">2008</xref>) proposed the non-line-of-sight (NLOS) imaging technique. Based on in-depth exploration, current NLOS imaging is divided mainly into active and passive NLOS imaging. The difference between the two methods is that active NLOS imaging uses actively modulated lasers, captures photons after three scattering cycles, and completes the reconstruction of the target object by calculating the time-of-flight information of the photons. Passive NLOS imaging light sources use natural ambient light with no need for modulation to achieve the reconstruction of the hidden object.</p>
<p>In Raskar (<xref ref-type="bibr" rid="B105">2012</xref>) first used a streak tube camera to realize the 3D imaging of hidden targets. The experimental setup is shown in <xref ref-type="fig" rid="F11">Figure 11</xref>. It can reconstruct images of targets at different depths with a resolution of up to centimeters and high temporal and spatial resolutions. In Gariepy et al. (<xref ref-type="bibr" rid="B41">2016</xref>), and then in Chan et al. (<xref ref-type="bibr" rid="B20">2017</xref>) achieved NLOS detection within a few seconds and realized dynamic tracking of moving targets as shown in <xref ref-type="fig" rid="F12">Figure 12</xref>. In Wu et al. (<xref ref-type="bibr" rid="B134">2021</xref>) increased the imaging distance by three orders of magnitude for the first time, achieving 1.43 km of NLOS detection imaging and real-time tracking of hidden target objects. They designed a near-infrared, high-efficiency NLOS imaging system and improved algorithm models to solve the problems of optical attenuation and spatiotemporal information mixing caused by diffuse reflection. This system is expected to be used in real scenarios such as daily transportation, national defense, and security.</p>
<fig id="F11" position="float">
<label>Figure 11</label>
<caption><p>Active NLOS imaging technique based on striped-tube camera. <bold>(A)</bold> Imaging light path, <bold>(B)</bold> striped image, and <bold>(C)</bold> reconstruction result.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fimag-03-1336829-g0011.tif"/>
</fig>
<fig id="F12" position="float">
<label>Figure 12</label>
<caption><p>NLOS imaging principle and reconstruction result based on occlusion. <bold>(A)</bold> Experimental optical path, <bold>(B)</bold> raw data without occlusion, <bold>(C)</bold> original data with occlusion, <bold>(D)</bold> reconstruction result without occlusion, and <bold>(E)</bold> reconstruction result with occlusion.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fimag-03-1336829-g0012.tif"/>
</fig>
<p>Active NLOS imaging has good resolution and high accuracy. Ambient light has a small influence, and can be used for 3D reconstruction with the help of a modulated light source. However, passive NLOS imaging can achieve the reconstruction of hidden targets using natural ambient light, which makes it more suitable for practical applications. Currently, passive NLOS imaging is mainly based on spatial coherence or intensity coherence. In Batarseh et al. (<xref ref-type="bibr" rid="B12">2018</xref>) used a dual-phase Sagnac interferometer to reconstruct and estimate the position of hidden objects; the imaging results are shown in <xref ref-type="fig" rid="F13">Figure 13A</xref>. In Saunders et al. (<xref ref-type="bibr" rid="B113">2019</xref>) achieved passive NLOS imaging of two-dimensional scenes based on the intensity coherence theory by incorporating occlusions of estimated positions in their experiments to obtain the spatial information of photons. The image reconstruction results are shown in <xref ref-type="fig" rid="F13">Figure 13B</xref>. Although passive NLOS imaging is suitable for practical scenarios, because its light source does not require modulation, the detector receives less photon information, resulting in a low SNR and imaging resolution in Batarseh et al. (<xref ref-type="bibr" rid="B12">2018</xref>).</p>
<fig id="F13" position="float">
<label>Figure 13</label>
<caption><p><bold>(A, B)</bold> The intensity distribution across the DuPSaI field of view corresponding to the square and equilateral triangle objects, respectively. <bold>(C, D)</bold> Plots of real and imaginary components of SCF measured for the square and equilateral triangle objects, respectively. The imaginary component is color coded and superposed on the 3D representation of the real part of SCF. <bold>(E, F)</bold> Variations of real and imaginary SCF components at y = 0. The corresponding apodizing function &#x00393;A(s) is also indicated by dashed lines. <bold>(G, H)</bold> The 1D projection of the intensity distributions recovered from SCF measurements (solid lines) together with the actual intensity profiles evaluated across the targets (dotted lines).</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fimag-03-1336829-g0013.tif"/>
</fig>
<p>In general, non-visual imaging can effectively detect scenes outside the visual domain, which is of great significance for applications such as military operations, public transportation safety, hostage rescue, anti-terrorism street fighting, and biomedical imaging.</p></sec>
<sec>
<title>4.1.4 Imaging based on deep learning</title>
<p>With the continuous development of computer technology, many studies report on the use of deep learning to solve relevant problems in imaging through scattering. If traditional scattered light imaging is regarded as a forward propagation process, scattered light imaging based on deep learning is a reverse solution process in which the input light field information is obtained by building a suitable neural network based on the output light field intensity. In Ando et al. (<xref ref-type="bibr" rid="B6">2015</xref>) introduced deep learning in imaging through scattering for the first time and used a support vector machine to scatter the collected face data; non-face data intensity maps were classified. In Lyu et al. (<xref ref-type="bibr" rid="B80">2019</xref>) established a hybrid neural network (HNN) model to recover hidden targets in strongly scattering media, as shown in <xref ref-type="fig" rid="F14">Figure 14</xref>. Although the HNN reconstruction results are similar to those of the original image, the reconstruction results based on OME do not recover the image under the same conditions. The recovery range of scattered light imaging based on deep learning is wider than that of OME. Subsequently, Li et al. (<xref ref-type="bibr" rid="B72">2018</xref>) trained scattering maps with different scattering media, and their network structure autonomously used the statistical features in the training data to realize the image recovery of different types of objects under different scattering media. In Lai et al. (<xref ref-type="bibr" rid="B63">2021</xref>) introduced the idea of transferring learning to the problem of recovering different types of objects by training images from multimode fibers (MMFs) and scattering media. MMF data was migrated to a scattering medium to achieve image recovery for different objects and scattering media.</p>
<fig id="F14" position="float">
<label>Figure 14</label>
<caption><p>Character reconstruction results. <bold>(A)</bold> Speckle pattern, <bold>(B)</bold> hybrid neural network (HNN) reconstruction results, <bold>(C)</bold> original image, and <bold>(D)</bold> reconstruction results of optical memory effect.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fimag-03-1336829-g0014.tif"/>
</fig>
<p>Scattered light imaging based on deep learning has several advantages compared with traditional scattered light imaging. For example, scattered light imaging can be realized through intensity measurement, and for strong scattering media, imaging can obtain a larger field of view. Nevertheless, it is undeniable that the deep learning method still has shortcomings such as a heavy computing burden, long computing time, high cost, and weak flexibility. Moreover, it cannot explain the physical laws of light propagation in the scattering medium. In addition, the better trained network is not well adapted to other systems, and the network structure cannot automatically adjust the parameters according to the changes in the imaging environment.</p></sec></sec>
<sec>
<title>4.2 Polarization imaging technology</title>
<p>Polarization plays an irreplaceable role in the study of descattering. The current research shows that the polarization distribution characteristics of a scattered light field are closely related to the imaging distance. The polarization statistical characteristics of a scattering medium, such as water, were studied, and the intensity and polarization characteristic distribution of the scattered light field were considered globally to address the long-distance imaging problem affected by the medium.</p>
<sec>
<title>4.2.1 Polarization descattering imaging technique</title>
<p>In Tyo et al. (<xref ref-type="bibr" rid="B127">1996</xref>) investigated the ability of polarization difference images for recovering target information at different scattering levels, and in Tyo (<xref ref-type="bibr" rid="B126">2000</xref>) analyzed the point spread functions (PSFs) of polarization difference and summation images, and investigated the PSFs in single-scattering and multiple-scattering media using the Monte Carlo algorithm. The PSFs of the polarization difference images were found to be much narrower than those of the polarization summation images, as shown in <xref ref-type="fig" rid="F15">Figure 15A</xref>, implying that the use of the polarization difference technique in transmission scattered light imaging can acquire target images with more high-frequency information and better imaging results.</p>
<fig id="F15" position="float">
<label>Figure 15</label>
<caption><p><bold>(A)</bold> PSF comparison between polarization difference and polarization summing images. <bold>(B)</bold> De-hazing results in a real scene (Liu et al., <xref ref-type="bibr" rid="B75">2015</xref>).</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fimag-03-1336829-g0015.tif"/>
</fig>
<p>In Schechner et al. (<xref ref-type="bibr" rid="B115">2003</xref>) proposed a polarization haze imaging model that showed that the two types of spectral intensities received by the detector (background-scattered light and target-information light) exhibited obvious polarization differences. The distribution of target information light can be effectively resolved by the extraction and interpretation of the polarization difference information. In Panigrahi et al. (<xref ref-type="bibr" rid="B92">2015</xref>) obtained polarization images from snapshot polarization cameras and proposed a linear representation method for optimizing the polarization image contrast, which effectively enhanced the visual effect of long-distance imaging over 1 km. To date, polarization differential imaging technology has developed relatively maturely; however, the use of this method is limited when the haze concentration is high. To reduce the complexity of the algorithm, this method assumes that the reflected light of the target is unpolarized, which is not applicable in many scenarios. In addition, this technique is highly dependent on the sky region and is not universal.</p>
<p>To address these problems, Fang et al. (<xref ref-type="bibr" rid="B33">2014</xref>) proposed a haze-removal algorithm. For no-sky background imaging, Zhang et al. (<xref ref-type="bibr" rid="B144">2016</xref>) combined polarization imaging technology with dark channel prior technology and proposed a new haze removal method. Although the above methods can achieve image defogging, they do not consider the difference in the frequency domain between the target and haze. Liu et al. (<xref ref-type="bibr" rid="B75">2015</xref>) analyzed the characteristics of haze images based on the frequency domain distribution of the target information and haze information, and proposed a multi-scale polarization defogging technology, greatly improving the details of the image shown in <xref ref-type="fig" rid="F15">Figure 15B</xref>.</p>
<p>In Schechner and Karpel (<xref ref-type="bibr" rid="B114">2006</xref>) proposed an underwater polarized imaging model based on a polarized differential imaging defogging technique combined with an image processing defogging technique that can achieve clear underwater imaging. In Liu et al. (<xref ref-type="bibr" rid="B75">2015</xref>) achieved clear transmission scattering imaging. They combined the differences in the spatial frequency distribution of the scattering medium and atmospheric molecules with multiscale image processing based on polarized differential imaging. This was followed by further studies on underwater long-range polarization extended-range imaging (Han et al., <xref ref-type="bibr" rid="B45">2017</xref>; Liu et al., <xref ref-type="bibr" rid="B76">2018</xref>), as shown in <xref ref-type="fig" rid="F16">Figure 16</xref>. In Hu et al. (<xref ref-type="bibr" rid="B50">2021</xref>) proposed a polarization differential underwater imaging technique with three degrees of freedom.</p>
<fig id="F16" position="float">
<label>Figure 16</label>
<caption><p>Underwater imaging results of polarization differential imaging technology. <bold>(A)</bold> CP (correlation peak) value curve, <bold>(B)</bold> traditional and polarization imaging results, <bold>(C)</bold> passive polarization imaging simulated results, and <bold>(D)</bold> underwater reconstructed image.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fimag-03-1336829-g0016.tif"/>
</fig>
<p>Although polarization imaging technology can effectively remove the adverse effects of scattering media, the use of polarizers and other components leads to the loss of light intensity energy, which is not conducive to remote imaging; therefore, it is necessary to develop a high-efficiency polarization detection method. In addition, the precise interpretation of the polarization information is a topic for future research focusing on polarization imaging technology.</p></sec>
<sec>
<title>4.2.2 Single photon imaging technology</title>
<p>To achieve clear imaging and target detection under harsh conditions at long or even at very long distances, signal processing needs to be optimized. According to the results of signal attenuation quantification, the signal-to-background ratio interval of the current detected signal has been analyzed and studies conducted on the extraction and recovery of weak signals from chaotic systems. Imaging targets with low SNRs at long distances usually requires a long time for information accumulation and processing. In Li et al. (<xref ref-type="bibr" rid="B73">2020</xref>) designed a single-photon light detection and ranging (LIDAR) imaging system for the detection of very weak single-photon signals. They designed a single-photon LIDAR imaging system to achieve 3D imaging of targets at a distance of 45 km. In 2021, this imaging system was further optimized to achieve 3D imaging of mountain targets at a distance of 200 km (Li J. Y. et al., <xref ref-type="bibr" rid="B69">2021</xref>; Li X. et al., <xref ref-type="bibr" rid="B70">2021</xref>), as shown in <xref ref-type="fig" rid="F17">Figure 17</xref>.</p>
<fig id="F17" position="float">
<label>Figure 17</label>
<caption><p>Illustration of single photon long-range imaging over 200 km. <bold>(A)</bold> Photograph of mountains, <bold>(B)</bold> experimental setup, <bold>(C)</bold> experimental hardware, <bold>(D)</bold> experimental environment, and <bold>(E)</bold> 3D profile of mountains.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fimag-03-1336829-g0017.tif"/>
</fig></sec></sec></sec>
<sec id="s5">
<title>5 Smaller (weight, volume, and power consumption)</title>
<p>The traditional optical system design, under the guidance of industrial design ideas, is driven by aberrations. To satisfy the requirements of the imaging field of view, focal length, and image quality, multiple lens combinations made of different materials are required to eliminate aberrations. Such systems are often complex in structure, large in size, and heavy, and an increase in the number of lenses causes difficulties in processing technology. It is difficult to realize miniaturized and lightweight photoelectric imaging systems.</p>
<sec>
<title>5.1 Optical imaging technology imitating the compound eye</title>
<p>Insect compound eyes are small, have a large field-of-view angle, and are sensitive to high-speed moving objects. Inspired by their unique imaging modes, the compound eye-like optical system achieves wide-area high-resolution imaging by mimicking biological visual mechanisms, which significantly improves the imaging performance of optoelectronic imaging and detection equipment. In Brady and Hagen (<xref ref-type="bibr" rid="B17">2009</xref>) proposed the TOMBO compound eye imaging system, which was designed using the juxtaposed compound eye structure of dragonflies as a reference, as shown in <xref ref-type="fig" rid="F18">Figure 18</xref>. Each aperture images the full field of view. Compared with traditional single-aperture imaging, the multi-aperture imaging method effectively improves the information capacity of the system. Although multi-scale imaging is obviously different from the multi-aperture imaging approach, the multi-scale system adopts a multi-stage system cascade to achieve high-performance imaging, and most of the more mature imaging systems currently contain only two stages (<xref ref-type="fig" rid="F18">Figure 18C</xref>), large-scale primary optics and small-scale secondary optics. The large-scale primary optics are used mainly to collect as much light energy as possible and to perform the initial aberration correction, whereas small-scale secondary optics are used mainly to secondarily transmit the light passing through the primary optics and image it onto the detector behind the secondary optics. Overall, however, the compound eye-like optical imaging technology not only effectively solves the problem of constraints between a large field of view and high resolution but also significantly reduces the size, weight, and power consumption of the imaging system.</p>
<fig id="F18" position="float">
<label>Figure 18</label>
<caption><p>Principle of multi-aperture imaging. <bold>(A)</bold> Traditional imaging, <bold>(B)</bold> 3 &#x000D7; 3 multi-aperture imaging, and <bold>(C)</bold> 5 &#x000D7; 5 multi-aperture imaging.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fimag-03-1336829-g0018.tif"/>
</fig>
<p>Since 2012, the Swiss Federal Institute of Technology in Lausanne has successively developed Panoptic (Afshari et al., <xref ref-type="bibr" rid="B2">2012</xref>; Popovic et al., <xref ref-type="bibr" rid="B98">2014</xref>), OMNI-R (Akin et al., <xref ref-type="bibr" rid="B4">2013</xref>), GigaEye-1 (Cogal et al., <xref ref-type="bibr" rid="B23">2014</xref>), GigaEye-2 (Popovic, <xref ref-type="bibr" rid="B97">2016</xref>), and other multi-aperture imaging systems, among which, the OMNI-R has a full-field-of-view angle as high as 360&#x000B0; &#x000D7; 100&#x000B0;, and its structure is similar to that of GigaEye-1. However, GigaEye-1 supports two imaging modes and exhibits good imaging effects for both static and dynamic scenes. In 2017, an ultra-compact high-definition imitation compound eye system was developed (Cogal and Leblebici, <xref ref-type="bibr" rid="B24">2016</xref>), which has a pixel count of up to 1.1 million pixels while achieving full-field-of-view of 180&#x000B0; &#x000D7; 180&#x000B0; imaging, and the radius of the whole system is only 5 mm. The system is equipped with a distributed illumination system, which is able to achieve dark-environmental imaging. In 2014, a new astronomical telescope was designed by Law et al. (<xref ref-type="bibr" rid="B65">2014</xref>, <xref ref-type="bibr" rid="B64">2015</xref>) that can image an area of 384 square degrees and detect up to 16 magnitudes, which greatly improves the telescope&#x00027;s imaging range and detection capability.</p></sec>
<sec>
<title>5.2 Design of computational optical system</title>
<p>In a traditional optical system design, the imaging link has a one-way design and independent optimization. This means that the optical design and image-processing algorithms are independent of each other, and the imaging link cannot be considered as a whole. Hence, it is easy to miss the optimal scheme for the joint design of an optical system and image processing. The minimalist optical system comprehensively considers the entire imaging link and achieves the purpose of simplifying the optical system structure and reducing costs based on the idea of global optimization to better promote the engineering application of the optical system. A comparison of the two design processes is shown in <xref ref-type="fig" rid="F19">Figure 19A</xref> (Stork and Robinson, <xref ref-type="bibr" rid="B122">2008</xref>).</p>
<fig id="F19" position="float">
<label>Figure 19</label>
<caption><p><bold>(A)</bold> Traditional and global designs compared. <bold>(B)</bold> Restoration images of traditional and joint designs compared. <bold>(C)</bold> Image quality of three lens joint design and six lens traditional design compared.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fimag-03-1336829-g0019.tif"/>
</fig>
<p>In Robinson and Stork (<xref ref-type="bibr" rid="B107">2006</xref>) proposed a method for the joint design of an optical system, detector, and image processing for document scanners and other instruments. The method uses the mean square error (MSE) of the predicted recovered image and the optically blurred image as an evaluation index and combines it with the Wiener filtering-based image recovery algorithm to recover optically blurred images. The effect of the recovered image was better than that of the traditional design (Robinson and Stork, <xref ref-type="bibr" rid="B108">2007</xref>, <xref ref-type="bibr" rid="B109">2008</xref>), as shown in <xref ref-type="fig" rid="F19">Figure 19B</xref>. In 2008, Robinson and Stork proposed the idea of co-designing an optical design with image restoration, which achieved end-to-end optimization by combining an optical transfer function with an image processing system using the MSE between the restored and original target images. In Li J. Y. et al. (<xref ref-type="bibr" rid="B69">2021</xref>) combined Zemax software and image processing through data exchange dynamic link communication technology to form a closed-loop link for optical-algorithmic joint design optimization, which reduced the difficulty of the optical system design with the system volume, weight, and cost, as shown in <xref ref-type="fig" rid="F19">Figure 19C</xref>. The joint design of a simple lens can be comparable to the imaging quality of a traditionally designed complex lens.</p>
<p>Computational optical system design technology effectively solves the shortcomings of the traditional optical system with complex internal structure, large volume, and high cost through the whole link integration optimization design and provides strong technical support for the miniaturization, light weight, and portability of photoelectric imaging equipment.</p></sec>
<sec>
<title>5.3 Computational detector technology</title>
<p>Traditional photodetectors are based on photosensitive semiconductors. The thermal sensitivity, negative resistivity, temperature characteristics of semiconductors, and quantum effects generated for detectors of small sizes affect the detection efficiency, thus affecting the photoelectric imaging ability. Therefore, it is necessary to find new photosensitive materials and create new photosensitive components to overcome the material limitations of silicon-based semiconductors to improve the detection sensitivity and reduce the detection threshold. At present, photodetectors can only respond to light intensity. With the attenuation of light waves through long-distance transmission, the light intensity information reaching the detector is limited, and more is needed to retain the physical quantity information of other dimensions. The development of a detector with multi-dimensional physical quantity response is of great significance for achieving remote imaging.</p>
<p>A traditional photodetector has a fixed plane structure and depends on the correction compensation of the optical system to obtain the ability of the focusing plane target surface. This can be achieved by using different lens combinations, resulting in a complex system, image distortion, and quality degradation problems. The retinal structure of the human eye is concave, allowing imaging to be realized by relying only on the relatively simple optical structure of the lens. Inspired by this, if an imaging detector with a flexible curved surface is used, as shown in <xref ref-type="fig" rid="F20">Figure 20</xref>, and the adaptive layout is conducted according to the focal plane shape of the optical system, the correction pressure of the optical system&#x00027;s imaging distortion can be reduced, the optical system design and complexity can be simplified, and high-resolution performance can be achieved (Zhang et al., <xref ref-type="bibr" rid="B143">2017</xref>). However, current processing technology limits the preparation of curved surface detectors, and improving the process is major difficulty.</p>
<fig id="F20" position="float">
<label>Figure 20</label>
<caption><p>Curved computational detector. <bold>(A)</bold> Photograph of hemispherical FPA based on original silicon optoelectronics, <bold>(B)</bold> ray patterns traced, <bold>(C)</bold> imaging setup, and <bold>(D)</bold> image result of &#x0201C;W&#x0201D; on curved detector.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fimag-03-1336829-g0020.tif"/>
</fig>
<p>In the process of converting incident photons into discrete digital signals that can be processed by a computer, it is necessary to perform signal sampling; Nyquist sampling used by traditional detectors causes considerable data redundancy and time-consuming data processing. Therefore, it is necessary to design a new non-uniform sampling computing detector and establish a matching computational optical system design scheme so that the significance of detection and sampling focuses on the target information to improve the imaging resolution and signal processing speed. In addition, it is necessary to develop an optical processing method that can transfer the information calculated from the electrical signal processing at the back end of the detector to the front end of the detector so that the detector itself can preprocess the imaging information.</p></sec></sec>
<sec id="s6">
<title>6 Wider (imaging field of view)</title>
<p>The amount of information obtained by an optical imaging system is determined by its field of view and the resolution of the optical system. A large field of view can cover a larger observation range, and a high resolution can provide more detailed information. The amount of information focused through the lens and collected by the imaging equipment is always limited because the spatial bandwidth product cannot be improved, similar to that used to determine the performance of a traditional imaging system. This results in a pair of irreconcilable contradictions between the spatial resolution and the imaging field of view. Therefore, through computational imaging, the system coding mode is used to upgrade the dimensions of the information, improve the utilization rate of the light-field information, and achieve a large field of view and high resolution.</p>
<sec>
<title>6.1 Single-scale and multi-aperture imaging technology</title>
<p>Single-scale multi-aperture imaging is a technique used to improve and realize the function of an imaging system by mimicking biological visual mechanisms, such as the widely used fisheye lens. To achieve a large field of view and high-resolution imaging, a biomimetic multi-aperture imaging system was developed by drawing on the structure of the compound eye of arthropods, which was first proposed by Brady and Hagen (<xref ref-type="bibr" rid="B17">2009</xref>), to some extent solving the problem of the incompatibility of a large field of view and high resolution.</p>
<p>In Akin et al. (<xref ref-type="bibr" rid="B4">2013</xref>) developed a high-resolution imaging system inspired by the panoramic optics approach, capable of omnidirectional video recording at 30 frames per second and a resolution of 9,000 &#x000D7; 2,400. The imaging field of view of the system reached 360&#x000B0; &#x000D7; 100&#x000B0;, and the physical and photographic samples of the system are shown in <xref ref-type="fig" rid="F21">Figure 21A</xref>.</p>
<fig id="F21" position="float">
<label>Figure 21</label>
<caption><p><bold>(A)</bold> High resolution omni-directional light field imaging system and results. <bold>(B)</bold> ARGUS-IS system and imaging renderings. <bold>(C)</bold> Multi-aperture system prototype and imaging results.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fimag-03-1336829-g0021.tif"/>
</fig>
<p>ARGUS-IS (Leninger et al., <xref ref-type="bibr" rid="B68">2008</xref>), an aerial camera system jointly developed by DARPA in the USA and Aerospace Systems in the UK in 2013, has a strong imaging reconnaissance capability. With 1.8 billion pixels, the system can monitor an area of more than 24 square kilometers from an altitude of 5.4 km, and detect more than 40,000 moving objects, including the identification and calibration of moving people and cars. It also has powerful information storage capabilities, as shown in <xref ref-type="fig" rid="F21">Figure 21B</xref>. Its imaging resolution is sufficient to identify and track vehicles and pedestrians from an altitude of 6,500 m, ground resolution of 0.15 m, instantaneous field of view angle of 23 &#x003BC;rad, and can simultaneously track at least 65 targets.</p>
<p>In Fu et al. (<xref ref-type="bibr" rid="B40">2015</xref>) designed the first generation of a bionic compound eye optical system with a large field of view and low edge aperture resolution. However, the central aperture had the opposite effect (Kitamura et al., <xref ref-type="bibr" rid="B61">2004</xref>). The 31 components detected the target using the edge aperture and accurately identified the target using the center aperture with a full field of view of 53.9&#x000B0;. In Shao et al. (<xref ref-type="bibr" rid="B117">2020</xref>) research group designed a multi-aperture system with a full field of view of 123.5&#x000B0; &#x000D7; 38.5&#x000B0;. The system also supported real-time viewing and other functions, such as images, videos, and other information. The prototype and imaging results are shown in <xref ref-type="fig" rid="F21">Figure 21C</xref>.</p></sec>
<sec>
<title>6.2 Concentric multi-scale imaging technology</title>
<p>Since Brady and Hagen (<xref ref-type="bibr" rid="B17">2009</xref>) proposed the theory of multi-scale imaging in 2009, this imaging method has received extensive attention from researchers worldwide. Since 2012, the AWARE-2 (Golish et al., <xref ref-type="bibr" rid="B42">2012</xref>; Youn et al., <xref ref-type="bibr" rid="B140">2014</xref>), AWARE-10 (Nakamura et al., <xref ref-type="bibr" rid="B89">2013</xref>; Marks et al., <xref ref-type="bibr" rid="B82">2014</xref>), and AWARE-40 (Nakamura et al., <xref ref-type="bibr" rid="B89">2013</xref>) were developed. It takes only 18 s to actually shoot a single photograph, which effectively realizes high-resolution imaging with a large field of view. As an improved version of AWARE-2, AWARE-10 has a field of view of 100&#x000B0; &#x000D7; 60&#x000B0;, two billion pixels, and a resolution of 12.5 cm &#x00040; 5 km, representing a significant improvement in the number of pixels and resolution compared to AWARE-2.</p>
<p>In Shao et al. (<xref ref-type="bibr" rid="B117">2020</xref>)&#x00027;s team developed a prototype multi-scale wide-area high-resolution computational optical imaging system based on the design principle of the secondary imaging system, as shown in <xref ref-type="fig" rid="F22">Figure 22A</xref> (Fei et al., <xref ref-type="bibr" rid="B35">2019</xref>). It had an imaging field of view of 120&#x000B0; &#x000D7; 90&#x000B0;, a system pixel count of 3.2 billion, and a resolution of 5 cm &#x00040; 5 km, which was capable of clearly resolving target objects within a range of 5 km. It was suitable for applications such as key area defense, border patrol, long-distance detection, and social activity surveillance. In the future, such systems can also play an important role in airborne, ground-based, and super-converged real-view reconnaissance. In the same year, Shao Xiaopeng&#x00027;s team designed a multi-scale system in the infrared band with a range of 8&#x02013;12 &#x003BC;m. The system had a magnification ratio of 2 &#x000D7; and a focal length of 68&#x02013;136 mm. The system resolution was 0.179 mrad in the telephoto mode, and 0.36 mrad in the short focal length, which was capable of effectively realizing the acquisition of targets in a large field of view and the identification of targets in a small field of view with high precision. The following year, the team miniaturized the multi-scale computational optics system, taking advantage of Galileo&#x00027;s compact structure to reduce the volume and complexity of the computational optics system. This resulted in a significant reduction in cost and energy consumption, and greater applicability when the system was engineered for application.</p>
<fig id="F22" position="float">
<label>Figure 22</label>
<caption><p><bold>(A)</bold> Multi-scale computational optical imaging system and its imaging effect. <bold>(B)</bold> Optical imaging system and imaging effect of AWARE-40.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fimag-03-1336829-g0022.tif"/>
</fig>
<p>On the basis of AWARE-2 (Golish et al., <xref ref-type="bibr" rid="B42">2012</xref>; Youn et al., <xref ref-type="bibr" rid="B140">2014</xref>) and AWARE-10 (Nakamura et al., <xref ref-type="bibr" rid="B89">2013</xref>), Brady developed an AWARE-40 (Nakamura et al., <xref ref-type="bibr" rid="B89">2013</xref>; Marks et al., <xref ref-type="bibr" rid="B82">2014</xref>) imaging system. Unlike AWARE-2 and AWARE-10, the primary mirror in AWARE-40 adopted a double Gaussian-like structure instead of a spherical lens, and its pixel count was up to 3.6 billion, with a resolution of up to 5.4 cm &#x00040; 5 km. The system had a superior imaging performance and could clearly detect and identify long-distance targets. The prototype and the imaging results are shown in <xref ref-type="fig" rid="F22">Figure 22B</xref>. In Wu et al. (<xref ref-type="bibr" rid="B135">2016</xref>) comprehensively considered light, light field information, sensors, and image reconstruction in the computational imaging process and used 4D deconvolution algorithms based on a multi-scale imaging design scheme to obtain a large-field-of-view high-definition image suitable for biomedical applications.</p></sec></sec>
<sec id="s7">
<title>7 Stronger (universality)</title>
<p>Extreme imaging conditions are an urgent problem in photoelectric imaging technology and are also the key to improving the universality of the system. However, traditional imaging methods are limited by single-intensity detection and information interpretation. It is difficult to obtain and calculate the target information effectively under the interference of a strong background and noise, resulting in imaging failure. CIT fully mines the polarization, spectrum, and other information of the light field, conducts high-dimensional constraints through upgraded light field information, and effectively solves the target information in extreme scenes to achieve high-universality photoelectric imaging.</p>
<sec>
<title>7.1 Spectral imaging technology</title>
<p>Hyperspectral imaging uses narrow and continuous spectra for the continuous remote sensing imaging of targets, which has great advantages in the fine classification, matching identification, and analysis of distant targets. However, the global scanning of multiple spectral bands results in tens or even hundreds of gigabytes of data per spectral data cube. Hence, rapidly locating the target information of interest from a large amount of data is a problem that needs to be solved by hyperspectral imaging technology in the future. In addition, the traditional scanning imaging method of hyperspectral imaging results in poor real-time imaging.</p>
<p>In Kumar et al. (<xref ref-type="bibr" rid="B62">2017</xref>) proposed a single-exposure multispectral imaging technique using a single camera. It sacrificed spatial dimensional information using spatial correlation and spectral decorrelation of scattered images to achieve multispectral imaging of simple structures, as shown in <xref ref-type="fig" rid="F23">Figure 23</xref>.</p>
<fig id="F23" position="float">
<label>Figure 23</label>
<caption><p><bold>(A)</bold> Schematic of multispectral imaging technique with scattering medium and monochromatic camera, <bold>(B)</bold> snapshot hyperspectral imaging.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fimag-03-1336829-g0023.tif"/>
</fig>
<p>In Monakhova et al. (<xref ref-type="bibr" rid="B87">2020</xref>) presented a system comprising an array of tiled spectral filters placed directly on an image sensor and a diffusion plate placed close to the sensor. Each point on the diffusion plate plane was mapped to a unique pseudorandom pattern on the spectrum filter array, which encoded multiplexed spatial spectral information. By solving the sparse constraint inverse problem, hyperspectral images were recovered at a sub-super-pixel resolution.</p>
<p>A spectral detector based on a monochrome camera can achieve spectral imaging with a compact and simple structure, and its cost is significantly less than that of a traditional spectral imaging system. However, the trade-off between spectral resolution and spectral range and the energy transmittance problem still exists, and the related theoretical supplement is the key research direction of spectral imaging technology in the next step.</p></sec>
<sec>
<title>7.2 Polarization 3D imaging technology</title>
<p>The existing 3D imaging methods are limited by the means of interpretation and imaging equipment, and cannot satisfy the increasing demand for 3D imaging in many different application scenarios. It is difficult to achieve the universal high-precision imaging of long-range targets. Polarized 3D imaging technology, through the study of the object surface morphology and polarization characteristics of the reflected light, interprets the multi-physical information of the optical field and achieves high-precision reconstruction of the target (Miyazaki et al., <xref ref-type="bibr" rid="B86">2016</xref>). The separation of the specular-diffuse reflection and the multi-value of the azimuth angle in the polarized 3D imaging method have been the core problems restricting its development in <xref ref-type="fig" rid="F24">Figure 24A</xref>. In Miyazaki et al. (<xref ref-type="bibr" rid="B85">2002a</xref>) proposed a rotational measurement method to eliminate the multi-value of the incidence angle in specular reflection and then proposed visible and infrared wavelength measurement methods (Miyazaki et al., <xref ref-type="bibr" rid="B84">2002b</xref>), which effectively avoided the multi-value of the incidence angle. With the help of the polarization of far-infrared wavelengths and high-precision measurement of the angle of incidence, they solved the multi-value problem of the angle of incidence. However, methods using far-infrared and visible light band measurement are complex and expensive. Moreover, different bands must be resolved under image matching and other issues, making the process cumbersome and complex. Many studies have proposed effective solutions to the multi-value problem of the incidence angle. On the basis of accurately obtaining the target angle of incidence, eliminating the multi-value problem of azimuth has become another challenge for researchers. Morel et al. (<xref ref-type="bibr" rid="B88">2006</xref>) proposed an active illumination method for the multi-value azimuth. This method solves the multi-value azimuth problem by modulating the light source in different directions; however, it cannot measure the azimuth of a moving target. Zhou et al. (<xref ref-type="bibr" rid="B147">2013</xref>) proposed a method to diffuse the azimuth information from the high-frequency region to the low-frequency region to solve the multi-value problem in the low-frequency region, which can achieve high-precision 3D reconstruction of the target on complex surfaces.</p>
<fig id="F24" position="float">
<label>Figure 24</label>
<caption><p><bold>(A)</bold> Results of specular-diffuse reflection separation and 3D reconstruction of different targets. <bold>(B)</bold> 3D reconstruction results in different environments. <bold>(C)</bold> 3D reconstruction results with millimeter-level 3D imaging accuracy at a long distance.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fimag-03-1336829-g0024.tif"/>
</fig>
<p>In Kadambi et al. (<xref ref-type="bibr" rid="B54">2017</xref>) acquired a surface depth map of a target object using Kinect and then fused it with the polarized 3D imaging technique. They effectively recovered the detailed information of the object&#x00027;s surface depth map while solving the problem of a non-unique azimuthal angle and realized high-precision 3D imaging under different target conditions. The reconstruction results are shown in <xref ref-type="fig" rid="F24">Figure 24B</xref>. However, this method is limited by the distance of the Kinect device and it is difficult to achieve high-precision imaging over long distances. Subsequently, Han et al. (<xref ref-type="bibr" rid="B44">2022</xref>) combined a deep learning approach with a monocular polarization camera to simultaneously achieve specular-diffuse reflection separation and azimuthal correction to achieve millimeter-level 3D imaging accuracy at a long distance, as shown in <xref ref-type="fig" rid="F24">Figure 24C</xref>. In addition, Li X. et al. (<xref ref-type="bibr" rid="B70">2021</xref>) proposed a near-infrared monocular 3D computational polarization imaging method to improve the material universality. They introduced a reference gradient field in the weight constraints to globally correct the surface normal blurring of the target with inhomogeneous reflectivity, which realized the direct shape reconstruction of inhomogeneous surfaces with inhomogeneous reflectivity. This method is simple, robust, and effectively avoids the influence of changes in the reflectivity.</p>
<p>By increasing the dimensions of the light field information, polarization 3D imaging technology solves the problem of mutual restriction between the imaging distance and 3D accuracy in 3D imaging and effectively improves the universality of 3D imaging technology.</p></sec>
<sec>
<title>7.3 Extremely low SNR interpretation technology</title>
<p>This improvement in the physical dimensions can relax, and achieve a universal expansion of, the imaging conditions. For extremely low-SNR decoding technology under extreme imaging conditions, an increase in the mathematical dimension is also a major development idea. A typical example is the sparse low-rank decomposition of signals, and its core introduces a sparse low-rank matrix decomposition model in mathematics for signal detection. The slow-varying background term is regarded as a low-rank term, and the weak man-made target term is regarded as a sparse term. Accordingly, with the help of the optimization model the SNR of the target signal can be greatly enhanced. Then, through the separation of the information and inverse transformation, the detection of the weak target and optical field disturbance signal in a specific domain can be achieved, which greatly improves the imaging universality. This principle is illustrated in <xref ref-type="fig" rid="F25">Figure 25A</xref> (Zutao et al., <xref ref-type="bibr" rid="B148">2016</xref>).</p>
<fig id="F25" position="float">
<label>Figure 25</label>
<caption><p><bold>(A)</bold> Decomposition principle of low-rank and sparse matrix. <bold>(B)</bold> Recovery result of low-rank sparse decomposition model based on the truncation norm (LRSD-TNN). <bold>(C)</bold> Face restoration results of LRSD-TNN. <bold>(D)</bold> Reconstruction results of original image under different concentration conditions. <bold>(E)</bold> Defogging results in different scenes.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fimag-03-1336829-g0025.tif"/>
</fig>
<p>Because the solution of the sparse low-rank problem is pathological, in Cao et al. (<xref ref-type="bibr" rid="B18">2017</xref>) proposed a low-rank sparse decomposition model based on the truncation norm to detect the background and foreground of datasets from different video databases. The recovered background and foreground contain no noise, which meets practical requirements. Part of the processing effect is shown in <xref ref-type="fig" rid="F25">Figure 25B</xref>. This idea was then used to process the face image to effectively remove shadows and reflections from the image, and the processing effect is shown in <xref ref-type="fig" rid="F25">Figure 25C</xref>.</p>
<p>Subsequently, Fei et al. (<xref ref-type="bibr" rid="B34">2021</xref>) applied sparse-low-rank decomposition to underwater imaging for the first time and proposed an underwater polarimetric imaging technique based on sparse-low-rank characteristics. They established a sparse low-rank decomposition model for underwater images in the polarimetric domain, effectively separated the target and background information, and reconstructed a high-definition target image to achieve high-quality recovery of the image under the conditions of a low SNR. The imaging results are shown in <xref ref-type="fig" rid="F25">Figure 25D</xref>. In the same year, Daubechies et al. (<xref ref-type="bibr" rid="B28">2004</xref>) and Berman et al. (<xref ref-type="bibr" rid="B13">2016</xref>) proposed a low-rank and dictionary expression decomposition method for haze removal in dense fog scenes, by constructing a low-rank and dictionary expression decomposition model to obtain a low-rank &#x0201C;haze&#x0201D; map. They then recovered a clear image using double and triple interpolation, producing a haze removal effect in different scenes, as shown in <xref ref-type="fig" rid="F25">Figure 25E</xref>. This proves that the proposed method yields satisfactory results for hazy images in different scenes.</p></sec></sec>
<sec id="s8">
<title>8 More skills</title>
<p>&#x0201C;Higher,&#x0201D; &#x0201C;farther,&#x0201D; &#x0201C;smaller,&#x0201D; &#x0201C;wider,&#x0201D; and &#x0201C;stronger&#x0201D; are the urgent needs of the next development of photoelectric imaging technology, as well as the core of the development of CIT. In many cases, CIT allows for several skills to be achieved simultaneously. For example, computational microscopy techniques (McLeod and Ozcan, <xref ref-type="bibr" rid="B83">2016</xref>; Aidukas et al., <xref ref-type="bibr" rid="B3">2019</xref>) able to guarantee compact, portable and low-cost imaging systems with very large field of view and super-resolution capabilities have recently demonstrated as shown in <xref ref-type="fig" rid="F26">Figure 26</xref>. Polarized 3D imaging (Han et al., <xref ref-type="bibr" rid="B44">2022</xref>) not only obtains target polarization information for high contrast imaging, but also achieves high-precision 3D imaging and restores the true 3D information of the target.</p>
<fig id="F26" position="float">
<label>Figure 26</label>
<caption><p><bold>(A)</bold> Experimental setup of the low-cost computational microscopy, <bold>(B)</bold> Bayer color filter array, <bold>(C)</bold> FPM imaging scheme.</p></caption>
<graphic mimetype="image" mime-subtype="tiff" xlink:href="fimag-03-1336829-g0026.tif"/>
</fig>
<p>In CIT imaging, the imaging process was comprehensively considered in the entire link, and the encoding effect of the transmission medium on the imaging information was analyzed. The joint multiplexing of multi-dimensional physical quantities such as amplitude, phase, polarization, and spectrum can improve the interpretation ability of information, improve imaging resolution, imaging field of view, imaging distance, imaging equipment, and imaging universality, and finally achieve a breakthrough in imaging limits.</p></sec>
<sec id="s9">
<title>9 Summary and prospect</title>
<p>In the age of information technology, the emergence of powerful computational capabilities, constantly innovating information theories, new detector structures, and new technologies, such as quantum optics, have brought broader development space for optoelectronic imaging and promoted the emergence of CIT that combines traditional optics and signal processing technology. The contribution of GPU and AI (Kellman et al., <xref ref-type="bibr" rid="B59">2020</xref>) to computational imaging is profound and has played a crucial role in advancing various aspects of image processing, computer vision, and related fields. GPU contribute through their parallel processing power, enabling accelerated image processing algorithms, supporting deep learning applications for image recognition and reconstruction, facilitating real-time image and video processing, and efficiently handling large datasets. Researchers have found that AI algorithms (Schmidhuber, <xref ref-type="bibr" rid="B116">2015</xref>; Horisaki et al., <xref ref-type="bibr" rid="B49">2016</xref>) can provide new perspectives and significant enhancements compared to traditional CIT methods, while in certain cases, specific difficulties in CIT have spurred advancements in AI architectures themselves. The symbiotic relationship between these two disciplines is expected to yield mutual benefits, particularly due to their close ties to optimization theory and application.</p>
<p>CIT introduces mathematical computation into the imaging physical process, is driven by imaging information transfer, integrates the design of the entire link, enhances information utilization and interpretation, and achieves revolutionary advantages that are difficult to obtain using traditional optical imaging technology. The advantages include improvement in imaging resolution, extension of imaging distance, increased imaging field of view, and reduction in the size of the optical system. CIT is expected to realize imaging of clouds and fog, living organisms, tissue imaging, NLOS imaging, and other subversive imaging applications.</p>
<p>The development of CIT is not only a reliable way to overcome the limitations of traditional photoelectric imaging but also an inevitable choice for the future development of photoelectric imaging technology. However, as an emerging cutting-edge crosscutting technology, many challenges remain in the development of CIT. On the one hand, the lack of basic theories leads to a lack of guidance for the interpretation of information and system design. In addition, the direction of development is unclear, resulting in fragmentation of studies and technology, and independence. <xref ref-type="table" rid="T1">Table 1</xref> lists the advantages and disadvantages of the typical CITs.</p>
<table-wrap position="float" id="T1">
<label>Table 1</label>
<caption><p>Advantages/disadvantages of typical techniques for computing imaging links compared.</p></caption>
<table frame="box" rules="all">
<thead>
<tr style="background-color:#919498;color:#ffffff">
<th/>
<th valign="top" align="left"><bold>Scattered light imaging</bold></th>
<th valign="top" align="left"><bold>Polarization imaging</bold></th>
<th valign="top" align="left"><bold>Bionic compound eye optical system</bold></th>
<th valign="top" align="left"><bold>Minimal optical system</bold></th>
<th valign="top" align="left"><bold>Super-resolution reconstruction Imaging</bold></th>
<th valign="top" align="left"><bold>Low SNR signal recovery</bold></th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top" align="left">Advantage</td>
<td valign="top" align="left">Solve the problem of penetrating through clouds and through fog during the detection process</td>
<td valign="top" align="left">Effectively eliminate the influence of imaging media on reconstruction results</td>
<td valign="top" align="left">Overcome the mutual restriction of large field of view and high resolution</td>
<td valign="top" align="left">Lighter, low cost, and portability</td>
<td valign="top" align="left">The reconstruction effect is greatly improved</td>
<td valign="top" align="left">Effectively realize the detection of long-distance targets with low signal-to-noise ratio</td>
</tr> <tr>
<td valign="top" align="left">Disadvantage</td>
<td valign="top" align="left">Difficulty in imaging through dynamic scattering media, Unable to achieve multi-scattering scene imaging</td>
<td valign="top" align="left">Long-distance imaging is limited</td>
<td valign="top" align="left">Data processing pressure is high</td>
<td valign="top" align="left">Immature technology</td>
<td valign="top" align="left">Large amount of calculation, Algorithm has limited application effect</td>
<td valign="top" align="left">Long processing time, Information processing is stressful</td>
</tr></tbody>
</table>
</table-wrap>
<p>The core of the future development of CIT is the efficient deciphering of high-dimensional light field information, which cannot be separated from the promotion of the following technologies: (1) The development of high-performance system components is the foundation of computational imaging technology, such as free-form surface optical systems (Li and Gu, <xref ref-type="bibr" rid="B71">2004</xref>; Ye et al., <xref ref-type="bibr" rid="B138">2017</xref>), high-performance detectors (Tan and Mohseni, <xref ref-type="bibr" rid="B124">2018</xref>; Wang et al., <xref ref-type="bibr" rid="B131">2020</xref>), and related fields. (2) Light field control devices such as meta-surface (Zhao et al., <xref ref-type="bibr" rid="B145">2021</xref>; Yu et al., <xref ref-type="bibr" rid="B141">2022</xref>; Arbabi and Faraon, <xref ref-type="bibr" rid="B9">2023</xref>) technology will utilize nanostructures to introduce more physical quantities into the imaging process, achieving higher performance imaging while also solving the problem of large system volume and mass. (3) The improvement of computing power [hardware and compressive sensing theory (Qaisar et al., <xref ref-type="bibr" rid="B100">2013</xref>; Rani et al., <xref ref-type="bibr" rid="B104">2018</xref>), new models such as deep learning (LeCun et al., <xref ref-type="bibr" rid="B66">2015</xref>; Kelleher, <xref ref-type="bibr" rid="B58">2019</xref>), quantum computing methods (Hidary and Hidary, <xref ref-type="bibr" rid="B48">2019</xref>; Rawat et al., <xref ref-type="bibr" rid="B106">2022</xref>), photon computing (Antonik et al., <xref ref-type="bibr" rid="B8">2019</xref>; Pammi et al., <xref ref-type="bibr" rid="B91">2019</xref>), etc.] is still a necessary condition for CIT to further enhance. The above methods will be conducive to achieving the ambitious goals of computational imaging development, including higher, farther, smaller, wider, and stronger. In addition, CIT will offer a more systematic and integrated solution to meet imaging needs for future development. By combining the respective advantages of a variety of imaging methods, it can achieve imaging effects and application scenarios that are not possible for traditional photoelectric imaging technology, realizing imaging applications such as remote sensing imaging in space, biomedical imaging, underwater imaging, and military counter-imaging. We believe that with the continuous development of CIT and theory, computational imaging systems will be richer, more 3D, and more effective; thus, CIT can become a future-oriented imaging technology to support forward-looking, strategic scientific, and technological research areas.</p></sec>
<sec sec-type="author-contributions" id="s10">
<title>Author contributions</title>
<p>MX: Conceptualization, Writing &#x02013; original draft, Writing &#x02013; review &#x00026; editing. FL: Writing &#x02013; review &#x00026; editing. JL: Investigation, Writing &#x02013; review &#x00026; editing. XD: Investigation, Writing &#x02013; review &#x00026; editing. QL: Writing &#x02013; original draft. XS: Conceptualization, Funding acquisition, Validation, Writing &#x02013; review &#x00026; editing.</p></sec>
</body>
<back>
<sec sec-type="funding-information" id="s11">
<title>Funding</title>
<p>The author(s) declare financial support was received for the research, authorship, and/or publication of this article. This research was funded by National Natural Science Foundation of China, grant numbers 62205259, 62075175, 61975254, and 62105254, the Open Research Fund of CAS Key Laboratory of Space Precision Measurement Technology, grant number B022420004, and National Key Laboratory of Infrared Detection Technologies grant number IRDT-23-06.</p>
</sec>
<sec sec-type="COI-statement" id="conf1">
<title>Conflict of interest</title>
<p>The authors declare that the research was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
</sec>
<sec sec-type="disclaimer" id="s12">
<title>Publisher&#x00027;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
<ref-list>
<title>References</title>
<ref id="B1">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Abbe</surname> <given-names>E.</given-names></name></person-group> (<year>1873</year>). <article-title>Beitr&#x000E4;ge zur theorie des mikroskops und der mikroskopischen wahrnehmung</article-title>. <source>Arch. F&#x000FC;r. Mikrosk. Anat.</source> <volume>9</volume>, <fpage>413</fpage>&#x02013;<lpage>468</lpage>. <pub-id pub-id-type="doi">10.1007/BF02956173</pub-id></citation>
</ref>
<ref id="B2">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Afshari</surname> <given-names>H.</given-names></name> <name><surname>Popovic</surname> <given-names>V.</given-names></name> <name><surname>Tasci</surname> <given-names>T.</given-names></name> <name><surname>Schmid</surname> <given-names>A.</given-names></name> <name><surname>Leblebici</surname> <given-names>Y.</given-names></name></person-group> (<year>2012</year>). <article-title>A spherical multi-camera system with real-time omnidirectional video acquisition capability</article-title>. <source>IEEE Trans. Consum. Electron.</source> <volume>58</volume>, <fpage>1110</fpage>&#x02013;<lpage>1118</lpage>. <pub-id pub-id-type="doi">10.1109/TCE.2012.6414975</pub-id></citation>
</ref>
<ref id="B3">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Aidukas</surname> <given-names>T.</given-names></name> <name><surname>Eckert</surname> <given-names>R.</given-names></name> <name><surname>Harvey</surname> <given-names>A. R.</given-names></name> <name><surname>Waller</surname> <given-names>L.</given-names></name> <name><surname>Konda</surname> <given-names>P. C.</given-names></name></person-group> (<year>2019</year>). <article-title>Low-cost, sub-micron resolution, wide-field computational microscopy using opensource hardware</article-title>. <source>Sci. Rep.</source> <volume>9</volume>, <fpage>1</fpage>&#x02013;<lpage>12</lpage>. <pub-id pub-id-type="doi">10.1038/s41598-019-43845-9</pub-id><pub-id pub-id-type="pmid">31092867</pub-id></citation></ref>
<ref id="B4">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Akin</surname> <given-names>A.</given-names></name> <name><surname>Cogal</surname> <given-names>O.</given-names></name> <name><surname>Seyid</surname> <given-names>K.</given-names></name> <name><surname>Afshari</surname> <given-names>H.</given-names></name> <name><surname>Schmid</surname> <given-names>A.</given-names></name> <name><surname>Leblebici</surname> <given-names>Y.</given-names></name></person-group> (<year>2013</year>). <article-title>Hemispherical multiple camera system for high resolution omni-directional light field imaging</article-title>. <source>Emerg. Sel. Top Circ. Syst. IEEE J.</source> <volume>3</volume>, <fpage>137</fpage>&#x02013;<lpage>144</lpage>. <pub-id pub-id-type="doi">10.1109/JETCAS.2013.2256831</pub-id></citation>
</ref>
<ref id="B5">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Alexandrov</surname> <given-names>S. A.</given-names></name> <name><surname>Hillman</surname> <given-names>T. R.</given-names></name> <name><surname>Gutzler</surname> <given-names>T.</given-names></name> <name><surname>Sampson</surname> <given-names>D. D.</given-names></name></person-group> (<year>2006</year>). <article-title>Synthetic aperture fourier holographic optical microscopy</article-title>. <source>Phys. Rev. Lett.</source> <volume>97</volume>:<fpage>168102</fpage>. <pub-id pub-id-type="doi">10.1103/PhysRevLett.97.168102</pub-id><pub-id pub-id-type="pmid">17155439</pub-id></citation></ref>
<ref id="B6">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Ando</surname> <given-names>T.</given-names></name> <name><surname>Horisaki</surname> <given-names>R.</given-names></name> <name><surname>Tanida</surname> <given-names>J.</given-names></name></person-group> (<year>2015</year>). <article-title>Speckle-learning-based object recognition through scattering media</article-title>. <source>Opt. Expr.</source> <volume>23</volume>:<fpage>33902</fpage>. <pub-id pub-id-type="doi">10.1364/OE.23.033902</pub-id><pub-id pub-id-type="pmid">26832049</pub-id></citation></ref>
<ref id="B7">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Andreoli</surname> <given-names>D.</given-names></name> <name><surname>Volpe</surname> <given-names>G.</given-names></name> <name><surname>Popoff</surname> <given-names>S.</given-names></name> <name><surname>Katz</surname> <given-names>O.</given-names></name> <name><surname>Gr&#x000E9;sillon</surname> <given-names>S.</given-names></name> <name><surname>Gigan</surname> <given-names>S.</given-names></name> <etal/></person-group>. (<year>2015</year>). <article-title>Deterministic control of broadband light through a multiply scattering medium via the multispectral transmission matrix</article-title>. <source>Sci. Rep.</source> <volume>5</volume>:<fpage>10347</fpage>. <pub-id pub-id-type="doi">10.1038/srep10347</pub-id><pub-id pub-id-type="pmid">25965944</pub-id></citation></ref>
<ref id="B8">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Antonik</surname> <given-names>P.</given-names></name> <name><surname>Marsal</surname> <given-names>N.</given-names></name> <name><surname>Brunner</surname> <given-names>D.</given-names></name> <name><surname>Rontani</surname> <given-names>D.</given-names></name></person-group> (<year>2019</year>). <article-title>Human action recognition with a large-scale brain-inspired photonic computer</article-title>. <source>Nat. Mach. Intell.</source> <volume>1</volume>, <fpage>530</fpage>&#x02013;<lpage>537</lpage>. <pub-id pub-id-type="doi">10.1038/s42256-019-0110-8</pub-id></citation>
</ref>
<ref id="B9">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Arbabi</surname> <given-names>A.</given-names></name> <name><surname>Faraon</surname> <given-names>A.</given-names></name></person-group> (<year>2023</year>). <article-title>Advances in optical metalenses</article-title>. <source>Nat. Photon.</source> <volume>17</volume>, <fpage>16</fpage>&#x02013;<lpage>25</lpage>. <pub-id pub-id-type="doi">10.1038/s41566-022-01108-6</pub-id></citation>
</ref>
<ref id="B10">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Barbastathis</surname> <given-names>G.</given-names></name> <name><surname>Ozcan</surname> <given-names>A.</given-names></name> <name><surname>Situ</surname> <given-names>G.</given-names></name></person-group> (<year>2019</year>). <article-title>On the use of deep learning for computational imaging</article-title>. <source>Optica</source> <volume>6</volume>, <fpage>921</fpage>&#x02013;<lpage>943</lpage>. <pub-id pub-id-type="doi">10.1364/OPTICA.6.000921</pub-id></citation>
</ref>
<ref id="B11">
<citation citation-type="web"><person-group person-group-type="author"><name><surname>Bar-Noy</surname> <given-names>A.</given-names></name> <name><surname>Cirincione</surname> <given-names>G.</given-names></name> <name><surname>Govindan</surname> <given-names>R.</given-names></name> <name><surname>Krishnamurthy</surname> <given-names>S.</given-names></name> <name><surname>Yener</surname> <given-names>A.</given-names></name></person-group> (<year>2011</year>). <article-title>&#x0201C;Quality-of-information aware networking for tactical military networks,&#x0201D;</article-title> in <source>2011 IEEE International Conference on Pervasive Computing and Communications Workshops (PERCOM Workshops)</source> (<publisher-loc>IEEE</publisher-loc>), <fpage>2</fpage>&#x02013;<lpage>7</lpage>. Available online at: <ext-link ext-link-type="uri" xlink:href="https://xueshu.baidu.com/u/citation?type=bibandpaperid=1j4304j0ad080jh0q41d0t50qk380364">https://xueshu.baidu.com/u/citation?type=bibandpaperid=1j4304j0ad080jh0q41d0t50qk380364</ext-link> (accessed December 26, 2023).</citation>
</ref>
<ref id="B12">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Batarseh</surname> <given-names>M.</given-names></name> <name><surname>Sukhov</surname> <given-names>S.</given-names></name> <name><surname>Shen</surname> <given-names>Z.</given-names></name> <name><surname>Gemar</surname> <given-names>H.</given-names></name> <name><surname>Rezvani</surname> <given-names>R.</given-names></name> <name><surname>Dogariu</surname> <given-names>A.</given-names></name> <etal/></person-group>. (<year>2018</year>). <article-title>Passive sensing around the corner using spatial coherence</article-title>. <source>Nat. Commun.</source> <volume>9</volume>, <fpage>3629</fpage>. <pub-id pub-id-type="doi">10.1038/s41467-018-05985-w</pub-id><pub-id pub-id-type="pmid">30194292</pub-id></citation></ref>
<ref id="B13">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Berman</surname> <given-names>D.</given-names></name> <name><surname>Treibitz</surname> <given-names>T.</given-names></name> <name><surname>Avidan</surname> <given-names>S.</given-names></name></person-group> (<year>2016</year>). <article-title>&#x0201C;Non-local image dehazing,&#x0201D;</article-title> in <source>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</source> (<publisher-loc>Las Vegas, NV, USA</publisher-loc>: <publisher-name>IEEE</publisher-name>), <fpage>1674</fpage>&#x02013;<lpage>1682</lpage>. <pub-id pub-id-type="doi">10.1109/CVPR.2016.185</pub-id></citation>
</ref>
<ref id="B14">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Bertolotti</surname> <given-names>J.</given-names></name> <name><surname>van Putten</surname> <given-names>E. G.</given-names></name> <name><surname>Blum</surname> <given-names>C.</given-names></name> <name><surname>Lagendijk</surname> <given-names>A.</given-names></name> <name><surname>Vos</surname> <given-names>W. L.</given-names></name> <name><surname>Mosk</surname> <given-names>A. P.</given-names></name> <etal/></person-group>. (<year>2012</year>). <article-title>Non-invasive imaging through opaque scattering layers</article-title>. <source>Nature</source> <volume>491</volume>, <fpage>232</fpage>&#x02013;<lpage>234</lpage>. <pub-id pub-id-type="doi">10.1038/nature11578</pub-id><pub-id pub-id-type="pmid">23135468</pub-id></citation></ref>
<ref id="B15">
<citation citation-type="web"><person-group person-group-type="author"><name><surname>Bogdanski</surname> <given-names>J.</given-names></name> <name><surname>Bjork</surname> <given-names>G.</given-names></name> <name><surname>Karlsson</surname> <given-names>A.</given-names></name></person-group> (<year>2004</year>). <source>Quantum and classical correlated imaging</source>. Available online at: <ext-link ext-link-type="uri" xlink:href="http://arxiv.org/abs/quant-ph/0407127">http://arxiv.org/abs/quant-ph/0407127</ext-link> (accessed June 26, 2023).</citation>
</ref>
<ref id="B16">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Bosq</surname> <given-names>T. W. D.</given-names></name> <name><surname>Agarwal</surname> <given-names>S.</given-names></name> <name><surname>Dijk</surname> <given-names>J.</given-names></name> <name><surname>Gungor</surname> <given-names>A.</given-names></name> <name><surname>Leonard</surname> <given-names>K.</given-names></name></person-group> (<year>2018</year>). <article-title>An overview of joint activities on computational imaging and compressive sensing systems by NATO SET-232</article-title>. <source>Computat. Imag. III</source> <volume>10669</volume>, <fpage>54</fpage>&#x02013;<lpage>72</lpage>. <pub-id pub-id-type="doi">10.1117/12.2307852</pub-id></citation>
</ref>
<ref id="B17">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Brady</surname> <given-names>D. J.</given-names></name> <name><surname>Hagen</surname> <given-names>N.</given-names></name></person-group> (<year>2009</year>). <article-title>Multiscale lens design</article-title>. <source>Opt. Expr.</source> <volume>17</volume>, <fpage>10659</fpage>&#x02013;<lpage>10674</lpage>. <pub-id pub-id-type="doi">10.1364/OE.17.010659</pub-id></citation>
</ref>
<ref id="B18">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Cao</surname> <given-names>F.</given-names></name> <name><surname>Chen</surname> <given-names>J.</given-names></name> <name><surname>Ye</surname> <given-names>H.</given-names></name> <name><surname>Zhao</surname> <given-names>J.</given-names></name> <name><surname>Zhou</surname> <given-names>Z.</given-names></name></person-group> (<year>2017</year>). <article-title>Recovering low-rank and sparse matrix based on the truncated nuclear norm</article-title>. <source>Neural Netw.</source> <volume>85</volume>, <fpage>10</fpage>&#x02013;<lpage>20</lpage>. <pub-id pub-id-type="doi">10.1016/j.neunet.2016.09.005</pub-id><pub-id pub-id-type="pmid">27814461</pub-id></citation></ref>
<ref id="B19">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Cao</surname> <given-names>L.</given-names></name> <name><surname>Zhang</surname> <given-names>H.</given-names></name> <name><surname>Brady</surname> <given-names>D. J.</given-names></name> <name><surname>Liu</surname> <given-names>S.</given-names></name></person-group> (<year>2020</year>). <article-title>Noise suppression for ballistic-photons based compressive in-line holographic imaging through inhomogeneous medium</article-title>. <source>Opt. Expr.</source> <volume>28</volume>, <fpage>10337</fpage>&#x02013;<lpage>10349</lpage>. <pub-id pub-id-type="doi">10.1364/OE.385992</pub-id><pub-id pub-id-type="pmid">32225621</pub-id></citation></ref>
<ref id="B20">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Chan</surname> <given-names>S.</given-names></name> <name><surname>Warburton</surname> <given-names>R. E.</given-names></name> <name><surname>Gariepy</surname> <given-names>G.</given-names></name> <name><surname>Leach</surname> <given-names>J.</given-names></name> <name><surname>Faccio</surname> <given-names>D.</given-names></name></person-group> (<year>2017</year>). <article-title>Non-line-of-sight tracking of people at long range</article-title>. <source>Opt. Expr.</source> <volume>25</volume>, <fpage>10109</fpage>&#x02013;<lpage>10117</lpage>. <pub-id pub-id-type="doi">10.1364/OE.25.010109</pub-id><pub-id pub-id-type="pmid">28468386</pub-id></citation></ref>
<ref id="B21">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Changsheng</surname> <given-names>S.</given-names></name> <name><surname>Yongtian</surname> <given-names>Z.</given-names></name> <name><surname>Zhongwen</surname> <given-names>H.</given-names></name> <name><surname>Teng</surname> <given-names>X.</given-names></name> <name><surname>Zhen</surname> <given-names>W.</given-names></name></person-group> (<year>2017</year>). <article-title>Astronomy optical interferometric telescope array optimizationbased on modified UV sampling method</article-title>. <source>J. Appl. Opt.</source> <volume>38</volume>, <fpage>532</fpage>&#x02013;<lpage>536</lpage>. <pub-id pub-id-type="doi">10.5768/JAO201738.0401007</pub-id></citation>
</ref>
<ref id="B22">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Chengfei</surname> <given-names>G.</given-names></name> <name><surname>Jietao</surname> <given-names>L.</given-names></name> <name><surname>Tengfei</surname> <given-names>W.</given-names></name> <name><surname>Lei</surname> <given-names>Z.</given-names></name> <name><surname>Xiaopeng</surname> <given-names>S.</given-names></name></person-group> (<year>2018</year>). <article-title>Tracking moving targets behind a scattering medium via speckle correlation</article-title>. <source>Appl. Opt.</source> <volume>57</volume>:<fpage>905</fpage>. <pub-id pub-id-type="doi">10.1364/AO.57.000905</pub-id><pub-id pub-id-type="pmid">29400766</pub-id></citation></ref>
<ref id="B23">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Cogal</surname> <given-names>O.</given-names></name> <name><surname>Akin</surname> <given-names>A.</given-names></name> <name><surname>Seyid</surname> <given-names>K.</given-names></name> <name><surname>Popovic</surname> <given-names>V.</given-names></name> <name><surname>Schmid</surname> <given-names>A.</given-names></name> <name><surname>Ott</surname> <given-names>B.</given-names></name> <etal/></person-group>. (<year>2014</year>). <article-title>&#x0201C;A new omni-directional multi-camera system for high resolution surveillance,&#x0201D;</article-title> in <source>Mobile Multimedia/Image Processing, Security, and Applications 2014</source> (<publisher-loc>SPIE</publisher-loc>), <fpage>179</fpage>&#x02013;<lpage>187</lpage>. <pub-id pub-id-type="doi">10.1117/12.2049698</pub-id></citation>
</ref>
<ref id="B24">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Cogal</surname> <given-names>O.</given-names></name> <name><surname>Leblebici</surname> <given-names>Y.</given-names></name></person-group> (<year>2016</year>). <article-title>An insect eye inspired miniaturized multi-camera system for endoscopic imaging</article-title>. <source>IEEE Trans. Biomed. Circ. Syst.</source> <volume>11</volume>, <fpage>212</fpage>&#x02013;<lpage>224</lpage>. <pub-id pub-id-type="doi">10.1109/TBCAS.2016.2547388</pub-id><pub-id pub-id-type="pmid">27249836</pub-id></citation></ref>
<ref id="B25">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Coskun</surname> <given-names>A. F.</given-names></name> <name><surname>Ozcan</surname> <given-names>A.</given-names></name></person-group> (<year>2014</year>). <article-title>Computational imaging, sensing and diagnostics for global health applications</article-title>. <source>Curr. Opin. Biotechnol.</source> <volume>25</volume>, <fpage>8</fpage>&#x02013;<lpage>16</lpage>. <pub-id pub-id-type="doi">10.1016/j.copbio.2013.08.008</pub-id><pub-id pub-id-type="pmid">24484875</pub-id></citation></ref>
<ref id="B26">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Cotte</surname> <given-names>Y.</given-names></name> <name><surname>Toy</surname> <given-names>F.</given-names></name> <name><surname>Jourdain</surname> <given-names>P.</given-names></name> <name><surname>Pavillon</surname> <given-names>N.</given-names></name> <name><surname>Boss</surname> <given-names>D.</given-names></name> <name><surname>Magistretti</surname> <given-names>P.</given-names></name> <etal/></person-group>. (<year>2013</year>). <article-title>Marker-free phase nanoscopy</article-title>. <source>Nat. Photon.</source> <volume>7</volume>, <fpage>113</fpage>&#x02013;<lpage>117</lpage>. <pub-id pub-id-type="doi">10.1038/nphoton.2012.329</pub-id></citation>
</ref>
<ref id="B27">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Cua</surname> <given-names>M.</given-names></name> <name><surname>Zhou</surname> <given-names>E.</given-names></name> <name><surname>Yang</surname> <given-names>C.</given-names></name></person-group> (<year>2017</year>). <article-title>Imaging moving targets through scattering media</article-title>. <source>Opt. Expr.</source> <volume>25</volume>:<fpage>3935</fpage>. <pub-id pub-id-type="doi">10.1364/OE.25.003935</pub-id><pub-id pub-id-type="pmid">28241603</pub-id></citation></ref>
<ref id="B28">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Daubechies</surname> <given-names>I.</given-names></name> <name><surname>Defrise</surname> <given-names>M.</given-names></name> <name><surname>De Mol</surname> <given-names>C.</given-names></name></person-group> (<year>2004</year>). <article-title>An iterative thresholding algorithm for linear inverse problems with a sparsity constraint</article-title>. <source>Commun. Pure Appl. Mathem.</source> <volume>57</volume>, <fpage>1413</fpage>&#x02013;<lpage>1457</lpage>. <pub-id pub-id-type="doi">10.1002/cpa.20042</pub-id></citation>
</ref>
<ref id="B29">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Dong</surname> <given-names>C.</given-names></name> <name><surname>Loy</surname> <given-names>C. C.</given-names></name> <name><surname>He</surname> <given-names>K.</given-names></name> <name><surname>Tang</surname> <given-names>X.</given-names></name></person-group> (<year>2014</year>). <article-title>&#x0201C;Learning a deep convolutional network for image super-resolution,&#x0201D;</article-title> in <source>Computer Vision&#x02013;ECCV 2014: 13th European Conference, Zurich, Switzerland, September 6-12, 2014, Proceedings, Part IV 13</source> (<publisher-loc>Cham</publisher-loc>: <publisher-name>Springer International Publishing</publisher-name>), <fpage>184</fpage>&#x02013;<lpage>199</lpage>. <pub-id pub-id-type="doi">10.1007/978-3-319-10593-2_13</pub-id></citation>
</ref>
<ref id="B30">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Dong</surname> <given-names>J.</given-names></name> <name><surname>Krzakala</surname> <given-names>F.</given-names></name> <name><surname>Gigan</surname> <given-names>S.</given-names></name></person-group> (<year>2018</year>). <article-title>&#x0201C;Spectral method for multiplexed phase retrieval and application in optical imaging in complex media,&#x0201D;</article-title> in <source>ICASSP 2019-2019 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)</source> (<publisher-loc>IEEE</publisher-loc>), <fpage>4963</fpage>&#x02013;<lpage>4967</lpage>. <pub-id pub-id-type="doi">10.1109/ICASSP.2019.8682329</pub-id></citation>
</ref>
<ref id="B31">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Dr&#x000E9;meau</surname> <given-names>A.</given-names></name> <name><surname>Liutkus</surname> <given-names>A.</given-names></name> <name><surname>Martina</surname> <given-names>D.</given-names></name> <name><surname>Katz</surname> <given-names>O.</given-names></name> <name><surname>Sch&#x000FC;lke</surname> <given-names>C.</given-names></name> <name><surname>Krzakala</surname> <given-names>F.</given-names></name> <etal/></person-group>. (<year>2015</year>). <article-title>Reference-less measurement of the transmission matrix of a highly scattering material using a DMD and phase retrieval techniques</article-title>. <source>Opt. Expr.</source> <volume>23</volume>, <fpage>11898</fpage>&#x02013;<lpage>11911</lpage>. <pub-id pub-id-type="doi">10.1364/OE.23.011898</pub-id><pub-id pub-id-type="pmid">25969280</pub-id></citation></ref>
<ref id="B32">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Enders</surname> <given-names>B.</given-names></name> <name><surname>Dierolf</surname> <given-names>M.</given-names></name> <name><surname>Cloetens</surname> <given-names>P.</given-names></name> <name><surname>Stockmar</surname> <given-names>M.</given-names></name> <name><surname>Pfeiffer</surname> <given-names>F.</given-names></name> <name><surname>Thibault</surname> <given-names>P.</given-names></name> <etal/></person-group>. (<year>2014</year>). <article-title>Ptychography with broad-bandwidth radiation</article-title>. <source>Appl. Phys. Lett.</source> <volume>104</volume>:<fpage>171104</fpage>. <pub-id pub-id-type="doi">10.1063/1.4874304</pub-id></citation>
</ref>
<ref id="B33">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Fang</surname> <given-names>S.</given-names></name> <name><surname>Xia</surname> <given-names>X.</given-names></name> <name><surname>Huo</surname> <given-names>X.</given-names></name> <name><surname>Chen</surname> <given-names>C.</given-names></name></person-group> (<year>2014</year>). <article-title>Image dehazing using polarization effects of objects and airlight</article-title>. <source>Opt. Expr.</source> <volume>22</volume>, <fpage>19523</fpage>&#x02013;<lpage>19537</lpage>. <pub-id pub-id-type="doi">10.1364/OE.22.019523</pub-id><pub-id pub-id-type="pmid">25321035</pub-id></citation></ref>
<ref id="B34">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Fei</surname> <given-names>L.</given-names></name> <name><surname>Shao-Jie</surname> <given-names>S.</given-names></name> <name><surname>Ping-Li</surname> <given-names>H.</given-names></name> <name><surname>Lin</surname> <given-names>Z.</given-names></name> <name><surname>Xiao-Peng</surname> <given-names>S.</given-names></name></person-group> (<year>2021</year>). <article-title>Clear underwater vision in non-uniform scattering field by low-rank-and-sparse-decomposition-based olarization imaging</article-title>. <source>Acta Phys. Sinica</source> <volume>70</volume>:<fpage>385</fpage>. <pub-id pub-id-type="doi">10.7498/aps.70.20210314</pub-id></citation>
</ref>
<ref id="B35">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Fei</surname> <given-names>L.</given-names></name> <name><surname>Yazhe</surname> <given-names>W.</given-names></name> <name><surname>Pingli</surname> <given-names>H.</given-names></name> <name><surname>Jiawei</surname> <given-names>L.</given-names></name> <name><surname>Xiaopeng</surname> <given-names>S.</given-names></name></person-group> (<year>2019</year>). <article-title>Design of monocentric wide field-of-view and high-resolution computational imaging system</article-title>. <source>Acta Phys. Sin.</source> <volume>68</volume>:<fpage>1447</fpage>. <pub-id pub-id-type="doi">10.7498/aps.68.20182229</pub-id></citation>
</ref>
<ref id="B36">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Feld</surname> <given-names>M. S.</given-names></name> <name><surname>Yang</surname> <given-names>C.</given-names></name> <name><surname>Psaltis</surname> <given-names>D.</given-names></name> <name><surname>Yaqoob</surname> <given-names>Z.</given-names></name></person-group> (<year>2008</year>). <article-title>Optical phase conjugation for turbidity suppression in biological samples</article-title>. <source>Nat. Photon.</source> <volume>2</volume>, <fpage>110</fpage>&#x02013;<lpage>115</lpage>. <pub-id pub-id-type="doi">10.1038/nphoton.2007.297</pub-id><pub-id pub-id-type="pmid">19492016</pub-id></citation></ref>
<ref id="B37">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Feng</surname> <given-names>S.</given-names></name> <name><surname>Kane</surname> <given-names>C.</given-names></name> <name><surname>Lee</surname> <given-names>P. A.</given-names></name> <name><surname>Stone</surname> <given-names>A. D.</given-names></name></person-group> (<year>1988</year>). <article-title>Correlations and fluctuations of coherent wave transmission through disordered media</article-title>. <source>Phys. Rev. Lett.</source> <volume>61</volume>, <fpage>834</fpage>&#x02013;<lpage>837</lpage>. <pub-id pub-id-type="doi">10.1103/PhysRevLett.61.834</pub-id><pub-id pub-id-type="pmid">10039442</pub-id></citation></ref>
<ref id="B38">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Freund</surname> <given-names>I.</given-names></name> <name><surname>Rosenbluh</surname> <given-names>M.</given-names></name> <name><surname>Feng</surname> <given-names>S.</given-names></name></person-group> (<year>1988</year>). <article-title>Memory effects in propagation of optical waves through disordered media</article-title>. <source>Phys. Rev. Lett.</source> <volume>61</volume>, <fpage>2328</fpage>&#x02013;<lpage>2331</lpage>. <pub-id pub-id-type="doi">10.1103/PhysRevLett.61.2328</pub-id><pub-id pub-id-type="pmid">10039084</pub-id></citation></ref>
<ref id="B39">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Fu</surname> <given-names>W.</given-names></name> <name><surname>Zhao</surname> <given-names>D.</given-names></name> <name><surname>Li</surname> <given-names>Z.</given-names></name> <name><surname>Liu</surname> <given-names>S.</given-names></name> <name><surname>Tian</surname> <given-names>C.</given-names></name> <name><surname>Huang</surname> <given-names>K.</given-names></name> <etal/></person-group>. (<year>2022</year>). <article-title>Ultracompact meta-imagers for arbitrary all-optical convolution</article-title>. <source>Light Sci. Appl.</source> <volume>11</volume>:<fpage>62</fpage>. <pub-id pub-id-type="doi">10.1038/s41377-022-00752-5</pub-id><pub-id pub-id-type="pmid">35304870</pub-id></citation></ref>
<ref id="B40">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Fu</surname> <given-names>Y.</given-names></name> <name><surname>Zhao</surname> <given-names>Y.</given-names></name> <name><surname>Liu</surname> <given-names>Z.</given-names></name> <name><surname>Dong</surname> <given-names>Z.</given-names></name></person-group> (<year>2015</year>). <article-title>Design of bionic compound eye optical system based on field of view stitching method</article-title>. <source>China J. Sci. Instrum</source>. <volume>36</volume>, <fpage>422</fpage>&#x02013;<lpage>429</lpage>.</citation>
</ref>
<ref id="B41">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Gariepy</surname> <given-names>G.</given-names></name> <name><surname>Tonolini</surname> <given-names>F.</given-names></name> <name><surname>Henderson</surname> <given-names>R.</given-names></name> <name><surname>Leach</surname> <given-names>J.</given-names></name> <name><surname>Faccio</surname> <given-names>D.</given-names></name></person-group> (<year>2016</year>). <article-title>Detection and tracking of moving objects hidden from view</article-title>. <source>Nat. Photon.</source> <volume>10</volume>, <fpage>23</fpage>&#x02013;<lpage>26</lpage>. <pub-id pub-id-type="doi">10.1038/nphoton.2015.234</pub-id></citation>
</ref>
<ref id="B42">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Golish</surname> <given-names>D. R.</given-names></name> <name><surname>Vera</surname> <given-names>E. M.</given-names></name> <name><surname>Kelly</surname> <given-names>K. J.</given-names></name> <name><surname>Gong</surname> <given-names>Q.</given-names></name> <name><surname>Jansen</surname> <given-names>P. A.</given-names></name> <name><surname>Hughes</surname> <given-names>J. M.</given-names></name> <etal/></person-group>. (<year>2012</year>). <article-title>Development of a scalable image formation pipeline for multiscale gigapixel photography</article-title>. <source>Opt. Expr.</source> <volume>20</volume>:<fpage>22048</fpage>. <pub-id pub-id-type="doi">10.1364/OE.20.022048</pub-id><pub-id pub-id-type="pmid">23037355</pub-id></citation></ref>
<ref id="B43">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Gustafsson</surname> <given-names>M. G. L.</given-names></name></person-group> (<year>2000</year>). <article-title>Surpassing the lateral resolution limit by a factor of two using structured illumination microscopy</article-title>. <source>J. Microsc.</source> <volume>198</volume>, <fpage>82</fpage>&#x02013;<lpage>87</lpage>. <pub-id pub-id-type="doi">10.1046/j.1365-2818.2000.00710.x</pub-id><pub-id pub-id-type="pmid">10810003</pub-id></citation></ref>
<ref id="B44">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Han</surname> <given-names>P.</given-names></name> <name><surname>Cai</surname> <given-names>Y.</given-names></name> <name><surname>Liu</surname> <given-names>F.</given-names></name> <name><surname>Li</surname> <given-names>X.</given-names></name> <name><surname>Liang</surname> <given-names>R.</given-names></name> <name><surname>Yan</surname> <given-names>M.</given-names></name> <etal/></person-group>. (<year>2022</year>). <article-title>Computational polarization 3D: new solution for monocular shape recovery in natural conditions</article-title>. <source>Opt. Lasers Eng.</source> <volume>151</volume>:<fpage>106925</fpage>. <pub-id pub-id-type="doi">10.1016/j.optlaseng.2021.106925</pub-id></citation>
</ref>
<ref id="B45">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Han</surname> <given-names>P.</given-names></name> <name><surname>Liu</surname> <given-names>F.</given-names></name> <name><surname>Yang</surname> <given-names>K.</given-names></name> <name><surname>Ma</surname> <given-names>J.</given-names></name> <name><surname>Li</surname> <given-names>J.</given-names></name> <name><surname>Shao</surname> <given-names>X.</given-names></name> <etal/></person-group>. (<year>2017</year>). <article-title>Active underwater descattering and image recovery</article-title>. <source>Appl. Opt.</source> <volume>56</volume>:<fpage>6631</fpage>. <pub-id pub-id-type="doi">10.1364/AO.56.006631</pub-id><pub-id pub-id-type="pmid">29047955</pub-id></citation></ref>
<ref id="B46">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Harris</surname> <given-names>J. L.</given-names></name></person-group> (<year>1964</year>). <article-title>Diffraction and resolving power</article-title>. <source>J. Opt. Soc. Am.</source> <volume>54</volume>:<fpage>931</fpage>. <pub-id pub-id-type="doi">10.1364/JOSA.54.000931</pub-id></citation>
</ref>
<ref id="B47">
<citation citation-type="web"><person-group person-group-type="author"><name><surname>Hegerl</surname> <given-names>R.</given-names></name> <name><surname>Hoppe</surname> <given-names>W.</given-names></name></person-group> (<year>1972</year>). <source>Phase evaluation in generalized diffraction (ptychography).</source> Available online at: <ext-link ext-link-type="uri" xlink:href="https://xueshu.baidu.com/u/citation?type=bibandpaperid=1b9f8cfe7b9b3ac2bac3983574af9864">https://xueshu.baidu.com/u/citation?type=bibandpaperid=1b9f8cfe7b9b3ac2bac3983574af9864</ext-link> (accessed December 26, 2023).</citation>
</ref>
<ref id="B48">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Hidary</surname> <given-names>J. D.</given-names></name> <name><surname>Hidary</surname> <given-names>J. D.</given-names></name></person-group> (<year>2019</year>). <source>Quantum Computing: An Applied Approach</source>. New York: Springer. <pub-id pub-id-type="doi">10.1007/978-3-030-23922-0</pub-id></citation>
</ref>
<ref id="B49">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Horisaki</surname> <given-names>R.</given-names></name> <name><surname>Takagi</surname> <given-names>R.</given-names></name> <name><surname>Tanida</surname> <given-names>J.</given-names></name></person-group> (<year>2016</year>). <article-title>Learning-based imaging through scattering media</article-title>. <source>Opt. Expr.</source> <volume>24</volume>, <fpage>13738</fpage>&#x02013;<lpage>13743</lpage>. <pub-id pub-id-type="doi">10.1364/OE.24.013738</pub-id><pub-id pub-id-type="pmid">27410537</pub-id></citation></ref>
<ref id="B50">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Hu</surname> <given-names>H. F.</given-names></name> <name><surname>Li</surname> <given-names>J. Q.</given-names></name> <name><surname>Li</surname> <given-names>X. B.</given-names></name></person-group> (<year>2021</year>). <article-title>Underwater polarization difference imaging with three degrees of freedom</article-title>. <source>Acta Opt. Sin.</source> <volume>41</volume>:<fpage>0329001</fpage>. <pub-id pub-id-type="doi">10.3788/AOS202141.0329001</pub-id></citation>
</ref>
<ref id="B51">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Hu</surname> <given-names>X.</given-names></name> <name><surname>Wu</surname> <given-names>J.</given-names></name> <name><surname>Suo</surname> <given-names>J.</given-names></name> <name><surname>Dai</surname> <given-names>Q.</given-names></name></person-group> (<year>2017</year>). <article-title>Emerging theories and technologies on computational imaging</article-title>. <source>Front. Inf. Technol. Electron. Eng.</source> <volume>18</volume>, <fpage>1207</fpage>&#x02013;<lpage>1221</lpage>. <pub-id pub-id-type="doi">10.1631/FITEE.1700211</pub-id></citation>
</ref>
<ref id="B52">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Jiasong</surname> <given-names>S.</given-names></name> <name><surname>Yuzhen</surname> <given-names>Z.</given-names></name> <name><surname>Qian</surname> <given-names>C.</given-names></name> <name><surname>Chao</surname> <given-names>Z.</given-names></name></person-group> (<year>2016</year>). <article-title>Fourier ptychographic microscopy: theory, advances, and applications</article-title>. <source>Acta Opt. Sin.</source> <volume>36</volume>:<fpage>1011005</fpage>. <pub-id pub-id-type="doi">10.3788/AOS201636.1011005</pub-id></citation>
</ref>
<ref id="B53">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Juyang</surname> <given-names>W.</given-names></name> <name><surname>Paul</surname> <given-names>C.</given-names></name> <name><surname>Marc</surname> <given-names>H.</given-names></name></person-group> (<year>1992</year>). <article-title>Camera calibration with distortion models and accuracy evaluation</article-title>. <source>IEEE Trans. Pattern. Anal. Mach. Intell.</source> <volume>14</volume>, <fpage>965</fpage>&#x02013;<lpage>980</lpage>. <pub-id pub-id-type="doi">10.1109/34.159901</pub-id></citation>
</ref>
<ref id="B54">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Kadambi</surname> <given-names>A.</given-names></name> <name><surname>Taamazyan</surname> <given-names>V. A.</given-names></name> <name><surname>Shi</surname> <given-names>B.</given-names></name> <name><surname>Raskar</surname> <given-names>R.</given-names></name></person-group> (<year>2017</year>). <article-title>Depth sensing using geometrically constrained polarization normals</article-title>. <source>Int. J. Comput. Vis.</source> <volume>125</volume>, <fpage>34</fpage>&#x02013;<lpage>51</lpage>. <pub-id pub-id-type="doi">10.1007/s11263-017-1025-7</pub-id></citation>
</ref>
<ref id="B55">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Katz</surname> <given-names>O.</given-names></name> <name><surname>Heidmann</surname> <given-names>P.</given-names></name> <name><surname>Fink</surname> <given-names>M.</given-names></name> <name><surname>Gigan</surname> <given-names>S.</given-names></name></person-group> (<year>2014</year>). <article-title>Non-invasive single-shot imaging through scattering layers and around corners via speckle correlations</article-title>. <source>Nat. Photon.</source> <volume>8</volume>, <fpage>784</fpage>&#x02013;<lpage>790</lpage>. <pub-id pub-id-type="doi">10.1038/nphoton.2014.189</pub-id></citation>
</ref>
<ref id="B56">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Katz</surname> <given-names>O.</given-names></name> <name><surname>Small</surname> <given-names>E.</given-names></name> <name><surname>Bromberg</surname> <given-names>Y.</given-names></name> <name><surname>Silberberg</surname> <given-names>Y.</given-names></name></person-group> (<year>2011</year>). <article-title>Focusing and compression of ultrashort pulses through scattering media</article-title>. <source>Nat. Photon.</source> <volume>5</volume>, <fpage>372</fpage>&#x02013;<lpage>377</lpage>. <pub-id pub-id-type="doi">10.1038/nphoton.2011.72</pub-id></citation>
</ref>
<ref id="B57">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Katz</surname> <given-names>O.</given-names></name> <name><surname>Small</surname> <given-names>E.</given-names></name> <name><surname>Silberberg</surname> <given-names>Y.</given-names></name></person-group> (<year>2012</year>). <article-title>Looking around corners and through thin turbid layers in real time with scattered incoherent light</article-title>. <source>Nat. Photon.</source> <volume>6</volume>, <fpage>549</fpage>&#x02013;<lpage>553</lpage>. <pub-id pub-id-type="doi">10.1038/nphoton.2012.150</pub-id></citation>
</ref>
<ref id="B58">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Kelleher</surname> <given-names>J. D.</given-names></name></person-group> (<year>2019</year>). <source>Deep Learning</source>. London: MIT Press. <pub-id pub-id-type="doi">10.7551/mitpress/11171.001.0001</pub-id></citation>
</ref>
<ref id="B59">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Kellman</surname> <given-names>M.</given-names></name> <name><surname>Zhang</surname> <given-names>K.</given-names></name> <name><surname>Markley</surname> <given-names>E.</given-names></name> <name><surname>Tamir</surname> <given-names>J.</given-names></name> <name><surname>Bostan</surname> <given-names>E.</given-names></name> <name><surname>Lustig</surname> <given-names>M.</given-names></name> <etal/></person-group>. (<year>2020</year>). <article-title>Memory-efficient learning for large-scale computational imaging</article-title>. <source>IEEE Trans. Comput. Imag.</source> <volume>6</volume>, <fpage>1403</fpage>&#x02013;<lpage>1414</lpage>. <pub-id pub-id-type="doi">10.1109/TCI.2020.3025735</pub-id></citation>
</ref>
<ref id="B60">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Kim</surname> <given-names>J.</given-names></name> <name><surname>Lee</surname> <given-names>J. K.</given-names></name> <name><surname>Lee</surname> <given-names>K. M.</given-names></name></person-group> (<year>2016</year>). <article-title>&#x0201C;Accurate image super-resolution using very deep convolutional networks,&#x0201D;</article-title> in <source>Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition</source>, 1646&#x02013;1654. <pub-id pub-id-type="doi">10.1109/CVPR.2016.182</pub-id></citation>
</ref>
<ref id="B61">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Kitamura</surname> <given-names>Y.</given-names></name> <name><surname>Shogenji</surname> <given-names>R.</given-names></name> <name><surname>Yamada</surname> <given-names>K.</given-names></name> <name><surname>Miyatake</surname> <given-names>S.</given-names></name> <name><surname>Miyamoto</surname> <given-names>M.</given-names></name> <name><surname>Morimoto</surname> <given-names>T.</given-names></name> <etal/></person-group>. (<year>2004</year>). <article-title>Reconstruction of a high-resolution image on a compound-eye image-capturing system</article-title>. <source>Appl. Opt.</source> <volume>43</volume>, <fpage>1719</fpage>&#x02013;<lpage>1727</lpage>. <pub-id pub-id-type="doi">10.1364/AO.43.001719</pub-id><pub-id pub-id-type="pmid">15046176</pub-id></citation></ref>
<ref id="B62">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Kumar</surname> <given-names>S. S.</given-names></name> <name><surname>Dongliang</surname> <given-names>T.</given-names></name> <name><surname>Cuong</surname> <given-names>D.</given-names></name></person-group> (<year>2017</year>). <article-title>Single-shot multispectral imaging with a monochromatic camera</article-title>. <source>Optica</source> <volume>4</volume>:<fpage>1209</fpage>. <pub-id pub-id-type="doi">10.1364/OPTICA.4.001209</pub-id></citation>
</ref>
<ref id="B63">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Lai</surname> <given-names>X.</given-names></name> <name><surname>Li</surname> <given-names>Q.</given-names></name> <name><surname>Wu</surname> <given-names>X.</given-names></name> <name><surname>Liu</surname> <given-names>G.</given-names></name> <name><surname>Pu</surname> <given-names>J.</given-names></name></person-group> (<year>2021</year>). <article-title>Mutual transfer learning of reconstructing images through a multimode fiber or a scattering medium</article-title>. <source>IEEE Access</source> <volume>9</volume>, <fpage>68387</fpage>&#x02013;<lpage>68395</lpage>. <pub-id pub-id-type="doi">10.1109/ACCESS.2021.3077560</pub-id></citation>
</ref>
<ref id="B64">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Law</surname> <given-names>N. M.</given-names></name> <name><surname>Fors</surname> <given-names>O.</given-names></name> <name><surname>Ratzloff</surname> <given-names>J.</given-names></name> <name><surname>Wulfken</surname> <given-names>P.</given-names></name> <name><surname>Kavanaugh</surname> <given-names>D.</given-names></name> <name><surname>Sitar</surname> <given-names>D. J.</given-names></name> <etal/></person-group>. (<year>2015</year>). <article-title>Evryscope science: exploring the potential of all-sky gigapixel-scale telescopes</article-title>. <source>Publ. Astron. Soc. Pac.</source> <volume>127</volume>, <fpage>234</fpage>&#x02013;<lpage>249</lpage>. <pub-id pub-id-type="doi">10.1086/680521</pub-id></citation>
</ref>
<ref id="B65">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Law</surname> <given-names>N. M.</given-names></name> <name><surname>Fors</surname> <given-names>O.</given-names></name> <name><surname>Wulfken</surname> <given-names>P.</given-names></name> <name><surname>Ratzloff</surname> <given-names>J.</given-names></name> <name><surname>Kavanaugh</surname> <given-names>D.</given-names></name></person-group> (<year>2014</year>). <article-title>&#x0201C;The Evryscope: the first full-sky gigapixel-scale telescope,&#x0201D;</article-title> in <source>Ground-based and Airborne Telescopes V</source> (<publisher-loc>SPIE</publisher-loc>), <fpage>357</fpage>&#x02013;<lpage>365</lpage>. <pub-id pub-id-type="doi">10.1117/12.2057031</pub-id></citation>
</ref>
<ref id="B66">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>LeCun</surname> <given-names>Y.</given-names></name> <name><surname>Bengio</surname> <given-names>Y.</given-names></name> <name><surname>Hinton</surname> <given-names>G.</given-names></name></person-group> (<year>2015</year>). <article-title>Deep learning</article-title>. <source>Nature</source> <volume>521</volume>, <fpage>436</fpage>&#x02013;<lpage>444</lpage>. <pub-id pub-id-type="doi">10.1038/nature14539</pub-id></citation>
</ref>
<ref id="B67">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Lee</surname> <given-names>D. D.</given-names></name> <name><surname>Pham</surname> <given-names>P.</given-names></name> <name><surname>Largman</surname> <given-names>Y.</given-names></name> <name><surname>Ng</surname> <given-names>A.</given-names></name></person-group> (<year>2022</year>). <article-title>&#x0201C;Advances in neural information processing systems 22,&#x0201D;</article-title> in <source>Proceedings of the 26th Annual Conference on Neural Information Processing Systems 2012</source> (<publisher-loc>NIPS</publisher-loc>).</citation>
</ref>
<ref id="B68">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Leninger</surname> <given-names>B.</given-names></name> <name><surname>Edwards</surname> <given-names>J.</given-names></name> <name><surname>Antoniades</surname> <given-names>J.</given-names></name> <name><surname>Chester</surname> <given-names>D.</given-names></name> <name><surname>Haas</surname> <given-names>D.</given-names></name> <name><surname>Liu</surname> <given-names>E.</given-names></name> <etal/></person-group>. (<year>2008</year>). <article-title>Autonomous real-time ground ubiquitous surveillance-imaging system (ARGUS-IS)</article-title>. <source>Proc. SPIE &#x02013; Int. Soc. Opt. Eng.</source> 6981, 69810H&#x02212;11. <pub-id pub-id-type="doi">10.1117/12.784724</pub-id></citation>
</ref>
<ref id="B69">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Li</surname> <given-names>J. Y.</given-names></name> <name><surname>Feng</surname> <given-names>W. X.</given-names></name> <name><surname>Liu</surname> <given-names>F.</given-names></name> <name><surname>Wei</surname> <given-names>Y. Z.</given-names></name> <name><surname>Shao</surname> <given-names>X. P.</given-names></name></person-group> (<year>2021</year>). <article-title>Design of airborne multi-scale wide-field-of-view and high-resolution imaging system</article-title>. <source>Acta Opt. Sin.</source> <volume>41</volume>, <fpage>50</fpage>&#x02013;<lpage>60</lpage>. <pub-id pub-id-type="doi">10.3788/AOS202141.0208002</pub-id></citation>
</ref>
<ref id="B70">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Li</surname> <given-names>X.</given-names></name> <name><surname>Liu</surname> <given-names>F.</given-names></name> <name><surname>Han</surname> <given-names>P.</given-names></name> <name><surname>Zhang</surname> <given-names>S.</given-names></name> <name><surname>Shao</surname> <given-names>X.</given-names></name></person-group> (<year>2021</year>). <article-title>Near-infrared monocular 3D computational polarization imaging of surfaces exhibiting nonuniform reflectance</article-title>. <source>Opt. Expr.</source> <volume>29</volume>, <fpage>15616</fpage>&#x02013;<lpage>15630</lpage>. <pub-id pub-id-type="doi">10.1364/OE.423790</pub-id><pub-id pub-id-type="pmid">33985259</pub-id></citation></ref>
<ref id="B71">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Li</surname> <given-names>Y.</given-names></name> <name><surname>Gu</surname> <given-names>P.</given-names></name></person-group> (<year>2004</year>). <article-title>Free-form surface inspection techniques state of the art review</article-title>. <source>Comput.-Aided Des.</source> <volume>36</volume>, <fpage>1395</fpage>&#x02013;<lpage>1417</lpage>. <pub-id pub-id-type="doi">10.1016/j.cad.2004.02.009</pub-id></citation>
</ref>
<ref id="B72">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Li</surname> <given-names>Y.</given-names></name> <name><surname>Xue</surname> <given-names>Y.</given-names></name> <name><surname>Tian</surname> <given-names>L.</given-names></name></person-group> (<year>2018</year>). <article-title>Deep speckle correlation: a deep learning approach toward scalable imaging through scattering media</article-title>. <source>Optica</source> <volume>5</volume>, <fpage>1181</fpage>&#x02013;<lpage>1190</lpage>. <pub-id pub-id-type="doi">10.1364/OPTICA.5.001181</pub-id></citation>
</ref>
<ref id="B73">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Li</surname> <given-names>Z. P.</given-names></name> <name><surname>Huang</surname> <given-names>X.</given-names></name> <name><surname>Cao</surname> <given-names>Y.</given-names></name> <name><surname>Wang</surname> <given-names>B.</given-names></name> <name><surname>Li</surname> <given-names>Y. H.</given-names></name> <name><surname>Jin</surname> <given-names>W.</given-names></name> <etal/></person-group>. (<year>2020</year>). <article-title>Single-photon computational 3D imaging at 45 km</article-title>. <source>Photon. Res.</source> <volume>8</volume>, <fpage>1532</fpage>&#x02013;<lpage>1540</lpage>. <pub-id pub-id-type="doi">10.1364/PRJ.390091</pub-id></citation>
</ref>
<ref id="B74">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Lim</surname> <given-names>B.</given-names></name> <name><surname>Son</surname> <given-names>S.</given-names></name> <name><surname>Kim</surname> <given-names>H.</given-names></name> <name><surname>Nah</surname> <given-names>S.</given-names></name> <name><surname>Mu Lee</surname> <given-names>K.</given-names></name></person-group> (<year>2017</year>). <article-title>&#x0201C;Enhanced deep residual networks for single image super-resolution,&#x0201D;</article-title> in <source>Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition Workshops</source>, 136&#x02013;144. <pub-id pub-id-type="doi">10.1109/CVPRW.2017.151</pub-id></citation>
</ref>
<ref id="B75">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Liu</surname> <given-names>F.</given-names></name> <name><surname>Cao</surname> <given-names>L.</given-names></name> <name><surname>Shao</surname> <given-names>X.</given-names></name> <name><surname>Han</surname> <given-names>P.</given-names></name> <name><surname>Bin</surname> <given-names>X.</given-names></name></person-group> (<year>2015</year>). <article-title>Polarimetric dehazing utilizing spatial frequency segregation of images</article-title>. <source>Appl. Opt.</source> <volume>54</volume>:<fpage>8116</fpage>. <pub-id pub-id-type="doi">10.1364/AO.54.008116</pub-id><pub-id pub-id-type="pmid">26406513</pub-id></citation></ref>
<ref id="B76">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Liu</surname> <given-names>F.</given-names></name> <name><surname>Han</surname> <given-names>P.</given-names></name> <name><surname>Wei</surname> <given-names>Y.</given-names></name> <name><surname>Yang</surname> <given-names>K.</given-names></name> <name><surname>Huang</surname> <given-names>S.</given-names></name> <name><surname>Li</surname> <given-names>X.</given-names></name> <etal/></person-group>. (<year>2018</year>). <article-title>Deeply seeing through highly turbid water by active polarization imaging</article-title>. <source>Opt. Lett.</source> <volume>43</volume>, <fpage>4903</fpage>&#x02013;<lpage>4906</lpage>. <pub-id pub-id-type="doi">10.1364/OL.43.004903</pub-id><pub-id pub-id-type="pmid">30320779</pub-id></citation></ref>
<ref id="B77">
<citation citation-type="web"><person-group person-group-type="author"><name><surname>Liu</surname> <given-names>Y.</given-names></name> <name><surname>Zhao</surname> <given-names>H.</given-names></name> <name><surname>Shen</surname> <given-names>D.</given-names></name> <name><surname>Xu</surname> <given-names>J.</given-names></name> <name><surname>Yu</surname> <given-names>X.</given-names></name></person-group> (<year>2020</year>). <article-title>&#x0201C;Research on snapshot infrared computational spectral imaging technology,&#x0201D;</article-title> in <source>Conference on Infrared Device and Infrared Technology</source> Available online at: <ext-link ext-link-type="uri" xlink:href="https://xueshu.baidu.com/u/citation?type=bibandpaperid=1w120am0pa3n08609d6d0rh0vx586617">https://xueshu.baidu.com/u/citation?type=bibandpaperid=1w120am0pa3n08609d6d0rh0vx586617</ext-link> (accessed December 26, 2023).</citation>
</ref>
<ref id="B78">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Liutkus</surname> <given-names>A.</given-names></name> <name><surname>Martina</surname> <given-names>D.</given-names></name> <name><surname>Popoff</surname> <given-names>S.</given-names></name> <name><surname>Chardon</surname> <given-names>G.</given-names></name> <name><surname>Katz</surname> <given-names>O.</given-names></name> <name><surname>Lerosey</surname> <given-names>G.</given-names></name> <etal/></person-group>. (<year>2014</year>). <article-title>Imaging with nature: compressive imaging using a multiply scattering medium</article-title>. <source>Rep.</source> <volume>4</volume>:<fpage>5552</fpage>. <pub-id pub-id-type="doi">10.1038/srep05552</pub-id><pub-id pub-id-type="pmid">25005695</pub-id></citation></ref>
<ref id="B79">
<citation citation-type="web"><person-group person-group-type="author"><name><surname>Lukac</surname> <given-names>R.</given-names></name> <name><surname>Radha</surname> <given-names>H.</given-names></name></person-group> (<year>2011</year>). <article-title>Computational photography: methods and applications</article-title>. <source>J. Electron Imaging.</source> Available online at: <ext-link ext-link-type="uri" xlink:href="https://xueshu.baidu.com/u/citation?type=bibandpaperid=2f3dad1aea4b8e3aac3614987d4abc97">https://xueshu.baidu.com/u/citation?type=bibandpaperid=2f3dad1aea4b8e3aac3614987d4abc97</ext-link> (accessed December 26, 2023).</citation>
</ref>
<ref id="B80">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Lyu</surname> <given-names>M.</given-names></name> <name><surname>Wang</surname> <given-names>H.</given-names></name> <name><surname>Li</surname> <given-names>G.</given-names></name> <name><surname>Zheng</surname> <given-names>S.</given-names></name> <name><surname>Situ</surname> <given-names>G.</given-names></name></person-group> (<year>2019</year>). <article-title>Learning-based lensless imaging through optically thick scattering media</article-title>. <source>Adv. Photon.</source> <volume>1</volume>:<fpage>10</fpage>. <pub-id pub-id-type="doi">10.1117/1.AP.1.3.036002</pub-id></citation>
</ref>
<ref id="B81">
<citation citation-type="web"><person-group person-group-type="author"><name><surname>Mait</surname> <given-names>J. N.</given-names></name> <name><surname>Euliss</surname> <given-names>G. W.</given-names></name> <name><surname>Athale</surname> <given-names>R. A.</given-names></name></person-group> (<year>2012</year>). <source>Computational imaging</source>. IEEE. Available online at: <ext-link ext-link-type="uri" xlink:href="https://xueshu.baidu.com/u/citation?type=bibandpaperid=ec226e2abc1ca0a55496cbe565da0209">https://xueshu.baidu.com/u/citation?type=bibandpaperid=ec226e2abc1ca0a55496cbe565da0209</ext-link> (accessed December 26, 2023).</citation>
</ref>
<ref id="B82">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Marks</surname> <given-names>D. L.</given-names></name> <name><surname>Llull</surname> <given-names>P. R.</given-names></name> <name><surname>Phillips</surname> <given-names>Z.</given-names></name> <name><surname>Anderson</surname> <given-names>J. G.</given-names></name> <name><surname>Feller</surname> <given-names>S. D.</given-names></name> <name><surname>Vera</surname> <given-names>E. M.</given-names></name> <etal/></person-group>. (<year>2014</year>). <article-title>Characterization of the AWARE 10 two-gigapixel wide-field-of-view visible imager</article-title>. <source>Appl. Opt.</source> <volume>53</volume>, <fpage>C54</fpage>&#x02013;<lpage>C63</lpage>. <pub-id pub-id-type="doi">10.1364/AO.53.000C54</pub-id><pub-id pub-id-type="pmid">24921890</pub-id></citation></ref>
<ref id="B83">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>McLeod</surname> <given-names>E.</given-names></name> <name><surname>Ozcan</surname> <given-names>A.</given-names></name></person-group> (<year>2016</year>). <article-title>Unconventional methods of imaging: computational microscopy and compact implementations</article-title>. <source>Rep. Prog. Phys.</source> <volume>79</volume>:<fpage>076001</fpage>. <pub-id pub-id-type="doi">10.1088/0034-4885/79/7/076001</pub-id><pub-id pub-id-type="pmid">27214407</pub-id></citation></ref>
<ref id="B84">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Miyazaki</surname> <given-names>D.</given-names></name> <name><surname>Kagesawa</surname> <given-names>M.</given-names></name> <name><surname>Ikeuchi</surname> <given-names>K.</given-names></name></person-group> (<year>2002b</year>). <article-title>&#x0201C;Determining shapes of transparent objects from two polarization images,&#x0201D;</article-title> in <source>MVA</source>, <fpage>26</fpage>&#x02013;<lpage>31</lpage>.</citation>
</ref>
<ref id="B85">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Miyazaki</surname> <given-names>D.</given-names></name> <name><surname>Saito</surname> <given-names>M.</given-names></name> <name><surname>Sato</surname> <given-names>Y.</given-names></name> <name><surname>Ikeuchi</surname> <given-names>K.</given-names></name></person-group> (<year>2002a</year>). <article-title>Determining surface orientations of transparent objects based on polarization degrees in visible and infrared wavelengths</article-title>. <source>JOSA A</source> <volume>19</volume>, <fpage>687</fpage>&#x02013;<lpage>694</lpage>. <pub-id pub-id-type="doi">10.1364/JOSAA.19.000687</pub-id><pub-id pub-id-type="pmid">11934161</pub-id></citation></ref>
<ref id="B86">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Miyazaki</surname> <given-names>D.</given-names></name> <name><surname>Shigetomi</surname> <given-names>T.</given-names></name> <name><surname>Baba</surname> <given-names>M.</given-names></name> <name><surname>Furukawa</surname> <given-names>R.</given-names></name> <name><surname>Hiura</surname> <given-names>S.</given-names></name> <name><surname>Asada</surname> <given-names>N.</given-names></name> <etal/></person-group>. (<year>2016</year>). <article-title>Surface normal estimation of black specular objects from multiview polarization images</article-title>. <source>Opt. Eng.</source> <volume>56</volume>:<fpage>041303</fpage>. <pub-id pub-id-type="doi">10.1117/1.OE.56.4.041303</pub-id></citation>
</ref>
<ref id="B87">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Monakhova</surname> <given-names>K.</given-names></name> <name><surname>Yanny</surname> <given-names>K.</given-names></name> <name><surname>Aggarwal</surname> <given-names>N.</given-names></name> <name><surname>Waller</surname> <given-names>L.</given-names></name></person-group> (<year>2020</year>). <article-title>Spectral DiffuserCam: lensless snapshot hyperspectral imaging with a spectral filter array</article-title>. <source>Optica</source> <volume>7</volume>, <fpage>1298</fpage>&#x02013;<lpage>1307</lpage>. <pub-id pub-id-type="doi">10.1364/OPTICA.397214</pub-id></citation>
</ref>
<ref id="B88">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Morel</surname> <given-names>O.</given-names></name> <name><surname>Stolz</surname> <given-names>C.</given-names></name> <name><surname>Meriaudeau</surname> <given-names>F.</given-names></name> <name><surname>Gorria</surname> <given-names>P.</given-names></name></person-group> (<year>2006</year>). <article-title>Active lighting applied to three-dimensional reconstruction of specular metallic surfaces by polarization imaging</article-title>. <source>Appl. Opt.</source> <volume>45</volume>, <fpage>4062</fpage>&#x02013;<lpage>4068</lpage>. <pub-id pub-id-type="doi">10.1364/AO.45.004062</pub-id><pub-id pub-id-type="pmid">16761046</pub-id></citation></ref>
<ref id="B89">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Nakamura</surname> <given-names>T.</given-names></name> <name><surname>Kittle</surname> <given-names>D. S.</given-names></name> <name><surname>Youn</surname> <given-names>S. H.</given-names></name> <name><surname>Feller</surname> <given-names>S. D.</given-names></name> <name><surname>Tanida</surname> <given-names>J.</given-names></name> <name><surname>Brady</surname> <given-names>D. J.</given-names></name> <etal/></person-group>. (<year>2013</year>). <article-title>Autofocus for a multiscale gigapixel camera</article-title>. <source>Appl. Opt.</source> <volume>52</volume>:<fpage>8146</fpage>. <pub-id pub-id-type="doi">10.1364/AO.52.008146</pub-id><pub-id pub-id-type="pmid">24513771</pub-id></citation></ref>
<ref id="B90">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Osnabrugge</surname> <given-names>G.</given-names></name> <name><surname>Horstmeyer</surname> <given-names>R.</given-names></name> <name><surname>Papadopoulos</surname> <given-names>I. N.</given-names></name> <name><surname>Judkewitz</surname> <given-names>B.</given-names></name> <name><surname>Vellekoop</surname> <given-names>I. M.</given-names></name></person-group> (<year>2017</year>). <article-title>Generalized optical memory effect</article-title>. <source>Optica</source> <volume>4</volume>:<fpage>886</fpage>. <pub-id pub-id-type="doi">10.1364/OPTICA.4.000886</pub-id></citation>
</ref>
<ref id="B91">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Pammi</surname> <given-names>V. A.</given-names></name> <name><surname>Alfaro-Bittner</surname> <given-names>K.</given-names></name> <name><surname>Clerc</surname> <given-names>M. G.</given-names></name> <name><surname>Barbay</surname> <given-names>S.</given-names></name></person-group> (<year>2019</year>). <article-title>Photonic computing with single and coupled spiking micropillar lasers</article-title>. <source>IEEE J. Sel. Top. Quantum Electron.</source> <volume>26</volume>, <fpage>1</fpage>&#x02013;<lpage>7</lpage>. <pub-id pub-id-type="doi">10.1109/JSTQE.2019.2929187</pub-id></citation>
</ref>
<ref id="B92">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Panigrahi</surname> <given-names>S.</given-names></name> <name><surname>Fade</surname> <given-names>J.</given-names></name> <name><surname>Alouini</surname> <given-names>M.</given-names></name></person-group> (<year>2015</year>). <article-title>Adaptive polarimetric image representation for contrast optimization of a polarized beacon through fog</article-title>. <source>J. Opt.</source> <volume>17</volume>:<fpage>252</fpage>. <pub-id pub-id-type="doi">10.1088/2040-8978/17/6/065703</pub-id></citation>
</ref>
<ref id="B93">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Park</surname> <given-names>Y.</given-names></name> <name><surname>Depeursinge</surname> <given-names>C.</given-names></name> <name><surname>Popescu</surname> <given-names>G.</given-names></name></person-group> (<year>2018</year>). <article-title>Quantitative phase imaging in biomedicine</article-title>. <source>Nat. Photon.</source> <volume>12</volume>, <fpage>578</fpage>&#x02013;<lpage>589</lpage>. <pub-id pub-id-type="doi">10.1038/s41566-018-0253-x</pub-id><pub-id pub-id-type="pmid">26648557</pub-id></citation></ref>
<ref id="B94">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Pendry</surname> <given-names>J. B.</given-names></name></person-group> (<year>2000</year>). <article-title>Negative refraction makes a perfect lens</article-title>. <source>Phys. Rev. Lett.</source> <volume>85</volume>:<fpage>3966</fpage>. <pub-id pub-id-type="doi">10.1103/PhysRevLett.85.3966</pub-id></citation>
</ref>
<ref id="B95">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Popoff</surname> <given-names>S.</given-names></name> <name><surname>Lerosey</surname> <given-names>G.</given-names></name> <name><surname>Fink</surname> <given-names>M.</given-names></name> <name><surname>Boccara</surname> <given-names>A. C.</given-names></name> <name><surname>Gigan</surname> <given-names>S.</given-names></name></person-group> (<year>2010b</year>). <article-title>Image transmission through an opaque material</article-title>. <source>Nat. Commun.</source> <volume>1</volume>:<fpage>81</fpage>. <pub-id pub-id-type="doi">10.1038/ncomms1078</pub-id><pub-id pub-id-type="pmid">20865799</pub-id></citation></ref>
<ref id="B96">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Popoff</surname> <given-names>S. M.</given-names></name> <name><surname>Lerosey</surname> <given-names>G.</given-names></name> <name><surname>Carminati</surname> <given-names>R.</given-names></name> <name><surname>Fink</surname> <given-names>M.</given-names></name> <name><surname>Boccara</surname> <given-names>A. C.</given-names></name> <name><surname>Gigan</surname> <given-names>S.</given-names></name> <etal/></person-group>. (<year>2010a</year>). <article-title>Measuring the transmission matrix in optics: an approach to the study and control of light propagation in disordered media</article-title>. <source>Phys. Rev. Lett.</source> <volume>104</volume>:<fpage>100601</fpage>. <pub-id pub-id-type="doi">10.1103/PhysRevLett.104.100601</pub-id><pub-id pub-id-type="pmid">20366410</pub-id></citation></ref>
<ref id="B97">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Popovic</surname> <given-names>V.</given-names></name></person-group> (<year>2016</year>). <source>Real-time computational gigapixel multi-camera systems.</source> Thesis, EPFL. <pub-id pub-id-type="doi">10.1007/978-3-319-59057-8_4</pub-id></citation>
</ref>
<ref id="B98">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Popovic</surname> <given-names>V.</given-names></name> <name><surname>Seyid</surname> <given-names>K.</given-names></name> <name><surname>Akin</surname> <given-names>A.</given-names></name> <name><surname>Cogal</surname> <given-names>&#x000D6;.</given-names></name> <name><surname>Afshari</surname> <given-names>H.</given-names></name> <name><surname>Schmid</surname> <given-names>A.</given-names></name> <etal/></person-group>. (<year>2014</year>). <article-title>Image blending in a high frame rate FPGA-based multi-camera system</article-title>. <source>J. Sign. Proc. Syst.</source> <volume>76</volume>, <fpage>169</fpage>&#x02013;<lpage>184</lpage>. <pub-id pub-id-type="doi">10.1007/s11265-013-0858-8</pub-id></citation>
</ref>
<ref id="B99">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Porat</surname> <given-names>A.</given-names></name> <name><surname>Andresen</surname> <given-names>E. R.</given-names></name> <name><surname>Rigneault</surname> <given-names>H.</given-names></name> <name><surname>Oron</surname> <given-names>D.</given-names></name> <name><surname>Gigan</surname> <given-names>S.</given-names></name> <name><surname>Katz</surname> <given-names>O.</given-names></name></person-group> (<year>2016</year>). <article-title>Widefield lensless imaging through a fiber bundle via speckle correlations</article-title>. <source>Opt. Expr.</source> <volume>24</volume>, <fpage>16835</fpage>&#x02013;<lpage>16855</lpage>. <pub-id pub-id-type="doi">10.1364/OE.24.016835</pub-id><pub-id pub-id-type="pmid">27464136</pub-id></citation></ref>
<ref id="B100">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Qaisar</surname> <given-names>S.</given-names></name> <name><surname>Bilal</surname> <given-names>R. M.</given-names></name> <name><surname>Iqbal</surname> <given-names>W.</given-names></name> <name><surname>Naureen</surname> <given-names>M.</given-names></name> <name><surname>Lee</surname> <given-names>S.</given-names></name></person-group> (<year>2013</year>). <article-title>Compressive sensing: from theory to applications, a survey</article-title>. <source>J. Commun. Netw.</source> <volume>15</volume>, <fpage>443</fpage>&#x02013;<lpage>456</lpage>. <pub-id pub-id-type="doi">10.1109/JCN.2013.000083</pub-id></citation>
</ref>
<ref id="B101">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Qian</surname> <given-names>T.</given-names></name> <name><surname>Wang</surname> <given-names>Y.</given-names></name></person-group> (<year>2010</year>). <article-title>Micro/nano-fabrication technologies for cell biology</article-title>. <source>Med. Biol. Eng. Comput.</source> <volume>48</volume>, <fpage>1023</fpage>&#x02013;<lpage>1032</lpage>. <pub-id pub-id-type="doi">10.1007/s11517-010-0632-z</pub-id><pub-id pub-id-type="pmid">20490938</pub-id></citation></ref>
<ref id="B102">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Qiao</surname> <given-names>M.</given-names></name> <name><surname>Liu</surname> <given-names>H.</given-names></name> <name><surname>Pang</surname> <given-names>G.</given-names></name> <name><surname>Han</surname> <given-names>S.</given-names></name></person-group> (<year>2017</year>). <article-title>Non-invasive three-dimension control of light between turbid layers using a surface quasi-point light source for precorrection</article-title>. <source>Rep.</source> <volume>7</volume>:<fpage>9792</fpage>. <pub-id pub-id-type="doi">10.1038/s41598-017-10450-7</pub-id><pub-id pub-id-type="pmid">28852142</pub-id></citation></ref>
<ref id="B103">
<citation citation-type="web"><person-group person-group-type="author"><name><surname>Ramesh</surname> <given-names>R.</given-names></name> <name><surname>Davis</surname> <given-names>J.</given-names></name></person-group> (<year>2008</year>). 5d time-light transport matrix: what can we reason about scene properties? <italic>Acta Leprol</italic>. Available online at: <ext-link ext-link-type="uri" xlink:href="https://dspace.mit.edu/handle/1721.1/67888?show=full">https://dspace.mit.edu/handle/1721.1/67888?show=full</ext-link></citation>
</ref>
<ref id="B104">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Rani</surname> <given-names>M.</given-names></name> <name><surname>Dhok</surname> <given-names>S. B.</given-names></name> <name><surname>Deshmukh</surname> <given-names>R. B. A.</given-names></name></person-group> (<year>2018</year>). <article-title>systematic review of compressive sensing: concepts, implementations and applications</article-title>. <source>IEEE Access</source> <volume>6</volume>, <fpage>4875</fpage>&#x02013;<lpage>4894</lpage>. <pub-id pub-id-type="doi">10.1109/ACCESS.2018.2793851</pub-id></citation>
</ref>
<ref id="B105">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Raskar</surname> <given-names>R.</given-names></name></person-group> (<year>2012</year>). <article-title>Recovering three-dimensional shape around a corner using ultrafast time-of-fiight imaging</article-title>. <source>Nature</source> <volume>3</volume>:<fpage>745</fpage>. <pub-id pub-id-type="doi">10.1038/ncomms1747</pub-id><pub-id pub-id-type="pmid">22434188</pub-id></citation></ref>
<ref id="B106">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Rawat</surname> <given-names>B.</given-names></name> <name><surname>Mehra</surname> <given-names>N.</given-names></name> <name><surname>Bist</surname> <given-names>A. S.</given-names></name> <name><surname>Yusup</surname> <given-names>M.</given-names></name> <name><surname>Sanjaya</surname> <given-names>Y. P. A.</given-names></name></person-group> (<year>2022</year>). <article-title>Quantum computing and AI: impacts and possibilities</article-title>. <source>ADI J. Recent Innov.</source> <volume>3</volume>, <fpage>202</fpage>&#x02013;<lpage>207</lpage>. <pub-id pub-id-type="doi">10.34306/ajri.v3i2.656</pub-id></citation>
</ref>
<ref id="B107">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Robinson</surname> <given-names>M. D.</given-names></name> <name><surname>Stork</surname> <given-names>D. G.</given-names></name></person-group> (<year>2006</year>). <article-title>&#x0201C;Joint design of lens systems and digital image processing,&#x0201D;</article-title> in <source>International Optical Design Conference</source> (<publisher-loc>Optica Publishing Group</publisher-loc>), WB4. <pub-id pub-id-type="doi">10.1364/IODC.2006.WB4</pub-id></citation>
</ref>
<ref id="B108">
<citation citation-type="book"><person-group person-group-type="author"><name><surname>Robinson</surname> <given-names>M. D.</given-names></name> <name><surname>Stork</surname> <given-names>D. G.</given-names></name></person-group> (<year>2007</year>). <article-title>&#x0201C;Joint digital-optical design of multi-frame imaging systems,&#x0201D;</article-title> in <source>Computational Optical Sensing and Imaging</source> (<publisher-loc>Optica Publishing Group</publisher-loc>), CMB2. <pub-id pub-id-type="doi">10.1364/COSI.2007.CMB2</pub-id></citation>
</ref>
<ref id="B109">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Robinson</surname> <given-names>M. D.</given-names></name> <name><surname>Stork</surname> <given-names>D. G.</given-names></name></person-group> (<year>2008</year>). <article-title>Joint digital-optical design of superresolution multiframe imaging systems</article-title>. <source>Appl. Opt.</source> <volume>47</volume>, <fpage>11</fpage>&#x02013;<lpage>20</lpage>. <pub-id pub-id-type="doi">10.1364/AO.47.000B11</pub-id><pub-id pub-id-type="pmid">18382546</pub-id></citation></ref>
<ref id="B110">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Ruan</surname> <given-names>H.</given-names></name> <name><surname>Brake</surname> <given-names>J.</given-names></name> <name><surname>Robinson</surname> <given-names>J. E.</given-names></name> <name><surname>Liu</surname> <given-names>Y.</given-names></name> <name><surname>Jang</surname> <given-names>M.</given-names></name> <name><surname>Xiao</surname> <given-names>C.</given-names></name> <etal/></person-group>. (<year>2017</year>). <article-title>Deep tissue optical focusing and optogenetic modulation with time-reversed ultrasonically encoded light</article-title>. <source>Sci. Adv.</source> <volume>3</volume>:<fpage>eaao5520</fpage>. <pub-id pub-id-type="doi">10.1126/sciadv.aao5520</pub-id><pub-id pub-id-type="pmid">29226248</pub-id></citation></ref>
<ref id="B111">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Rust</surname> <given-names>M. J.</given-names></name> <name><surname>Bates</surname> <given-names>M.</given-names></name> <name><surname>Zhuang</surname> <given-names>X.</given-names></name></person-group> (<year>2006</year>). <article-title>Sub-diffraction-limit imaging by stochastic optical reconstruction microscopy (STORM)</article-title>. <source>Nat. Methods</source> <volume>3</volume>, <fpage>793</fpage>&#x02013;<lpage>796</lpage>. <pub-id pub-id-type="doi">10.1038/nmeth929</pub-id><pub-id pub-id-type="pmid">16896339</pub-id></citation></ref>
<ref id="B112">
<citation citation-type="web"><person-group person-group-type="author"><name><surname>Sabelhaus</surname> <given-names>P. A.</given-names></name></person-group> (<year>2004</year>). <source>An Overview of the James Webb Space Telescope %28JWST%29 Project.</source> Available online at: <ext-link ext-link-type="uri" xlink:href="https://xueshu.baidu.com/u/citation?type=bibandpaperid=19d39339a8725c71611fc5e00befc675">https://xueshu.baidu.com/u/citation?type=bibandpaperid=19d39339a8725c71611fc5e00befc675</ext-link> (accessed December 26, 2023).</citation>
</ref>
<ref id="B113">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Saunders</surname> <given-names>C.</given-names></name> <name><surname>Murray-Bruce</surname> <given-names>J.</given-names></name> <name><surname>Goyal</surname> <given-names>V. K.</given-names></name></person-group> (<year>2019</year>). <article-title>Computational periscopy with an ordinary digital camera</article-title>. <source>Nature</source> <volume>565</volume>:<fpage>472</fpage>. <pub-id pub-id-type="doi">10.1038/s41586-018-0868-6</pub-id><pub-id pub-id-type="pmid">30675042</pub-id></citation></ref>
<ref id="B114">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Schechner</surname> <given-names>Y. Y.</given-names></name> <name><surname>Karpel</surname> <given-names>N.</given-names></name></person-group> (<year>2006</year>). <article-title>Recovery of underwater visibility and structure by polarization analysis</article-title>. <source>IEEE J. Ocean Eng.</source> <volume>30</volume>, <fpage>570</fpage>&#x02013;<lpage>587</lpage>. <pub-id pub-id-type="doi">10.1109/JOE.2005.850871</pub-id></citation>
</ref>
<ref id="B115">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Schechner</surname> <given-names>Y. Y.</given-names></name> <name><surname>Narasimhan</surname> <given-names>S. G.</given-names></name> <name><surname>Nayar</surname> <given-names>S. K.</given-names></name></person-group> (<year>2003</year>). <article-title>Polarization-based vision through haze</article-title>. <source>Appl. Opt.</source> <volume>42</volume>, <fpage>511</fpage>&#x02013;<lpage>525</lpage>. <pub-id pub-id-type="doi">10.1364/AO.42.000511</pub-id><pub-id pub-id-type="pmid">12570274</pub-id></citation></ref>
<ref id="B116">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Schmidhuber</surname> <given-names>J.</given-names></name></person-group> (<year>2015</year>). <article-title>Deep learning in neural networks: an overview</article-title>. <source>Neural Netw.</source> <volume>61</volume>, <fpage>85</fpage>&#x02013;<lpage>117</lpage>. <pub-id pub-id-type="doi">10.1016/j.neunet.2014.09.003</pub-id><pub-id pub-id-type="pmid">25462637</pub-id></citation></ref>
<ref id="B117">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Shao</surname> <given-names>X. P.</given-names></name> <name><surname>Liu</surname> <given-names>F.</given-names></name> <name><surname>Li</surname> <given-names>W.</given-names></name> <name><surname>Yang</surname> <given-names>L.</given-names></name> <name><surname>Yang</surname> <given-names>S.</given-names></name> <name><surname>Liu</surname> <given-names>J.</given-names></name></person-group> (<year>2020</year>). <article-title>Latest progress in computational imaging technology and application</article-title>. <source>Laser Optoelectr. Progr.</source> <volume>57</volume>:<fpage>020001</fpage>. <pub-id pub-id-type="doi">10.3788/LOP57.020001</pub-id></citation>
</ref>
<ref id="B118">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Shen</surname> <given-names>Y.</given-names></name> <name><surname>Liu</surname> <given-names>Y.</given-names></name> <name><surname>Ma</surname> <given-names>C.</given-names></name> <name><surname>Wang</surname> <given-names>L. V.</given-names></name></person-group> (<year>2016</year>). <article-title>Focusing light through biological tissue and tissue-mimicking phantoms up to 9.6cm in thickness with digital optical phase conjugation</article-title>. <source>J. Biomed. Opt.</source> <volume>21</volume>:<fpage>85001</fpage>. <pub-id pub-id-type="doi">10.1117/1.JBO.21.8.085001</pub-id><pub-id pub-id-type="pmid">27533439</pub-id></citation></ref>
<ref id="B119">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Sinha</surname> <given-names>A.</given-names></name> <name><surname>Lee</surname> <given-names>J.</given-names></name> <name><surname>Li</surname> <given-names>S.</given-names></name> <name><surname>Barbastathis</surname> <given-names>G.</given-names></name></person-group> (<year>2017</year>). <article-title>Lensless computational imaging through deep learning</article-title>. <source>Optica</source> <volume>4</volume>, <fpage>1117</fpage>&#x02013;<lpage>1125</lpage>. <pub-id pub-id-type="doi">10.1364/OPTICA.4.001117</pub-id></citation>
</ref>
<ref id="B120">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Sprague</surname> <given-names>R.</given-names></name> <name><surname>Zhang</surname> <given-names>A.</given-names></name> <name><surname>Hendricks</surname> <given-names>L.</given-names></name> <name><surname>O&#x00027;Brien</surname> <given-names>T.</given-names></name> <name><surname>Rutherford</surname> <given-names>T.</given-names></name></person-group> (<year>2012</year>). <article-title>Novel HMD concepts from the DARPA SCENICC program</article-title>. <source>Proc. Spie.</source> <volume>8383</volume>:<fpage>1</fpage>. <pub-id pub-id-type="doi">10.1117/12.922475</pub-id></citation>
</ref>
<ref id="B121">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Stasio</surname> <given-names>N.</given-names></name> <name><surname>Conkey</surname> <given-names>D. B.</given-names></name> <name><surname>Moser</surname> <given-names>C.</given-names></name> <name><surname>Psaltis</surname> <given-names>D.</given-names></name></person-group> (<year>2015</year>). <article-title>Light control in a multicore fiber using the memory effect</article-title>. <source>Opt. Expr.</source> <volume>23</volume>:<fpage>30532</fpage>. <pub-id pub-id-type="doi">10.1364/OE.23.030532</pub-id><pub-id pub-id-type="pmid">26698531</pub-id></citation></ref>
<ref id="B122">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Stork</surname> <given-names>D. G.</given-names></name> <name><surname>Robinson</surname> <given-names>M. D.</given-names></name></person-group> (<year>2008</year>). <article-title>Theoretical foundations for joint digital-optical analysis of electro-optical imaging systems</article-title>. <source>Appl. Opt.</source> <volume>47</volume>:<fpage>B64</fpage>. <pub-id pub-id-type="doi">10.1364/AO.47.000B64</pub-id><pub-id pub-id-type="pmid">18382552</pub-id></citation></ref>
<ref id="B123">
<citation citation-type="web"><person-group person-group-type="author"><name><surname>Suo</surname> <given-names>J.</given-names></name> <name><surname>Zhang</surname> <given-names>W.</given-names></name> <name><surname>Gong</surname> <given-names>J.</given-names></name> <name><surname>Yuan</surname> <given-names>X.</given-names></name> <name><surname>Brady</surname> <given-names>D. J.</given-names></name> <name><surname>Dai</surname> <given-names>Q.</given-names></name> <etal/></person-group>. (<year>2021</year>). <source>Computational Imaging and Artificial Intelligence: The Next Revolution of Mobile Vision.</source> Available online at: <ext-link ext-link-type="uri" xlink:href="http://arxiv.org/abs/2109.08880">http://arxiv.org/abs/2109.08880</ext-link> (accessed June 27, 2023).</citation>
</ref>
<ref id="B124">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Tan</surname> <given-names>C. L.</given-names></name> <name><surname>Mohseni</surname> <given-names>H.</given-names></name></person-group> (<year>2018</year>). <article-title>Emerging technologies for high performance infrared detectors</article-title>. <source>Nanophotonics</source> <volume>7</volume>, <fpage>169</fpage>&#x02013;<lpage>197</lpage>. <pub-id pub-id-type="doi">10.1515/nanoph-2017-0061</pub-id></citation>
</ref>
<ref id="B125">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Tian</surname> <given-names>Z.</given-names></name> <name><surname>Zhao</surname> <given-names>M.</given-names></name> <name><surname>Yang</surname> <given-names>D.</given-names></name> <name><surname>Wang</surname> <given-names>S.</given-names></name> <name><surname>Pan</surname> <given-names>A.</given-names></name></person-group> (<year>2023</year>). <article-title>Optical remote imaging via Fourier ptychography</article-title>. <source>Photon. Res.</source> <volume>11</volume>:<fpage>2072</fpage>. <pub-id pub-id-type="doi">10.1364/PRJ.493938</pub-id></citation>
</ref>
<ref id="B126">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Tyo</surname> <given-names>J. S.</given-names></name></person-group> (<year>2000</year>). <article-title>Enhancement of the point-spread function for imaging in scattering media by use of polarization-difference imaging</article-title>. <source>JOSA A</source> <volume>17</volume>, <fpage>1</fpage>&#x02013;<lpage>10</lpage>. <pub-id pub-id-type="doi">10.1364/JOSAA.17.000001</pub-id><pub-id pub-id-type="pmid">10641832</pub-id></citation></ref>
<ref id="B127">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Tyo</surname> <given-names>J. S.</given-names></name> <name><surname>Rowe</surname> <given-names>M. P.</given-names></name> <name><surname>Pugh</surname> <given-names>E. N.</given-names></name> <name><surname>Engheta</surname> <given-names>N.</given-names></name></person-group> (<year>1996</year>). <article-title>Target detection in optically scattering media by polarization-difference imaging</article-title>. <source>Appl. Opt.</source> <volume>35</volume>, <fpage>1855</fpage>&#x02013;<lpage>1870</lpage>. <pub-id pub-id-type="doi">10.1364/AO.35.001855</pub-id><pub-id pub-id-type="pmid">21085310</pub-id></citation></ref>
<ref id="B128">
<citation citation-type="web"><person-group person-group-type="author"><name><surname>Vellekoop</surname> <given-names>I. M.</given-names></name></person-group> (<year>2010</year>). <article-title>&#x0201C;Focusing light through living tissue,&#x0201D;</article-title> in <source>Optical Coherence Tomography and Coherence Domain Optical Methods in Biomedicine XIV</source> (<publisher-loc>SPIE</publisher-loc>), <fpage>406</fpage>&#x02013;<lpage>415</lpage>. Available online at: <ext-link ext-link-type="uri" xlink:href="https://xueshu.baidu.com/u/citation?type=bibandpaperid=5b655643a3c68d28adc47dbb8b905a3c">https://xueshu.baidu.com/u/citation?type=bibandpaperid=5b655643a3c68d28adc47dbb8b905a3c</ext-link> (accessed December 26, 2023).</citation>
</ref>
<ref id="B129">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Vellekoop</surname> <given-names>I. M.</given-names></name> <name><surname>Lagendijk</surname> <given-names>A.</given-names></name> <name><surname>Mosk</surname> <given-names>A. P.</given-names></name></person-group> (<year>2010</year>). <article-title>Exploiting disorder for perfect focusing</article-title>. <source>Nat. Photon.</source> <volume>4</volume>, <fpage>320</fpage>&#x02013;<lpage>322</lpage>. <pub-id pub-id-type="doi">10.1038/nphoton.2010.3</pub-id></citation>
</ref>
<ref id="B130">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Vellekoop</surname> <given-names>I. M.</given-names></name> <name><surname>Mosk</surname> <given-names>A. P.</given-names></name></person-group> (<year>2007</year>). <article-title>Focusing coherent light through opaque strongly scattering media</article-title>. <source>Opt. Lett.</source> <volume>32</volume>:<fpage>2309</fpage>. <pub-id pub-id-type="doi">10.1364/OL.32.002309</pub-id><pub-id pub-id-type="pmid">17700768</pub-id></citation></ref>
<ref id="B131">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Wang</surname> <given-names>B.</given-names></name> <name><surname>Zhang</surname> <given-names>Z. B.</given-names></name> <name><surname>Zhong</surname> <given-names>S. P.</given-names></name> <name><surname>Zheng</surname> <given-names>Z. Q.</given-names></name> <name><surname>Xu</surname> <given-names>P.</given-names></name> <name><surname>Zhang</surname> <given-names>H.</given-names></name> <etal/></person-group>. (<year>2020</year>). <article-title>Recent progress in high-performance photo-detectors enabled by the pulsed laser deposition technology</article-title>. <source>J. Mater. Chem. C</source> <volume>8</volume>, <fpage>4988</fpage>&#x02013;<lpage>5014</lpage>. <pub-id pub-id-type="doi">10.1039/C9TC07098B</pub-id></citation>
</ref>
<ref id="B132">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Wang</surname> <given-names>C.</given-names></name> <name><surname>Hu</surname> <given-names>M.</given-names></name> <name><surname>Takashima</surname> <given-names>Y.</given-names></name> <name><surname>Schulz</surname> <given-names>T. J.</given-names></name> <name><surname>Brady</surname> <given-names>D. J.</given-names></name></person-group> (<year>2022</year>). <article-title>Snapshot ptychography on array cameras</article-title>. <source>Opt. Expr.</source> <volume>30</volume>:<fpage>2585</fpage>. <pub-id pub-id-type="doi">10.1364/OE.447499</pub-id><pub-id pub-id-type="pmid">35209395</pub-id></citation></ref>
<ref id="B133">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Wickens</surname> <given-names>C. D.</given-names></name> <name><surname>Carswell</surname> <given-names>C. M.</given-names></name></person-group> (<year>2021</year>). <article-title>Information processing</article-title>. <source>Handb. Hum. Factors Ergon.</source> <volume>5</volume>, <fpage>114</fpage>&#x02013;<lpage>158</lpage>. <pub-id pub-id-type="doi">10.1002/9781119636113.ch5</pub-id></citation>
</ref>
<ref id="B134">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Wu</surname> <given-names>C.</given-names></name> <name><surname>Liu</surname> <given-names>J.</given-names></name> <name><surname>Huang</surname> <given-names>X.</given-names></name> <name><surname>Li</surname> <given-names>Z. P.</given-names></name> <name><surname>Pan</surname> <given-names>J. W.</given-names></name></person-group> (<year>2021</year>). <article-title>Non-line-of-sight imaging over 1.43 km</article-title>. <source>Proc. Natl. Acad. Sci.</source> <volume>118</volume>:<fpage>e2024468118</fpage>. <pub-id pub-id-type="doi">10.1073/pnas.2024468118</pub-id><pub-id pub-id-type="pmid">33658383</pub-id></citation></ref>
<ref id="B135">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Wu</surname> <given-names>J.</given-names></name> <name><surname>Xiong</surname> <given-names>B.</given-names></name> <name><surname>Lin</surname> <given-names>X.</given-names></name> <name><surname>He</surname> <given-names>J.</given-names></name> <name><surname>Suo</surname> <given-names>J.</given-names></name> <name><surname>Dai</surname> <given-names>Q.</given-names></name></person-group> (<year>2016</year>). <article-title>Snapshot hyperspectral volumetric microscopy</article-title>. <source>Sci. Rep.</source> <volume>6</volume>:<fpage>24624</fpage>. <pub-id pub-id-type="doi">10.1038/srep24624</pub-id><pub-id pub-id-type="pmid">27103155</pub-id></citation></ref>
<ref id="B136">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Xiang</surname> <given-names>M.</given-names></name> <name><surname>Pan</surname> <given-names>A.</given-names></name> <name><surname>Zhao</surname> <given-names>Y.</given-names></name> <name><surname>Fan</surname> <given-names>X.</given-names></name> <name><surname>Zhao</surname> <given-names>H.</given-names></name> <name><surname>Li</surname> <given-names>C.</given-names></name> <etal/></person-group>. (<year>2021</year>). <article-title>Coherent synthetic aperture imaging for visible remote sensing via reflective Fourier ptychography</article-title>. <source>Opt. Soc. Am.</source> <volume>46</volume>, <fpage>29</fpage>&#x02013;<lpage>32</lpage>. <pub-id pub-id-type="doi">10.1364/OL.409258</pub-id><pub-id pub-id-type="pmid">33362005</pub-id></citation></ref>
<ref id="B137">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Xinquan</surname> <given-names>W.</given-names></name></person-group> (<year>2007</year>). <article-title>Image reconstruction for the computed-tomography imaging interferometer</article-title>. <source>Acta Opt. Sin.</source> <volume>27</volume>, <fpage>1600</fpage>&#x02013;<lpage>1604</lpage>.</citation>
</ref>
<ref id="B138">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Ye</surname> <given-names>J.</given-names></name> <name><surname>Chen</surname> <given-names>L.</given-names></name> <name><surname>Li</surname> <given-names>X.</given-names></name> <name><surname>Yuan</surname> <given-names>Q.</given-names></name> <name><surname>Gao</surname> <given-names>Z.</given-names></name></person-group> (<year>2017</year>). <article-title>Review of optical freeform surface representation technique and its application</article-title>. <source>Opt. Eng.</source> <volume>56</volume>, <fpage>110901</fpage>&#x02013;<lpage>110901</lpage>. <pub-id pub-id-type="doi">10.1117/1.OE.56.11.110901</pub-id></citation>
</ref>
<ref id="B139">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Ying</surname> <given-names>Z.</given-names></name> <name><surname>Feng</surname> <given-names>C.</given-names></name> <name><surname>Zhao</surname> <given-names>Z.</given-names></name> <name><surname>Dhar</surname> <given-names>S.</given-names></name> <name><surname>Dalir</surname> <given-names>H.</given-names></name> <name><surname>Gu</surname> <given-names>J.</given-names></name> <etal/></person-group>. (<year>2020</year>). <article-title>Electronic-photonic arithmetic logic unit for high-speed computing</article-title>. <source>Nat. Commun.</source> <volume>11</volume>:<fpage>2154</fpage>. <pub-id pub-id-type="doi">10.1038/s41467-020-16057-3</pub-id><pub-id pub-id-type="pmid">32358492</pub-id></citation></ref>
<ref id="B140">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Youn</surname> <given-names>S. H.</given-names></name> <name><surname>Son</surname> <given-names>H. S.</given-names></name> <name><surname>Marks</surname> <given-names>D. L.</given-names></name> <name><surname>Shaw</surname> <given-names>J. M.</given-names></name> <name><surname>McLaughlin</surname> <given-names>P. O.</given-names></name> <name><surname>Feller</surname> <given-names>S. D.</given-names></name> <etal/></person-group>. (<year>2014</year>). <article-title>Optical performance test and validation of microcameras in multiscale, gigapixel imagers</article-title>. <source>Opt. Expr.</source> <volume>22</volume>, <fpage>3712</fpage>&#x02013;<lpage>3723</lpage>. <pub-id pub-id-type="doi">10.1364/OE.22.003712</pub-id><pub-id pub-id-type="pmid">24663663</pub-id></citation></ref>
<ref id="B141">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Yu</surname> <given-names>F.</given-names></name> <name><surname>Chen</surname> <given-names>J.</given-names></name> <name><surname>Huang</surname> <given-names>L.</given-names></name> <name><surname>Zhao</surname> <given-names>Z.</given-names></name> <name><surname>Wang</surname> <given-names>J.</given-names></name> <name><surname>Jin</surname> <given-names>R.</given-names></name> <etal/></person-group>. (<year>2022</year>). <article-title>Photonic slide rule with metasurfaces</article-title>. <source>Light Sci. Appl.</source> <volume>11</volume>:<fpage>77</fpage>. <pub-id pub-id-type="doi">10.1038/s41377-022-00765-0</pub-id><pub-id pub-id-type="pmid">35351851</pub-id></citation></ref>
<ref id="B142">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Zhang</surname> <given-names>J.</given-names></name> <name><surname>Chen</surname> <given-names>Q.</given-names></name> <name><surname>Li</surname> <given-names>J.</given-names></name> <name><surname>Sun</surname> <given-names>J.</given-names></name> <name><surname>Zuo</surname> <given-names>C.</given-names></name></person-group> (<year>2018</year>). <article-title>Lensfree dynamic super-resolved phase imaging based on active micro-scanning</article-title>. <source>Opt. Lett.</source> <volume>43</volume>:<fpage>3714</fpage>. <pub-id pub-id-type="doi">10.1364/OL.43.003714</pub-id><pub-id pub-id-type="pmid">30067662</pub-id></citation></ref>
<ref id="B143">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Zhang</surname> <given-names>K.</given-names></name> <name><surname>Jung</surname> <given-names>Y. H.</given-names></name> <name><surname>Mikael</surname> <given-names>S.</given-names></name> <name><surname>Seo</surname> <given-names>J. H.</given-names></name> <name><surname>Kim</surname> <given-names>M.</given-names></name> <name><surname>Mi</surname> <given-names>H.</given-names></name> <etal/></person-group>. (<year>2017</year>). <article-title>Origami silicon optoelectronics for hemispherical electronic eye systems</article-title>. <source>Nat. Commun.</source> <volume>8</volume>:<fpage>1782</fpage>. <pub-id pub-id-type="doi">10.1038/s41467-017-01926-1</pub-id><pub-id pub-id-type="pmid">29176549</pub-id></citation></ref>
<ref id="B144">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Zhang</surname> <given-names>W.</given-names></name> <name><surname>Liang</surname> <given-names>J.</given-names></name> <name><surname>Ju</surname> <given-names>H.</given-names></name> <name><surname>Ren</surname> <given-names>L.</given-names></name> <name><surname>Qu</surname> <given-names>E.</given-names></name> <name><surname>Wu</surname> <given-names>Z.</given-names></name></person-group> (<year>2016</year>). <article-title>A robust haze-removal scheme in polarimetric dehazing imaging based on automatic identification of sky region</article-title>. <source>Opt. Laser Technol.</source> <volume>86</volume>, <fpage>145</fpage>&#x02013;<lpage>151</lpage>. <pub-id pub-id-type="doi">10.1016/j.optlastec.2016.07.015</pub-id></citation>
</ref>
<ref id="B145">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Zhao</surname> <given-names>F.</given-names></name> <name><surname>Shen</surname> <given-names>Z.</given-names></name> <name><surname>Wang</surname> <given-names>D.</given-names></name> <name><surname>Xu</surname> <given-names>B.</given-names></name> <name><surname>Chen</surname> <given-names>X.</given-names></name> <name><surname>Yang</surname> <given-names>Y.</given-names></name> <etal/></person-group>. (<year>2021</year>). <article-title>Synthetic aperture metalens</article-title>. <source>Photon. Res.</source> <volume>9</volume>:<fpage>2388</fpage>. <pub-id pub-id-type="doi">10.1364/PRJ.440185</pub-id></citation>
</ref>
<ref id="B146">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Zheng</surname> <given-names>G.</given-names></name> <name><surname>Horstmeyer</surname> <given-names>R.</given-names></name> <name><surname>Yang</surname> <given-names>C.</given-names></name></person-group> (<year>2013</year>). <article-title>Wide-field, high-resolution Fourier ptychographic microscopy</article-title>. <source>Nat. Photon.</source> <volume>7</volume>, <fpage>739</fpage>&#x02013;<lpage>745</lpage>. <pub-id pub-id-type="doi">10.1038/nphoton.2013.187</pub-id><pub-id pub-id-type="pmid">25243016</pub-id></citation></ref>
<ref id="B147">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Zhou</surname> <given-names>Z.</given-names></name> <name><surname>Wu</surname> <given-names>Z.</given-names></name> <name><surname>Tan</surname> <given-names>P.</given-names></name></person-group> (<year>2013</year>). <article-title>Multi-view photometric stereo with spatially varying isotropic materials</article-title>. <source>Digit Signal Proc.</source> <volume>21</volume>, <fpage>391</fpage>&#x02013;<lpage>403</lpage>. <pub-id pub-id-type="doi">10.1109/CVPR.2013.195</pub-id><pub-id pub-id-type="pmid">32011254</pub-id></citation></ref>
<ref id="B148">
<citation citation-type="journal"><person-group person-group-type="author"><name><surname>Zutao</surname> <given-names>Z.</given-names></name> <name><surname>Yanjun</surname> <given-names>L.</given-names></name> <name><surname>Fubing</surname> <given-names>W.</given-names></name> <name><surname>Guanjun</surname> <given-names>M.</given-names></name> <name><surname>Waleed</surname> <given-names>S.</given-names></name> <name><surname>Layth</surname> <given-names>S.</given-names></name> <etal/></person-group>. (<year>2016</year>). <article-title>Novel multi-sensor environmental perception method using low-rank representation and a particle filter for vehicle reversing safety</article-title>. <source>Sensors</source> <volume>16</volume>:<fpage>848</fpage>. <pub-id pub-id-type="doi">10.3390/s16060848</pub-id><pub-id pub-id-type="pmid">27294931</pub-id></citation></ref>
</ref-list>
</back>
</article>