<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.3 20070202//EN" "journalpublishing.dtd">
<article article-type="research-article" dtd-version="2.3" xml:lang="EN" xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Phys.</journal-id>
<journal-title>Frontiers in Physics</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Phys.</abbrev-journal-title>
<issn pub-type="epub">2296-424X</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="publisher-id">1362690</article-id>
<article-id pub-id-type="doi">10.3389/fphy.2024.1362690</article-id>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Physics</subject>
<subj-group>
<subject>Original Research</subject>
</subj-group>
</subj-group>
</article-categories>
<title-group>
<article-title>A resource-efficient quantum convolutional neural network</article-title>
<alt-title alt-title-type="left-running-head">Song et al.</alt-title>
<alt-title alt-title-type="right-running-head">
<ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/fphy.2024.1362690">10.3389/fphy.2024.1362690</ext-link>
</alt-title>
</title-group>
<contrib-group>
<contrib contrib-type="author">
<name>
<surname>Song</surname>
<given-names>Yanqi</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<role content-type="https://credit.niso.org/contributor-roles/writing-original-draft/"/>
<role content-type="https://credit.niso.org/contributor-roles/software/"/>
<role content-type="https://credit.niso.org/contributor-roles/visualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/Writing - review &#x26; editing/"/>
<role content-type="https://credit.niso.org/contributor-roles/conceptualization/"/>
<role content-type="https://credit.niso.org/contributor-roles/data-curation/"/>
<role content-type="https://credit.niso.org/contributor-roles/formal-analysis/"/>
<role content-type="https://credit.niso.org/contributor-roles/investigation/"/>
<role content-type="https://credit.niso.org/contributor-roles/methodology/"/>
<role content-type="https://credit.niso.org/contributor-roles/validation/"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Li</surname>
<given-names>Jing</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<uri xlink:href="https://loop.frontiersin.org/people/2618960/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/writing-original-draft/"/>
<role content-type="https://credit.niso.org/contributor-roles/Writing - review &#x26; editing/"/>
<role content-type="https://credit.niso.org/contributor-roles/investigation/"/>
<role content-type="https://credit.niso.org/contributor-roles/validation/"/>
<role content-type="https://credit.niso.org/contributor-roles/visualization/"/>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name>
<surname>Wu</surname>
<given-names>Yusen</given-names>
</name>
<xref ref-type="aff" rid="aff2">
<sup>2</sup>
</xref>
<xref ref-type="corresp" rid="c001">&#x2a;</xref>
<uri xlink:href="https://loop.frontiersin.org/people/2627857/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/Writing - review &#x26; editing/"/>
<role content-type="https://credit.niso.org/contributor-roles/software/"/>
<role content-type="https://credit.niso.org/contributor-roles/data-curation/"/>
<role content-type="https://credit.niso.org/contributor-roles/supervision/"/>
<role content-type="https://credit.niso.org/contributor-roles/validation/"/>
</contrib>
<contrib contrib-type="author" corresp="yes">
<name>
<surname>Qin</surname>
<given-names>Sujuan</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<xref ref-type="corresp" rid="c001">&#x2a;</xref>
<uri xlink:href="https://loop.frontiersin.org/people/1803193/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/Writing - review &#x26; editing/"/>
<role content-type="https://credit.niso.org/contributor-roles/funding-acquisition/"/>
<role content-type="https://credit.niso.org/contributor-roles/supervision/"/>
<role content-type="https://credit.niso.org/contributor-roles/validation/"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Wen</surname>
<given-names>Qiaoyan</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<role content-type="https://credit.niso.org/contributor-roles/Writing - review &#x26; editing/"/>
<role content-type="https://credit.niso.org/contributor-roles/funding-acquisition/"/>
<role content-type="https://credit.niso.org/contributor-roles/supervision/"/>
<role content-type="https://credit.niso.org/contributor-roles/validation/"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Gao</surname>
<given-names>Fei</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<uri xlink:href="https://loop.frontiersin.org/people/283545/overview"/>
<role content-type="https://credit.niso.org/contributor-roles/Writing - review &#x26; editing/"/>
<role content-type="https://credit.niso.org/contributor-roles/funding-acquisition/"/>
<role content-type="https://credit.niso.org/contributor-roles/supervision/"/>
<role content-type="https://credit.niso.org/contributor-roles/validation/"/>
</contrib>
</contrib-group>
<aff id="aff1">
<sup>1</sup>
<institution>State Key Laboratory of Networking and Switching Technology</institution>, <institution>Beijing University of Posts and Telecommunications</institution>, <addr-line>Beijing</addr-line>, <country>China</country>
</aff>
<aff id="aff2">
<sup>2</sup>
<institution>Department of Physics</institution>, <institution>The University of Western Australia</institution>, <addr-line>Perth</addr-line>, <addr-line>WA</addr-line>, <country>Australia</country>
</aff>
<author-notes>
<fn fn-type="edited-by">
<p>
<bold>Edited by:</bold> <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/1753413/overview">Xiao Yuan</ext-link>, Peking University, China</p>
</fn>
<fn fn-type="edited-by">
<p>
<bold>Reviewed by:</bold> <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/2060530/overview">Yiming Huang</ext-link>, Peking University, China</p>
<p>
<ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/2112353/overview">Hongyi Zhou</ext-link>, Chinese Academy of Sciences (CAS), China</p>
</fn>
<corresp id="c001">&#x2a;Correspondence: Yusen Wu, <email>yusen.wu@research.uwa.edu.au</email>; Sujuan Qin, <email>qsujuan@bupt.edu.cn</email>
</corresp>
</author-notes>
<pub-date pub-type="epub">
<day>05</day>
<month>04</month>
<year>2024</year>
</pub-date>
<pub-date pub-type="collection">
<year>2024</year>
</pub-date>
<volume>12</volume>
<elocation-id>1362690</elocation-id>
<history>
<date date-type="received">
<day>28</day>
<month>12</month>
<year>2023</year>
</date>
<date date-type="accepted">
<day>14</day>
<month>03</month>
<year>2024</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#xa9; 2024 Song, Li, Wu, Qin, Wen and Gao.</copyright-statement>
<copyright-year>2024</copyright-year>
<copyright-holder>Song, Li, Wu, Qin, Wen and Gao</copyright-holder>
<license xlink:href="http://creativecommons.org/licenses/by/4.0/">
<p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (CC BY). The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</p>
</license>
</permissions>
<abstract>
<p>Quantum Convolutional Neural Network (QCNN) has achieved significant success in solving various complex problems, such as quantum many-body physics and image recognition. In comparison to the classical Convolutional Neural Network (CNN) model, the QCNN model requires excellent numerical performance or efficient computational resources to showcase its potential quantum advantages, particularly in classical data processing tasks. In this paper, we propose a computationally resource-efficient QCNN model referred to as RE-QCNN. Specifically, through a comprehensive analysis of the complexity associated with the forward and backward propagation processes in the quantum convolutional layer, our results demonstrate a significant reduction in computational resources required for this layer compared to the classical CNN model. Furthermore, our model is numerically benchmarked on recognizing images from the MNIST and Fashion-MNIST datasets, achieving high accuracy in these multi-class classification tasks.</p>
</abstract>
<kwd-group>
<kwd>quantum machine learning</kwd>
<kwd>variational quantum algorithm</kwd>
<kwd>quantum convolutional neural network</kwd>
<kwd>data encoding</kwd>
<kwd>parameterized quantum circuit</kwd>
<kwd>computational resources</kwd>
</kwd-group>
<custom-meta-wrap>
<custom-meta>
<meta-name>section-at-acceptance</meta-name>
<meta-value>Quantum Engineering and Technology</meta-value>
</custom-meta>
</custom-meta-wrap>
</article-meta>
</front>
<body>
<sec id="s1">
<title>1 Introduction</title>
<p>In the era of big data, as the scale of data continues to increase, the computational requirements for machine learning are expanding. Simultaneously, theoretical research indicates that quantum computing holds the potential to accelerate the solution for certain problems that pose challenges to classical computers [<xref ref-type="bibr" rid="B1">1</xref>&#x2013;<xref ref-type="bibr" rid="B3">3</xref>]. Consequently, the field of Quantum Machine Learning (QML) [<xref ref-type="bibr" rid="B4">4</xref>&#x2013;<xref ref-type="bibr" rid="B6">6</xref>] has gained widespread attention, with several promising breakthroughs. On one hand, quantum basic linear algebra subroutines, such as Fourier transforms, eigenvector and eigenvalue computations, and linear equation solving, exhibit exponential quantum speedups compared to their well-established classical counterparts [<xref ref-type="bibr" rid="B7">7</xref>&#x2013;<xref ref-type="bibr" rid="B9">9</xref>]. These subroutines bring quantum speedups in a range of machine learning algorithms, including least-squares fitting [<xref ref-type="bibr" rid="B10">10</xref>], semidefinite programming [<xref ref-type="bibr" rid="B11">11</xref>], gradient descent [<xref ref-type="bibr" rid="B12">12</xref>], principal component analysis [<xref ref-type="bibr" rid="B13">13</xref>], support vector machine [<xref ref-type="bibr" rid="B14">14</xref>], and neural network [<xref ref-type="bibr" rid="B15">15</xref>]. However, these quantum algorithms generally involve long-depth quantum circuits, which require a fault-tolerant quantum computer with error-correction capabilities. As a result, it is not straightforward to extend these theoretical quantum advantages to Noisy Intermediate-Scale Quantum (NISQ) devices [<xref ref-type="bibr" rid="B16">16</xref>].</p>
<p>Hybrid quantum-classical machine learning models [<xref ref-type="bibr" rid="B17">17</xref>&#x2013;<xref ref-type="bibr" rid="B21">21</xref>] based on the Variational Quantum Algorithm (VQA) [<xref ref-type="bibr" rid="B22">22</xref>&#x2013;<xref ref-type="bibr" rid="B26">26</xref>] emerge as a notable advancement for designing QML algorithms with shallow-depth quantum circuits. A typical example is the Quantum Convolutional Neural Network (QCNN) [<xref ref-type="bibr" rid="B27">27</xref>], which is a quantum analog of the Convolutional Neural Network (CNN) [<xref ref-type="bibr" rid="B28">28</xref>,<xref ref-type="bibr" rid="B29">29</xref>] composed of the convolutional layer, the pooling layer, and the fully connected layer. The QCNN model was first proposed by Cong et al. [<xref ref-type="bibr" rid="B30">30</xref>], demonstrating accurate quantum phase recognition by utilizing a small number of trainable parameters in comparison to the system size. Since translating a complex quantum state into the classical world may suffer from the challenge known as the &#x201c;exponential wall&#x201d; problem, the processing of quantum data inherently demonstrates the quantum advantages of the QCNN model over the classical CNN model.</p>
<p>In addition to quantum data processing, the QCNN model is also applied to classical data processing tasks [<xref ref-type="bibr" rid="B31">31</xref>&#x2013;<xref ref-type="bibr" rid="B36">36</xref>]. These QCNN models comprise a combination of quantum and classical components, including a quantum circuit, classical circuits, and a classical optimizer. Specifically, the quantum circuit consists of a data encoding circuit and a parameterized quantum circuit, which together form the quantum convolutional layer. The main idea of the quantum convolutional layer is to extract features from the classical image by transforming the data block, obtained from the image using a sliding window, using a parameterized quantum circuit. This is in contrast to the classical convolutional layer, where the transformation is performed using a weight matrix known as the classical convolutional kernel [<xref ref-type="bibr" rid="B29">29</xref>]. Furthermore, the quantum convolutional layer may extract complicated features whose processing may be classically stubborn [<xref ref-type="bibr" rid="B35">35</xref>,<xref ref-type="bibr" rid="B37">37</xref>]. The primary focus of studies mentioned above is to enhance the numerical performance of the model by improving the data encoding strategy and the structure of the parameterized quantum circuit. Rotation encoding [<xref ref-type="bibr" rid="B31">31</xref>,<xref ref-type="bibr" rid="B32">32</xref>], which offers ease of implementation, and amplitude encoding [<xref ref-type="bibr" rid="B33">33</xref>], which reduces the number of qubits, are commonly employed data encoding strategies in these QCNN models. Moreover, to encode more information of classical data onto quantum states, Matic et al. [<xref ref-type="bibr" rid="B34">34</xref>] proposed a data encoding strategy that combines two-qubit gates with single-qubit gates. Their model achieved comparable performance to the classical CNN model in radiological image classification tasks. Additionally, there are several related works focused on improving the structure of the parameterized quantum circuit. Henderson et al. [<xref ref-type="bibr" rid="B35">35</xref>] utilized multiple random quantum circuits to construct the quantum convolutional layer and achieved promising performance on the MNIST dataset. Furthermore, Chen et al. [<xref ref-type="bibr" rid="B36">36</xref>] constructed a trainable parameterized quantum circuit and applied their model to high-energy physics. To explore the potential quantum advantages of the QCNN model in classical data processing tasks, it is common to compare the prediction accuracy directly between the quantum and classical models. However, a specific QCNN or CNN model may not exhibit excellent numerical performance across all tasks. This perspective is highly dependent on the specific task and may be influenced by random factors. A more solid perspective for exploring the potential quantum advantages involves comparing computational resources used in the training and prediction processes of the model. Therefore, it is crucial to highlight either the excellent numerical performance or the efficient computational resources when showcasing the potential quantum advantages of the QCNN model over the classical CNN model.</p>
<p>In this paper, we propose a computationally resource-efficient QCNN model referred to as RE-QCNN. Specifically, by employing the amplitude encoding strategy [<xref ref-type="bibr" rid="B38">38</xref>,<xref ref-type="bibr" rid="B39">39</xref>] and the Quantum Alternating Operator Ansatz (QAOA) [<xref ref-type="bibr" rid="B40">40</xref>&#x2013;<xref ref-type="bibr" rid="B42">42</xref>] to construct the quantum convolutional layer, the complexity of the forward propagation process in this layer is <inline-formula id="inf1">
<mml:math id="m1">
<mml:mi mathvariant="script">O</mml:mi>
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:mrow>
<mml:mi>k</mml:mi>
<mml:mi mathvariant="normal">p</mml:mi>
<mml:mi mathvariant="normal">o</mml:mi>
<mml:mi mathvariant="normal">l</mml:mi>
<mml:mi mathvariant="normal">y</mml:mi>
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:mrow>
<mml:mi mathvariant="normal">l</mml:mi>
<mml:mi mathvariant="normal">o</mml:mi>
<mml:mi mathvariant="normal">g</mml:mi>
<mml:mi>N</mml:mi>
</mml:mrow>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
</mml:mrow>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula>. Here, <italic>k</italic> and <italic>N</italic> respectively denote the sparsity and dimension of the classical data. Furthermore, we analyze the parameter updating process based on backpropagation [<xref ref-type="bibr" rid="B43">43</xref>] to obtain the complexity of the backward propagation process in the quantum convolutional layer. In detail, we extend the existing parameter shift rule [<xref ref-type="bibr" rid="B44">44</xref>&#x2013;<xref ref-type="bibr" rid="B46">46</xref>] to calculate analytical gradients for the QAOA circuit with parameter sharing. As a result, when the sparsity <italic>k</italic> of the classical data is <inline-formula id="inf2">
<mml:math id="m2">
<mml:mi mathvariant="script">O</mml:mi>
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:mrow>
<mml:mi mathvariant="normal">l</mml:mi>
<mml:mi mathvariant="normal">o</mml:mi>
<mml:mi mathvariant="normal">g</mml:mi>
<mml:mi>N</mml:mi>
</mml:mrow>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula>, we conclude that the overall complexity of the quantum convolutional layer is <inline-formula id="inf3">
<mml:math id="m3">
<mml:mi mathvariant="script">O</mml:mi>
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:mrow>
<mml:mi mathvariant="normal">p</mml:mi>
<mml:mi mathvariant="normal">o</mml:mi>
<mml:mi mathvariant="normal">l</mml:mi>
<mml:mi mathvariant="normal">y</mml:mi>
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:mrow>
<mml:mi mathvariant="normal">l</mml:mi>
<mml:mi mathvariant="normal">o</mml:mi>
<mml:mi mathvariant="normal">g</mml:mi>
<mml:mi>N</mml:mi>
</mml:mrow>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
</mml:mrow>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula>. Compared to the complexity of the classical convolutional layer, which is <inline-formula id="inf4">
<mml:math id="m4">
<mml:mi mathvariant="script">O</mml:mi>
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:mrow>
<mml:mi>N</mml:mi>
</mml:mrow>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula> [<xref ref-type="bibr" rid="B29">29</xref>,<xref ref-type="bibr" rid="B47">47</xref>], our model significantly reduces the computational resources required for the quantum convolutional layer. Moreover, we perform numerical experiments on our model using the MNIST and Fashion-MNIST datasets. Our model achieves high accuracy in these multi-class classification tasks. Our results are of great significance for exploring the potential quantum advantages of the QCNN model in classical data processing tasks within the NISQ era.</p>
<p>The rest of this paper is organized as follows. In <xref ref-type="sec" rid="s2">Section 2</xref>, we review the frame of VQA. <xref ref-type="sec" rid="s3">Section 3</xref> describes the structure of RE-QCNN in detail. <xref ref-type="sec" rid="s4">Section 4</xref> presents the result of numerical experiments. Conclusions is given in <xref ref-type="sec" rid="s5">Section 5</xref>.</p>
</sec>
<sec id="s2">
<title>2 Variational quantum algorithm</title>
<p>VQA utilizes parameterized quantum circuits on quantum devices, with the parameter optimization task delegated to a classical optimizer. This algorithm offers the benefit of maintaining a shallow quantum circuit depth, thereby reducing the impact of noise. This is in contrast to quantum algorithms developed for the fault-tolerant era.</p>
<p>In detail, as depicted in <xref ref-type="fig" rid="F1">Figure 1</xref>, VQA consists of three components: the data encoding circuit, the parameterized quantum circuit, and the classical optimizer. The data encoding circuit is employed to encode classical data onto quantum states. Two commonly used data encoding strategies are rotation encoding and amplitude encoding. The rotation encoding strategy encodes classical data using the Pauli rotation operators, offering the benefit of being easy to implement. On the other hand, the amplitude encoding strategy encodes classical data onto the amplitudes of quantum states. Subsequently, a parameterized quantum circuit is applied to the data-encoding quantum state. The parameterized quantum circuit is a unitary which relies on a set of trainable parameters. The expressibility of the parameterized quantum circuit is a crucial factor that significantly affects the performance of VQA [<xref ref-type="bibr" rid="B48">48</xref>]. Then, the output of the parameterized quantum circuit is measured to calculate a cost function. Finally, the classical optimizer, such as stochastic gradient descent [<xref ref-type="bibr" rid="B49">49</xref>], is used to iteratively update the parameters for optimizing the cost function.</p>
<fig id="F1" position="float">
<label>FIGURE 1</label>
<caption>
<p>The frame of VQA.</p>
</caption>
<graphic xlink:href="fphy-12-1362690-g001.tif"/>
</fig>
<p>VQA has the notable benefit of offering a general framework for solving diverse problems. In particular, hybrid quantum-classical machine learning models can be seen as quantum counterparts to extremely successful classical neural networks. Next, we will provide a detailed description of our RE-QCNN model based on VQA.</p>
</sec>
<sec id="s3">
<title>3 The structure of RE-QCNN</title>
<p>In <xref ref-type="fig" rid="F2">Figure 2</xref>, the structure of the QCNN model is illustrated, which comprises three essential components: the quantum convolutional layer, the pooling layer, and the fully connected layer. This section provides a detailed description of our RE-QCNN model, starting from its fundamental components.</p>
<fig id="F2" position="float">
<label>FIGURE 2</label>
<caption>
<p>The structure of QCNN. The quantum convolutional layer contains several quantum convolutional kernels that transform the classical image into different feature maps. The detailed processing of classical data block into and out of the quantum convolutional kernel is provided within the dashed box.</p>
</caption>
<graphic xlink:href="fphy-12-1362690-g002.tif"/>
</fig>
<sec id="s3-1">
<title>3.1 Quantum convolutional layer</title>
<p>The classical convolutional layer is the pivotal layer in the classical CNN model. This layer performs convolutional operations on the classical image to extract features. Specifically, in the convolutional operation, a data block is obtained from the image using a sliding window, and a dot product is calculated between this data block and a weight matrix referred to as the classical convolutional kernel [<xref ref-type="bibr" rid="B29">29</xref>]. Unlike the classical convolutional layer, the main idea of a quantum convolutional layer is to extract features from the image by transforming the data block using a quantum convolutional kernel, also known as a quantum filter. The quantum filter consists of both a data encoding circuit and a parameterized quantum circuit.</p>
<p>
<italic>Data encoding strategy</italic>. In our RE-QCNN model, amplitude encoding is adopted as the data encoding strategy. Let <inline-formula id="inf5">
<mml:math id="m5">
<mml:msubsup>
<mml:mrow>
<mml:mrow>
<mml:mo stretchy="false">{</mml:mo>
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>X</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>d</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
<mml:mo stretchy="false">}</mml:mo>
</mml:mrow>
</mml:mrow>
<mml:mrow>
<mml:mi>d</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mrow>
<mml:mi>D</mml:mi>
</mml:mrow>
</mml:msubsup>
</mml:math>
</inline-formula> represent the preprocessed <italic>D</italic> data blocks extracted from the given classical image using a <inline-formula id="inf6">
<mml:math id="m6">
<mml:msqrt>
<mml:mrow>
<mml:mi>N</mml:mi>
</mml:mrow>
</mml:msqrt>
<mml:mo>&#xd7;</mml:mo>
<mml:msqrt>
<mml:mrow>
<mml:mi>N</mml:mi>
</mml:mrow>
</mml:msqrt>
</mml:math>
</inline-formula> sliding window with a stride of 1, which contain all the information of the image. Through the amplitude encoding strategy, each <inline-formula id="inf7">
<mml:math id="m7">
<mml:msub>
<mml:mrow>
<mml:mi>X</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>d</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>&#x2208;</mml:mo>
<mml:msup>
<mml:mrow>
<mml:mi mathvariant="double-struck">R</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>N</mml:mi>
</mml:mrow>
</mml:msup>
</mml:math>
</inline-formula> is encoded onto its respective quantum state denoted as &#x7c;<italic>X</italic>
<sub>
<italic>d</italic>
</sub>&#x27e9;. Specifically,<disp-formula id="e1">
<mml:math id="m8">
<mml:msub>
<mml:mrow>
<mml:mi>X</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>d</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>&#x2192;</mml:mo>
<mml:mo stretchy="false">&#x7c;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi>X</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>d</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo stretchy="false">&#x232a;</mml:mo>
<mml:mo>&#x3d;</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mrow>
<mml:mfenced open="&#x2016;" close="&#x2016;">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>X</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>d</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mfrac>
<mml:mstyle displaystyle="true">
<mml:munderover>
<mml:mrow>
<mml:mo>&#x2211;</mml:mo>
</mml:mrow>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mrow>
<mml:mi>N</mml:mi>
</mml:mrow>
</mml:munderover>
</mml:mstyle>
<mml:msub>
<mml:mrow>
<mml:mi>X</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>d</mml:mi>
<mml:mi>i</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo stretchy="false">&#x7c;</mml:mo>
<mml:mi>i</mml:mi>
<mml:mo stretchy="false">&#x232a;</mml:mo>
<mml:mo>,</mml:mo>
</mml:math>
<label>(1)</label>
</disp-formula>where <italic>X</italic>
<sub>
<italic>di</italic>
</sub> represents the <italic>i</italic>-th component of <italic>X</italic>
<sub>
<italic>d</italic>
</sub> and <inline-formula id="inf8">
<mml:math id="m9">
<mml:mfenced open="&#x2016;" close="&#x2016;">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>X</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>d</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:mfenced>
<mml:mo>&#x3d;</mml:mo>
<mml:msqrt>
<mml:mrow>
<mml:msubsup>
<mml:mrow>
<mml:mo movablelimits="false" form="prefix">&#x2211;</mml:mo>
</mml:mrow>
<mml:mrow>
<mml:mi>i</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mrow>
<mml:mi>N</mml:mi>
</mml:mrow>
</mml:msubsup>
<mml:msubsup>
<mml:mrow>
<mml:mi>X</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>d</mml:mi>
<mml:mi>i</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>2</mml:mn>
</mml:mrow>
</mml:msubsup>
</mml:mrow>
</mml:msqrt>
</mml:math>
</inline-formula> is the 2-norm of <italic>X</italic>
<sub>
<italic>d</italic>
</sub>. The <italic>N</italic>-dimensional vector <italic>X</italic>
<sub>
<italic>d</italic>
</sub> is encoded onto a superposition state &#x7c;<italic>X</italic>
<sub>
<italic>d</italic>
</sub>&#x27e9; with log&#x2009; <italic>N</italic> qubits. As a result, amplitude encoding efficiently reduces the number of qubits within our model. However, its implementation is challenging. Specifically, to prepare an arbitrary quantum state, it is necessary to use a quantum circuit whose size scales exponentially with the number of qubits [<xref ref-type="bibr" rid="B38">38</xref>,<xref ref-type="bibr" rid="B39">39</xref>]. Nevertheless, when dealing with sparse datasets such as the MNIST dataset employed in our numerical experiments, amplitude encoding can be regarded as an efficient strategy. As mentioned in Refs. [<xref ref-type="bibr" rid="B50">50</xref>,<xref ref-type="bibr" rid="B51">51</xref>], the preparation of an arbitrary sparse quantum state can be accomplished using a quantum circuit whose size scales polynomially with the sparsity and the number of qubits.</p>
<p>
<italic>Parameterized quantum circuit</italic>. We construct the parameterized quantum circuit based on the QAOA circuit that was originally designed to provide approximations for combinatorial optimization problems. The QAOA circuit starts by constructing two Hamiltonians <italic>H</italic>
<sub>
<italic>B</italic>
</sub> and <italic>H</italic>
<sub>
<italic>C</italic>
</sub>. Specifically, the Hamiltonian <italic>H</italic>
<sub>
<italic>B</italic>
</sub> is given by<disp-formula id="e2">
<mml:math id="m10">
<mml:msub>
<mml:mrow>
<mml:mi>H</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>B</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>&#x3d;</mml:mo>
<mml:mstyle displaystyle="true">
<mml:munderover>
<mml:mrow>
<mml:mo>&#x2211;</mml:mo>
</mml:mrow>
<mml:mrow>
<mml:mi>q</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mrow>
<mml:mi>log</mml:mi>
<mml:mo>&#x2061;</mml:mo>
<mml:mi>N</mml:mi>
</mml:mrow>
</mml:munderover>
</mml:mstyle>
<mml:msubsup>
<mml:mrow>
<mml:mi>&#x3c3;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>q</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>x</mml:mi>
</mml:mrow>
</mml:msubsup>
<mml:mo>,</mml:mo>
</mml:math>
<label>(2)</label>
</disp-formula>where <inline-formula id="inf9">
<mml:math id="m11">
<mml:msubsup>
<mml:mrow>
<mml:mi>&#x3c3;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>q</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>x</mml:mi>
</mml:mrow>
</mml:msubsup>
</mml:math>
</inline-formula> is the Pauli <italic>X</italic> operator acting on the <italic>q</italic>-th qubit. The Hamiltonian <italic>H</italic>
<sub>
<italic>C</italic>
</sub> is defined as<disp-formula id="e3">
<mml:math id="m12">
<mml:msub>
<mml:mrow>
<mml:mi>H</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>C</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>&#x3d;</mml:mo>
<mml:mstyle displaystyle="true">
<mml:munderover>
<mml:mrow>
<mml:mo>&#x2211;</mml:mo>
</mml:mrow>
<mml:mrow>
<mml:mi>q</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mrow>
<mml:mi>log</mml:mi>
<mml:mo>&#x2061;</mml:mo>
<mml:mi>N</mml:mi>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:munderover>
</mml:mstyle>
<mml:mfrac>
<mml:mrow>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mrow>
<mml:mn>2</mml:mn>
</mml:mrow>
</mml:mfrac>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>I</mml:mi>
<mml:mo>&#x2212;</mml:mo>
<mml:msubsup>
<mml:mrow>
<mml:mi>&#x3c3;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>q</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>z</mml:mi>
</mml:mrow>
</mml:msubsup>
<mml:msubsup>
<mml:mrow>
<mml:mi>&#x3c3;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>q</mml:mi>
<mml:mo>&#x2b;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mrow>
<mml:mi>z</mml:mi>
</mml:mrow>
</mml:msubsup>
</mml:mrow>
</mml:mfenced>
<mml:mo>,</mml:mo>
</mml:math>
<label>(3)</label>
</disp-formula>where <inline-formula id="inf10">
<mml:math id="m13">
<mml:msubsup>
<mml:mrow>
<mml:mi>&#x3c3;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>q</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>z</mml:mi>
</mml:mrow>
</mml:msubsup>
</mml:math>
</inline-formula> is the Pauli <italic>Z</italic> operator acting on the <italic>q</italic>-th qubit. Subsequently, the <italic>p</italic>-layer QAOA circuit is constructed by iteratively applying <inline-formula id="inf11">
<mml:math id="m14">
<mml:msup>
<mml:mrow>
<mml:mi>e</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mo>&#x2212;</mml:mo>
<mml:mi>i</mml:mi>
<mml:msub>
<mml:mrow>
<mml:mi>&#x3b2;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:msub>
<mml:msub>
<mml:mrow>
<mml:mi>H</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>B</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:msup>
</mml:math>
</inline-formula> and <inline-formula id="inf12">
<mml:math id="m15">
<mml:msup>
<mml:mrow>
<mml:mi>e</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mo>&#x2212;</mml:mo>
<mml:mi>i</mml:mi>
<mml:msub>
<mml:mrow>
<mml:mi>&#x3b3;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:msub>
<mml:msub>
<mml:mrow>
<mml:mi>H</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>C</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:msup>
</mml:math>
</inline-formula> for <italic>p</italic> rounds, given by<disp-formula id="e4">
<mml:math id="m16">
<mml:mi>U</mml:mi>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi mathvariant="bold-italic">&#x3b2;</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi mathvariant="bold-italic">&#x3b3;</mml:mi>
</mml:mrow>
</mml:mfenced>
<mml:mo>&#x3d;</mml:mo>
<mml:mstyle displaystyle="true">
<mml:munderover>
<mml:mrow>
<mml:mo>&#x220f;</mml:mo>
</mml:mrow>
<mml:mrow>
<mml:mi>j</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mrow>
<mml:mi>p</mml:mi>
</mml:mrow>
</mml:munderover>
</mml:mstyle>
<mml:msup>
<mml:mrow>
<mml:mi>e</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mo>&#x2212;</mml:mo>
<mml:mi>i</mml:mi>
<mml:msub>
<mml:mrow>
<mml:mi>&#x3b2;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:msub>
<mml:msub>
<mml:mrow>
<mml:mi>H</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>B</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:msup>
<mml:msup>
<mml:mrow>
<mml:mi>e</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mo>&#x2212;</mml:mo>
<mml:mi>i</mml:mi>
<mml:msub>
<mml:mrow>
<mml:mi>&#x3b3;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:msub>
<mml:msub>
<mml:mrow>
<mml:mi>H</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>C</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:msup>
<mml:mo>,</mml:mo>
</mml:math>
<label>(4)</label>
</disp-formula>where parameter vectors <bold>
<italic>&#x3b2;</italic>
</bold> &#x3d; [<italic>&#x3b2;</italic>
<sub>1</sub>, &#x2026;, <italic>&#x3b2;</italic>
<sub>
<italic>p</italic>
</sub>], and <bold>
<italic>&#x3b3;</italic>
</bold> &#x3d; [<italic>&#x3b3;</italic>
<sub>1</sub>, &#x2026;, <italic>&#x3b3;</italic>
<sub>
<italic>p</italic>
</sub>]. <xref ref-type="fig" rid="F3">Figure 3A</xref> displays the structure of the QAOA circuit. In more detail, <inline-formula id="inf13">
<mml:math id="m17">
<mml:msup>
<mml:mrow>
<mml:mi>e</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mo>&#x2212;</mml:mo>
<mml:mi>i</mml:mi>
<mml:msub>
<mml:mrow>
<mml:mi>&#x3b2;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:msub>
<mml:msub>
<mml:mrow>
<mml:mi>H</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>B</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:msup>
</mml:math>
</inline-formula> and <inline-formula id="inf14">
<mml:math id="m18">
<mml:msup>
<mml:mrow>
<mml:mi>e</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mo>&#x2212;</mml:mo>
<mml:mi>i</mml:mi>
<mml:msub>
<mml:mrow>
<mml:mi>&#x3b3;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:msub>
<mml:msub>
<mml:mrow>
<mml:mi>H</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>C</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:msup>
</mml:math>
</inline-formula> can be decomposed into single-qubit and two-qubit gates, as illustrated in <xref ref-type="fig" rid="F3">Figures 3B, C</xref>. Specifically,<disp-formula id="e5">
<mml:math id="m19">
<mml:msup>
<mml:mrow>
<mml:mi>e</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mo>&#x2212;</mml:mo>
<mml:mi>i</mml:mi>
<mml:msub>
<mml:mrow>
<mml:mi>&#x3b2;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:msub>
<mml:msub>
<mml:mrow>
<mml:mi>H</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>B</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:msup>
<mml:mo>&#x3d;</mml:mo>
<mml:mstyle displaystyle="true">
<mml:munderover>
<mml:mrow>
<mml:mo>&#x220f;</mml:mo>
</mml:mrow>
<mml:mrow>
<mml:mi>q</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mrow>
<mml:mi>log</mml:mi>
<mml:mo>&#x2061;</mml:mo>
<mml:mi>N</mml:mi>
</mml:mrow>
</mml:munderover>
</mml:mstyle>
<mml:msub>
<mml:mrow>
<mml:mi>R</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>x</mml:mi>
</mml:mrow>
</mml:msub>
<mml:msub>
<mml:mrow>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mn>2</mml:mn>
<mml:msub>
<mml:mrow>
<mml:mi>&#x3b2;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mrow>
<mml:mi>q</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>,</mml:mo>
</mml:math>
<label>(5)</label>
</disp-formula>
<disp-formula id="e6">
<mml:math id="m20">
<mml:msup>
<mml:mrow>
<mml:mi>e</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mo>&#x2212;</mml:mo>
<mml:mi>i</mml:mi>
<mml:msub>
<mml:mrow>
<mml:mi>&#x3b3;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:msub>
<mml:msub>
<mml:mrow>
<mml:mi>H</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>C</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:msup>
<mml:mo>&#x3d;</mml:mo>
<mml:mstyle displaystyle="true">
<mml:munderover>
<mml:mrow>
<mml:mo>&#x220f;</mml:mo>
</mml:mrow>
<mml:mrow>
<mml:mi>q</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mrow>
<mml:mi>log</mml:mi>
<mml:mo>&#x2061;</mml:mo>
<mml:mi>N</mml:mi>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:munderover>
</mml:mstyle>
<mml:msup>
<mml:mrow>
<mml:mi>e</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mo>&#x2212;</mml:mo>
<mml:mi>i</mml:mi>
<mml:mfrac>
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>&#x3b3;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
<mml:mrow>
<mml:mn>2</mml:mn>
</mml:mrow>
</mml:mfrac>
<mml:mi>I</mml:mi>
</mml:mrow>
</mml:msup>
<mml:msub>
<mml:mrow>
<mml:mi mathvariant="normal">C</mml:mi>
<mml:mi mathvariant="normal">N</mml:mi>
<mml:mi mathvariant="normal">O</mml:mi>
<mml:mi mathvariant="normal">T</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>q</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>q</mml:mi>
<mml:mo>&#x2b;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:msub>
<mml:msub>
<mml:mrow>
<mml:mi>R</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>z</mml:mi>
</mml:mrow>
</mml:msub>
<mml:msub>
<mml:mrow>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mo>&#x2212;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi>&#x3b3;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mrow>
<mml:mi>q</mml:mi>
<mml:mo>&#x2b;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:msub>
<mml:msub>
<mml:mrow>
<mml:mi mathvariant="normal">C</mml:mi>
<mml:mi mathvariant="normal">N</mml:mi>
<mml:mi mathvariant="normal">O</mml:mi>
<mml:mi mathvariant="normal">T</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>q</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi>q</mml:mi>
<mml:mo>&#x2b;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:msub>
<mml:mo>,</mml:mo>
</mml:math>
<label>(6)</label>
</disp-formula>where <italic>R</italic>
<sub>
<italic>x</italic>
</sub> (&#x22c5;)<sub>
<italic>q</italic>
</sub> represents Pauli <italic>X</italic> rotation operator acting on the <italic>q</italic>-th qubit, and <italic>R</italic>
<sub>
<italic>z</italic>
</sub> (&#x22c5;)<sub>
<italic>q</italic>
</sub> denotes the Pauli <italic>Z</italic> rotation operator acting on the <italic>q</italic>-th qubit. An important characteristic of the QAOA circuit is parameter sharing, which refers to the sharing of the parameter among quantum gates within the same layer. This characteristic of the QAOA circuit reduces the required parameters for constructing the quantum convolutional layer. Moreover, the presence of two-qubit gates in the QAOA circuit provides it with a strong entanglement capability, resulting in a high expressibility of the quantum convolutional layer [<xref ref-type="bibr" rid="B48">48</xref>].</p>
<fig id="F3" position="float">
<label>FIGURE 3</label>
<caption>
<p>The structure of QAOA circuit.</p>
</caption>
<graphic xlink:href="fphy-12-1362690-g003.tif"/>
</fig>
<p>Now, the output <italic>X</italic>
<sub>
<italic>qcl</italic>
</sub> (<bold>
<italic>&#x3b2;</italic>
</bold>, <bold>
<italic>&#x3b3;</italic>
</bold>) of the quantum convolutional layer is a <inline-formula id="inf15">
<mml:math id="m21">
<mml:msqrt>
<mml:mrow>
<mml:mi>D</mml:mi>
</mml:mrow>
</mml:msqrt>
<mml:mo>&#xd7;</mml:mo>
<mml:msqrt>
<mml:mrow>
<mml:mi>D</mml:mi>
</mml:mrow>
</mml:msqrt>
</mml:math>
</inline-formula> matrix, given by<disp-formula id="e7">
<mml:math id="m22">
<mml:msub>
<mml:mrow>
<mml:mi>X</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi mathvariant="italic">qcl</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi mathvariant="bold-italic">&#x3b2;</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi mathvariant="bold-italic">&#x3b3;</mml:mi>
</mml:mrow>
</mml:mfenced>
<mml:mo>&#x3d;</mml:mo>
<mml:mfenced open="[" close="]">
<mml:mrow>
<mml:mtable class="matrix">
<mml:mtr>
<mml:mtd columnalign="center">
<mml:msub>
<mml:mrow>
<mml:mi>h</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi mathvariant="bold-italic">&#x3b2;</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi mathvariant="bold-italic">&#x3b3;</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mtd>
<mml:mtd columnalign="center">
<mml:mo>&#x2026;</mml:mo>
</mml:mtd>
<mml:mtd columnalign="center">
<mml:msub>
<mml:mrow>
<mml:mi>h</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:msqrt>
<mml:mrow>
<mml:mi>D</mml:mi>
</mml:mrow>
</mml:msqrt>
</mml:mrow>
</mml:msub>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi mathvariant="bold-italic">&#x3b2;</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi mathvariant="bold-italic">&#x3b3;</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mtd>
</mml:mtr>
<mml:mtr>
<mml:mtd columnalign="center">
<mml:mo>&#x22ee;</mml:mo>
</mml:mtd>
<mml:mtd columnalign="center">
<mml:mo>&#x2026;</mml:mo>
</mml:mtd>
<mml:mtd columnalign="center">
<mml:mo>&#x22ee;</mml:mo>
</mml:mtd>
</mml:mtr>
<mml:mtr>
<mml:mtd columnalign="center">
<mml:msub>
<mml:mrow>
<mml:mi>h</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:msqrt>
<mml:mrow>
<mml:mi>D</mml:mi>
</mml:mrow>
</mml:msqrt>
<mml:mo>&#x2212;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:mfenced>
<mml:msqrt>
<mml:mrow>
<mml:mi>D</mml:mi>
</mml:mrow>
</mml:msqrt>
<mml:mo>&#x2b;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi mathvariant="bold-italic">&#x3b2;</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi mathvariant="bold-italic">&#x3b3;</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mtd>
<mml:mtd columnalign="center">
<mml:mo>&#x2026;</mml:mo>
</mml:mtd>
<mml:mtd columnalign="center">
<mml:msub>
<mml:mrow>
<mml:mi>h</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>D</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi mathvariant="bold-italic">&#x3b2;</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi mathvariant="bold-italic">&#x3b3;</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mtd>
</mml:mtr>
</mml:mtable>
</mml:mrow>
</mml:mfenced>
<mml:mo>.</mml:mo>
</mml:math>
<label>(7)</label>
</disp-formula>Here, for <italic>d</italic> &#x2208; {1, &#x2026;, <italic>D</italic>}, <italic>h</italic>
<sub>
<italic>d</italic>
</sub> (<bold>
<italic>&#x3b2;</italic>
</bold>, <bold>
<italic>&#x3b3;</italic>
</bold>) denotes the expectation value of a specific observable <italic>H</italic>, given by<disp-formula id="e8">
<mml:math id="m23">
<mml:msub>
<mml:mrow>
<mml:mi>h</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>d</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi mathvariant="bold-italic">&#x3b2;</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi mathvariant="bold-italic">&#x3b3;</mml:mi>
</mml:mrow>
</mml:mfenced>
<mml:mo>&#x3d;</mml:mo>
<mml:mrow>
<mml:mo stretchy="false">&#x27e8;</mml:mo>
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>X</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>d</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo stretchy="false">&#x7c;</mml:mo>
<mml:msup>
<mml:mrow>
<mml:mi>U</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mo>&#x2020;</mml:mo>
</mml:mrow>
</mml:msup>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi mathvariant="bold-italic">&#x3b2;</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi mathvariant="bold-italic">&#x3b3;</mml:mi>
</mml:mrow>
</mml:mfenced>
<mml:mi>H</mml:mi>
<mml:mi>U</mml:mi>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi mathvariant="bold-italic">&#x3b2;</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi mathvariant="bold-italic">&#x3b3;</mml:mi>
</mml:mrow>
</mml:mfenced>
<mml:mo stretchy="false">&#x7c;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi>X</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>d</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
<mml:mo stretchy="false">&#x27e9;</mml:mo>
</mml:mrow>
<mml:mo>.</mml:mo>
</mml:math>
<label>(8)</label>
</disp-formula>
</p>
<p>A common form of <italic>H</italic> is expressed as<disp-formula id="e9">
<mml:math id="m24">
<mml:mi>H</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mstyle displaystyle="true">
<mml:munderover>
<mml:mrow>
<mml:mo>&#x2211;</mml:mo>
</mml:mrow>
<mml:mrow>
<mml:mi>s</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mrow>
<mml:mi>S</mml:mi>
</mml:mrow>
</mml:munderover>
</mml:mstyle>
<mml:msub>
<mml:mrow>
<mml:mi>c</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>s</mml:mi>
</mml:mrow>
</mml:msub>
<mml:msub>
<mml:mrow>
<mml:mi>P</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>s</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>,</mml:mo>
</mml:math>
<label>(9)</label>
</disp-formula>where <inline-formula id="inf16">
<mml:math id="m25">
<mml:msub>
<mml:mrow>
<mml:mi>c</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>s</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>&#x2208;</mml:mo>
<mml:mi mathvariant="double-struck">R</mml:mi>
</mml:math>
</inline-formula>, and <inline-formula id="inf17">
<mml:math id="m26">
<mml:msub>
<mml:mrow>
<mml:mi>P</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>s</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>&#x2208;</mml:mo>
<mml:msup>
<mml:mrow>
<mml:mrow>
<mml:mo stretchy="false">{</mml:mo>
<mml:mrow>
<mml:mi>I</mml:mi>
<mml:mo>,</mml:mo>
<mml:msup>
<mml:mrow>
<mml:mi>&#x3c3;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>x</mml:mi>
</mml:mrow>
</mml:msup>
<mml:mo>,</mml:mo>
<mml:msup>
<mml:mrow>
<mml:mi>&#x3c3;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>y</mml:mi>
</mml:mrow>
</mml:msup>
<mml:mo>,</mml:mo>
<mml:msup>
<mml:mrow>
<mml:mi>&#x3c3;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>z</mml:mi>
</mml:mrow>
</mml:msup>
</mml:mrow>
<mml:mo stretchy="false">}</mml:mo>
</mml:mrow>
</mml:mrow>
<mml:mrow>
<mml:mo>&#x2297;</mml:mo>
<mml:mi>log</mml:mi>
<mml:mo>&#x2061;</mml:mo>
<mml:mi>N</mml:mi>
</mml:mrow>
</mml:msup>
</mml:math>
</inline-formula>. Specifically, we can use <inline-formula id="inf18">
<mml:math id="m27">
<mml:mi>H</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:msubsup>
<mml:mrow>
<mml:mo movablelimits="false" form="prefix">&#x2211;</mml:mo>
</mml:mrow>
<mml:mrow>
<mml:mi>s</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mrow>
<mml:mi>log</mml:mi>
<mml:mo>&#x2061;</mml:mo>
<mml:mi>N</mml:mi>
</mml:mrow>
</mml:msubsup>
<mml:msubsup>
<mml:mrow>
<mml:mi>&#x3c3;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>s</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>x</mml:mi>
</mml:mrow>
</mml:msubsup>
</mml:math>
</inline-formula>, where <inline-formula id="inf19">
<mml:math id="m28">
<mml:msubsup>
<mml:mrow>
<mml:mi>&#x3c3;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>s</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>x</mml:mi>
</mml:mrow>
</mml:msubsup>
</mml:math>
</inline-formula> represents the Pauli <italic>X</italic> operator acting on the <italic>s</italic>-th qubit.</p>
</sec>
<sec id="s3-2">
<title>3.2 Pooling layer and fully connected layer</title>
<p>
<italic>Pooling layer</italic>. We employ a downsampling function denoted as down (&#x22c5;) to reduce the dimension of <italic>X</italic>
<sub>
<italic>qcl</italic>
</sub> (<bold>
<italic>&#x3b2;</italic>
</bold>, <bold>
<italic>&#x3b3;</italic>
</bold>), and the resulting output <italic>X</italic>
<sub>
<italic>pl</italic>
</sub> (<bold>
<italic>&#x3b2;</italic>
</bold>, <bold>
<italic>&#x3b3;</italic>
</bold>) is represented as<disp-formula id="e10">
<mml:math id="m29">
<mml:msub>
<mml:mrow>
<mml:mi>X</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>p</mml:mi>
<mml:mi>l</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi mathvariant="bold-italic">&#x3b2;</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi mathvariant="bold-italic">&#x3b3;</mml:mi>
</mml:mrow>
</mml:mfenced>
<mml:mo>&#x3d;</mml:mo>
<mml:mi mathvariant="normal">d</mml:mi>
<mml:mi mathvariant="normal">o</mml:mi>
<mml:mi mathvariant="normal">w</mml:mi>
<mml:mi mathvariant="normal">n</mml:mi>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi>a</mml:mi>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>X</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi mathvariant="italic">qcl</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi mathvariant="bold-italic">&#x3b2;</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi mathvariant="bold-italic">&#x3b3;</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mfenced>
<mml:mo>,</mml:mo>
</mml:math>
<label>(10)</label>
</disp-formula>where <italic>a</italic> (&#x22c5;) represents an activation function, such as the ReLU activation function. Here, down (&#x22c5;) is implemented by computing the maximum of different blocks of <italic>a</italic> (<italic>X</italic>
<sub>
<italic>qcl</italic>
</sub> (<bold>
<italic>&#x3b2;</italic>
</bold>, <bold>
<italic>&#x3b3;</italic>
</bold>)). Specifically, these blocks can be obtained by applying a 2 &#xd7; 2 sliding window with a stride of 2 to <italic>a</italic> (<italic>X</italic>
<sub>
<italic>qcl</italic>
</sub> (<bold>
<italic>&#x3b2;</italic>
</bold>, <bold>
<italic>&#x3b3;</italic>
</bold>)), leading to the dimension of <italic>a</italic> (<italic>X</italic>
<sub>
<italic>qcl</italic>
</sub> (<bold>
<italic>&#x3b2;</italic>
</bold>, <bold>
<italic>&#x3b3;</italic>
</bold>)) being reduced from <inline-formula id="inf20">
<mml:math id="m30">
<mml:msqrt>
<mml:mrow>
<mml:mi>D</mml:mi>
</mml:mrow>
</mml:msqrt>
<mml:mo>&#xd7;</mml:mo>
<mml:msqrt>
<mml:mrow>
<mml:mi>D</mml:mi>
</mml:mrow>
</mml:msqrt>
</mml:math>
</inline-formula> to <inline-formula id="inf21">
<mml:math id="m31">
<mml:msqrt>
<mml:mrow>
<mml:mi>D</mml:mi>
</mml:mrow>
</mml:msqrt>
<mml:mo>/</mml:mo>
<mml:mn>2</mml:mn>
<mml:mo>&#xd7;</mml:mo>
<mml:msqrt>
<mml:mrow>
<mml:mi>D</mml:mi>
</mml:mrow>
</mml:msqrt>
<mml:mo>/</mml:mo>
<mml:mn>2</mml:mn>
</mml:math>
</inline-formula>.</p>
<p>
<italic>Fully connected layer</italic>. The fully connected layer maps the features extracted by the quantum convolutional layer and the pooling layer to the <italic>m</italic>-dimensional label space. Specifically, the output <italic>y</italic>
<sub>
<italic>out</italic>
</sub> (<bold>
<italic>&#x3b2;</italic>
</bold>, <bold>
<italic>&#x3b3;</italic>
</bold>, <bold>
<italic>W</italic>
</bold>, <bold>
<italic>b</italic>
</bold>) of the fully connected layer can be represented as<disp-formula id="e11">
<mml:math id="m32">
<mml:msub>
<mml:mrow>
<mml:mi>y</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi mathvariant="italic">out</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi mathvariant="bold-italic">&#x3b2;</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi mathvariant="bold-italic">&#x3b3;</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi mathvariant="bold-italic">W</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi mathvariant="bold-italic">b</mml:mi>
</mml:mrow>
</mml:mfenced>
<mml:mo>&#x3d;</mml:mo>
<mml:mi>g</mml:mi>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:msup>
<mml:mrow>
<mml:mi mathvariant="bold-italic">W</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>T</mml:mi>
</mml:mrow>
</mml:msup>
<mml:msub>
<mml:mrow>
<mml:mi>X</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>p</mml:mi>
<mml:mi>l</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi mathvariant="bold-italic">&#x3b2;</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi mathvariant="bold-italic">&#x3b3;</mml:mi>
</mml:mrow>
</mml:mfenced>
<mml:mo>&#x2b;</mml:mo>
<mml:mi mathvariant="bold-italic">b</mml:mi>
</mml:mrow>
</mml:mfenced>
<mml:mo>,</mml:mo>
</mml:math>
<label>(11)</label>
</disp-formula>where <bold>
<italic>W</italic>
</bold> denotes the <italic>D</italic>/4 &#xd7; <italic>m</italic> weight matrix, <italic>X</italic>
<sub>
<italic>pl</italic>
</sub> (<bold>
<italic>&#x3b2;</italic>
</bold>, <bold>
<italic>&#x3b3;</italic>
</bold>) is flattened into a <italic>D</italic>/4-dimensional vector, <bold>
<italic>b</italic>
</bold> represents the <italic>m</italic>-dimensional bias vector, and <italic>g</italic> (&#x22c5;) is an activation function, such as the softmax activation function used for the multi-class classification task.</p>
</sec>
<sec id="s3-3">
<title>3.3 Parameter updating</title>
<p>As described in Section 3.1 and 3.2, the forward propagation flow of our RE-QCNN model involves several mappings. Let <inline-formula id="inf22">
<mml:math id="m33">
<mml:msubsup>
<mml:mrow>
<mml:mrow>
<mml:mo stretchy="false">{</mml:mo>
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>X</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>d</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
<mml:mo stretchy="false">}</mml:mo>
</mml:mrow>
</mml:mrow>
<mml:mrow>
<mml:mi>d</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mrow>
<mml:mi>D</mml:mi>
</mml:mrow>
</mml:msubsup>
</mml:math>
</inline-formula> be the preprocessed <italic>D</italic> data blocks. We denote the mapping of the quantum convolutional layer as <inline-formula id="inf23">
<mml:math id="m34">
<mml:msub>
<mml:mrow>
<mml:mi>f</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi mathvariant="italic">qcl</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>:</mml:mo>
<mml:msubsup>
<mml:mrow>
<mml:mrow>
<mml:mo stretchy="false">{</mml:mo>
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>X</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>d</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
<mml:mo stretchy="false">}</mml:mo>
</mml:mrow>
</mml:mrow>
<mml:mrow>
<mml:mi>d</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mrow>
<mml:mi>D</mml:mi>
</mml:mrow>
</mml:msubsup>
<mml:mo>&#x2192;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi>X</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi mathvariant="italic">qcl</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:mrow>
<mml:mi mathvariant="bold-italic">&#x3b2;</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi mathvariant="bold-italic">&#x3b3;</mml:mi>
</mml:mrow>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula>, the mapping of the pooling layer as <italic>f</italic>
<sub>
<italic>pl</italic>
</sub>: <italic>X</italic>
<sub>
<italic>qcl</italic>
</sub> (<bold>
<italic>&#x3b2;</italic>
</bold>, <bold>
<italic>&#x3b3;</italic>
</bold>) &#x2192; <italic>X</italic>
<sub>
<italic>pl</italic>
</sub> (<bold>
<italic>&#x3b2;</italic>
</bold>, <bold>
<italic>&#x3b3;</italic>
</bold>), the mapping of the fully connected layer as <italic>f</italic>
<sub>
<italic>fcl</italic>
</sub>: <italic>X</italic>
<sub>
<italic>pl</italic>
</sub> (<bold>
<italic>&#x3b2;</italic>
</bold>, <bold>
<italic>&#x3b3;</italic>
</bold>) &#x2192; <italic>y</italic>
<sub>
<italic>out</italic>
</sub> (<bold>
<italic>&#x3b2;</italic>
</bold>, <bold>
<italic>&#x3b3;</italic>
</bold>, <bold>
<italic>W</italic>
</bold>, <bold>
<italic>b</italic>
</bold>), and the mapping of the cost function as <italic>l</italic>: <italic>y</italic>
<sub>
<italic>out</italic>
</sub> (<bold>
<italic>&#x3b2;</italic>
</bold>, <bold>
<italic>&#x3b3;</italic>
</bold>, <bold>
<italic>W</italic>
</bold>, <bold>
<italic>b</italic>
</bold>) &#x2192; <italic>C</italic> (<bold>
<italic>&#x3b2;</italic>
</bold>, <bold>
<italic>&#x3b3;</italic>
</bold>, <bold>
<italic>W</italic>
</bold>, <bold>
<italic>b</italic>
</bold>). Now, the forward propagation flow is given by<disp-formula id="e12">
<mml:math id="m35">
<mml:mi>l</mml:mi>
<mml:mo>&#x25e6;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi>f</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi mathvariant="italic">fcl</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>&#x25e6;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi>f</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>p</mml:mi>
<mml:mi>l</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>&#x25e6;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi>f</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi mathvariant="italic">qcl</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>.</mml:mo>
</mml:math>
<label>(12)</label>
</disp-formula>
</p>
<p>Here, we focus on the parameter updates of <bold>
<italic>&#x3b2;</italic>
</bold> &#x3d; [<italic>&#x3b2;</italic>
<sub>1</sub>, &#x2026;, <italic>&#x3b2;</italic>
<sub>
<italic>p</italic>
</sub>] and <bold>
<italic>&#x3b3;</italic>
</bold> &#x3d; [<italic>&#x3b3;</italic>
<sub>1</sub>, &#x2026;, <italic>&#x3b3;</italic>
<sub>
<italic>p</italic>
</sub>] in the quantum convolutional layer. Following the principles of backpropagation, for <italic>j</italic> &#x2208; {1, <italic>&#x2026;</italic>, <italic>p</italic>}, the derivative of <italic>C</italic> (<bold>
<italic>&#x3b2;</italic>
</bold>, <bold>
<italic>&#x3b3;</italic>
</bold>, <bold>
<italic>W</italic>
</bold>, <bold>
<italic>b</italic>
</bold>) with respect to <italic>&#x3b3;</italic>
<sub>
<italic>j</italic>
</sub> is given by<disp-formula id="e13">
<mml:math id="m36">
<mml:mtable class="aligned">
<mml:mtr>
<mml:mtd columnalign="right">
<mml:mfrac>
<mml:mrow>
<mml:mi>&#x2202;</mml:mi>
<mml:mi>C</mml:mi>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi mathvariant="bold-italic">&#x3b2;</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi mathvariant="bold-italic">&#x3b3;</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi mathvariant="bold-italic">W</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi mathvariant="bold-italic">b</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mrow>
<mml:mi>&#x2202;</mml:mi>
<mml:msub>
<mml:mrow>
<mml:mi>&#x3b3;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:mfrac>
</mml:mtd>
<mml:mtd columnalign="left">
<mml:mo>&#x3d;</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:mi>&#x2202;</mml:mi>
<mml:mi>C</mml:mi>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi mathvariant="bold-italic">&#x3b2;</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi mathvariant="bold-italic">&#x3b3;</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi mathvariant="bold-italic">W</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi mathvariant="bold-italic">b</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mrow>
<mml:mi>&#x2202;</mml:mi>
<mml:msub>
<mml:mrow>
<mml:mi>y</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi mathvariant="italic">out</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi mathvariant="bold-italic">&#x3b2;</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi mathvariant="bold-italic">&#x3b3;</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi mathvariant="bold-italic">W</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi mathvariant="bold-italic">b</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mfrac>
<mml:mo>&#x22c5;</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:mi>&#x2202;</mml:mi>
<mml:msub>
<mml:mrow>
<mml:mi>y</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi mathvariant="italic">out</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi mathvariant="bold-italic">&#x3b2;</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi mathvariant="bold-italic">&#x3b3;</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi mathvariant="bold-italic">W</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi mathvariant="bold-italic">b</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mrow>
<mml:mi>&#x2202;</mml:mi>
<mml:msub>
<mml:mrow>
<mml:mi>X</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>p</mml:mi>
<mml:mi>l</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi mathvariant="bold-italic">&#x3b2;</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi mathvariant="bold-italic">&#x3b3;</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mfrac>
<mml:mo>&#x22c5;</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:mi>&#x2202;</mml:mi>
<mml:msub>
<mml:mrow>
<mml:mi>X</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>p</mml:mi>
<mml:mi>l</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi mathvariant="bold-italic">&#x3b2;</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi mathvariant="bold-italic">&#x3b3;</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mrow>
<mml:mi>&#x2202;</mml:mi>
<mml:msub>
<mml:mrow>
<mml:mi>X</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi mathvariant="italic">qcl</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi mathvariant="bold-italic">&#x3b2;</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi mathvariant="bold-italic">&#x3b3;</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mfrac>
<mml:mo>&#x22c5;</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:mi>&#x2202;</mml:mi>
<mml:msub>
<mml:mrow>
<mml:mi>X</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi mathvariant="italic">qcl</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi mathvariant="bold-italic">&#x3b2;</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi mathvariant="bold-italic">&#x3b3;</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mrow>
<mml:mi>&#x2202;</mml:mi>
<mml:msub>
<mml:mrow>
<mml:mi>&#x3b3;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:mfrac>
</mml:mtd>
</mml:mtr>
<mml:mtr>
<mml:mtd columnalign="right"/>
<mml:mtd columnalign="left">
<mml:mo>&#x3d;</mml:mo>
<mml:mstyle displaystyle="true">
<mml:munderover>
<mml:mrow>
<mml:mo>&#x2211;</mml:mo>
</mml:mrow>
<mml:mrow>
<mml:mi>d</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mrow>
<mml:mi>D</mml:mi>
</mml:mrow>
</mml:munderover>
</mml:mstyle>
<mml:mfrac>
<mml:mrow>
<mml:mi>&#x2202;</mml:mi>
<mml:mi>C</mml:mi>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi mathvariant="bold-italic">&#x3b2;</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi mathvariant="bold-italic">&#x3b3;</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi mathvariant="bold-italic">W</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi mathvariant="bold-italic">b</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mrow>
<mml:mi>&#x2202;</mml:mi>
<mml:msub>
<mml:mrow>
<mml:mi>y</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi mathvariant="italic">out</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi mathvariant="bold-italic">&#x3b2;</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi mathvariant="bold-italic">&#x3b3;</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi mathvariant="bold-italic">W</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi mathvariant="bold-italic">b</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mfrac>
<mml:mo>&#x22c5;</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:mi>&#x2202;</mml:mi>
<mml:msub>
<mml:mrow>
<mml:mi>y</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi mathvariant="italic">out</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi mathvariant="bold-italic">&#x3b2;</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi mathvariant="bold-italic">&#x3b3;</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi mathvariant="bold-italic">W</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi mathvariant="bold-italic">b</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mrow>
<mml:mi>&#x2202;</mml:mi>
<mml:msub>
<mml:mrow>
<mml:mi>X</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>p</mml:mi>
<mml:mi>l</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi mathvariant="bold-italic">&#x3b2;</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi mathvariant="bold-italic">&#x3b3;</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mfrac>
<mml:mo>&#x22c5;</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:mi>&#x2202;</mml:mi>
<mml:msub>
<mml:mrow>
<mml:mi>X</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>p</mml:mi>
<mml:mi>l</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi mathvariant="bold-italic">&#x3b2;</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi mathvariant="bold-italic">&#x3b3;</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mrow>
<mml:mi>&#x2202;</mml:mi>
<mml:msub>
<mml:mrow>
<mml:mi>h</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>d</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi mathvariant="bold-italic">&#x3b2;</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi mathvariant="bold-italic">&#x3b3;</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
</mml:mfrac>
<mml:mo>&#x22c5;</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:mi>&#x2202;</mml:mi>
<mml:msub>
<mml:mrow>
<mml:mi>h</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>d</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi mathvariant="bold-italic">&#x3b2;</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi mathvariant="bold-italic">&#x3b3;</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mrow>
<mml:mi>&#x2202;</mml:mi>
<mml:msub>
<mml:mrow>
<mml:mi>&#x3b3;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:mfrac>
<mml:mo>.</mml:mo>
</mml:mtd>
</mml:mtr>
</mml:mtable>
</mml:math>
<label>(13)</label>
</disp-formula>
</p>
<p>The first term relies on the specific form of the cost function. Specifically, the cost function quantifies the difference between the predicted labels of our model and the true labels. Different tasks typically use different cost functions. For instance, in the case of a multi-class classification task, cross-entropy is commonly employed as the cost function. Additionally, the second and third terms are associated with the mappings of the fully connected layer and the pooling layer, respectively. These three terms retain classical structures and can be calculated using traditional classical methods. Now, we focus on the calculation of the fourth term. It is crucial to note that the QAOA circuit exhibits parameter sharing, meaning that the same parameter is shared among quantum gates within the same layer. Considering this characteristic, we present the analytical expression for the derivative <italic>&#x2202;h</italic>
<sub>
<italic>d</italic>
</sub> (<bold>
<italic>&#x3b2;</italic>
</bold>, <bold>
<italic>&#x3b3;</italic>
</bold>)/<italic>&#x2202;&#x3b3;</italic>
<sub>
<italic>j</italic>
</sub>.</p>
<p>Taking the 3-qubit QAOA circuit shown in <xref ref-type="fig" rid="F4">Figure 4</xref> as an example, we divide this QAOA circuit into five components: <italic>V</italic>
<sub>1</sub>, <italic>U</italic>
<sub>1</sub> (&#x2212;<italic>&#x3b3;</italic>
<sub>
<italic>j</italic>
</sub>), <italic>V</italic>
<sub>2</sub>, <italic>U</italic>
<sub>2</sub> (&#x2212;<italic>&#x3b3;</italic>
<sub>
<italic>j</italic>
</sub>), and <italic>V</italic>
<sub>3</sub>. Let <inline-formula id="inf24">
<mml:math id="m37">
<mml:msub>
<mml:mrow>
<mml:mi mathvariant="script">V</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:msub>
</mml:math>
</inline-formula>, <inline-formula id="inf25">
<mml:math id="m38">
<mml:msub>
<mml:mrow>
<mml:mi mathvariant="script">U</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:mrow>
<mml:mo>&#x2212;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi>&#x3b3;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula>, <inline-formula id="inf26">
<mml:math id="m39">
<mml:msub>
<mml:mrow>
<mml:mi mathvariant="script">V</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>2</mml:mn>
</mml:mrow>
</mml:msub>
</mml:math>
</inline-formula>, <inline-formula id="inf27">
<mml:math id="m40">
<mml:msub>
<mml:mrow>
<mml:mi mathvariant="script">U</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>2</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:mrow>
<mml:mo>&#x2212;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi>&#x3b3;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula>, and <inline-formula id="inf28">
<mml:math id="m41">
<mml:msub>
<mml:mrow>
<mml:mi mathvariant="script">V</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>3</mml:mn>
</mml:mrow>
</mml:msub>
</mml:math>
</inline-formula> denote the unitary channels corresponding to each component, respectively. Then, the expectation value <italic>h</italic>
<sub>
<italic>d</italic>
</sub> (<bold>
<italic>&#x3b2;</italic>
</bold>, <bold>
<italic>&#x3b3;</italic>
</bold>) described by Eq. <xref ref-type="disp-formula" rid="e8">8</xref> can be reformulated as<disp-formula id="e14">
<mml:math id="m42">
<mml:msub>
<mml:mrow>
<mml:mi>h</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>d</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi mathvariant="bold-italic">&#x3b2;</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi mathvariant="bold-italic">&#x3b3;</mml:mi>
</mml:mrow>
</mml:mfenced>
<mml:mo>&#x3d;</mml:mo>
<mml:mi mathvariant="normal">t</mml:mi>
<mml:mi mathvariant="normal">r</mml:mi>
<mml:mfenced open="[" close="]">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi mathvariant="script">V</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mo>&#x25e6;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi mathvariant="script">U</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mo>&#x2212;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi>&#x3b3;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:mfenced>
<mml:mo>&#x25e6;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi mathvariant="script">V</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>2</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mo>&#x25e6;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi mathvariant="script">U</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>2</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mo>&#x2212;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi>&#x3b3;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:mfenced>
<mml:mo>&#x25e6;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi mathvariant="script">V</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>3</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>&#x3c1;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>d</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:mfenced>
<mml:mi>H</mml:mi>
</mml:mrow>
</mml:mfenced>
<mml:mo>,</mml:mo>
</mml:math>
<label>(14)</label>
</disp-formula>where <italic>&#x3c1;</italic>
<sub>
<italic>d</italic>
</sub> &#x3d; &#x7c;<italic>X</italic>
<sub>
<italic>d</italic>
</sub>&#x27e9;&#x27e8;<italic>X</italic>
<sub>
<italic>d</italic>
</sub>&#x7c;. Then, we have<disp-formula id="e15">
<mml:math id="m43">
<mml:mtable class="aligned">
<mml:mtr>
<mml:mtd columnalign="right">
<mml:mfrac>
<mml:mrow>
<mml:mi>&#x2202;</mml:mi>
<mml:msub>
<mml:mrow>
<mml:mi>h</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>d</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi mathvariant="bold-italic">&#x3b2;</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi mathvariant="bold-italic">&#x3b3;</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mrow>
<mml:mi>&#x2202;</mml:mi>
<mml:msub>
<mml:mrow>
<mml:mi>&#x3b3;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:mfrac>
</mml:mtd>
<mml:mtd columnalign="left">
<mml:mo>&#x3d;</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:mi>&#x2202;</mml:mi>
<mml:mi mathvariant="normal">t</mml:mi>
<mml:mi mathvariant="normal">r</mml:mi>
<mml:mfenced open="[" close="]">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi mathvariant="script">V</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mo>&#x25e6;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi mathvariant="script">U</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mo>&#x2212;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi>&#x3b3;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:mfenced>
<mml:mo>&#x25e6;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi mathvariant="script">V</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>2</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mo>&#x25e6;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi mathvariant="script">U</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>2</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mo>&#x2212;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi>&#x3b3;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:mfenced>
<mml:mo>&#x25e6;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi mathvariant="script">V</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>3</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>&#x3c1;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>d</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:mfenced>
<mml:mi>H</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mrow>
<mml:mi>&#x2202;</mml:mi>
<mml:msub>
<mml:mrow>
<mml:mi>&#x3b3;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:mfrac>
</mml:mtd>
</mml:mtr>
<mml:mtr>
<mml:mtd columnalign="right"/>
<mml:mtd columnalign="left">
<mml:mo>&#x3d;</mml:mo>
<mml:mi mathvariant="normal">t</mml:mi>
<mml:mi mathvariant="normal">r</mml:mi>
<mml:mfenced open="[" close="]">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi mathvariant="script">V</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mo>&#x25e6;</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:mi>&#x2202;</mml:mi>
<mml:msub>
<mml:mrow>
<mml:mi mathvariant="script">U</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mo>&#x2212;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi>&#x3b3;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mrow>
<mml:mi>&#x2202;</mml:mi>
<mml:msub>
<mml:mrow>
<mml:mi>&#x3b3;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:mfrac>
<mml:mo>&#x25e6;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi mathvariant="script">V</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>2</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mo>&#x25e6;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi mathvariant="script">U</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>2</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mo>&#x2212;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi>&#x3b3;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:mfenced>
<mml:mo>&#x25e6;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi mathvariant="script">V</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>3</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>&#x3c1;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>d</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:mfenced>
<mml:mi>H</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mtd>
</mml:mtr>
<mml:mtr>
<mml:mtd columnalign="right"/>
<mml:mtd columnalign="left">
<mml:mo>&#x2b;</mml:mo>
<mml:mi mathvariant="normal">t</mml:mi>
<mml:mi mathvariant="normal">r</mml:mi>
<mml:mfenced open="[" close="]">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi mathvariant="script">V</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mo>&#x25e6;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi mathvariant="script">U</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mo>&#x2212;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi>&#x3b3;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:mfenced>
<mml:mo>&#x25e6;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi mathvariant="script">V</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>2</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mo>&#x25e6;</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:mi>&#x2202;</mml:mi>
<mml:msub>
<mml:mrow>
<mml:mi mathvariant="script">U</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>2</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mo>&#x2212;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi>&#x3b3;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mrow>
<mml:mi>&#x2202;</mml:mi>
<mml:msub>
<mml:mrow>
<mml:mi>&#x3b3;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:mfrac>
<mml:mo>&#x25e6;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi mathvariant="script">V</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>3</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>&#x3c1;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>d</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:mfenced>
<mml:mi>H</mml:mi>
</mml:mrow>
</mml:mfenced>
<mml:mo>.</mml:mo>
</mml:mtd>
</mml:mtr>
</mml:mtable>
</mml:math>
<label>(15)</label>
</disp-formula>
</p>
<fig id="F4" position="float">
<label>FIGURE 4</label>
<caption>
<p>The illustration of QAOA circuit division.</p>
</caption>
<graphic xlink:href="fphy-12-1362690-g004.tif"/>
</fig>
<p>Based on the parameter shift rule (see <xref ref-type="sec" rid="s11">Supplementary Material</xref>), for <italic>i</italic> &#x2208; {1, 2}, each Pauli <italic>Z</italic> rotation channel <inline-formula id="inf29">
<mml:math id="m44">
<mml:msub>
<mml:mrow>
<mml:mi mathvariant="script">U</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>i</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:mrow>
<mml:mo>&#x2212;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi>&#x3b3;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula> satisfies<disp-formula id="e16">
<mml:math id="m45">
<mml:mfrac>
<mml:mrow>
<mml:mi>&#x2202;</mml:mi>
<mml:msub>
<mml:mrow>
<mml:mi mathvariant="script">U</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>i</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mo>&#x2212;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi>&#x3b3;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mrow>
<mml:mi>&#x2202;</mml:mi>
<mml:msub>
<mml:mrow>
<mml:mi>&#x3b3;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:mfrac>
<mml:mo>&#x3d;</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi mathvariant="script">U</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>i</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mo>&#x2212;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi>&#x3b3;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>&#x2b;</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:mi>&#x3c0;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>2</mml:mn>
</mml:mrow>
</mml:mfrac>
</mml:mrow>
</mml:mfenced>
<mml:mo>&#x2212;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi mathvariant="script">U</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>i</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mo>&#x2212;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi>&#x3b3;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>&#x2212;</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:mi>&#x3c0;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>2</mml:mn>
</mml:mrow>
</mml:mfrac>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mrow>
<mml:mn>2</mml:mn>
</mml:mrow>
</mml:mfrac>
<mml:mo>.</mml:mo>
</mml:math>
<label>(16)</label>
</disp-formula>
</p>
<p>Then, we have <disp-formula id="e17">
<mml:math id="m46">
<mml:mtable class="aligned">
<mml:mtr>
<mml:mtd columnalign="right">
<mml:mfrac>
<mml:mrow>
<mml:mi>&#x2202;</mml:mi>
<mml:msub>
<mml:mrow>
<mml:mi>h</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>d</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mi mathvariant="bold-italic">&#x3b2;</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi mathvariant="bold-italic">&#x3b3;</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mrow>
<mml:mi>&#x2202;</mml:mi>
<mml:msub>
<mml:mrow>
<mml:mi>&#x3b3;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:mfrac>
</mml:mtd>
<mml:mtd columnalign="left">
<mml:mo>&#x3d;</mml:mo>
<mml:mi mathvariant="normal">t</mml:mi>
<mml:mi mathvariant="normal">r</mml:mi>
<mml:mfenced open="[" close="]">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi mathvariant="script">V</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mo>&#x25e6;</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi mathvariant="script">U</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mo>&#x2212;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi>&#x3b3;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>&#x2b;</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:mi>&#x3c0;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>2</mml:mn>
</mml:mrow>
</mml:mfrac>
</mml:mrow>
</mml:mfenced>
<mml:mo>&#x2212;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi mathvariant="script">U</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mo>&#x2212;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi>&#x3b3;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>&#x2212;</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:mi>&#x3c0;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>2</mml:mn>
</mml:mrow>
</mml:mfrac>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mrow>
<mml:mn>2</mml:mn>
</mml:mrow>
</mml:mfrac>
<mml:mo>&#x25e6;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi mathvariant="script">V</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>2</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mo>&#x25e6;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi mathvariant="script">U</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>2</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mo>&#x2212;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi>&#x3b3;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:mfenced>
<mml:mo>&#x25e6;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi mathvariant="script">V</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>3</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>&#x3c1;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>d</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:mfenced>
<mml:mi>H</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mtd>
</mml:mtr>
<mml:mtr>
<mml:mtd columnalign="right"/>
<mml:mtd columnalign="left">
<mml:mo>&#x2b;</mml:mo>
<mml:mi mathvariant="normal">t</mml:mi>
<mml:mi mathvariant="normal">r</mml:mi>
<mml:mfenced open="[" close="]">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi mathvariant="script">V</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mo>&#x25e6;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi mathvariant="script">U</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mo>&#x2212;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi>&#x3b3;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:mfenced>
<mml:mo>&#x25e6;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi mathvariant="script">V</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>2</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mo>&#x25e6;</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi mathvariant="script">U</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>2</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mo>&#x2212;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi>&#x3b3;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>&#x2b;</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:mi>&#x3c0;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>2</mml:mn>
</mml:mrow>
</mml:mfrac>
</mml:mrow>
</mml:mfenced>
<mml:mo>&#x2212;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi mathvariant="script">U</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>2</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mo>&#x2212;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi>&#x3b3;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>&#x2212;</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:mi>&#x3c0;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>2</mml:mn>
</mml:mrow>
</mml:mfrac>
</mml:mrow>
</mml:mfenced>
</mml:mrow>
<mml:mrow>
<mml:mn>2</mml:mn>
</mml:mrow>
</mml:mfrac>
<mml:mo>&#x25e6;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi mathvariant="script">V</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>3</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>&#x3c1;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>d</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:mfenced>
<mml:mi>H</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mtd>
</mml:mtr>
<mml:mtr>
<mml:mtd columnalign="right"/>
<mml:mtd columnalign="left">
<mml:mo>&#x3d;</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mrow>
<mml:mn>2</mml:mn>
</mml:mrow>
</mml:mfrac>
<mml:mi mathvariant="normal">t</mml:mi>
<mml:mi mathvariant="normal">r</mml:mi>
<mml:mfenced open="[" close="]">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi mathvariant="script">V</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mo>&#x25e6;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi mathvariant="script">U</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mo>&#x2212;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi>&#x3b3;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>&#x2b;</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:mi>&#x3c0;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>2</mml:mn>
</mml:mrow>
</mml:mfrac>
</mml:mrow>
</mml:mfenced>
<mml:mo>&#x25e6;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi mathvariant="script">V</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>2</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mo>&#x25e6;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi mathvariant="script">U</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>2</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mo>&#x2212;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi>&#x3b3;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:mfenced>
<mml:mo>&#x25e6;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi mathvariant="script">V</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>3</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>&#x3c1;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>d</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:mfenced>
<mml:mi>H</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mtd>
</mml:mtr>
<mml:mtr>
<mml:mtd columnalign="right"/>
<mml:mtd columnalign="left">
<mml:mo>&#x2212;</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mrow>
<mml:mn>2</mml:mn>
</mml:mrow>
</mml:mfrac>
<mml:mi mathvariant="normal">t</mml:mi>
<mml:mi mathvariant="normal">r</mml:mi>
<mml:mfenced open="[" close="]">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi mathvariant="script">V</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mo>&#x25e6;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi mathvariant="script">U</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mo>&#x2212;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi>&#x3b3;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>&#x2212;</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:mi>&#x3c0;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>2</mml:mn>
</mml:mrow>
</mml:mfrac>
</mml:mrow>
</mml:mfenced>
<mml:mo>&#x25e6;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi mathvariant="script">V</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>2</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mo>&#x25e6;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi mathvariant="script">U</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>2</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mo>&#x2212;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi>&#x3b3;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:mfenced>
<mml:mo>&#x25e6;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi mathvariant="script">V</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>3</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>&#x3c1;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>d</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:mfenced>
<mml:mi>H</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mtd>
</mml:mtr>
<mml:mtr>
<mml:mtd columnalign="right"/>
<mml:mtd columnalign="left">
<mml:mo>&#x2b;</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mrow>
<mml:mn>2</mml:mn>
</mml:mrow>
</mml:mfrac>
<mml:mi mathvariant="normal">t</mml:mi>
<mml:mi mathvariant="normal">r</mml:mi>
<mml:mfenced open="[" close="]">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi mathvariant="script">V</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mo>&#x25e6;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi mathvariant="script">U</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mo>&#x2212;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi>&#x3b3;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:mfenced>
<mml:mo>&#x25e6;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi mathvariant="script">V</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>2</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mo>&#x25e6;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi mathvariant="script">U</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>2</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mo>&#x2212;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi>&#x3b3;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>&#x2b;</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:mi>&#x3c0;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>2</mml:mn>
</mml:mrow>
</mml:mfrac>
</mml:mrow>
</mml:mfenced>
<mml:mo>&#x25e6;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi mathvariant="script">V</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>3</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>&#x3c1;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>d</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:mfenced>
<mml:mi>H</mml:mi>
</mml:mrow>
</mml:mfenced>
</mml:mtd>
</mml:mtr>
<mml:mtr>
<mml:mtd columnalign="right"/>
<mml:mtd columnalign="left">
<mml:mo>&#x2212;</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mrow>
<mml:mn>2</mml:mn>
</mml:mrow>
</mml:mfrac>
<mml:mi mathvariant="normal">t</mml:mi>
<mml:mi mathvariant="normal">r</mml:mi>
<mml:mfenced open="[" close="]">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi mathvariant="script">V</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mo>&#x25e6;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi mathvariant="script">U</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>1</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mo>&#x2212;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi>&#x3b3;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:mfenced>
<mml:mo>&#x25e6;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi mathvariant="script">V</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>2</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mo>&#x25e6;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi mathvariant="script">U</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>2</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:mo>&#x2212;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi>&#x3b3;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>&#x2212;</mml:mo>
<mml:mfrac>
<mml:mrow>
<mml:mi>&#x3c0;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>2</mml:mn>
</mml:mrow>
</mml:mfrac>
</mml:mrow>
</mml:mfenced>
<mml:mo>&#x25e6;</mml:mo>
<mml:msub>
<mml:mrow>
<mml:mi mathvariant="script">V</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>3</mml:mn>
</mml:mrow>
</mml:msub>
<mml:mfenced open="(" close=")">
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>&#x3c1;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>d</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:mfenced>
<mml:mi>H</mml:mi>
</mml:mrow>
</mml:mfenced>
<mml:mo>.</mml:mo>
</mml:mtd>
</mml:mtr>
</mml:mtable>
</mml:math>
<label>(17)</label>
</disp-formula>Indeed, by estimating the aforementioned four expectation values, we can obtain this derivative. The described process can naturally be extended to the QAOA circuit with log&#x2009; <italic>N</italic> qubits. Similarly, we can obtain <italic>&#x2202;C</italic> (<bold>
<italic>&#x3b2;</italic>
</bold>, <bold>
<italic>&#x3b3;</italic>
</bold>, <bold>
<italic>W</italic>
</bold>, <bold>
<italic>b</italic>
</bold>)/<italic>&#x2202;&#x3b2;</italic>
<sub>
<italic>j</italic>
</sub>. Subsequently, by utilizing a gradient-based optimization method, we can update the parameters <bold>
<italic>&#x3b2;</italic>
</bold> and <bold>
<italic>&#x3b3;</italic>
</bold> in the quantum convolutional layer.</p>
</sec>
<sec id="s3-4">
<title>3.4 Complexity analysis</title>
<p>In this section, we provide a detailed analysis of the complexity associated with the forward and backward propagation processes in the quantum convolutional layer.</p>
<p>For the forward propagation process, the <italic>D</italic> data blocks <inline-formula id="inf30">
<mml:math id="m47">
<mml:msubsup>
<mml:mrow>
<mml:mrow>
<mml:mo stretchy="false">{</mml:mo>
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>X</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>d</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
<mml:mo stretchy="false">}</mml:mo>
</mml:mrow>
</mml:mrow>
<mml:mrow>
<mml:mi>d</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mrow>
<mml:mi>D</mml:mi>
</mml:mrow>
</mml:msubsup>
</mml:math>
</inline-formula> are extracted from the given classical image using a <inline-formula id="inf31">
<mml:math id="m48">
<mml:msqrt>
<mml:mrow>
<mml:mi>N</mml:mi>
</mml:mrow>
</mml:msqrt>
<mml:mo>&#xd7;</mml:mo>
<mml:msqrt>
<mml:mrow>
<mml:mi>N</mml:mi>
</mml:mrow>
</mml:msqrt>
</mml:math>
</inline-formula> sliding window with a stride of 1, which contain all the information of the image. Subsequently, <inline-formula id="inf32">
<mml:math id="m49">
<mml:msubsup>
<mml:mrow>
<mml:mrow>
<mml:mo stretchy="false">{</mml:mo>
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>X</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>d</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
<mml:mo stretchy="false">}</mml:mo>
</mml:mrow>
</mml:mrow>
<mml:mrow>
<mml:mi>d</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mrow>
<mml:mi>D</mml:mi>
</mml:mrow>
</mml:msubsup>
</mml:math>
</inline-formula> are sequentially processed by the quantum convolutional kernel, and the resulting <italic>D</italic> expectation values <inline-formula id="inf33">
<mml:math id="m50">
<mml:msubsup>
<mml:mrow>
<mml:mrow>
<mml:mo stretchy="false">{</mml:mo>
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>h</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>d</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:mrow>
<mml:mi mathvariant="bold-italic">&#x3b2;</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi mathvariant="bold-italic">&#x3b3;</mml:mi>
</mml:mrow>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
</mml:mrow>
<mml:mo stretchy="false">}</mml:mo>
</mml:mrow>
</mml:mrow>
<mml:mrow>
<mml:mi>d</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mrow>
<mml:mi>D</mml:mi>
</mml:mrow>
</mml:msubsup>
</mml:math>
</inline-formula> as described in Eq. <xref ref-type="disp-formula" rid="e8">8</xref> form the output of the quantum convolutional layer, represented as a <inline-formula id="inf34">
<mml:math id="m51">
<mml:msqrt>
<mml:mrow>
<mml:mi>D</mml:mi>
</mml:mrow>
</mml:msqrt>
<mml:mo>&#xd7;</mml:mo>
<mml:msqrt>
<mml:mrow>
<mml:mi>D</mml:mi>
</mml:mrow>
</mml:msqrt>
</mml:math>
</inline-formula> matrix. In detail, the quantum convolutional kernel consists of the data encoding circuit and the parameterized quantum circuit. On one hand, the data encoding circuit adopts the amplitude encoding strategy as described in Eq. <xref ref-type="disp-formula" rid="e1">1</xref>, and each <inline-formula id="inf35">
<mml:math id="m52">
<mml:msub>
<mml:mrow>
<mml:mi>X</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>d</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mo>&#x2208;</mml:mo>
<mml:msup>
<mml:mrow>
<mml:mi mathvariant="double-struck">R</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>N</mml:mi>
</mml:mrow>
</mml:msup>
</mml:math>
</inline-formula> is encoded onto its corresponding quantum state &#x7c;<italic>X</italic>
<sub>
<italic>d</italic>
</sub>&#x27e9; using log&#x2009; <italic>N</italic> qubits. Notably, as stated in Theorem 1 of Ref. [<xref ref-type="bibr" rid="B50">50</xref>], when <italic>X</italic>
<sub>
<italic>d</italic>
</sub> is <italic>k</italic>-sparse (with <italic>k</italic> nonzero entries), the amplitude encoding strategy is achieved by a data encoding circuit of size <inline-formula id="inf36">
<mml:math id="m53">
<mml:mi mathvariant="script">O</mml:mi>
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:mrow>
<mml:mi>k</mml:mi>
<mml:mo>&#x2061;</mml:mo>
<mml:mi>log</mml:mi>
<mml:mo>&#x2061;</mml:mo>
<mml:mi>N</mml:mi>
</mml:mrow>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula>, which includes <inline-formula id="inf37">
<mml:math id="m54">
<mml:mi mathvariant="script">O</mml:mi>
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:mrow>
<mml:mi>k</mml:mi>
<mml:mo>&#x2061;</mml:mo>
<mml:mi>log</mml:mi>
<mml:mo>&#x2061;</mml:mo>
<mml:mi>k</mml:mi>
<mml:mo>&#x2b;</mml:mo>
<mml:mi>log</mml:mi>
<mml:mo>&#x2061;</mml:mo>
<mml:mi>N</mml:mi>
</mml:mrow>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula> single-qubit gates and <inline-formula id="inf38">
<mml:math id="m55">
<mml:mi mathvariant="script">O</mml:mi>
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:mrow>
<mml:mi>k</mml:mi>
<mml:mo>&#x2061;</mml:mo>
<mml:mi>log</mml:mi>
<mml:mo>&#x2061;</mml:mo>
<mml:mi>N</mml:mi>
</mml:mrow>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula> CNOT gates. Additionally, a quantum superposition query is required to obtain the <italic>k</italic> nonzero entries of <italic>X</italic>
<sub>
<italic>d</italic>
</sub> [<xref ref-type="bibr" rid="B52">52</xref>]. On the other hand, the parameterized quantum circuit is constructed based on the QAOA circuit <italic>U</italic> (<bold>
<italic>&#x3b2;</italic>
</bold>, <bold>
<italic>&#x3b3;</italic>
</bold>) with log&#x2009; <italic>N</italic> qubits, as described in Eq. <xref ref-type="disp-formula" rid="e4">4</xref>. According to Eq.<xref ref-type="disp-formula" rid="e5">5</xref> and <xref ref-type="disp-formula" rid="e6">6</xref>, <inline-formula id="inf39">
<mml:math id="m56">
<mml:msup>
<mml:mrow>
<mml:mi>e</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mo>&#x2212;</mml:mo>
<mml:mi>i</mml:mi>
<mml:msub>
<mml:mrow>
<mml:mi>&#x3b2;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:msub>
<mml:msub>
<mml:mrow>
<mml:mi>H</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>B</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:msup>
</mml:math>
</inline-formula> can be decomposed into log&#x2009; <italic>N</italic> single-qubit gates, while <inline-formula id="inf40">
<mml:math id="m57">
<mml:msup>
<mml:mrow>
<mml:mi>e</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mo>&#x2212;</mml:mo>
<mml:mi>i</mml:mi>
<mml:msub>
<mml:mrow>
<mml:mi>&#x3b2;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:msub>
<mml:msub>
<mml:mrow>
<mml:mi>H</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>C</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:msup>
</mml:math>
</inline-formula> can be decomposed into log&#x2009; <italic>N</italic> single-qubit gates and 2&#x2009;log&#x2009; <italic>N</italic> CNOT gates in each layer of the QAOA circuit. Therefore, the total number of quantum gates required to implement <italic>U</italic> (<bold>
<italic>&#x3b2;</italic>
</bold>, <bold>
<italic>&#x3b3;</italic>
</bold>) is <inline-formula id="inf41">
<mml:math id="m58">
<mml:mi mathvariant="script">O</mml:mi>
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:mrow>
<mml:mi>p</mml:mi>
<mml:mi mathvariant="normal">p</mml:mi>
<mml:mi mathvariant="normal">o</mml:mi>
<mml:mi mathvariant="normal">l</mml:mi>
<mml:mi mathvariant="normal">y</mml:mi>
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:mrow>
<mml:mi>log</mml:mi>
<mml:mo>&#x2061;</mml:mo>
<mml:mi>N</mml:mi>
</mml:mrow>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
</mml:mrow>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula>, where <italic>p</italic> is the number of layers. Finally, by performing measurements, we estimate each <italic>h</italic>
<sub>
<italic>d</italic>
</sub> (<bold>
<italic>&#x3b2;</italic>
</bold>, <bold>
<italic>&#x3b3;</italic>
</bold>) with a complexity of <inline-formula id="inf42">
<mml:math id="m59">
<mml:mi mathvariant="script">O</mml:mi>
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:mrow>
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:mrow>
<mml:mi>k</mml:mi>
<mml:mo>&#x2b;</mml:mo>
<mml:mi>p</mml:mi>
</mml:mrow>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
<mml:mi mathvariant="normal">p</mml:mi>
<mml:mi mathvariant="normal">o</mml:mi>
<mml:mi mathvariant="normal">l</mml:mi>
<mml:mi mathvariant="normal">y</mml:mi>
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:mrow>
<mml:mi>log</mml:mi>
<mml:mo>&#x2061;</mml:mo>
<mml:mi>N</mml:mi>
</mml:mrow>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
<mml:mo>/</mml:mo>
<mml:msup>
<mml:mrow>
<mml:mi>&#x3f5;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>2</mml:mn>
</mml:mrow>
</mml:msup>
</mml:mrow>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula>, where <italic>&#x3f5;</italic> represents the additive error in estimating this expectation value. Therefore, the overall complexity of the forward propagation process in the quantum convolutional layer is <inline-formula id="inf43">
<mml:math id="m60">
<mml:mi mathvariant="script">O</mml:mi>
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:mrow>
<mml:mi>D</mml:mi>
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:mrow>
<mml:mi>k</mml:mi>
<mml:mo>&#x2b;</mml:mo>
<mml:mi>p</mml:mi>
</mml:mrow>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
<mml:mi mathvariant="normal">p</mml:mi>
<mml:mi mathvariant="normal">o</mml:mi>
<mml:mi mathvariant="normal">l</mml:mi>
<mml:mi mathvariant="normal">y</mml:mi>
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:mrow>
<mml:mi>log</mml:mi>
<mml:mo>&#x2061;</mml:mo>
<mml:mi>N</mml:mi>
</mml:mrow>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
<mml:mo>/</mml:mo>
<mml:msup>
<mml:mrow>
<mml:mi>&#x3f5;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>2</mml:mn>
</mml:mrow>
</mml:msup>
</mml:mrow>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula>.</p>
<p>For the backward propagation process, the complexity of the quantum convolutional layer primarily arises from the estimation of <inline-formula id="inf44">
<mml:math id="m61">
<mml:msubsup>
<mml:mrow>
<mml:mrow>
<mml:mo stretchy="false">{</mml:mo>
<mml:mrow>
<mml:msubsup>
<mml:mrow>
<mml:mrow>
<mml:mo stretchy="false">{</mml:mo>
<mml:mrow>
<mml:mfrac>
<mml:mrow>
<mml:mi>&#x2202;</mml:mi>
<mml:msub>
<mml:mrow>
<mml:mi>h</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>d</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:mrow>
<mml:mi mathvariant="bold-italic">&#x3b2;</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi mathvariant="bold-italic">&#x3b3;</mml:mi>
</mml:mrow>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
</mml:mrow>
<mml:mrow>
<mml:mi>&#x2202;</mml:mi>
<mml:msub>
<mml:mrow>
<mml:mi>&#x3b3;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:mfrac>
</mml:mrow>
<mml:mo stretchy="false">}</mml:mo>
</mml:mrow>
</mml:mrow>
<mml:mrow>
<mml:mi>j</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mrow>
<mml:mi>p</mml:mi>
</mml:mrow>
</mml:msubsup>
</mml:mrow>
<mml:mo stretchy="false">}</mml:mo>
</mml:mrow>
</mml:mrow>
<mml:mrow>
<mml:mi>d</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mrow>
<mml:mi>D</mml:mi>
</mml:mrow>
</mml:msubsup>
</mml:math>
</inline-formula> and <inline-formula id="inf45">
<mml:math id="m62">
<mml:msubsup>
<mml:mrow>
<mml:mrow>
<mml:mo stretchy="false">{</mml:mo>
<mml:mrow>
<mml:msubsup>
<mml:mrow>
<mml:mrow>
<mml:mo stretchy="false">{</mml:mo>
<mml:mrow>
<mml:mfrac>
<mml:mrow>
<mml:mi>&#x2202;</mml:mi>
<mml:msub>
<mml:mrow>
<mml:mi>h</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>d</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:mrow>
<mml:mi mathvariant="bold-italic">&#x3b2;</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi mathvariant="bold-italic">&#x3b3;</mml:mi>
</mml:mrow>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
</mml:mrow>
<mml:mrow>
<mml:mi>&#x2202;</mml:mi>
<mml:msub>
<mml:mrow>
<mml:mi>&#x3b2;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:mfrac>
</mml:mrow>
<mml:mo stretchy="false">}</mml:mo>
</mml:mrow>
</mml:mrow>
<mml:mrow>
<mml:mi>j</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mrow>
<mml:mi>p</mml:mi>
</mml:mrow>
</mml:msubsup>
</mml:mrow>
<mml:mo stretchy="false">}</mml:mo>
</mml:mrow>
</mml:mrow>
<mml:mrow>
<mml:mi>d</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mrow>
<mml:mi>D</mml:mi>
</mml:mrow>
</mml:msubsup>
</mml:math>
</inline-formula>. As each <inline-formula id="inf46">
<mml:math id="m63">
<mml:mfrac>
<mml:mrow>
<mml:mi>&#x2202;</mml:mi>
<mml:msub>
<mml:mrow>
<mml:mi>h</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>d</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:mrow>
<mml:mi mathvariant="bold-italic">&#x3b2;</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi mathvariant="bold-italic">&#x3b3;</mml:mi>
</mml:mrow>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
</mml:mrow>
<mml:mrow>
<mml:mi>&#x2202;</mml:mi>
<mml:msub>
<mml:mrow>
<mml:mi>&#x3b3;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:mfrac>
</mml:math>
</inline-formula> and <inline-formula id="inf47">
<mml:math id="m64">
<mml:mfrac>
<mml:mrow>
<mml:mi>&#x2202;</mml:mi>
<mml:msub>
<mml:mrow>
<mml:mi>h</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>d</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:mrow>
<mml:mi mathvariant="bold-italic">&#x3b2;</mml:mi>
<mml:mo>,</mml:mo>
<mml:mi mathvariant="bold-italic">&#x3b3;</mml:mi>
</mml:mrow>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
</mml:mrow>
<mml:mrow>
<mml:mi>&#x2202;</mml:mi>
<mml:msub>
<mml:mrow>
<mml:mi>&#x3b2;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:mfrac>
</mml:math>
</inline-formula> can be obtained according to Eq. <xref ref-type="disp-formula" rid="e17">17</xref>, the overall complexity of the backward propagation process in the quantum convolutional layer is <inline-formula id="inf48">
<mml:math id="m65">
<mml:mi mathvariant="script">O</mml:mi>
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:mrow>
<mml:mi>D</mml:mi>
<mml:mi>p</mml:mi>
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:mrow>
<mml:mi>k</mml:mi>
<mml:mo>&#x2b;</mml:mo>
<mml:mi>p</mml:mi>
</mml:mrow>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
<mml:mi mathvariant="normal">p</mml:mi>
<mml:mi mathvariant="normal">o</mml:mi>
<mml:mi mathvariant="normal">l</mml:mi>
<mml:mi mathvariant="normal">y</mml:mi>
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:mrow>
<mml:mi>log</mml:mi>
<mml:mo>&#x2061;</mml:mo>
<mml:mi>N</mml:mi>
</mml:mrow>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
<mml:mo>/</mml:mo>
<mml:msup>
<mml:mrow>
<mml:mi>&#x3f5;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mn>2</mml:mn>
</mml:mrow>
</mml:msup>
</mml:mrow>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula>.</p>
<p>Increasing the number of layers <italic>p</italic> in the parameterized quantum circuit enhances its expressibility, thereby improving the numerical performance of our model. However, when the parameterized quantum circuit reaches a specific number of layers, it may exhibit the barren plateau phenomenon [<xref ref-type="bibr" rid="B53">53</xref>,<xref ref-type="bibr" rid="B54">54</xref>]. This phenomenon poses challenges to the model training and impacts its numerical performance. Taking these factors into account, choosing <italic>p</italic> to be <inline-formula id="inf49">
<mml:math id="m66">
<mml:mi mathvariant="script">O</mml:mi>
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:mrow>
<mml:mi>log</mml:mi>
<mml:mo>&#x2061;</mml:mo>
<mml:mi>N</mml:mi>
</mml:mrow>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula> is reasonable. In addition, the additive error <italic>&#x3f5;</italic> is considered to be a constant, which is a general choice in the context of quantum machine learning [<xref ref-type="bibr" rid="B55">55</xref>]. In this context, the complexity of both the forward and backward propagation processes is <inline-formula id="inf50">
<mml:math id="m67">
<mml:mi mathvariant="script">O</mml:mi>
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:mrow>
<mml:mi>D</mml:mi>
<mml:mi>k</mml:mi>
<mml:mi mathvariant="normal">p</mml:mi>
<mml:mi mathvariant="normal">o</mml:mi>
<mml:mi mathvariant="normal">l</mml:mi>
<mml:mi mathvariant="normal">y</mml:mi>
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:mrow>
<mml:mi>log</mml:mi>
<mml:mo>&#x2061;</mml:mo>
<mml:mi>N</mml:mi>
</mml:mrow>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
</mml:mrow>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula>. Now, we can conclude that the complexity of the quantum convolutional layer is <inline-formula id="inf51">
<mml:math id="m68">
<mml:mi mathvariant="script">O</mml:mi>
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:mrow>
<mml:mi>D</mml:mi>
<mml:mi>k</mml:mi>
<mml:mi mathvariant="normal">p</mml:mi>
<mml:mi mathvariant="normal">o</mml:mi>
<mml:mi mathvariant="normal">l</mml:mi>
<mml:mi mathvariant="normal">y</mml:mi>
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:mrow>
<mml:mi>log</mml:mi>
<mml:mo>&#x2061;</mml:mo>
<mml:mi>N</mml:mi>
</mml:mrow>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
</mml:mrow>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula>.</p>
<p>Meanwhile, for the forward propagation process of the classical convolutional layer, the aforementioned <italic>D</italic> data blocks <inline-formula id="inf52">
<mml:math id="m69">
<mml:msubsup>
<mml:mrow>
<mml:mrow>
<mml:mo stretchy="false">{</mml:mo>
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>X</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>d</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
<mml:mo stretchy="false">}</mml:mo>
</mml:mrow>
</mml:mrow>
<mml:mrow>
<mml:mi>d</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mrow>
<mml:mi>D</mml:mi>
</mml:mrow>
</mml:msubsup>
</mml:math>
</inline-formula>, which are all <italic>k</italic>-sparse <italic>N</italic>-dimensional vectors, are sequentially processed by the classical convolutional kernel. This results in <italic>D</italic> feature values <inline-formula id="inf53">
<mml:math id="m70">
<mml:msubsup>
<mml:mrow>
<mml:mrow>
<mml:mo stretchy="false">{</mml:mo>
<mml:mrow>
<mml:msub>
<mml:mrow>
<mml:mi>h</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>d</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:mrow>
<mml:mi mathvariant="bold-italic">w</mml:mi>
</mml:mrow>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
</mml:mrow>
<mml:mo stretchy="false">}</mml:mo>
</mml:mrow>
</mml:mrow>
<mml:mrow>
<mml:mi>d</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mrow>
<mml:mi>D</mml:mi>
</mml:mrow>
</mml:msubsup>
</mml:math>
</inline-formula>. Specifically, each <italic>h</italic>
<sub>
<italic>d</italic>
</sub>(<bold>
<italic>w</italic>
</bold>) is given by <italic>h</italic>
<sub>
<italic>d</italic>
</sub>(<bold>
<italic>w</italic>
</bold>) &#x3d; <bold>
<italic>w</italic>
</bold>
<sup>
<italic>T</italic>
</sup>
<italic>X</italic>
<sub>
<italic>d</italic>
</sub>, where <bold>
<italic>w</italic>
</bold> is the <italic>N</italic>-dimensional weight vector [<xref ref-type="bibr" rid="B29">29</xref>]. Considering that each <italic>X</italic>
<sub>
<italic>d</italic>
</sub> is <italic>k</italic>-sparse, the calculation of each <italic>h</italic>
<sub>
<italic>d</italic>
</sub>(<bold>
<italic>w</italic>
</bold>) has a complexity of <inline-formula id="inf54">
<mml:math id="m71">
<mml:mi mathvariant="script">O</mml:mi>
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:mrow>
<mml:mi>k</mml:mi>
</mml:mrow>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula>. Additionally, <italic>N</italic> classical queries are required to obtain the <italic>k</italic> nonzero entries of <italic>X</italic>
<sub>
<italic>d</italic>
</sub>. Therefore, the overall complexity of the forward propagation process in the classical convolutional layer is <inline-formula id="inf55">
<mml:math id="m72">
<mml:mi mathvariant="script">O</mml:mi>
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:mrow>
<mml:mi>D</mml:mi>
<mml:mi>k</mml:mi>
</mml:mrow>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula> [<xref ref-type="bibr" rid="B47">47</xref>]. For the backward propagation process, the complexity of the classical convolutional layer primarily arises from the calculation of <inline-formula id="inf56">
<mml:math id="m73">
<mml:msubsup>
<mml:mrow>
<mml:mrow>
<mml:mo stretchy="false">{</mml:mo>
<mml:mrow>
<mml:msubsup>
<mml:mrow>
<mml:mrow>
<mml:mo stretchy="false">{</mml:mo>
<mml:mrow>
<mml:mfrac>
<mml:mrow>
<mml:mi>&#x2202;</mml:mi>
<mml:msub>
<mml:mrow>
<mml:mi>h</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>d</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:mrow>
<mml:mi mathvariant="bold-italic">w</mml:mi>
</mml:mrow>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
</mml:mrow>
<mml:mrow>
<mml:mi>&#x2202;</mml:mi>
<mml:msub>
<mml:mrow>
<mml:mi>w</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:mfrac>
</mml:mrow>
<mml:mo stretchy="false">}</mml:mo>
</mml:mrow>
</mml:mrow>
<mml:mrow>
<mml:mi>j</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mrow>
<mml:mi>N</mml:mi>
</mml:mrow>
</mml:msubsup>
</mml:mrow>
<mml:mo stretchy="false">}</mml:mo>
</mml:mrow>
</mml:mrow>
<mml:mrow>
<mml:mi>d</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mrow>
<mml:mi>D</mml:mi>
</mml:mrow>
</mml:msubsup>
</mml:math>
</inline-formula>. Since each <inline-formula id="inf57">
<mml:math id="m74">
<mml:mfrac>
<mml:mrow>
<mml:mi>&#x2202;</mml:mi>
<mml:msub>
<mml:mrow>
<mml:mi>h</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>d</mml:mi>
</mml:mrow>
</mml:msub>
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:mrow>
<mml:mi mathvariant="bold-italic">w</mml:mi>
</mml:mrow>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
</mml:mrow>
<mml:mrow>
<mml:mi>&#x2202;</mml:mi>
<mml:msub>
<mml:mrow>
<mml:mi>w</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>j</mml:mi>
</mml:mrow>
</mml:msub>
</mml:mrow>
</mml:mfrac>
</mml:math>
</inline-formula> is calculated with a complexity of <inline-formula id="inf58">
<mml:math id="m75">
<mml:mi mathvariant="script">O</mml:mi>
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:mrow>
<mml:mi>k</mml:mi>
</mml:mrow>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula>, the overall complexity of the backward propagation process is <inline-formula id="inf59">
<mml:math id="m76">
<mml:mi mathvariant="script">O</mml:mi>
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:mrow>
<mml:mi>D</mml:mi>
<mml:mi>N</mml:mi>
<mml:mi>k</mml:mi>
</mml:mrow>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula>. Now, we can conclude that the complexity of the classical convolutional layer is <inline-formula id="inf60">
<mml:math id="m77">
<mml:mi mathvariant="script">O</mml:mi>
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:mrow>
<mml:mi>D</mml:mi>
<mml:mi>N</mml:mi>
<mml:mi>k</mml:mi>
</mml:mrow>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula>.</p>
<p>The complexity analysis of the quantum and classical convolutional layers, respectively handling <italic>D k</italic>-sparse <italic>N</italic>-dimensional data blocks, is shown in <xref ref-type="table" rid="T1">Table 1</xref>. There is a trade-off between the sizes of <italic>D</italic> and <italic>N</italic>, that is <italic>D</italic> and <italic>N</italic> form an inverse proportional relationship. As a result, when <italic>N</italic> is relatively large and the sparsity <italic>k</italic> of the data block is <inline-formula id="inf61">
<mml:math id="m78">
<mml:mi mathvariant="script">O</mml:mi>
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:mrow>
<mml:mi>log</mml:mi>
<mml:mo>&#x2061;</mml:mo>
<mml:mi>N</mml:mi>
</mml:mrow>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula>, the quantum convolutional layer demonstrates a significant reduction in computational resources compared to the classical CNN model.</p>
<table-wrap id="T1" position="float">
<label>TABLE 1</label>
<caption>
<p>Complexity analysis of the quantum (gate complexity) and classical (computational complexity) convolutional layers. Here, the quantum and classical convolutional layers handle <italic>D k</italic>-sparse <italic>N</italic>-dimensional data blocks respectively.</p>
</caption>
<table>
<thead valign="top">
<tr>
<th align="center">Process</th>
<th align="center">Quantum convolutional layer</th>
<th align="center">Classical convolutional layer</th>
</tr>
</thead>
<tbody valign="top">
<tr>
<td align="center">Forward propagation</td>
<td align="center">
<inline-formula id="inf62">
<mml:math id="m79">
<mml:mi mathvariant="script">O</mml:mi>
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:mrow>
<mml:mi>D</mml:mi>
<mml:mi>k</mml:mi>
<mml:mi mathvariant="normal">p</mml:mi>
<mml:mi mathvariant="normal">o</mml:mi>
<mml:mi mathvariant="normal">l</mml:mi>
<mml:mi mathvariant="normal">y</mml:mi>
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:mrow>
<mml:mi>log</mml:mi>
<mml:mo>&#x2061;</mml:mo>
<mml:mi>N</mml:mi>
</mml:mrow>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
</mml:mrow>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="center">
<inline-formula id="inf63">
<mml:math id="m80">
<mml:mi mathvariant="script">O</mml:mi>
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:mrow>
<mml:mi>D</mml:mi>
<mml:mi>k</mml:mi>
</mml:mrow>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
</tr>
<tr>
<td align="center">Backward propagation</td>
<td align="center">
<inline-formula id="inf64">
<mml:math id="m81">
<mml:mi mathvariant="script">O</mml:mi>
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:mrow>
<mml:mi>D</mml:mi>
<mml:mi>k</mml:mi>
<mml:mi mathvariant="normal">p</mml:mi>
<mml:mi mathvariant="normal">o</mml:mi>
<mml:mi mathvariant="normal">l</mml:mi>
<mml:mi mathvariant="normal">y</mml:mi>
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:mrow>
<mml:mi>log</mml:mi>
<mml:mo>&#x2061;</mml:mo>
<mml:mi>N</mml:mi>
</mml:mrow>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
</mml:mrow>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
<td align="center">
<inline-formula id="inf65">
<mml:math id="m82">
<mml:mi mathvariant="script">O</mml:mi>
<mml:mrow>
<mml:mo stretchy="false">(</mml:mo>
<mml:mrow>
<mml:mi>D</mml:mi>
<mml:mi>N</mml:mi>
<mml:mi>k</mml:mi>
</mml:mrow>
<mml:mo stretchy="false">)</mml:mo>
</mml:mrow>
</mml:math>
</inline-formula>
</td>
</tr>
</tbody>
</table>
</table-wrap>
</sec>
</sec>
<sec id="s4">
<title>4 Numerical experiments</title>
<p>To evaluate the performance of our RE-QCNN model, we conduct numerical experiments on the MNIST and Fashion-MNIST datasets.</p>
<sec id="s4-1">
<title>4.1 Performance of RE-QCNN on MNIST dataset</title>
<p>The MNIST dataset consists of a training set with 60,000 images and a test set with 10,000 images. Each image in this dataset is composed of 784 (28 &#xd7; 28) pixels. The visualization of the MNIST dataset is depicted in <xref ref-type="fig" rid="F5">Figure 5</xref>.</p>
<fig id="F5" position="float">
<label>FIGURE 5</label>
<caption>
<p>The visualization of MNIST dataset.</p>
</caption>
<graphic xlink:href="fphy-12-1362690-g005.tif"/>
</fig>
<p>In detail, we conduct numerical experiments using the 2-layer RE-QCNN model, which includes two quantum convolutional layers and two pooling layers. The objective of these numerical experiments is to recognize handwritten digit images across all categories. The model configuration details can be found in <xref ref-type="table" rid="T2">Table 2</xref>. In this configuration, the first quantum convolutional layer consists of a single quantum filter implemented using a 5-qubit QAOA circuit with two layers. Specifically, the quantum convolutional layer applies a 5 &#xd7; 5 sliding window with a stride of 1 to the normalized 28 &#xd7; 28 image matrix, resulting in 576 data blocks. Each data block, consisting of 25 pixels, is encoded onto its respective quantum state using 5 qubits through the amplitude encoding strategy. Subsequently, the 576 data-encoding quantum states are sequentially processed by the 2-layer QAOA circuit. Finally, the measurement outcomes of the observable <inline-formula id="inf66">
<mml:math id="m83">
<mml:mi>H</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:msubsup>
<mml:mrow>
<mml:mo movablelimits="false" form="prefix">&#x2211;</mml:mo>
</mml:mrow>
<mml:mrow>
<mml:mi>s</mml:mi>
<mml:mo>&#x3d;</mml:mo>
<mml:mn>1</mml:mn>
</mml:mrow>
<mml:mrow>
<mml:mn>5</mml:mn>
</mml:mrow>
</mml:msubsup>
<mml:msubsup>
<mml:mrow>
<mml:mi>&#x3c3;</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>s</mml:mi>
</mml:mrow>
<mml:mrow>
<mml:mi>x</mml:mi>
</mml:mrow>
</mml:msubsup>
</mml:math>
</inline-formula> are obtained, resulting in 576 expectation values. These expectation values form the output of the first quantum convolutional layer, represented as a 24 &#xd7; 24 matrix. The second quantum convolutional layer comprises 6 quantum filters, and each quantum filter is also implemented using a 5-qubit QAOA circuit with two layers. Before the output of each quantum convolutional layer enters the pooling layer, it is subjected to a ReLU activation function, followed by a downsampling function. Additionally, for this multi-class classification task, the fully connected layer with the softmax activation function generates the predicted label, and cross-entropy is employed as the cost function. We conduct cross-validation experiments using 2-fold, 3-fold, 5-fold, and 7-fold validation on a total of 70,000 images.</p>
<table-wrap id="T2" position="float">
<label>TABLE 2</label>
<caption>
<p>The configuration of the 2-layer RE-QCNN on MNIST dataset.</p>
</caption>
<table>
<thead valign="top">
<tr>
<th align="center">Parameter</th>
<th align="center">Quantum convolutional layer 1</th>
<th align="center">Quantum convolutional layer 2</th>
</tr>
</thead>
<tbody valign="top">
<tr>
<td align="center">Num of qubits</td>
<td align="center">5</td>
<td align="center">5</td>
</tr>
<tr>
<td align="center">Num of QAOA layers</td>
<td align="center">2</td>
<td align="center">2</td>
</tr>
<tr>
<td align="center">Num of quantum filters</td>
<td align="center">1</td>
<td align="center">6</td>
</tr>
</tbody>
</table>
</table-wrap>
<p>For each cross-validation experiment, the Receiver Operating Characteristic (ROC) curve is depicted in <xref ref-type="fig" rid="F6">Figure 6A</xref>. The ROC curves reveal that our model trained using 2-fold cross-validation lacks superior generalization capability. Specifically, at the False Positive Rate (FPR) of 2%, our model achieves the True Positive Rate (TPR) of 89%, indicating a convergence phase. Complete convergence is achieved at the FPR of 10%. In contrast, our model trained using 7-fold cross-validation demonstrates superior generalization capability. The ROC curve exhibits noticeable improvement at the FPR of 2%, and complete convergence is achieved at the FPR of 10%. Furthermore, the stability of our model is evaluated using the confusion matrix, as depicted in <xref ref-type="fig" rid="F6">Figure 6B</xref>. Overall, our model attains high accuracy in recognizing handwritten digit images across all categories.</p>
<fig id="F6" position="float">
<label>FIGURE 6</label>
<caption>
<p>The performance of RE-QCNN on MNIST dataset.</p>
</caption>
<graphic xlink:href="fphy-12-1362690-g006.tif"/>
</fig>
</sec>
<sec id="s4-2">
<title>4.2 Performance of RE-QCNN on Fashion-MNIST dataset</title>
<p>The Fashion-MNIST dataset consists of grayscale images of fashion products, where each image is composed of 28 &#xd7; 28 pixels. This dataset comprises 70,000 images from 10 categories, with 7,000 images per category. The training set contains 60,000 images, while the test set contains 10,000 images. The Fashion-MNIST dataset is considered to be more complex compared to the conventional MNIST dataset. The visualization of the Fashion-MNIST dataset is depicted in <xref ref-type="fig" rid="F7">Figure 7</xref>. In detail, we conduct numerical experiments to comprehensively assess our model&#x2019;s performance on the Fashion-MNIST dataset. The objective of these numerical experiments is to recognize images across all categories using the 2-layer RE-QCNN model, which includes two quantum convolutional layers and two pooling layers. Next, we present the performance of our model as obtained from numerical experiments involving the QAOA circuit with different random initial parameters, various numbers of layers, and different levels of measurement errors.</p>
<fig id="F7" position="float">
<label>FIGURE 7</label>
<caption>
<p>The visualization of Fashion-MNIST dataset.</p>
</caption>
<graphic xlink:href="fphy-12-1362690-g007.tif"/>
</fig>
<p>Firstly, for the numerical experiments involving the QAOA circuit with five different sets of random initial parameters, the model configuration is consistent with the configuration used for the MNIST dataset, with some details being different. Specifically, the first quantum convolutional layer consists of 40 quantum filters. Each of these quantum filters is implemented using a 5-qubit QAOA circuit with four layers. The second quantum convolutional layer comprises 80 quantum filters, and each quantum filter is also implemented using a 5-qubit QAOA circuit with four layers. These differences can be found in <xref ref-type="table" rid="T3">Table 3</xref>. Additionally, since we are also dealing with a multi-class classification task, cross-entropy is employed as the cost function. The accuracy of our model, using the QAOA circuit with five different sets of random initial parameters, is depicted as a function of epoch in <xref ref-type="fig" rid="F8">Figure 8A</xref>. This figure illustrates that the highest achieved accuracy is 92.94%, the lowest accuracy is 92.20%, and the average accuracy is 92.59%.</p>
<table-wrap id="T3" position="float">
<label>TABLE 3</label>
<caption>
<p>The configuration of the 2-layer RE-QCNN on Fashion-MNIST dataset.</p>
</caption>
<table>
<thead valign="top">
<tr>
<th align="center">Parameter</th>
<th align="center">Quantum convolutional layer 1</th>
<th align="center">Quantum convolutional layer 2</th>
</tr>
</thead>
<tbody valign="top">
<tr>
<td align="center">Num of qubits</td>
<td align="center">5</td>
<td align="center">5</td>
</tr>
<tr>
<td align="center">Num of QAOA layers</td>
<td align="center">4</td>
<td align="center">4</td>
</tr>
<tr>
<td align="center">Num of quantum filters</td>
<td align="center">40</td>
<td align="center">80</td>
</tr>
</tbody>
</table>
</table-wrap>
<fig id="F8" position="float">
<label>FIGURE 8</label>
<caption>
<p>The performance of RE-QCNN on Fashion-MNIST dataset.</p>
</caption>
<graphic xlink:href="fphy-12-1362690-g008.tif"/>
</fig>
<p>Subsequently, using the same model configuration and the optimal initial parameters of the QAOA circuit mentioned above, we assess the performance of our model under measurement errors of 0, 0.03, and 0.05. <xref ref-type="fig" rid="F8">Figure 8B</xref> reveals that the performance of our model is quite resilient to such errors. Considering that the number of layers in the parameterized quantum circuit of our model is relatively shallow, gate errors may be effectively mitigated by error mitigation techniques [<xref ref-type="bibr" rid="B56">56</xref>-<xref ref-type="bibr" rid="B59">59</xref>], which suggests small gate errors may not significantly affect the performance of our model. Therefore, we only assess the performance of our model under different levels of measurement errors.</p>
<p>Finally, using the same model configuration and the optimal initial parameters of the QAOA circuit mentioned above, we vary the number of layers in the QAOA circuit to 2, 3, and 4. The corresponding accuracy of our model is depicted as a function of epoch in <xref ref-type="fig" rid="F8">Figure 8C</xref>. This figure demonstrates that our model achieves superior performance when a larger number of layers are employed in the QAOA circuit. The result aligns with the viewpoint that increasing the number of layers enhances the entanglement capability of the QAOA circuit.</p>
<p>Overall, our model exhibits excellent numerical performance on the more complex Fashion-MNIST dataset compared to the MNIST dataset.</p>
</sec>
</sec>
<sec sec-type="conclusion" id="s5">
<title>5 Conclusion</title>
<p>To explore the potential quantum advantages of the QCNN model, it is common to compare the prediction accuracy directly between the quantum and classical models. However, according to the &#x201c;no-free-lunch&#x201d; conjecture, a specific QCNN or CNN model may not exhibit excellent numerical performance across all tasks. This perspective is highly dependent on the specific task and may be influenced by random factors. A more solid perspective for exploring the potential quantum advantages involves comparing computational resources by quantifying the number of fundamental computational elements used in the training and prediction processes. By considering both perspectives, a more comprehensive showcase of the potential quantum advantages can be achieved. In this paper, we propose a computationally resource-efficient QCNN model. Our model significantly reduces the computational resources required for the quantum convolutional layer compared to the classical CNN model. Additionally, our model achieves high accuracy in the multi-class classification tasks of recognizing images from the MNIST and Fashion-MNIST datasets. Our results hold significant importance in exploring the potential quantum advantages of the QCNN model in the NISQ era.</p>
<p>Several important aspects merit further investigation in the field of QCNN. Specifically, a crucial research topic is to explore the impact of parameterized quantum circuits on the numerical performance of the QCNN model. Additionally, it is worth considering the exploration of novel data encoding strategies, particularly those capable of handling high-dimensional data such as video streams and 3D medical images. This research will greatly contribute to diversifying the application scenarios of the QCNN model.</p>
</sec>
</body>
<back>
<sec sec-type="data-availability" id="s6">
<title>Data availability statement</title>
<p>The original contributions presented in the study are included in the article/<xref ref-type="sec" rid="s11">Supplementary Material</xref>, further inquiries can be directed to the corresponding authors.</p>
</sec>
<sec id="s7">
<title>Author contributions</title>
<p>YS: Writing&#x2013;original draft, Software, Visualization, Writing&#x2013;review and editing, Conceptualization, Data curation, Formal Analysis, Investigation, Methodology, Validation. JL: Writing&#x2013;original draft, Writing&#x2013;review and editing, Investigation, Validation, Visualization. YW: Writing&#x2013;review and editing, Software, Data curation, Supervision, Validation. SQ: Writing&#x2013;review and editing, Funding acquisition, Supervision, Validation. QW: Writing&#x2013;review and editing, Funding acquisition, Supervision, Validation. FG: Writing&#x2013;review and editing, Funding acquisition, Supervision, Validation.</p>
</sec>
<sec sec-type="funding-information" id="s8">
<title>Funding</title>
<p>The author(s) declare that financial support was received for the research, authorship, and/or publication of this article. This work is supported by National Natural Science Foundation of China (Grant Nos. 62372048, 62371069, and 62272056), Beijing Natural Science Foundation (Grant No. 4222031), and China Scholarship Council (Grant No. 202006470011).</p>
</sec>
<ack>
<p>We would like to thank Yueran Hou, Jiashao Shen, Ximing Wang, and Shengyao Wu to provide helpful suggestions for the manuscript.</p>
</ack>
<sec sec-type="COI-statement" id="s9">
<title>Conflict of interest</title>
<p>The authors declare that the research was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
</sec>
<sec sec-type="disclaimer" id="s10">
<title>Publisher&#x2019;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
<sec id="s11">
<title>Supplementary material</title>
<p>The Supplementary Material for this article can be found online at: <ext-link ext-link-type="uri" xlink:href="https://www.frontiersin.org/articles/10.3389/fphy.2024.1362690/full#supplementary-material">https://www.frontiersin.org/articles/10.3389/fphy.2024.1362690/full&#x23;supplementary-material</ext-link>
</p>
<supplementary-material xlink:href="DataSheet1.PDF" id="SM1" mimetype="application/PDF" xmlns:xlink="http://www.w3.org/1999/xlink"/>
</sec>
<ref-list>
<title>References</title>
<ref id="B1">
<label>1.</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Harrow</surname>
<given-names>AW</given-names>
</name>
<name>
<surname>Montanaro</surname>
<given-names>A</given-names>
</name>
</person-group>. <article-title>Quantum computational supremacy</article-title>. <source>Nature</source> (<year>2017</year>) <volume>549</volume>:<fpage>203</fpage>&#x2013;<lpage>9</lpage>. <pub-id pub-id-type="doi">10.1038/nature23458</pub-id>
</citation>
</ref>
<ref id="B2">
<label>2.</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Shor</surname>
<given-names>PW</given-names>
</name>
</person-group>. <article-title>Polynomial-time algorithms for prime factorization and discrete logarithms on a quantum computer</article-title>. <source>SIAM Rev</source> (<year>1999</year>) <volume>41</volume>:<fpage>303</fpage>&#x2013;<lpage>32</lpage>. <pub-id pub-id-type="doi">10.1137/S0036144598347011</pub-id>
</citation>
</ref>
<ref id="B3">
<label>3.</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Grover</surname>
<given-names>LK</given-names>
</name>
</person-group>. <article-title>Quantum mechanics helps in searching for a needle in a haystack</article-title>. <source>Phys Rev Lett</source> (<year>1997</year>) <volume>79</volume>:<fpage>325</fpage>&#x2013;<lpage>8</lpage>. <pub-id pub-id-type="doi">10.1103/PhysRevLett.79.325</pub-id>
</citation>
</ref>
<ref id="B4">
<label>4.</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Biamonte</surname>
<given-names>J</given-names>
</name>
<name>
<surname>Wittek</surname>
<given-names>P</given-names>
</name>
<name>
<surname>Pancotti</surname>
<given-names>N</given-names>
</name>
<name>
<surname>Rebentrost</surname>
<given-names>P</given-names>
</name>
<name>
<surname>Wiebe</surname>
<given-names>N</given-names>
</name>
<name>
<surname>Lloyd</surname>
<given-names>S</given-names>
</name>
</person-group>. <article-title>Quantum machine learning</article-title>. <source>Nature</source> (<year>2017</year>) <volume>549</volume>:<fpage>195</fpage>&#x2013;<lpage>202</lpage>. <pub-id pub-id-type="doi">10.1038/nature23474</pub-id>
</citation>
</ref>
<ref id="B5">
<label>5.</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Dunjko</surname>
<given-names>V</given-names>
</name>
<name>
<surname>Briegel</surname>
<given-names>HJ</given-names>
</name>
</person-group>. <article-title>Machine learning and artificial intelligence in the quantum domain: a review of recent progress</article-title>. <source>Rep Prog Phys</source> (<year>2018</year>) <volume>81</volume>:<fpage>074001</fpage>. <pub-id pub-id-type="doi">10.1088/1361-6633/aab406</pub-id>
</citation>
</ref>
<ref id="B6">
<label>6.</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Sch&#xfc;tt</surname>
<given-names>KT</given-names>
</name>
<name>
<surname>Chmiela</surname>
<given-names>S</given-names>
</name>
<name>
<surname>Von Lilienfeld</surname>
<given-names>OA</given-names>
</name>
<name>
<surname>Tkatchenko</surname>
<given-names>A</given-names>
</name>
<name>
<surname>Tsuda</surname>
<given-names>K</given-names>
</name>
<name>
<surname>M&#xfc;ller</surname>
<given-names>K-R</given-names>
</name>
</person-group>. <article-title>Machine learning meets quantum physics</article-title>. <source>Lecture Notes Phys</source> (<year>2020</year>). <pub-id pub-id-type="doi">10.1007/978-3-030-40245-7</pub-id>
</citation>
</ref>
<ref id="B7">
<label>7.</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Harrow</surname>
<given-names>AW</given-names>
</name>
<name>
<surname>Hassidim</surname>
<given-names>A</given-names>
</name>
<name>
<surname>Lloyd</surname>
<given-names>S</given-names>
</name>
</person-group>. <article-title>Quantum algorithm for linear systems of equations</article-title>. <source>Phys Rev Lett</source> (<year>2009</year>) <volume>103</volume>:<fpage>150502</fpage>. <pub-id pub-id-type="doi">10.1103/PhysRevLett.103.150502</pub-id>
</citation>
</ref>
<ref id="B8">
<label>8.</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Rebentrost</surname>
<given-names>P</given-names>
</name>
<name>
<surname>Steffens</surname>
<given-names>A</given-names>
</name>
<name>
<surname>Marvian</surname>
<given-names>I</given-names>
</name>
<name>
<surname>Lloyd</surname>
<given-names>S</given-names>
</name>
</person-group>. <article-title>Quantum singular-value decomposition of nonsparse low-rank matrices</article-title>. <source>Phys Rev A</source> (<year>2018</year>) <volume>97</volume>:<fpage>012327</fpage>. <pub-id pub-id-type="doi">10.1103/PhysRevA.97.012327</pub-id>
</citation>
</ref>
<ref id="B9">
<label>9.</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Somma</surname>
<given-names>R</given-names>
</name>
<name>
<surname>Childs</surname>
<given-names>A</given-names>
</name>
<name>
<surname>Kothari</surname>
<given-names>R</given-names>
</name>
</person-group>. <article-title>Quantum linear systems algorithm with exponentially improved dependence on precision</article-title>. <source>APS March Meet Abstr</source> (<year>2016</year>) <volume>2016</volume>. <pub-id pub-id-type="doi">10.1137/16M1087072</pub-id>
</citation>
</ref>
<ref id="B10">
<label>10.</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Wiebe</surname>
<given-names>N</given-names>
</name>
<name>
<surname>Braun</surname>
<given-names>D</given-names>
</name>
<name>
<surname>Lloyd</surname>
<given-names>S</given-names>
</name>
</person-group>. <article-title>Quantum algorithm for data fitting</article-title>. <source>Phys Rev Lett</source> (<year>2012</year>) <volume>109</volume>:<fpage>050505</fpage>. <pub-id pub-id-type="doi">10.1103/PhysRevLett.109.050505</pub-id>
</citation>
</ref>
<ref id="B11">
<label>11.</label>
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Brandao</surname>
<given-names>FG</given-names>
</name>
<name>
<surname>Svore</surname>
<given-names>KM</given-names>
</name>
</person-group>. <article-title>Quantum speed-ups for solving semidefinite programs</article-title>. In: <conf-name>2017 IEEE 58th Annual Symposium on Foundations of Computer Science (FOCS) (IEEE)</conf-name>; <conf-date>15-17 October 2017</conf-date>; <conf-loc>Berkeley, California, USA</conf-loc> (<year>2017</year>). p. <fpage>415</fpage>&#x2013;<lpage>26</lpage>. <pub-id pub-id-type="doi">10.1109/FOCS.2017.45</pub-id>
</citation>
</ref>
<ref id="B12">
<label>12.</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Rebentrost</surname>
<given-names>P</given-names>
</name>
<name>
<surname>Schuld</surname>
<given-names>M</given-names>
</name>
<name>
<surname>Wossnig</surname>
<given-names>L</given-names>
</name>
<name>
<surname>Petruccione</surname>
<given-names>F</given-names>
</name>
<name>
<surname>Lloyd</surname>
<given-names>S</given-names>
</name>
</person-group>. <article-title>Quantum gradient descent and Newton&#x2019;s method for constrained polynomial optimization</article-title>. <source>New J Phys</source> (<year>2019</year>) <volume>21</volume>:<fpage>073023</fpage>. <pub-id pub-id-type="doi">10.1088/1367-2630/ab2a9e</pub-id>
</citation>
</ref>
<ref id="B13">
<label>13.</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Lloyd</surname>
<given-names>S</given-names>
</name>
<name>
<surname>Mohseni</surname>
<given-names>M</given-names>
</name>
<name>
<surname>Rebentrost</surname>
<given-names>P</given-names>
</name>
</person-group>. <article-title>Quantum principal component analysis</article-title>. <source>Nat Phys</source> (<year>2014</year>) <volume>10</volume>:<fpage>631</fpage>&#x2013;<lpage>3</lpage>. <pub-id pub-id-type="doi">10.1038/nphys3029</pub-id>
</citation>
</ref>
<ref id="B14">
<label>14.</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Rebentrost</surname>
<given-names>P</given-names>
</name>
<name>
<surname>Mohseni</surname>
<given-names>M</given-names>
</name>
<name>
<surname>Lloyd</surname>
<given-names>S</given-names>
</name>
</person-group>. <article-title>Quantum support vector machine for big data classification</article-title>. <source>Phys Rev Lett</source> (<year>2014</year>) <volume>113</volume>:<fpage>130503</fpage>. <pub-id pub-id-type="doi">10.1103/PhysRevLett.113.130503</pub-id>
</citation>
</ref>
<ref id="B15">
<label>15.</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Wei</surname>
<given-names>S</given-names>
</name>
<name>
<surname>Chen</surname>
<given-names>Y</given-names>
</name>
<name>
<surname>Zhou</surname>
<given-names>Z</given-names>
</name>
<name>
<surname>Long</surname>
<given-names>G</given-names>
</name>
</person-group>. <article-title>A quantum convolutional neural network on nisq devices</article-title>. <source>AAPPS Bull</source> (<year>2022</year>) <volume>32</volume>:<fpage>2</fpage>&#x2013;<lpage>11</lpage>. <pub-id pub-id-type="doi">10.1007/s43673-021-00030-3</pub-id>
</citation>
</ref>
<ref id="B16">
<label>16.</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Preskill</surname>
<given-names>J</given-names>
</name>
</person-group>. <article-title>Quantum computing in the nisq era and beyond</article-title>. <source>Quantum</source> (<year>2018</year>) <volume>2</volume>:<fpage>79</fpage>. <pub-id pub-id-type="doi">10.22331/q-2018-08-06-79</pub-id>
</citation>
</ref>
<ref id="B17">
<label>17.</label>
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Farhi</surname>
<given-names>E</given-names>
</name>
<name>
<surname>Neven</surname>
<given-names>H</given-names>
</name>
</person-group>. <source>Classification with quantum neural networks on near term processors</source> (<year>2018</year>). <comment>
<italic>arXiv preprint arXiv:1802.06002</italic>
</comment>.</citation>
</ref>
<ref id="B18">
<label>18.</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Schuld</surname>
<given-names>M</given-names>
</name>
<name>
<surname>Bocharov</surname>
<given-names>A</given-names>
</name>
<name>
<surname>Svore</surname>
<given-names>KM</given-names>
</name>
<name>
<surname>Wiebe</surname>
<given-names>N</given-names>
</name>
</person-group>. <article-title>Circuit-centric quantum classifiers</article-title>. <source>Phys Rev A</source> (<year>2020</year>) <volume>101</volume>:<fpage>032308</fpage>. <pub-id pub-id-type="doi">10.1103/PhysRevA.101.032308</pub-id>
</citation>
</ref>
<ref id="B19">
<label>19.</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Li</surname>
<given-names>W</given-names>
</name>
<name>
<surname>Lu</surname>
<given-names>Z-d.</given-names>
</name>
<name>
<surname>Deng</surname>
<given-names>D-L</given-names>
</name>
<name>
<surname>Guo</surname>
<given-names>Y</given-names>
</name>
<name>
<surname>Chen</surname>
<given-names>J</given-names>
</name>
<name>
<surname>Shang</surname>
<given-names>S</given-names>
</name>
<etal/>
</person-group> (<year>2022</year>). <article-title>Quantum neural network classifiers: a tutorial</article-title>. <source>SciPost Phys Lecture Notes</source>. <volume>19</volume>, <fpage>61</fpage>, <pub-id pub-id-type="doi">10.1186/s12985-022-01789-z</pub-id>
</citation>
</ref>
<ref id="B20">
<label>20.</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Massoli</surname>
<given-names>FV</given-names>
</name>
<name>
<surname>Vadicamo</surname>
<given-names>L</given-names>
</name>
<name>
<surname>Amato</surname>
<given-names>G</given-names>
</name>
<name>
<surname>Falchi</surname>
<given-names>F</given-names>
</name>
</person-group>. <article-title>A leap among quantum computing and quantum neural networks: a survey</article-title>. <source>ACM Comput Surv</source> (<year>2022</year>) <volume>55</volume>:<fpage>1</fpage>&#x2013;<lpage>37</lpage>. <pub-id pub-id-type="doi">10.1145/3529756</pub-id>
</citation>
</ref>
<ref id="B21">
<label>21.</label>
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Song</surname>
<given-names>Y</given-names>
</name>
<name>
<surname>Wu</surname>
<given-names>Y</given-names>
</name>
<name>
<surname>Wu</surname>
<given-names>S</given-names>
</name>
<name>
<surname>Li</surname>
<given-names>D</given-names>
</name>
<name>
<surname>Wen</surname>
<given-names>Q</given-names>
</name>
<name>
<surname>Qin</surname>
<given-names>S</given-names>
</name>
<etal/>
</person-group> <source>A quantum federated learning framework for classical clients</source> (<year>2023</year>). <comment>
<italic>arXiv preprint arXiv:2312.11672</italic>
</comment>.</citation>
</ref>
<ref id="B22">
<label>22.</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>McClean</surname>
<given-names>JR</given-names>
</name>
<name>
<surname>Romero</surname>
<given-names>J</given-names>
</name>
<name>
<surname>Babbush</surname>
<given-names>R</given-names>
</name>
<name>
<surname>Aspuru-Guzik</surname>
<given-names>A</given-names>
</name>
</person-group>. <article-title>The theory of variational hybrid quantum-classical algorithms</article-title>. <source>New J Phys</source> (<year>2016</year>) <volume>18</volume>:<fpage>023023</fpage>. <pub-id pub-id-type="doi">10.1088/1367-2630/18/2/023023</pub-id>
</citation>
</ref>
<ref id="B23">
<label>23.</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Cerezo</surname>
<given-names>M</given-names>
</name>
<name>
<surname>Arrasmith</surname>
<given-names>A</given-names>
</name>
<name>
<surname>Babbush</surname>
<given-names>R</given-names>
</name>
<name>
<surname>Benjamin</surname>
<given-names>SC</given-names>
</name>
<name>
<surname>Endo</surname>
<given-names>S</given-names>
</name>
<name>
<surname>Fujii</surname>
<given-names>K</given-names>
</name>
<etal/>
</person-group> <article-title>Variational quantum algorithms</article-title>. <source>Nat Rev Phys</source> (<year>2021</year>) <volume>3</volume>:<fpage>625</fpage>&#x2013;<lpage>44</lpage>. <pub-id pub-id-type="doi">10.1038/s42254-021-00348-9</pub-id>
</citation>
</ref>
<ref id="B24">
<label>24.</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Bharti</surname>
<given-names>K</given-names>
</name>
<name>
<surname>Cervera-Lierta</surname>
<given-names>A</given-names>
</name>
<name>
<surname>Kyaw</surname>
<given-names>TH</given-names>
</name>
<name>
<surname>Haug</surname>
<given-names>T</given-names>
</name>
<name>
<surname>Alperin-Lea</surname>
<given-names>S</given-names>
</name>
<name>
<surname>Anand</surname>
<given-names>A</given-names>
</name>
<etal/>
</person-group> <article-title>Noisy intermediate-scale quantum algorithms</article-title>. <source>Rev Mod Phys</source> (<year>2022</year>) <volume>94</volume>:<fpage>015004</fpage>. <pub-id pub-id-type="doi">10.1103/RevModPhys.94.015004</pub-id>
</citation>
</ref>
<ref id="B25">
<label>25.</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Wu</surname>
<given-names>Y</given-names>
</name>
<name>
<surname>Huang</surname>
<given-names>Z</given-names>
</name>
<name>
<surname>Sun</surname>
<given-names>J</given-names>
</name>
<name>
<surname>Yuan</surname>
<given-names>X</given-names>
</name>
<name>
<surname>Wang</surname>
<given-names>JB</given-names>
</name>
<name>
<surname>Lv</surname>
<given-names>D</given-names>
</name>
</person-group>. <article-title>Orbital expansion variational quantum eigensolver</article-title>. <source>Quan Sci Tech</source> (<year>2023</year>) <volume>8</volume>:<fpage>045030</fpage>. <pub-id pub-id-type="doi">10.1088/2058-9565/acf9c7</pub-id>
</citation>
</ref>
<ref id="B26">
<label>26.</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Wu</surname>
<given-names>Y</given-names>
</name>
<name>
<surname>Wu</surname>
<given-names>B</given-names>
</name>
<name>
<surname>Wang</surname>
<given-names>J</given-names>
</name>
<name>
<surname>Yuan</surname>
<given-names>X</given-names>
</name>
</person-group>. <article-title>Quantum phase recognition via quantum kernel methods</article-title>. <source>Quantum</source> (<year>2023</year>) <volume>7</volume>:<fpage>981</fpage>. <pub-id pub-id-type="doi">10.22331/q-2023-04-17-981</pub-id>
</citation>
</ref>
<ref id="B27">
<label>27.</label>
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Oh</surname>
<given-names>S</given-names>
</name>
<name>
<surname>Choi</surname>
<given-names>J</given-names>
</name>
<name>
<surname>Kim</surname>
<given-names>J</given-names>
</name>
</person-group>. <article-title>A tutorial on quantum convolutional neural networks (qcnn)</article-title>. In: <conf-name>2020 International Conference on Information and Communication Technology Convergence (ICTC)</conf-name>; <conf-date>October 21-23, 2020</conf-date>; <conf-loc>Jeju Island, Korea (South)</conf-loc> (<year>2020</year>). p. <fpage>236</fpage>&#x2013;<lpage>9</lpage>. <pub-id pub-id-type="doi">10.1109/ICTC49870.2020.9289439</pub-id>
</citation>
</ref>
<ref id="B28">
<label>28.</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Li</surname>
<given-names>Z</given-names>
</name>
<name>
<surname>Liu</surname>
<given-names>F</given-names>
</name>
<name>
<surname>Yang</surname>
<given-names>W</given-names>
</name>
<name>
<surname>Peng</surname>
<given-names>S</given-names>
</name>
<name>
<surname>Zhou</surname>
<given-names>J</given-names>
</name>
</person-group>. <article-title>A survey of convolutional neural networks: analysis, applications, and prospects</article-title>. <source>IEEE Trans Neural Networks Learn Syst</source> (<year>2022</year>) <volume>33</volume>:<fpage>6999</fpage>&#x2013;<lpage>7019</lpage>. <pub-id pub-id-type="doi">10.1109/TNNLS.2021.3084827</pub-id>
</citation>
</ref>
<ref id="B29">
<label>29.</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Gu</surname>
<given-names>J</given-names>
</name>
<name>
<surname>Wang</surname>
<given-names>Z</given-names>
</name>
<name>
<surname>Kuen</surname>
<given-names>J</given-names>
</name>
<name>
<surname>Ma</surname>
<given-names>L</given-names>
</name>
<name>
<surname>Shahroudy</surname>
<given-names>A</given-names>
</name>
<name>
<surname>Shuai</surname>
<given-names>B</given-names>
</name>
<etal/>
</person-group> <article-title>Recent advances in convolutional neural networks</article-title>. <source>Pattern recognition</source> (<year>2018</year>) <volume>77</volume>:<fpage>354</fpage>&#x2013;<lpage>77</lpage>. <pub-id pub-id-type="doi">10.1016/j.patcog.2017.10.013</pub-id>
</citation>
</ref>
<ref id="B30">
<label>30.</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Cong</surname>
<given-names>I</given-names>
</name>
<name>
<surname>Choi</surname>
<given-names>S</given-names>
</name>
<name>
<surname>Lukin</surname>
<given-names>MD</given-names>
</name>
</person-group>. <article-title>Quantum convolutional neural networks</article-title>. <source>Nat Phys</source> (<year>2019</year>) <volume>15</volume>:<fpage>1273</fpage>&#x2013;<lpage>8</lpage>. <pub-id pub-id-type="doi">10.1038/s41567-019-0648-8</pub-id>
</citation>
</ref>
<ref id="B31">
<label>31.</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Liu</surname>
<given-names>J</given-names>
</name>
<name>
<surname>Lim</surname>
<given-names>KH</given-names>
</name>
<name>
<surname>Wood</surname>
<given-names>KL</given-names>
</name>
<name>
<surname>Huang</surname>
<given-names>W</given-names>
</name>
<name>
<surname>Guo</surname>
<given-names>C</given-names>
</name>
<name>
<surname>Huang</surname>
<given-names>H-L</given-names>
</name>
</person-group>. <article-title>Hybrid quantum-classical convolutional neural networks</article-title>. <source>Sci China Phys Mech Astron</source> (<year>2021</year>) <volume>64</volume>:<fpage>290311</fpage>. <pub-id pub-id-type="doi">10.1007/s11433-021-1734-3</pub-id>
</citation>
</ref>
<ref id="B32">
<label>32.</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Houssein</surname>
<given-names>EH</given-names>
</name>
<name>
<surname>Abohashima</surname>
<given-names>Z</given-names>
</name>
<name>
<surname>Elhoseny</surname>
<given-names>M</given-names>
</name>
<name>
<surname>Mohamed</surname>
<given-names>WM</given-names>
</name>
</person-group>. <article-title>Hybrid quantum-classical convolutional neural network model for covid-19 prediction using chest x-ray images</article-title>. <source>J Comput Des Eng</source> (<year>2022</year>) <volume>9</volume>:<fpage>343</fpage>&#x2013;<lpage>63</lpage>. <pub-id pub-id-type="doi">10.1093/jcde/qwac003</pub-id>
</citation>
</ref>
<ref id="B33">
<label>33.</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Bokhan</surname>
<given-names>D</given-names>
</name>
<name>
<surname>Mastiukova</surname>
<given-names>AS</given-names>
</name>
<name>
<surname>Boev</surname>
<given-names>AS</given-names>
</name>
<name>
<surname>Trubnikov</surname>
<given-names>DN</given-names>
</name>
<name>
<surname>Fedorov</surname>
<given-names>AK</given-names>
</name>
</person-group>. <article-title>Multiclass classification using quantum convolutional neural networks with hybrid quantum-classical learning</article-title>. <source>Front Phys</source> (<year>2022</year>) <volume>10</volume>. <pub-id pub-id-type="doi">10.3389/fphy.2022.1069985</pub-id>
</citation>
</ref>
<ref id="B34">
<label>34.</label>
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Matic</surname>
<given-names>A</given-names>
</name>
<name>
<surname>Monnet</surname>
<given-names>M</given-names>
</name>
<name>
<surname>Lorenz</surname>
<given-names>JM</given-names>
</name>
<name>
<surname>Schachtner</surname>
<given-names>B</given-names>
</name>
<name>
<surname>Messerer</surname>
<given-names>T</given-names>
</name>
</person-group>. <article-title>Quantum-classical convolutional neural networks in radiological image classification</article-title>. In: <conf-name>2022 IEEE International Conference on Quantum Computing and Engineering (QCE) (IEEE)</conf-name>; <conf-date>18-23 September 2022</conf-date>; <conf-loc>Broomfield, Colorado, USA</conf-loc> (<year>2022</year>). p. <fpage>56</fpage>. <comment>&#x2013;66</comment>. <pub-id pub-id-type="doi">10.1109/QCE53715.2022.00024</pub-id>
</citation>
</ref>
<ref id="B35">
<label>35.</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Henderson</surname>
<given-names>M</given-names>
</name>
<name>
<surname>Shakya</surname>
<given-names>S</given-names>
</name>
<name>
<surname>Pradhan</surname>
<given-names>S</given-names>
</name>
<name>
<surname>Cook</surname>
<given-names>T</given-names>
</name>
</person-group>. <article-title>Quanvolutional neural networks: powering image recognition with quantum circuits</article-title>. <source>Quan Machine Intelligence</source> (<year>2020</year>) <volume>2</volume>:<fpage>2</fpage>. <pub-id pub-id-type="doi">10.1007/s42484-020-00012-y</pub-id>
</citation>
</ref>
<ref id="B36">
<label>36.</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Chen</surname>
<given-names>SY-C</given-names>
</name>
<name>
<surname>Wei</surname>
<given-names>T-C</given-names>
</name>
<name>
<surname>Zhang</surname>
<given-names>C</given-names>
</name>
<name>
<surname>Yu</surname>
<given-names>H</given-names>
</name>
<name>
<surname>Yoo</surname>
<given-names>S</given-names>
</name>
</person-group>. <article-title>Quantum convolutional neural networks for high energy physics data analysis</article-title>. <source>Phys Rev Res</source> (<year>2022</year>) <volume>4</volume>:<fpage>013231</fpage>. <pub-id pub-id-type="doi">10.1103/PhysRevResearch.4.013231</pub-id>
</citation>
</ref>
<ref id="B37">
<label>37.</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Amin</surname>
<given-names>J</given-names>
</name>
<name>
<surname>Sharif</surname>
<given-names>M</given-names>
</name>
<name>
<surname>Gul</surname>
<given-names>N</given-names>
</name>
<name>
<surname>Kadry</surname>
<given-names>S</given-names>
</name>
<name>
<surname>Chakraborty</surname>
<given-names>C</given-names>
</name>
</person-group>. <article-title>Quantum machine learning architecture for covid-19 classification based on synthetic data generation using conditional adversarial neural network</article-title>. <source>Cogn Comput</source> (<year>2022</year>) <volume>14</volume>:<fpage>1677</fpage>&#x2013;<lpage>88</lpage>. <pub-id pub-id-type="doi">10.1007/s12559-021-09926-6</pub-id>
</citation>
</ref>
<ref id="B38">
<label>38.</label>
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Mottonen</surname>
<given-names>M</given-names>
</name>
<name>
<surname>Vartiainen</surname>
<given-names>JJ</given-names>
</name>
<name>
<surname>Bergholm</surname>
<given-names>V</given-names>
</name>
<name>
<surname>Salomaa</surname>
<given-names>MM</given-names>
</name>
</person-group>. <source>Transformation of quantum states using uniformly controlled rotations</source> (<year>2004</year>). <comment>
<italic>arXiv preprint quant-ph/0407010</italic>
</comment>.</citation>
</ref>
<ref id="B39">
<label>39.</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Iten</surname>
<given-names>R</given-names>
</name>
<name>
<surname>Colbeck</surname>
<given-names>R</given-names>
</name>
<name>
<surname>Kukuljan</surname>
<given-names>I</given-names>
</name>
<name>
<surname>Home</surname>
<given-names>J</given-names>
</name>
<name>
<surname>Christandl</surname>
<given-names>M</given-names>
</name>
</person-group>. <article-title>Quantum circuits for isometries</article-title>. <source>Phys Rev A</source> (<year>2016</year>) <volume>93</volume>:<fpage>032318</fpage>. <pub-id pub-id-type="doi">10.1103/PhysRevA.93.032318</pub-id>
</citation>
</ref>
<ref id="B40">
<label>40.</label>
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Farhi</surname>
<given-names>E</given-names>
</name>
<name>
<surname>Goldstone</surname>
<given-names>J</given-names>
</name>
<name>
<surname>Gutmann</surname>
<given-names>S</given-names>
</name>
</person-group>. <source>A quantum approximate optimization algorithm</source> (<year>2014</year>). <comment>
<italic>arXiv preprint arXiv:1411.4028</italic>
</comment>.</citation>
</ref>
<ref id="B41">
<label>41.</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Hadfield</surname>
<given-names>S</given-names>
</name>
<name>
<surname>Wang</surname>
<given-names>Z</given-names>
</name>
<name>
<surname>O&#x2019;gorman</surname>
<given-names>B</given-names>
</name>
<name>
<surname>Rieffel</surname>
<given-names>EG</given-names>
</name>
<name>
<surname>Venturelli</surname>
<given-names>D</given-names>
</name>
<name>
<surname>Biswas</surname>
<given-names>R</given-names>
</name>
</person-group>. <article-title>From the quantum approximate optimization algorithm to a quantum alternating operator ansatz</article-title>. <source>Algorithms</source> (<year>2019</year>) <volume>12</volume>:<fpage>34</fpage>. <pub-id pub-id-type="doi">10.3390/a12020034</pub-id>
</citation>
</ref>
<ref id="B42">
<label>42.</label>
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Song</surname>
<given-names>Y</given-names>
</name>
<name>
<surname>Wu</surname>
<given-names>Y</given-names>
</name>
<name>
<surname>Qin</surname>
<given-names>S</given-names>
</name>
<name>
<surname>Wen</surname>
<given-names>Q</given-names>
</name>
<name>
<surname>Wang</surname>
<given-names>JB</given-names>
</name>
<name>
<surname>Gao</surname>
<given-names>F</given-names>
</name>
</person-group>. <source>Trainability analysis of quantum optimization algorithms from a bayesian lens</source> (<year>2023</year>). <comment>
<italic>arXiv preprint arXiv:2310.06270</italic>
</comment>.</citation>
</ref>
<ref id="B43">
<label>43.</label>
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Liu</surname>
<given-names>T</given-names>
</name>
<name>
<surname>Fang</surname>
<given-names>S</given-names>
</name>
<name>
<surname>Zhao</surname>
<given-names>Y</given-names>
</name>
<name>
<surname>Wang</surname>
<given-names>P</given-names>
</name>
<name>
<surname>Zhang</surname>
<given-names>J</given-names>
</name>
</person-group>. <source>Implementation of training convolutional neural networks</source> (<year>2015</year>). <comment>
<italic>arXiv preprint arXiv:1506.01195</italic>
</comment>.</citation>
</ref>
<ref id="B44">
<label>44.</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Romero</surname>
<given-names>J</given-names>
</name>
<name>
<surname>Babbush</surname>
<given-names>R</given-names>
</name>
<name>
<surname>McClean</surname>
<given-names>JR</given-names>
</name>
<name>
<surname>Hempel</surname>
<given-names>C</given-names>
</name>
<name>
<surname>Love</surname>
<given-names>PJ</given-names>
</name>
<name>
<surname>Aspuru-Guzik</surname>
<given-names>A</given-names>
</name>
</person-group>. <article-title>Strategies for quantum computing molecular energies using the unitary coupled cluster ansatz</article-title>. <source>Quan Sci Tech</source> (<year>2018</year>) <volume>4</volume>:<fpage>014008</fpage>. <pub-id pub-id-type="doi">10.1088/2058-9565/aad3e4</pub-id>
</citation>
</ref>
<ref id="B45">
<label>45.</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Mitarai</surname>
<given-names>K</given-names>
</name>
<name>
<surname>Negoro</surname>
<given-names>M</given-names>
</name>
<name>
<surname>Kitagawa</surname>
<given-names>M</given-names>
</name>
<name>
<surname>Fujii</surname>
<given-names>K</given-names>
</name>
</person-group>. <article-title>Quantum circuit learning</article-title>. <source>Phys Rev A</source> (<year>2018</year>) <volume>98</volume>:<fpage>032309</fpage>. <pub-id pub-id-type="doi">10.1103/PhysRevA.98.032309</pub-id>
</citation>
</ref>
<ref id="B46">
<label>46.</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Schuld</surname>
<given-names>M</given-names>
</name>
<name>
<surname>Bergholm</surname>
<given-names>V</given-names>
</name>
<name>
<surname>Gogolin</surname>
<given-names>C</given-names>
</name>
<name>
<surname>Izaac</surname>
<given-names>J</given-names>
</name>
<name>
<surname>Killoran</surname>
<given-names>N</given-names>
</name>
</person-group>. <article-title>Evaluating analytic gradients on quantum hardware</article-title>. <source>Phys Rev A</source> (<year>2019</year>) <volume>99</volume>:<fpage>032331</fpage>. <pub-id pub-id-type="doi">10.1103/PhysRevA.99.032331</pub-id>
</citation>
</ref>
<ref id="B47">
<label>47.</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Kiranyaz</surname>
<given-names>S</given-names>
</name>
<name>
<surname>Avci</surname>
<given-names>O</given-names>
</name>
<name>
<surname>Abdeljaber</surname>
<given-names>O</given-names>
</name>
<name>
<surname>Ince</surname>
<given-names>T</given-names>
</name>
<name>
<surname>Gabbouj</surname>
<given-names>M</given-names>
</name>
<name>
<surname>Inman</surname>
<given-names>DJ</given-names>
</name>
</person-group>. <article-title>1d convolutional neural networks and applications: a survey</article-title>. <source>Mech Syst signal Process</source> (<year>2021</year>) <volume>151</volume>:<fpage>107398</fpage>. <pub-id pub-id-type="doi">10.1016/j.ymssp.2020.107398</pub-id>
</citation>
</ref>
<ref id="B48">
<label>48.</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Sim</surname>
<given-names>S</given-names>
</name>
<name>
<surname>Johnson</surname>
<given-names>PD</given-names>
</name>
<name>
<surname>Aspuru-Guzik</surname>
<given-names>A</given-names>
</name>
</person-group>. <article-title>Expressibility and entangling capability of parameterized quantum circuits for hybrid quantum-classical algorithms</article-title>. <source>Adv Quan Tech</source> (<year>2019</year>) <volume>2</volume>:<fpage>1900070</fpage>. <pub-id pub-id-type="doi">10.1002/qute.201900070</pub-id>
</citation>
</ref>
<ref id="B49">
<label>49.</label>
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Kingma</surname>
<given-names>DP</given-names>
</name>
<name>
<surname>Ba</surname>
<given-names>J</given-names>
</name>
</person-group>. <source>Adam: a method for stochastic optimization</source> (<year>2014</year>). <comment>
<italic>arXiv preprint arXiv:1412.6980</italic>
</comment>.</citation>
</ref>
<ref id="B50">
<label>50.</label>
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Gleinig</surname>
<given-names>N</given-names>
</name>
<name>
<surname>Hoefler</surname>
<given-names>T</given-names>
</name>
</person-group>. <article-title>An efficient algorithm for sparse quantum state preparation</article-title>. In: <conf-name>2021 58th ACM/IEEE Design Automation Conference (DAC) (IEEE)</conf-name>; <conf-date>December 5 - 9, 2021</conf-date>; <conf-loc>San Francisco, CA, USA</conf-loc> (<year>2021</year>). p. <fpage>433</fpage>&#x2013;<lpage>8</lpage>.</citation>
</ref>
<ref id="B51">
<label>51.</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Malvetti</surname>
<given-names>E</given-names>
</name>
<name>
<surname>Iten</surname>
<given-names>R</given-names>
</name>
<name>
<surname>Colbeck</surname>
<given-names>R</given-names>
</name>
</person-group>. <article-title>Quantum circuits for sparse isometries</article-title>. <source>Quantum</source> (<year>2021</year>) <volume>5</volume>:<fpage>412</fpage>. <pub-id pub-id-type="doi">10.22331/q-2021-03-15-412</pub-id>
</citation>
</ref>
<ref id="B52">
<label>52.</label>
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Camps</surname>
<given-names>D</given-names>
</name>
<name>
<surname>Lin</surname>
<given-names>L</given-names>
</name>
<name>
<surname>Van Beeumen</surname>
<given-names>R</given-names>
</name>
<name>
<surname>Yang</surname>
<given-names>C</given-names>
</name>
</person-group>. <source>Explicit quantum circuits for block encodings of certain sparse matrices</source> (<year>2022</year>). <comment>
<italic>arXiv preprint arXiv:2203.10236</italic>
</comment>.</citation>
</ref>
<ref id="B53">
<label>53.</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>McClean</surname>
<given-names>JR</given-names>
</name>
<name>
<surname>Boixo</surname>
<given-names>S</given-names>
</name>
<name>
<surname>Smelyanskiy</surname>
<given-names>VN</given-names>
</name>
<name>
<surname>Babbush</surname>
<given-names>R</given-names>
</name>
<name>
<surname>Neven</surname>
<given-names>H</given-names>
</name>
</person-group>. <article-title>Barren plateaus in quantum neural network training landscapes</article-title>. <source>Nat Commun</source> (<year>2018</year>) <volume>9</volume>:<fpage>4812</fpage>&#x2013;<lpage>6</lpage>. <pub-id pub-id-type="doi">10.1038/s41467-018-07090-4</pub-id>
</citation>
</ref>
<ref id="B54">
<label>54.</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Cerezo</surname>
<given-names>M</given-names>
</name>
<name>
<surname>Sone</surname>
<given-names>A</given-names>
</name>
<name>
<surname>Volkoff</surname>
<given-names>T</given-names>
</name>
<name>
<surname>Cincio</surname>
<given-names>L</given-names>
</name>
<name>
<surname>Coles</surname>
<given-names>PJ</given-names>
</name>
</person-group>. <article-title>Cost function dependent barren plateaus in shallow parametrized quantum circuits</article-title>. <source>Nat Commun</source> (<year>2021</year>) <volume>12</volume>:<fpage>1791</fpage>. <pub-id pub-id-type="doi">10.1038/s41467-021-21728-w</pub-id>
</citation>
</ref>
<ref id="B55">
<label>55.</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Huang</surname>
<given-names>H-Y</given-names>
</name>
<name>
<surname>Broughton</surname>
<given-names>M</given-names>
</name>
<name>
<surname>Cotler</surname>
<given-names>J</given-names>
</name>
<name>
<surname>Chen</surname>
<given-names>S</given-names>
</name>
<name>
<surname>Li</surname>
<given-names>J</given-names>
</name>
<name>
<surname>Mohseni</surname>
<given-names>M</given-names>
</name>
<etal/>
</person-group> <article-title>Quantum advantage in learning from experiments</article-title>. <source>Science</source> (<year>2022</year>) <volume>376</volume>:<fpage>1182</fpage>&#x2013;<lpage>6</lpage>. <pub-id pub-id-type="doi">10.1126/science.abn7293</pub-id>
</citation>
</ref>
<ref id="B56">
<label>56.</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Endo</surname>
<given-names>S</given-names>
</name>
<name>
<surname>Cai</surname>
<given-names>Z</given-names>
</name>
<name>
<surname>Benjamin</surname>
<given-names>SC</given-names>
</name>
<name>
<surname>Yuan</surname>
<given-names>X</given-names>
</name>
</person-group>. <article-title>Hybrid quantum-classical algorithms and quantum error mitigation</article-title>. <source>J Phys Soc Jpn</source> (<year>2021</year>) <volume>90</volume>:<fpage>032001</fpage>. <pub-id pub-id-type="doi">10.7566/jpsj.90.032001</pub-id>
</citation>
</ref>
<ref id="B57">
<label>57.</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Cai</surname>
<given-names>Z</given-names>
</name>
<name>
<surname>Babbush</surname>
<given-names>R</given-names>
</name>
<name>
<surname>Benjamin</surname>
<given-names>SC</given-names>
</name>
<name>
<surname>Endo</surname>
<given-names>S</given-names>
</name>
<name>
<surname>Huggins</surname>
<given-names>WJ</given-names>
</name>
<name>
<surname>Li</surname>
<given-names>Y</given-names>
</name>
<etal/>
</person-group> <article-title>Quantum error mitigation</article-title>. <source>Rev Mod Phys</source> (<year>2023</year>) <volume>95</volume>:<fpage>045005</fpage>. <pub-id pub-id-type="doi">10.1103/revmodphys.95.045005</pub-id>
</citation>
</ref>
<ref id="B58">
<label>58.</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Endo</surname>
<given-names>S</given-names>
</name>
<name>
<surname>Benjamin</surname>
<given-names>SC</given-names>
</name>
<name>
<surname>Li</surname>
<given-names>Y</given-names>
</name>
</person-group>. <article-title>Practical quantum error mitigation for near-future applications</article-title>. <source>Phys Rev X</source> (<year>2018</year>) <volume>8</volume>:<fpage>031027</fpage>. <pub-id pub-id-type="doi">10.1103/physrevx.8.031027</pub-id>
</citation>
</ref>
<ref id="B59">
<label>59.</label>
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Temme</surname>
<given-names>K</given-names>
</name>
<name>
<surname>Bravyi</surname>
<given-names>S</given-names>
</name>
<name>
<surname>Gambetta</surname>
<given-names>JM</given-names>
</name>
</person-group>. <article-title>Error mitigation for short-depth quantum circuits</article-title>. <source>Phys Rev Lett</source> (<year>2017</year>) <volume>119</volume>:<fpage>180509</fpage>. <pub-id pub-id-type="doi">10.1103/physrevlett.119.180509</pub-id>
</citation>
</ref>
</ref-list>
</back>
</article>