<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.3 20070202//EN" "journalpublishing.dtd">
	<article article-type="review-article" dtd-version="2.3" xml:lang="EN" xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink">
		<front>
			<journal-meta>
				<journal-id journal-id-type="publisher-id">Front. Big Data</journal-id>
				<journal-title>Frontiers in Big Data</journal-title>
				<abbrev-journal-title abbrev-type="pubmed">Front. Big Data</abbrev-journal-title>
				<issn pub-type="epub">2624-909X</issn>
				<publisher>
					<publisher-name>Frontiers Media S.A.</publisher-name>
				</publisher>
			</journal-meta>
			<article-meta>
				<article-id pub-id-type="publisher-id">587139</article-id>
				<article-id pub-id-type="doi">10.3389/fdata.2020.587139</article-id>
				<article-categories>
					<subj-group subj-group-type="heading">
						<subject>Big Data</subject>
						<subj-group>
							<subject>Systematic Review</subject>
						</subj-group>
					</subj-group>
				</article-categories>
				<title-group>
					<article-title>Securing Machine Learning in the Cloud: A Systematic Review of Cloud Machine Learning Security</article-title>
					<alt-title alt-title-type="left-running-head">Qayyum et al.</alt-title>
					<alt-title alt-title-type="right-running-head">Systematic Review of Cloud ML Security</alt-title>
				</title-group>
				<contrib-group>
					<contrib contrib-type="author" corresp="yes">
						<name>
							<surname>Qayyum</surname>
							<given-names>Adnan</given-names>
						</name>
						<xref ref-type="aff" rid="aff1">
							<sup>1</sup>
						</xref>
						<xref ref-type="corresp" rid="c001">&#x2a;</xref>
						<uri xlink:href="http://loop.frontiersin.org/people/1040547/overview"/>
					</contrib>
					<contrib contrib-type="author">
						<name>
							<surname>Ijaz</surname>
							<given-names>Aneeqa</given-names>
						</name>
						<xref ref-type="aff" rid="aff2">
							<sup>2</sup>
						</xref>
						<uri xlink:href="https://loop.frontiersin.org/people/1132599/overview"/>
					</contrib>
					<contrib contrib-type="author">
						<name>
							<surname>Usama</surname>
							<given-names>Muhammad</given-names>
						</name>
						<xref ref-type="aff" rid="aff1">
							<sup>1</sup>
						</xref>
						<uri xlink:href="http://loop.frontiersin.org/people/1074624/overview"/>
					</contrib>
					<contrib contrib-type="author">
						<name>
							<surname>Iqbal</surname>
							<given-names>Waleed</given-names>
						</name>
						<xref ref-type="aff" rid="aff3">
							<sup>3</sup>
						</xref>
						<uri xlink:href="http://loop.frontiersin.org/people/1041711/overview"/>
					</contrib>
					<contrib contrib-type="author">
						<name>
							<surname>Qadir</surname>
							<given-names>Junaid</given-names>
						</name>
						<xref ref-type="aff" rid="aff1">
							<sup>1</sup>
						</xref>
						<uri xlink:href="https://loop.frontiersin.org/people/791252/overview"/>
					</contrib>
					<contrib contrib-type="author">
						<name>
							<surname>Elkhatib</surname>
							<given-names>Yehia</given-names>
						</name>
						<xref ref-type="aff" rid="aff4">
							<sup>4</sup>
						</xref>
						<uri xlink:href="https://loop.frontiersin.org/people/1122100/overview"/>
					</contrib>
					<contrib contrib-type="author">
						<name>
							<surname>Al-Fuqaha</surname>
							<given-names>Ala</given-names>
						</name>
						<xref ref-type="aff" rid="aff5">
							<sup>5</sup>
						</xref>
						<uri xlink:href="https://loop.frontiersin.org/people/1098985/overview"/>
					</contrib>
				</contrib-group>
				<aff id="aff1">
					<label>
						<sup>1</sup>
					</label>Information Technology University (ITU), <addr-line>Lahore</addr-line>, <country>Pakistan</country>
				</aff>
				<aff id="aff2">
					<label>
						<sup>2</sup>
					</label>AI4Networks Research Center, University of Oklahoma, <addr-line>Norman</addr-line>, <addr-line>OK</addr-line>, <country>United States</country>
				</aff>
				<aff id="aff3">
					<label>
						<sup>3</sup>
					</label>Social Data Science (SDS) Lab, Queen Mary University of London, <addr-line>London</addr-line>, <country>United Kingdom</country>
				</aff>
				<aff id="aff4">
					<label>
						<sup>4</sup>
					</label>School of Computing and Communications, Lancaster University, <addr-line>Lancaster</addr-line>, <country>United Kingdom</country>
				</aff>
				<aff id="aff5">
					<label>
						<sup>5</sup>
					</label>Hamad Bin Khalifa University (HBKU), <addr-line>Doha</addr-line>, <country>Qatar</country>
				</aff>
				<author-notes>
					<fn fn-type="edited-by">
						<p>
							<bold>Edited by:</bold>
							<ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/845367/overview">Bhavya Kailkhura</ext-link>, United States Department of Energy (DOE), United States</p>
					</fn>
					<fn fn-type="edited-by">
						<p>
							<bold>Reviewed by:</bold>
							<ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/1048357/overview">Giovanni Apruzzese</ext-link>, University of Liechtenstein, Liechtenstein</p>
						<p>
							<ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/1058640/overview">Cheng Chen</ext-link>, The University of Utah, United States</p>
					</fn>
					<corresp id="c001">&#x2a;Correspondence: Adnan Qayyum, <email>adnan.qayyum@itu.edu.pk</email>
					</corresp>
					<fn fn-type="other" id="fn001">
						<p>Specialty section: This article was submitted to Machine Learning and Artificial Intelligence, a section of the journal Frontiers in Big Data</p>
					</fn>
				</author-notes>
				<pub-date pub-type="epub">
					<day>12</day>
					<month>11</month>
					<year>2020</year>
				</pub-date>
				<pub-date pub-type="collection">
					<year>2020</year>
				</pub-date>
				<volume>3</volume>
				<elocation-id>587139</elocation-id>
				<history>
					<date date-type="received">
						<day>24</day>
						<month>07</month>
						<year>2020</year>
					</date>
					<date date-type="accepted">
						<day>08</day>
						<month>10</month>
						<year>2020</year>
					</date>
				</history>
				<permissions>
					<copyright-statement>Copyright &#x00A9; 2020 Qayyum, Ijaz, Usama, Iqbal, Qadir, Elkhatib and Al-Fuqaha</copyright-statement>
					<copyright-holder>Qayyum, Ijaz, Usama, Iqbal, Qadir, Elkhatib and Al-Fuqaha</copyright-holder>
					<license xlink:href="http://creativecommons.org/licenses/by/4.0/">
						<p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (CC BY). The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</p>
					</license>
				</permissions>
				<abstract>
					<p>With the advances in machine learning (ML) and deep learning (DL) techniques, and the potency of cloud computing in offering services efficiently and cost-effectively, Machine Learning as a Service (MLaaS) cloud platforms have become popular. In addition, there is increasing adoption of third-party cloud services for outsourcing training of DL models, which requires substantial costly computational resources (e.g., high-performance graphics processing units (GPUs)). Such widespread usage of cloud-hosted ML/DL services opens a wide range of attack surfaces for adversaries to exploit the ML/DL system to achieve malicious goals. In this article, we conduct a systematic evaluation of literature of cloud-hosted ML/DL models along both the important dimensions&#x2014;<italic>attacks</italic> and <italic>defenses</italic>&#x2014;related to their security. Our systematic review identified a total of 31 related articles out of which 19 focused on attack, six focused on defense, and six focused on both attack and defense. Our evaluation reveals that there is an increasing interest from the research community on the perspective of attacking and defending different attacks on Machine Learning as a Service platforms. In addition, we identify the limitations and pitfalls of the analyzed articles and highlight open research issues that require further investigation.</p>
				</abstract>
				<kwd-group>
					<kwd>Machine Learning as a Service</kwd>
					<kwd>cloud-hosted machine learning models</kwd>
					<kwd>machine learning security</kwd>
					<kwd>cloud machine learning security</kwd>
					<kwd>systematic review</kwd>
					<kwd>attacks</kwd>
					<kwd>defenses</kwd>
				</kwd-group>
				<counts>
					<page-count count="0"/>
				</counts>
			</article-meta>
		</front>
		<body>
			<sec id="s1">
				<label>1</label>
				<title>Introduction</title>
				<p>In recent years, machine learning (ML) techniques have been successfully applied to a wide range of applications, significantly outperforming previous state-of-the-art methods in various domains: for example, image classification, face recognition, and object detection. These ML techniques&#x2014;in particular deep learning (DL)&#x2013;based ML techniques&#x2014;are resource intensive and require a large amount of training data to accomplish a specific task with good performance. Training DL models on large-scale datasets is usually performed using high-performance graphics processing units (GPUs) and tensor processing units. However, keeping in mind the cost of GPUs/Tensor Processing Units and the fact that small businesses and individuals cannot afford such computational resources, the training of deep models is typically outsourced to clouds, which is referred to in the literature as <italic>&#x201c;Machine Learning as a Service&#x201d;</italic> (MLaaS).</p>
				<p>MLaaS refers to different ML services that are offered as a component of a cloud computing services, for example, predictive analytics, face recognition, natural language services, and data modeling APIs. MLaaS allows users to upload their data and model for training at the cloud. In addition to training, cloud-hosted ML services can also be used for inference purposes, that is, models can be deployed on the cloud environments; the system architecture of a typical MLaaS is shown in <xref ref-type="fig" rid="F1">Figure 1</xref>.</p>
				<fig id="F1" position="float">
					<label>FIGURE 1</label>
					<caption>
						<p>Taxonomy of different defenses proposed for defending attacks on the third-party cloud-hosted machine learning (ML) or deep learning (DL) models.</p>
					</caption>
					<graphic xlink:href="fdata-03-587139-g001.tif"/>
				</fig>
				<p>MLaaS<xref ref-type="fn" rid="FN1">
						<sup>1</sup>
					</xref> can help reduce the entry barrier to the use of ML and DL through access to managed services of wide hardware heterogeneity and incredible horizontal scale. MLaaS is currently provided by several major organizations such as Google, Microsoft, and Amazon. For example, Google offers Cloud ML Engine<xref ref-type="fn" rid="FN2">
						<sup>2</sup>
					</xref> that allows developers and data scientists to upload training data and model which is trained on the cloud in the <italic>Tensorflow</italic>
					<xref ref-type="fn" rid="FN3">
						<sup>3</sup>
					</xref> environment. Similarly, Microsoft offers Azure Batch AI<xref ref-type="fn" rid="FN4">
						<sup>4</sup>
					</xref>&#x2014;a cloud-based service for training DL models using different frameworks supported by both Linux and Windows operating systems and Amazon offers a cloud service named Deep Learning AMI (DLAMI)<xref ref-type="fn" rid="FN5">
						<sup>5</sup>
					</xref> that provides several pre-built DL frameworks (e.g., MXNet, Caffe, Theano, and Tensorflow) that are available in Amazon&#x2019;s EC2 cloud computing infrastructure. Such cloud services are popular among researchers as evidenced by the price lifting of Amazon&#x2019;s p2.16x large instance to the maximum possible&#x2014;two days before the deadline of NeurIPS 2017 (the largest research venue on ML)&#x2014;indicating that a large number of users request to reserve instances.</p>
				<p>In addition to MLaaS services that allow users to upload their model and data for training on the cloud, <italic>transfer learning</italic> is another strategy to reduce computational cost in which a pretrained model is fine-tuned for a new task (using a new dataset). Transfer learning is widely applied for image recognition tasks using a convolutional neural network (CNN). A CNN model learns and encodes features like edges and other patterns. The learned weights and convolutional filters are useful for image recognition tasks in other domains and state-of-the-art results can be obtained with a minimal amount of training even on a single GPU. Moreover, various popular pretrained models such as AlexNet (<xref ref-type="bibr" rid="B23">Krizhevsky et al., 2012</xref>), VGG (<xref ref-type="bibr" rid="B42">Simonyan and Zisserman, 2015</xref>), and Inception (<xref ref-type="bibr" rid="B45">Szegedy et al., 2016</xref>) are available for download and fine-tuning online. Both of the aforementioned outsourcing strategies come with new security concerns. In addition, the literature suggests that different types of attacks can be realized on different components of the communication network as well (<xref ref-type="bibr" rid="B48">Usama et al., 2020a</xref>), for example, intrusion detection (<xref ref-type="bibr" rid="B13">Han et al., 2020</xref>; <xref ref-type="bibr" rid="B49">Usama et al., 2020b</xref>), network traffic classification (<xref ref-type="bibr" rid="B50">Usama et al., 2019</xref>), and malware detection systems (<xref ref-type="bibr" rid="B4">Chen et al., 2018</xref>). Moreover, adversarial ML attacks have also been devised for client-side ML classifiers, that is, Google&#x2019;s phishing pages filter (<xref ref-type="bibr" rid="B26">Liang et al., 2016</xref>).</p>
				<p>
					<italic>Contributions of the article:</italic> In this article, we analyze the security of MLaaS and other cloud-hosted ML/DL models and provide a systematic review of associated security challenges and solutions. To the best of our knowledge, this article is the first effort on providing a systematic review of the security of cloud-hosted ML models and services. The following are the major contributions of this article:<list list-type="order">
						<list-item>
							<p>We conducted a systematic evaluation of 31 articles related to MLaaS attacks and defenses.</p>
						</list-item>
						<list-item>
							<p>We investigated five themes of approaches aiming to attack MLaaS and cloud-hosted ML services.</p>
						</list-item>
						<list-item>
							<p>We examined five themes of defense methods for securing MLaaS and cloud-hosted ML services.</p>
						</list-item>
						<list-item>
							<p>We identified the pitfalls and limitations of the examined articles. Finally, we have highlighted open research issues that require further investigation.</p>
						</list-item>
					</list>
				</p>
				<p>
					<italic>Organization of the article:</italic> The rest of the article is organized as follows. The methodology adopted for the systematic review is presented in <xref ref-type="sec" rid="s2">Section 2</xref>. The results of the systematic review are presented in <xref ref-type="sec" rid="s3">Section 3</xref>. <xref ref-type="sec" rid="s4">Section 4</xref> presents various security challenges associated with cloud-hosted ML models and potential solutions for securing cloud-hosted ML models are presented in <xref ref-type="sec" rid="s5">Section 5</xref>. The pitfalls and limitations of the reviewed approaches are discussed in <xref ref-type="sec" rid="s6">Section 6</xref>. We briefly reflect on our methodology to identify any threats to the validity in <xref ref-type="sec" rid="s8">Section 8</xref> and various open research issues that require further investigation are highlighted in <xref ref-type="sec" rid="s7">Section 7</xref>. Finally, we conclude the article in <xref ref-type="sec" rid="s9">Section 9</xref>.</p>
			</sec>
			<sec sec-type="materials|methods" id="s2">
				<label>2</label>
				<title>Review Methodology</title>
				<p>In this section, we present the research objectives and the adopted methodology for the systematic review. The purpose of this article is to identify and systematically review the state-of-the art research related to the security of the cloud-based ML/DL techniques. The methodology followed for this study is depicted in <xref ref-type="fig" rid="F2">Figure 2</xref>.</p>
				<fig id="F2" position="float">
					<label>FIGURE 2</label>
					<caption>
						<p>An illustration of a typical cloud-based ML or machine learning as a service (MLaaS) architecture.</p>
					</caption>
					<graphic xlink:href="fdata-03-587139-g002.tif"/>
				</fig>
				<sec id="s2-1">
					<label>2.1</label>
					<title>Research Objectives</title>
					<p>The following are the key objectives of this article.</p>
					<p>O1: To build upon the existing work around the security of cloud-based ML/DL methods and present a broad overview of the existing state-of-the-art literature related to MLaaS and cloud-hosted ML services.</p>
					<p>O2: To identify and present a taxonomy of different attack and defense strategies for cloud-hosted ML/DL models.</p>
					<p>O3: To identify the pitfalls and limitations of the existing approaches in terms of research challenges and opportunities.</p>
				</sec>
				<sec id="s2-2">
					<label>2.2</label>
					<title>Research Questions</title>
					<p>To achieve our objectives, we consider answering two important questions that are described below and conducted a systematic analysis of 31 articles.</p>
					<p>Q1: What are the well-known attacks on cloud-hosted/third-party ML/DL models?</p>
					<p>Q2: What are the countermeasures and defenses against such attacks?</p>
				</sec>
				<sec id="s2-3">
					<label>2.3</label>
					<title>Review Protocol</title>
					<p>We developed a review protocol to conduct the systematic review; the details are described below.</p>
					<sec id="s2-3-1">
						<label>2.3.1</label>
						<title>Search Strategy and Searching Phase</title>
						<p>To build a knowledge base and extract the relevant articles, eight major publishers and online repositories were queried that include ACM Digital Library, IEEE Xplore, ScienceDirect, international conference on machine learning, international conference on learning representations, journal of machine learning research, neural information processing systems, USENIX, and arXiv. As we added non-peer&#x2013;reviewed articles from electric preprint archive (arXiv), we (AQ and AI) performed the critical appraisal using AACODS checklist; it is designed to enable evaluation and appraisal of gray literature (<xref ref-type="bibr" rid="B47">Tyndall, 2010</xref>), which is designed for the critical evaluation of gray literature.</p>
						<p>In the initial phase, we queried main libraries using a set of different search terms that evolved using an iterative process to maximize the number of relevant articles. To achieve optimal sensitivity, we used a combination of words: attack, poisoning, Trojan attack, contamination, model inversion, evasion, backdoor, model stealing, black box, ML, neural networks, MLaaS, cloud computing, outsource, third party, secure, robust, and defense. The combinations of search keywords used are depicted in <xref ref-type="fig" rid="F3">Figure 3</xref>. We then created search strategies with controlled or index terms given in <xref ref-type="fig" rid="F3">Figure 3</xref>. Please note that no lower limit for the publication date was applied; the last search date was June 2020. The researchers (WI and AI) searched additional articles through citations and by snowballing on Google Scholar. Any disagreement was adjudicated by the third reviewer (AQ). Finally, articles focusing on the attack/defense for cloud-based ML models were retrieved.</p>
						<fig id="F3" position="float">
							<label>FIGURE 3</label>
							<caption>
								<p>The methodology for systematic review.</p>
							</caption>
							<graphic xlink:href="fdata-03-587139-g003.tif"/>
						</fig>
					</sec>
					<sec id="s2-3-2">
						<label>2.3.2</label>
						<title>Inclusion and Exclusion Criteria</title>
						<p>The inclusion and exclusion criteria followed for this systematic review are defined below.</p>
						<sec id="s2-3-2-1">
							<label>2.3.2.1</label>
							<title>Inclusion Criteria</title>
							<p>The following are the key points that we considered for screening retrieved articles as relevant for conducting a systematic review.<list list-type="bullet">
									<list-item>
										<p>We included all articles relevant to the research questions and published in the English language that discusses the attacks on cloud-based ML services, for example, offered by cloud computing service providers.</p>
									</list-item>
									<list-item>
										<p>We then assessed the eligibility of the relevant articles by identifying whether they discussed either attack or defense for cloud-based ML/DL models.</p>
									</list-item>
									<list-item>
										<p>Comparative studies that compare the attacks and robustness against different well-known attacks on cloud-hosted ML services (poisoning attacks, black box attacks, Trojan attacks, backdoor attacks, contamination attacks, inversion, stealing, and invasion attacks).</p>
									</list-item>
									<list-item>
										<p>Finally, we categorized the selected articles into three categories, that is, articles on attacks, articles on defenses, and articles on attacks and defenses.</p>
									</list-item>
								</list>
							</p>
						</sec>
						<sec id="s2-3-2-2">
							<label>2.3.2.2</label>
							<title>Exclusion Criteria</title>
							<p>The exclusion criteria are outlined below.<list list-type="bullet">
									<list-item>
										<p>Articles that are written in a language other than English.</p>
									</list-item>
									<list-item>
										<p>Articles not available in full text.</p>
									</list-item>
									<list-item>
										<p>Secondary studies (e.g., systematic literature reviews, surveys, editorials, and abstracts or short papers) are not included.</p>
									</list-item>
									<list-item>
										<p>Articles that do not discuss attacks and defenses for cloud-based/third-party ML services, that is, we only consider those articles which have proposed an attack or defense for a cloud-hosted ML or MLaaS service.</p>
									</list-item>
								</list>
							</p>
						</sec>
					</sec>
					<sec id="s2-3-3">
						<label>2.3.3</label>
						<title>Screening Phase</title>
						<p>For the screening of articles, we employ two phases based on the content of the retrieved articles: 1) title and abstract screening and 2) full text of the publication. Please note that to avoid bias and to ensure that the judgment about the relevancy of articles is entirely based on the content of the publications, we intentionally do not consider authors, publication type (e.g., conference and journal), and publisher (e.g., IEEE and ACM). Titles and abstracts might not be true reflectors of the articles&#x2019; contents; however, we concluded that our review protocol is sufficient to avoid provenance-based bias.</p>
						<p>It is very common that the same work got published in multiple venues, for example, conference papers are usually extended to journals. In such cases, we only consider the original article. In the screening phase, every article was screened by at least two authors of this article that were tasked to annotate the articles as either relevant, not relevant, or need further investigation, which was finalized by the discussion between the authors until any such article is either marked relevant or not relevant. Only original technical articles are selected, while survey and review articles are ignored. Finally, all selected publications were thoroughly read by the authors for categorization and thematic analysis.</p>
					</sec>
				</sec>
			</sec>
			<sec id="s3">
				<label>3</label>
				<title>Review Results</title>
				<sec id="s3-1">
					<label>3.1</label>
					<title>Overview of the Search and Selection Process Outcome</title>
					<p>The search using the aforementioned strategy identified a total of 4,384 articles. After removing duplicate articles, title, and abstract screening, the overall number of articles reduced to 384. A total of 230 articles did not meet the inclusion criteria and were therefore excluded. From the remaining 154 articles, 123 articles did not discuss attack/defense for third-party cloud-hosted ML models and were excluded as well. Of the remaining articles, a total of 31 articles are identified as relevant. Reasons for excluding articles were documented and reported in a PRISMA flow diagram, depicted in <xref ref-type="fig" rid="F4">Figure 4</xref>. These articles were categorized into three classes, that is, articles that are specifically focused on attacks, articles that are specifically focused on defenses, and articles that considered both attacks and defenses containing 19, 6, and 6 articles each, respectively.</p>
					<fig id="F4" position="float">
						<label>FIGURE 4</label>
						<caption>
							<p>Search queries used to identify publications to include in the systematic review.</p>
						</caption>
						<graphic xlink:href="fdata-03-587139-g004.tif"/>
					</fig>
				</sec>
				<sec id="s3-2">
					<label>3.2</label>
					<title>Overview of the Selected Studies</title>
					<p>The systematic review eventually identified a set of 31 articles related to cloud-based ML/DL models and MLaaS, which we categorized into three classes as mentioned above and shown in <xref ref-type="fig" rid="F4">Figure 4</xref>. As shown in <xref ref-type="fig" rid="F5">Figure 5</xref>, a significant portion of the selected articles were published in conferences (41.94%); comparatively, a very smaller proportion of these articles were published in journals or transactions (19.35%). The percentage of gray literature (i.e., non-peer&#x2013;reviewed articles) is 25.81%. Yet, a very small proportion of publications are published in symposia (6.45%), and this percentage is the same for workshop papers. The distribution of selected publications by their types over the years is shown in <xref ref-type="fig" rid="F6">Figure 6</xref>. The figure depicts that the interest in the security of cloud-hosted ML/DL models increased in the year 2017 and was at a peak in the year 2018 and was slightly lower in the year 2019 as compared to 2018. Also, the majority of the articles during these years were published in conferences. The distribution of selected publications by their publishers over the years is depicted in <xref ref-type="fig" rid="F7">Figure 7</xref>, the figure shows that the majority of the publications have been published at IEEE, ACM, and arXiv. There is a similar trend in the number of articles in the year 2017, 2018, and 2019 as discussed previously.</p>
					<fig id="F5" position="float">
						<label>FIGURE 5</label>
						<caption>
							<p>Flowchart of systematic review and categorization.</p>
						</caption>
						<graphic xlink:href="fdata-03-587139-g005.tif"/>
					</fig>
					<fig id="F6" position="float">
						<label>FIGURE 6</label>
						<caption>
							<p>Distribution of selected publications according to their types.</p>
						</caption>
						<graphic xlink:href="fdata-03-587139-g006.tif"/>
					</fig>
					<fig id="F7" position="float">
						<label>FIGURE 7</label>
						<caption>
							<p>Distribution of selected publications by types over years.</p>
						</caption>
						<graphic xlink:href="fdata-03-587139-g007.tif"/>
					</fig>
				</sec>
				<sec id="s3-3">
					<label>3.3</label>
					<title>Some Partially Related Non-Selected Studies: A Discussion</title>
					<p>We have described our inclusion and exclusion criteria that help us to identify relevant articles. We note, however, that some seemingly relevant articles failed to meet the inclusion criteria. Here, we briefly describe few such articles for giving a rationale why they were not included.<list list-type="bullet">
							<list-item>
								<p>
									<xref ref-type="bibr" rid="B26">Liang et al. (2016)</xref> investigated the security challenges for the client-side classifiers via a case study on the Google&#x2019;s phishing pages filter, a very widely used classifier for automatically detecting unknown phishing pages. They devised an attack that is not relevant to the cloud-based service.</p>
							</list-item>
							<list-item>
								<p>
									<xref ref-type="bibr" rid="B9">Demetrio et al. (2020)</xref> presented WAF-A-MoLE, a tool that models the presence of an adversary. This tool leverages a set of mutation operators that alter the syntax of a payload without affecting the original semantics. Using the results, the authors demonstrated that ML-based WAFs are exposed to a concrete risk of being bypassed. However, this attack is not associated with any cloud-based services.</p>
							</list-item>
							<list-item>
								<p>Authors in <xref ref-type="bibr" rid="B2">Apruzzese et al. (2019)</xref> discussed adversarial attacks where the machine learning model is compromised to induce an output favorable to the attacker. These attacks are realized in a different setting as compared to the scope of this systematic review, as we only included the articles which discuss the attack or defense when the cloud is outsourcing its services as MLaaS.</p>
							</list-item>
							<list-item>
								<p>
									<xref ref-type="bibr" rid="B13">Han et al. (2020)</xref> conducted the first systematic study of the practical traffic space evasion attack on learning-based network intrusion detection systems; again it is out of the inclusion criteria of our work.</p>
							</list-item>
							<list-item>
								<p>
									<xref ref-type="bibr" rid="B4">Chen et al. (2018)</xref> designed and evaluated three types of attackers targeting the training phases to poison our detection. To address this threat, the authors proposed the detection system, KuafuDet, and showed it significantly reduces false negatives and boosts the detection accuracy.</p>
							</list-item>
							<list-item>
								<p>
									<xref ref-type="bibr" rid="B43">Song et al. (2020)</xref> presented a federated defense approach for mitigating the effect of adversarial perturbations in a federated learning environment. This article can be potentially relevant for our study as they address the problem of defending cloud-hosted ML models; however, instead of using a third-party service, the authors conducted the experiments on a single computer system in a simulated environment; therefore, this study is not included in the analysis of this article.</p>
							</list-item>
							<list-item>
								<p>In a similar study, <xref ref-type="bibr" rid="B55">Zhang et al. (2019)</xref> presented a defense mechanism for defending adversarial attacks on cloud-aided automatic speech recognition (ASR); however, it is not explicitly stated that the cloud is outsourcing ML services and also which ML/DL model or MLaaS was used in experiments.</p>
							</list-item>
						</list>
					</p>
				</sec>
			</sec>
			<sec id="s4">
				<label>4</label>
				<title>Attacks on Cloud-Hosted Machine Learning Models (Q1)</title>
				<p>In this section, we present the findings from the systematically selected articles that aim at attacking cloud-hosted/third-party ML/DL models.</p>
				<sec id="s4-1">
					<label>4.1</label>
					<title>Attacks on Cloud-Hosted Machine Learning Models: Thematic Analysis</title>
					<p>In ML practice, it is very common to outsource the training of ML/DL models to third-party services that provide high computational resources on the cloud. Such services enable ML practitioners to upload their models along with training data which is then trained on the cloud. Although such services have clear benefits for reducing the training and inference time; however, these services can easily be compromised and to this end, different types of attacks against these services have been proposed in the literature. In this section, we present the thematic analysis of 19 articles that are focused on attacking cloud-hosted ML/DL models. These articles are classified into five major themes: 1) attack type, 2) threat model, 3) attack method, 4) target model(s), and 5) dataset.</p>
					<p>
						<italic>Attack type:</italic> A wide variety of attacks have been proposed in the literature. These are listed below with their descriptions provided in the next section.<list list-type="bullet">
							<list-item>
								<p>Adversarial attacks (<xref ref-type="bibr" rid="B3">Brendel et al., 2017</xref>);</p>
							</list-item>
							<list-item>
								<p>Backdoor attacks<xref ref-type="fn" rid="FN6">
										<sup>6</sup>
									</xref> (<xref ref-type="bibr" rid="B5">Chen et al., 2017</xref>; <xref ref-type="bibr" rid="B12">Gu et al., 2019</xref>);</p>
							</list-item>
							<list-item>
								<p>Cyber kill chain&#x2013;based attack (<xref ref-type="bibr" rid="B30">Nguyen, 2017</xref>);</p>
							</list-item>
							<list-item>
								<p>Data manipulation attacks (<xref ref-type="bibr" rid="B27">Liao et al., 2018</xref>);</p>
							</list-item>
							<list-item>
								<p>Evasion attacks (<xref ref-type="bibr" rid="B16">Hitaj et al., 2019</xref>);</p>
							</list-item>
							<list-item>
								<p>Exploration attacks (<xref ref-type="bibr" rid="B39">Sethi and Kantardzic, 2018</xref>);</p>
							</list-item>
							<list-item>
								<p>Model extraction attacks (<xref ref-type="bibr" rid="B8">Correia-Silva et al., 2018</xref>; <xref ref-type="bibr" rid="B22">Kesarwani et al., 2018</xref>; <xref ref-type="bibr" rid="B21">Joshi and Tammana, 2019</xref>; <xref ref-type="bibr" rid="B34">Reith et al., 2019</xref>);</p>
							</list-item>
							<list-item>
								<p>Model inversion attacks (<xref ref-type="bibr" rid="B53">Yang et al., 2019</xref>);</p>
							</list-item>
							<list-item>
								<p>Model-reuse attacks (<xref ref-type="bibr" rid="B19">Ji et al., 2018</xref>);</p>
							</list-item>
							<list-item>
								<p>Trojan attacks (<xref ref-type="bibr" rid="B29">Liu et al., 2018</xref>).</p>
							</list-item>
						</list>
					</p>
					<p>
						<italic>Threat model:</italic> Cloud ML attacks are based on different threat models, with the salient types with examples are listed below.<list list-type="bullet">
							<list-item>
								<p>black box attacks (no knowledge) (<xref ref-type="bibr" rid="B3">Brendel et al., 2017</xref>; <xref ref-type="bibr" rid="B5">Chen et al., 2017</xref>; <xref ref-type="bibr" rid="B17">Hosseini et al., 2017</xref>; <xref ref-type="bibr" rid="B8">Correia-Silva et al., 2018</xref>; <xref ref-type="bibr" rid="B39">Sethi and Kantardzic, 2018</xref>; <xref ref-type="bibr" rid="B16">Hitaj et al., 2019</xref>);</p>
							</list-item>
							<list-item>
								<p>white box attacks (full knowledge) (<xref ref-type="bibr" rid="B27">Liao et al., 2018</xref>; <xref ref-type="bibr" rid="B29">Liu et al., 2018</xref>; <xref ref-type="bibr" rid="B12">Gu et al., 2019</xref>; <xref ref-type="bibr" rid="B34">Reith et al., 2019</xref>);</p>
							</list-item>
							<list-item>
								<p>gray box attacks (partial knowledge) (<xref ref-type="bibr" rid="B19">Ji et al., 2018</xref>; <xref ref-type="bibr" rid="B22">Kesarwani et al., 2018</xref>).</p>
							</list-item>
						</list>
					</p>
					<p>
						<italic>Attack method:</italic> In each article, a different type of method is proposed for attacking cloud-hosted ML/DL models; a brief description of these methods is presented in <xref ref-type="table" rid="T1">Table 1</xref> and is discussed in detail in the next section.</p>
					<table-wrap id="T1" position="float">
						<label>TABLE 1</label>
						<caption>
							<p>Summary of the state-of-the art attack types for cloud-based/third-party ML/DL models.</p>
						</caption>
						<table frame="hsides" rules="groups">
							<thead>
								<tr>
									<th>Author(s)</th>
									<th align="center">Attack type</th>
									<th align="center">Method</th>
									<th align="center">Target model (s)</th>
									<th align="center">Threat model</th>
									<th align="center">Data</th>
								</tr>
							</thead>
							<tbody>
								<tr>
									<td>(<xref ref-type="bibr" rid="B3">Brendel et al., 2017</xref>)</td>
									<td align="center">Adversarial attack</td>
									<td align="left">Presented a decision-based attack, i.e., the boundary attack</td>
									<td align="left">Two ML classifiers from <ext-link ext-link-type="uri" xlink:href="http://Clarifai.com">Clarifai.com</ext-link>, i.e., brand and celebrity recognition</td>
									<td align="left">Black box</td>
									<td align="left">Two datasets: Natural images and celebrities</td>
								</tr>
								<tr>
									<td>(<xref ref-type="bibr" rid="B36">Saadatpanah et al., 2019</xref>)</td>
									<td align="center">&#x2014;</td>
									<td align="left">Crafted adversarial examples for copyright detection system</td>
									<td align="left">YouTube content ID and AudioTag copyright</td>
									<td align="left">White box and black box</td>
									<td align="center">N/A</td>
								</tr>
								<tr>
									<td>(<xref ref-type="bibr" rid="B17">Hosseini et al., 2017</xref>)</td>
									<td align="center">&#x2014;</td>
									<td align="left">Proposed two targeted attacks for video labeling and shot detection</td>
									<td align="left">Google cloud video intelligence API</td>
									<td align="left">Black box</td>
									<td align="center">&#x2014;</td>
								</tr>
								<tr>
									<td>(<xref ref-type="bibr" rid="B22">Kesarwani et al., 2018</xref>)</td>
									<td align="center">Extraction attack</td>
									<td align="left">Used information gain to measure model learning rate</td>
									<td align="left">Decision tree deployed on BigML platform</td>
									<td align="left">Gray box</td>
									<td align="left">Four BigML datasets, IRS tax pattern, GSS survey, email importance, steak survey</td>
								</tr>
								<tr>
									<td>(<xref ref-type="bibr" rid="B8">Correia-Silva et al., 2018</xref>)</td>
									<td align="center">&#x2014;</td>
									<td align="left">Knowledge extraction by querying the model with unlabeled data samples and then used responses to create fake dataset and model</td>
									<td align="left">Three local CNN models for visual recognition for facial expression, object, and crosswalk classification and Microsoft Azure Emotion API</td>
									<td align="left">Black box</td>
									<td align="left">Used three datasets for facial expression recognition, object, and satellite crosswalk classification</td>
								</tr>
								<tr>
									<td>(<xref ref-type="bibr" rid="B34">Reith et al., 2019</xref>)</td>
									<td align="center">&#x2014;</td>
									<td align="left">Performed model extraction attacks on the homomorphic encryption-based protocol for preserving SVR-based indoor localization</td>
									<td align="left">Support vector regressor (SVR) and SVM</td>
									<td align="left">White box</td>
									<td align="left">California housing, Boston house prices, UJIIndoorLoc, and IPIN 2016 tutorial</td>
								</tr>
								<tr>
									<td>(<xref ref-type="bibr" rid="B21">Joshi and Tammana, 2019</xref>)</td>
									<td align="center">&#x2014;</td>
									<td align="left">Proposed a variant of gradient driven adaptive learning rate (GDALR) for stealing MLaaS models</td>
									<td align="left">Used three different models</td>
									<td align="left">Black box</td>
									<td align="left">Iris, liver disease, and land satellite datasets</td>
								</tr>
								<tr>
									<td>(<xref ref-type="bibr" rid="B39">Sethi and Kantardzic, 2018</xref>)</td>
									<td align="center">Exploration attack</td>
									<td align="left">Presented a seed-explore-exploit framework for generating adversarial samples</td>
									<td align="left">Google cloud prediction platform</td>
									<td align="left">Black box</td>
									<td align="left">10 real-world datasets</td>
								</tr>
								<tr>
									<td>(<xref ref-type="bibr" rid="B12">Gu et al., 2019</xref>)</td>
									<td align="center">Backdoor attack</td>
									<td align="left">Realized attack by poisoning training samples and labels</td>
									<td align="left">MNIST and a U.S. street sign classifier, i.e., Faster-RCNN with outsourced training and transfer learning</td>
									<td align="left">White box</td>
									<td align="left">MNIST and U.S. traffic signs dataset</td>
								</tr>
								<tr>
									<td>(<xref ref-type="bibr" rid="B5">Chen et al., 2017</xref>)</td>
									<td align="center">&#x2014;</td>
									<td align="left">Used poisoning strategies to realized a targeted attack and proposed two types of backdoor poisoning attacks</td>
									<td align="left">Two face recognition models, i.e., DeepID and VGG-Face</td>
									<td align="left">Black box</td>
									<td align="left">YouTube aligned face dataset</td>
								</tr>
								<tr>
									<td>(<xref ref-type="bibr" rid="B29">Liu et al., 2018</xref>)</td>
									<td align="center">Trojan attack</td>
									<td align="left">Proposed stealth infection on neural network-based Trojan attack</td>
									<td align="left">Cloud-based intelligent supply chain, i.e., MLaaS</td>
									<td align="left">White box</td>
									<td align="left">Fashion-MNIST</td>
								</tr>
								<tr>
									<td>(<xref ref-type="bibr" rid="B10">Gong et al., 2019</xref>)</td>
									<td align="center">&#x2014;</td>
									<td align="left">Proposed real-time adversarial example crafting procedure</td>
									<td align="left">Voice/speech enabled devices and Google Speech</td>
									<td align="left">Gray box</td>
									<td align="left">Voice-command dataset</td>
								</tr>
								<tr>
									<td>(<xref ref-type="bibr" rid="B19">Ji et al., 2018</xref>)</td>
									<td align="center">Model reuse attack</td>
									<td align="left">Presented empirical evaluation of model-reuse attacks on primitive models and realizing attack by generating semantically similar neighbors and identifying salient features</td>
									<td align="left">Pretrained primitive models for speech recognition, autonomous steering, face verification, and skin cancer screening</td>
									<td align="left">Gray box</td>
									<td align="left">Speech commands, udacity self-driving car challenge, VGG Face2, and International Skin Imaging Collaboration (ISIC) datasets</td>
								</tr>
								<tr>
									<td>(<xref ref-type="bibr" rid="B27">Liao et al., 2018</xref>)</td>
									<td align="center">Data manipulation attack</td>
									<td align="left">Studied data manipulation attacks for stealthily manipulating ML and DL models using transfer learning and gradient descent</td>
									<td align="left">Cloud-hosted ML and DL models</td>
									<td align="left">White box</td>
									<td align="left">Enron spam and MINIST</td>
								</tr>
								<tr>
									<td>(<xref ref-type="bibr" rid="B38">Sehwag et al., 2019</xref>)</td>
									<td align="center">&#x2014;</td>
									<td align="left">Crafted out-of-distribution exploratory adversarial examples to compromise ML/DL models of Clarifai&#x2019;s content moderation system in the cloud</td>
									<td align="left">Cloud-hosted ML and DL models</td>
									<td align="left">White box and black box</td>
									<td align="left">MINIST, CIFAR, and ImageNet</td>
								</tr>
								<tr>
									<td>(<xref ref-type="bibr" rid="B30">Nguyen, 2017</xref>)</td>
									<td align="center">Cyber kill chain attack</td>
									<td align="left">Proposed a high-level threat model for ML cyber kill chain and provided proof of concept</td>
									<td align="left">IBM visual recognition MLaaS (i.e., cognitive classifier for classification cats and female lions)</td>
									<td align="left">N/A</td>
									<td align="left">Project Wolf Eye</td>
								</tr>
								<tr>
									<td>(<xref ref-type="bibr" rid="B15">Hilprecht et al., 2019</xref>)</td>
									<td align="center">Membership inference attack</td>
									<td align="left">Monte Carlo based attack and membership inference attack on GAN.</td>
									<td align="left">Amazon web services p2</td>
									<td align="left">Black box</td>
									<td align="left">MNIST, fashion-MNIST, and CIFAR</td>
								</tr>
								<tr>
									<td>(<xref ref-type="bibr" rid="B16">Hitaj et al., 2019</xref>)</td>
									<td align="center">Evasion attacks</td>
									<td align="left">Realized evasion attacks using two ensemble neural networks</td>
									<td align="left">Watermarking detection models</td>
									<td align="left">Black box</td>
									<td align="left">MNIST</td>
								</tr>
								<tr>
									<td>(<xref ref-type="bibr" rid="B53">Yang et al., 2019</xref>)</td>
									<td align="center">Iversion attacks</td>
									<td align="left">Constructed an auxiliary set for training the inversion model</td>
									<td align="left">CNN</td>
									<td align="left">Gray-box</td>
									<td align="left">FaceScrub, CelebA, and CIFAR-10</td>
								</tr>
								<tr>
									<td/>
									<td align="center">&#x2014;</td>
									<td align="center">&#x2014;</td>
									<td align="center">&#x2014;</td>
									<td align="center">&#x2014;</td>
									<td align="center">&#x2014;</td>
								</tr>
							</tbody>
						</table>
					</table-wrap>
					<p>
						<italic>Target model(s):</italic> Considered studies have used different MLaaS services (e.g., Google Cloud ML Services (<xref ref-type="bibr" rid="B17">Hosseini et al., 2017</xref>; <xref ref-type="bibr" rid="B37">Salem et al., 2018</xref>; <xref ref-type="bibr" rid="B39">Sethi and Kantardzic, 2018</xref>), ML models of BigML Platform (<xref ref-type="bibr" rid="B22">Kesarwani et al., 2018</xref>), IBM&#x2019;s visual recognition (<xref ref-type="bibr" rid="B30">Nguyen, 2017</xref>), and Amazon Prediction APIs (<xref ref-type="bibr" rid="B34">Reith et al., 2019</xref>; <xref ref-type="bibr" rid="B53">Yang et al., 2019</xref>)).</p>
					<p>
						<italic>Dataset:</italic> These attacks have been realized using different datasets ranging from small size datasets (e.g., MNIST (<xref ref-type="bibr" rid="B12">Gu et al., 2019</xref>) and Fashion-MNIST (<xref ref-type="bibr" rid="B29">Liu et al., 2018</xref>)) to large size datasets (e.g., YouTube Aligned Face Dataset (<xref ref-type="bibr" rid="B5">Chen et al., 2017</xref>), Project Wolf Eye (<xref ref-type="bibr" rid="B30">Nguyen, 2017</xref>), and Iris dataset (<xref ref-type="bibr" rid="B21">Joshi and Tammana, 2019</xref>)). Other datasets include California Housing, Boston House Prices, UJIIndoorLoc, and IPIN 2016 Tutorial (<xref ref-type="bibr" rid="B34">Reith et al., 2019</xref>), FaceScrub, CelebA, and CIFAR-10 (<xref ref-type="bibr" rid="B53">Yang et al., 2019</xref>). A summary of thematic analyses of these attacks is presented in <xref ref-type="table" rid="T1">Table 1</xref> and briefly described in the next section.</p>
				</sec>
				<sec id="s4-2">
					<label>4.2</label>
					<title>Taxonomy of Attacks on Cloud-Hosted Machine Learning Models</title>
					<p>In this section, we present a taxonomy and description of different attacks described above in thematic analysis. A taxonomy of attacks on cloud-hosted ML/DL models is depicted in <xref ref-type="fig" rid="F8">Figure 8</xref> and is described next.</p>
					<fig id="F8" position="float">
						<label>FIGURE 8</label>
						<caption>
							<p>Distribution of selected publications by publishers over years.</p>
						</caption>
						<graphic xlink:href="fdata-03-587139-g008.tif"/>
					</fig>
					<sec id="s4-2-1">
						<label>4.2.1</label>
						<title>Adversarial Attacks</title>
						<p>In recent years, DL models have been found vulnerable to carefully crafted imperceptible adversarial examples (<xref ref-type="bibr" rid="B11">Goodfellow et al., 2014</xref>). For instance, a decision-based adversarial attack namely <italic>the boundary attack</italic> against two black box ML models trained for brand and celebrity recognition hosted at <ext-link ext-link-type="uri" xlink:href="http://Clarifai.com">Clarifai.com</ext-link> are proposed in (<xref ref-type="bibr" rid="B3">Brendel et al., 2017</xref>). The first model identifies brand names from natural images for 500 distinct brands and the second model recognizes over 10,000 celebrities. To date, a variety of adversarial examples generation methods have been proposed in the literature so far, the interesting readers are referred to recent surveys articles for detailed taxonomy of different types of adversarial attacks (i.e., <xref ref-type="bibr" rid="B1">Akhtar and Mian, 2018</xref>; <xref ref-type="bibr" rid="B54">Yuan et al., 2019</xref>; <xref ref-type="bibr" rid="B33">Qayyum et al., 2020b</xref>; <xref ref-type="bibr" rid="B9">Demetrio et al., 2020</xref>).</p>
					</sec>
					<sec id="s4-2-2">
						<label>4.2.2</label>
						<title>Exploratory Attacks</title>
						<p>These attacks are inference time attacks in which adversary attempts to evade the underlying ML/DL model, for example, by forcing the classifier (i.e., ML/DL model) to misclassify a positive sample as a negative one. Exploratory attacks do not harm the training data and only affects the model at test time. A data-driven exploratory attack using the <italic>Seed</italic>&#x2013;<italic>Explore</italic>&#x2013;<italic>Exploit</italic> strategy for evading Google&#x2019;s cloud prediction API considering black box settings is presented in (<xref ref-type="bibr" rid="B39">Sethi and Kantardzic, 2018</xref>). The performance evaluation of the proposed framework was performed using 10 real-world datasets.</p>
					</sec>
					<sec id="s4-2-3">
						<label>4.2.3</label>
						<title>Model Extraction Attacks</title>
						<p>In model extraction attacks, adversaries can query the deployed ML model and can use query&#x2013;response pair for compromising future predictions and also, they can potentially realize privacy breaches of the training data and can steal the model by learning extraction queries. In <xref ref-type="bibr" rid="B22">Kesarwani et al. (2018)</xref>, the authors presented a novel method for quantifying the extraction status of models for users with an increasing number of queries, which aims to measure model learning rate using information gain observed by query and response streams of users. The key objective of the authors was to design a cloud-based system for monitoring model extraction status and warnings. The performance evaluation of the proposed method was performed using a decision tree model deployed on the BigML MLaaS platform for different adversarial attack scenarios. Similarly, a model extraction/stealing strategy is presented by <xref ref-type="bibr" rid="B8">Correia-Silva et al. (2018)</xref>. The authors queried the cloud-hosted DL model with random unlabeled samples and used their predictions for creating a fake dataset. Then they used the fake dataset for building a fake model by training an oracle (copycat) model in an attempt to achieve similar performance as of the target model.</p>
					</sec>
					<sec id="s4-2-4">
						<label>4.2.4</label>
						<title>Backdooring Attacks</title>
						<p>In backdooring attacks, an adversary maliciously creates the trained model which performs as good as expected on the users&#x2019; training and validation data, but it performs badly on attacker input samples. The backdooring attacks on deep neural networks (DNNs) are explored and evaluated in (<xref ref-type="bibr" rid="B12">Gu et al., 2019</xref>). The authors first explored the properties of backdooring for a toy example and created a backdoor model for handwritten digit classifier and then demonstrated that backdoors are powerful for DNN by creating a backdoor model for a United States street sign classifier. Where, two scenarios were considered, that is, outsourced training of the model and transfer learning where an attacker can acquire a backdoor pretrained model online. In another similar study (<xref ref-type="bibr" rid="B5">Chen et al., 2017</xref>), a targeted backdoor attack for two state-of-the art face recognition models, that is, DeepID (<xref ref-type="bibr" rid="B44">Sun et al., 2014</xref>) and VGG-Face (<xref ref-type="bibr" rid="B31">Parkhi et al., 2015</xref>) is presented. The authors proposed two categories of backdooring poisoning attacks, that is, input&#x2013;instance&#x2013;key attacks and pattern&#x2013;key attacks using two different data poising strategies, that is, input&#x2013;instance&#x2013;key strategies and pattern&#x2013;key strategies, respectively.</p>
					</sec>
					<sec id="s4-2-5">
						<label>4.2.5</label>
						<title>Trojan Attacks</title>
						<p>In Trojan attacks, the attacker inserts malicious content into the system that looks legitimate but can take over the control of the system. However, the purpose of Trojan insertion can be varied, for example, stealing, disruption, misbehaving, or getting intended behavior. In <xref ref-type="bibr" rid="B29">Liu et al. (2018)</xref>, the authors proposed a stealth infection on neural networks, namely, SIN2 to realize a practical supply chain triggered neural Trojan attacks. Also, they proposed a variety of Trojan insertion strategies for agile and practical Trojan attacks. The proof of the concept is demonstrated by developing a prototype of the proposed neural Trojan attack (i.e., SIN2) in Linux sandbox and used Torch (<xref ref-type="bibr" rid="B7">Collobert et al., 2011</xref>) ML/DL framework for building visual recognition models using the Fashion-MNIST dataset.</p>
					</sec>
					<sec id="s4-2-6">
						<label>4.2.6</label>
						<title>Model-Reuse Attacks</title>
						<p>In model-reuse attacks, an adversary creates a malicious model (i.e., adversarial model) that influences the host model to misbehave on targeted inputs (i.e., triggers) in extremely predictable fashion, that is, getting a sample classified into specific (intended class). For instance, experimental evaluation of model-reuse attacks for four pretrained primitive DL models (i.e., speech recognition, autonomous steering, face verification, and skin cancer screening) is evaluated by <xref ref-type="bibr" rid="B19">Ji et al. (2018)</xref>.</p>
					</sec>
					<sec id="s4-2-7">
						<label>4.2.7</label>
						<title>Data Manipulation Attacks</title>
						<p>Those attacks in which training data are manipulated to get intended behavior by the ML/DL model are known as data manipulation attacks. Data manipulation attacks for stealthily manipulating traditional supervised ML techniques and logistic regression (LR) and CNN models are studied by <xref ref-type="bibr" rid="B27">Liao et al. (2018)</xref>. In the attack strategy, the authors added a new constraint on fully connected layers of the models and used gradient descent for retraining them, and other layers were frozen (i.e., were made non-trainable).</p>
					</sec>
					<sec id="s4-2-8">
						<label>4.2.8</label>
						<title>Cyber Kill Chain&#x2013;Based Attacks</title>
						<p>Kill chain is a term used to define steps for attacking a target usually used in the military. In cyber kill chain&#x2013;based attacks, the cloud-hosted ML/DL models are attacked, for example, a high-level threat model targeting ML cyber kill chain is presented by <xref ref-type="bibr" rid="B30">Nguyen (2017)</xref>. Also, the authors provided proof of concept by providing a case study using IBM visual recognition MLaaS (i.e., cognitive classifier for classification cats and female lions) and provided recommendations for ensuring secure and robust ML.</p>
					</sec>
					<sec id="s4-2-9">
						<label>4.2.9</label>
						<title>Membership Inference Attacks</title>
						<p>In a typical membership inference attack, for given input data and black box access to the ML model, an attacker attempts to figure out if the given input sample was the part of the training set or not. To realize a membership inference attack against a target model, a classification model is trained for distinguishing between the predictions of the target model against the inputs on which it was trained and that those on which it was not trained (<xref ref-type="bibr" rid="B41">Shokri et al., 2017</xref>).</p>
					</sec>
					<sec id="s4-2-10">
						<label>4.2.10</label>
						<title>Evasion Attacks</title>
						<p>Evasion attacks are inference time attacks in which an adversary attempts to modify the test data for getting the intended outcome from the ML/DL model. Two evasion attacks against watermarking techniques for DL models hosted as MLaaS have been presented by <xref ref-type="bibr" rid="B16">Hitaj et al. (2019)</xref>. The authors used five publicly available models and trained them for distinguishing between watermarked and clean (non-watermarked) images, that is, binary image classification tasks.</p>
					</sec>
					<sec id="s4-2-11">
						<label>4.2.11</label>
						<title>Model Inversion Attacks</title>
						<p>In model inversion attacks, an attacker tries to learn about training data using the model&#x2019;s outcomes. Two model inversion techniques have been proposed by <xref ref-type="bibr" rid="B53">Yang et al. (2019)</xref>, that is, training an inversion model using auxiliary set composed by utilizing adversary&#x2019;s background knowledge and truncation-based method for aligning the inversion model. The authors evaluated their proposed methods on a commercial prediction MLaaS named Amazon Rekognition.</p>
					</sec>
				</sec>
			</sec>
			<sec id="s5">
				<label>5</label>
				<title>Toward Securing Cloud-Hosted Machine Learning Models (Q2)</title>
				<p>In this section, we present the insights from the systematically selected articles that provide tailored defense against specific attacks and report the articles that along with creating attacks propose countermeasure for the attacks for cloud-hosted/third-party ML/DL models.</p>
				<sec id="s5-1">
					<label>5.1</label>
					<title>Defenses for Attacks on Cloud-Hosted Machine Learning Models: Thematic Analysis</title>
					<p>Leveraging cloud-based ML services for computational offloading and minimizing the communication overhead is accepted as a promising trend. While cloud-based prediction services have significant benefits, however, by sharing the model and the training data raises many privacy and security challenges. Several attacks that can compromise the model and data integrity, as described in the previous section. To avoid such issues, users can download the model and make inferences locally. However, this approach has certain drawbacks, including, confidentiality issues, service providers cannot update the models, adversaries can use the model to develop evading strategies, and privacy of the user data is compromised. To outline the countermeasures against these attacks, we present the thematic analysis of six articles that are focused on defense against the tailored attacks for cloud-hosted ML/DL models or data. In addition, we also provide the thematic analysis of those six articles that propose defense against specific attacks. These articles are classified into five major themes: 1) attack type, 2) defense, 3) target model(s), 4) dataset, and 5) measured outcomes. The thematic analysis of these systematically reviewed articles that are focused on developing defense strategies against attacks is given below.</p>
					<p>
						<italic>Considered attacks for developing defenses:</italic> The defenses proposed in the reviewed articles are developed against the following specific attacks.<list list-type="bullet">
							<list-item>
								<p>Extraction attacks (<xref ref-type="bibr" rid="B46">Tram&#xe8;r et al., 2016</xref>; <xref ref-type="bibr" rid="B28">Liu et al., 2017</xref>);</p>
							</list-item>
							<list-item>
								<p>Inversion attacks (<xref ref-type="bibr" rid="B28">Liu et al., 2017</xref>; <xref ref-type="bibr" rid="B40">Sharma and Chen, 2018</xref>);</p>
							</list-item>
							<list-item>
								<p>Adversarial attacks (<xref ref-type="bibr" rid="B17">Hosseini et al., 2017</xref>; <xref ref-type="bibr" rid="B52">Wang et al., 2018b</xref>; <xref ref-type="bibr" rid="B35">Rouhani et al., 2018</xref>);</p>
							</list-item>
							<list-item>
								<p>Evasion attacks (<xref ref-type="bibr" rid="B25">Lei et al., 2020</xref>);</p>
							</list-item>
							<list-item>
								<p>GAN attacks (<xref ref-type="bibr" rid="B40">Sharma and Chen, 2018</xref>);</p>
							</list-item>
							<list-item>
								<p>Privacy threat attacks (<xref ref-type="bibr" rid="B14">Hesamifard et al., 2017</xref>);</p>
							</list-item>
							<list-item>
								<p>ide channel and cache-timing attacks (<xref ref-type="bibr" rid="B20">Jiang et al., 2018</xref>);</p>
							</list-item>
							<list-item>
								<p>Membership inference attacks (<xref ref-type="bibr" rid="B41">Shokri et al., 2017</xref>; <xref ref-type="bibr" rid="B37">Salem et al., 2018</xref>).</p>
							</list-item>
						</list>
					</p>
					<p>Most of the aforementioned attacks are elaborated in previous sections. However, in the selected articles that are identified as either defense or attack and defense articles, some attacks are specifically created, for instance, GAN attacks, side channel, cache-timing attack, privacy threats, etc. Therefore, the attacks are worth mentioning in this section to explain the specific countermeasures proposed against them in the defense articles.</p>
					<p>
						<italic>Defenses against different attacks:</italic> To provide resilience against these attacks, the authors of selected articles proposed different defense algorithms, which are listed below against each type of attack.<list list-type="bullet">
							<list-item>
								<p>Extraction attacks: MiniONN (<xref ref-type="bibr" rid="B28">Liu et al., 2017</xref>), rounding confidence, differential, and ensemble methods (<xref ref-type="bibr" rid="B46">Tram&#xe8;r et al., 2016</xref>);</p>
							</list-item>
							<list-item>
								<p>Adversarial attacks: ReDCrypt (<xref ref-type="bibr" rid="B35">Rouhani et al., 2018</xref>) and Arden (<xref ref-type="bibr" rid="B52">Wang et al., 2018b</xref>);</p>
							</list-item>
							<list-item>
								<p>Inversion attacks: MiniONN (<xref ref-type="bibr" rid="B28">Liu et al., 2017</xref>) and image disguising techniques (<xref ref-type="bibr" rid="B40">Sharma and Chen, 2018</xref>);</p>
							</list-item>
							<list-item>
								<p>Privacy attacks: encryption-based defense (<xref ref-type="bibr" rid="B14">Hesamifard et al., 2017</xref>; <xref ref-type="bibr" rid="B20">Jiang et al., 2018</xref>);</p>
							</list-item>
							<list-item>
								<p>Side channel and cache-timing attacks: encryption-based defense (<xref ref-type="bibr" rid="B14">Hesamifard et al., 2017</xref>; <xref ref-type="bibr" rid="B20">Jiang et al., 2018</xref>);</p>
							</list-item>
							<list-item>
								<p>Membership inference attack: dropout and model stacking (<xref ref-type="bibr" rid="B37">Salem et al., 2018</xref>).</p>
							</list-item>
						</list>
					</p>
					<p>
						<italic>Target model(s):</italic> Different cloud-hosted ML/DL models have been used for the evaluation of the proposed defenses, as shown in <xref ref-type="table" rid="T2">Table 2</xref>.</p>
					<table-wrap id="T2" position="float">
						<label>TABLE 2</label>
						<caption>
							<p>Summary of attack types and corresponding defenses for cloud-based/third-party ML/DL models.</p>
						</caption>
						<table frame="hsides" rules="groups">
							<thead>
								<tr>
									<th>Author</th>
									<th align="center">Attack</th>
									<th align="center">Defense</th>
									<th align="center">Target model</th>
									<th align="center">Data</th>
									<th align="center">Measured outcomes</th>
								</tr>
							</thead>
							<tbody>
								<tr>
									<td>(<xref ref-type="bibr" rid="B28">Liu et al., 2017</xref>)</td>
									<td align="left">Extraction attack and inversion attack</td>
									<td align="left">MiniONN: a defense against information leakage in DNN to transform into an oblivious NN</td>
									<td align="left">Cloud-hosted DL models, neural network for cloud-based prediction services</td>
									<td align="left">MNIST and CIFAR-10</td>
									<td align="left">Response latency and message sizes</td>
								</tr>
								<tr>
									<td>(<xref ref-type="bibr" rid="B35">Rouhani et al., 2018</xref>)</td>
									<td align="left">Adversarial attacks</td>
									<td align="left">ReDCrypt: reconfigurable hardware-accelerated framework for the privacy-preserving</td>
									<td align="left">Cloud-hosted DL models</td>
									<td align="left">MNIST and MovieLens</td>
									<td align="left">Throughput</td>
								</tr>
								<tr>
									<td>(<xref ref-type="bibr" rid="B52">Wang et al., 2018b</xref>)</td>
									<td align="center">&#x2014;</td>
									<td align="left">Arden: To distribute DNN model computation among edge device and cloud data centers</td>
									<td align="left">Partial cloud-hosted DNN models</td>
									<td align="left">MNIST, SVHN, and CIFAR-10</td>
									<td align="left">Latency, accuracy, and privacy budget</td>
								</tr>
								<tr>
									<td>(<xref ref-type="bibr" rid="B17">Hosseini et al., 2017</xref>)</td>
									<td align="center">&#x2014;</td>
									<td align="left">Incorporating randomness to video analysis algorithms</td>
									<td align="left">Google cloud video intelligence API</td>
									<td align="left">Videos comprising of adversarial examples</td>
									<td align="left">Histogram peaks to detect shot change</td>
								</tr>
								<tr>
									<td>(<xref ref-type="bibr" rid="B40">Sharma and Chen, 2018</xref>)</td>
									<td align="left">Inversion attack and GAN attack</td>
									<td align="left">Image disguising techniques to ensure the protection against model-based adversarial attacks</td>
									<td align="left">Cloud-hosted DL models</td>
									<td align="left">MNIST and CIFAR-10</td>
									<td align="left">Accuracy, average visual privacy, and Fano factor</td>
								</tr>
								<tr>
									<td>(<xref ref-type="bibr" rid="B14">Hesamifard et al., 2017</xref>)</td>
									<td align="left">Privacy threats due to raw cloud data</td>
									<td align="left">Homomorphic encryption to preserve the privacy and integrity of data in DNN</td>
									<td align="left">Cloud-based DNN</td>
									<td align="left">Crab dataset, fertility dataset, climate dataset</td>
									<td align="left">Accuracy and training time</td>
								</tr>
								<tr>
									<td>(<xref ref-type="bibr" rid="B20">Jiang et al., 2018</xref>)</td>
									<td align="left">Side channel and cache-timing attack</td>
									<td align="left">Secure logistic encryption along with hardware-based security enhancement by exploiting software guard extensions</td>
									<td align="left">Cloud-hosted LR models</td>
									<td align="left">Edinburgh MI, WI-Breast cancer, and MONK&#x2019;s prob</td>
									<td align="left">Area under the curve, complexity, and model training time</td>
								</tr>
								<tr>
									<td>(<xref ref-type="bibr" rid="B25">Lei et al., 2020</xref>)</td>
									<td align="left">Evasion attack</td>
									<td align="left">Pelican: similarity-based analysis of unknown website with the known phishing Web site</td>
									<td align="left">BitDefender&#x2019;s partical processing hosted on cloud</td>
									<td align="left">PhishTank, PhishNet</td>
									<td align="left">Similarity index</td>
								</tr>
								<tr>
									<td>(<xref ref-type="bibr" rid="B46">Tram&#xe8;r et al., 2016</xref>)</td>
									<td align="left">Extraction attack</td>
									<td align="left">Rounding confidences to some precision, differential privacy to protect training data elements, ensemble methods</td>
									<td align="left">ML models hosted on BigML and amazon</td>
									<td align="left">102 categories flower dataset, face dataset, iris dataset, and traffic signs dataset</td>
									<td align="left">Success rate given the perturbation budget</td>
								</tr>
								<tr>
									<td>(<xref ref-type="bibr" rid="B41">Shokri et al., 2017</xref>)</td>
									<td align="left">Membership inference attack</td>
									<td align="left">Top <italic>k</italic> class model predictions, increase entropy, regularization and reducing precision of prediction vector</td>
									<td align="left">MLaaS classification models of Google and Amazon APIs</td>
									<td align="left">CIFAR-10,purchases, locations, Texas hospital stays, MNIST, UCI adults</td>
									<td align="left">Accuracy and precision</td>
								</tr>
								<tr>
									<td>(<xref ref-type="bibr" rid="B37">Salem et al., 2018</xref>)</td>
									<td align="center">&#x2014;</td>
									<td align="left">Dropout and model stacking to prevent overfitting</td>
									<td align="left">Google cloud prediction API</td>
									<td align="left">Used eight different datasets</td>
									<td align="left">Precision and recall</td>
								</tr>
								<tr>
									<td>(<xref ref-type="bibr" rid="B51">Wang et al., 2018a</xref>)</td>
									<td align="left">Misclassification attacks</td>
									<td align="left">Neuron distance model, ensemble method, dropout randomization</td>
									<td align="left">Google cloud ML, microsoft cognitive toolkit (CNTK), and the PyTorch</td>
									<td align="left">102-Class VGG flower, face dataset, iris dataset, and traffic signs dataset, Google&#x2019;s InceptionV3</td>
									<td align="left">Accuracy and success rate</td>
								</tr>
							</tbody>
						</table>
					</table-wrap>
					<p>
						<italic>Dataset(s) used:</italic> The robustness of these defenses have been evaluated using various datasets ranging from small size datasets (e.g., MNIST (<xref ref-type="bibr" rid="B28">Liu et al., 2017</xref>; <xref ref-type="bibr" rid="B52">Wang et al., 2018b</xref>; <xref ref-type="bibr" rid="B35">Rouhani et al., 2018</xref>; <xref ref-type="bibr" rid="B40">Sharma and Chen, 2018</xref>)) and CIFAR-10 (<xref ref-type="bibr" rid="B28">Liu et al., 2017</xref>; <xref ref-type="bibr" rid="B52">Wang et al., 2018b</xref>; <xref ref-type="bibr" rid="B40">Sharma and Chen, 2018</xref>)), to large size datasets (e.g., Iris dataset (<xref ref-type="bibr" rid="B46">Tram&#xe8;r et al., 2016</xref>), fertility and climate dataset (<xref ref-type="bibr" rid="B14">Hesamifard et al., 2017</xref>), and breast cancer (<xref ref-type="bibr" rid="B20">Jiang et al., 2018</xref>)). Other datasets include Crab dataset (<xref ref-type="bibr" rid="B14">Hesamifard et al., 2017</xref>), Face dataset, Traffic signs dataset, Traffic signs dataset (<xref ref-type="bibr" rid="B46">Tram&#xe8;r et al., 2016</xref>), SVHN (<xref ref-type="bibr" rid="B52">Wang et al., 2018b</xref>), Edinburgh MI, Edinburgh MI, WI-Breast Cancerband MONKs Prob (<xref ref-type="bibr" rid="B20">Jiang et al., 2018</xref>), crab dataset, fertility dataset, and climate dataset (<xref ref-type="bibr" rid="B14">Hesamifard et al., 2017</xref>). Each of the defense techniques discussed above is mapped in <xref ref-type="table" rid="T2">Table 2</xref> to the specific attack for which it was developed.</p>
					<p>
						<italic>Measured outcomes:</italic> The measured outcomes based on which the defenses are evaluated are response latency and message sizes (<xref ref-type="bibr" rid="B28">Liu et al., 2017</xref>; <xref ref-type="bibr" rid="B52">Wang et al., 2018b</xref>), throughput comparison (<xref ref-type="bibr" rid="B35">Rouhani et al., 2018</xref>), average on the cache miss rates per second (<xref ref-type="bibr" rid="B40">Sharma and Chen, 2018</xref>), AUC, space complexity to demonstrate approximated storage costs (<xref ref-type="bibr" rid="B20">Jiang et al., 2018</xref>), classification accuracy of the model as well as running time (<xref ref-type="bibr" rid="B14">Hesamifard et al., 2017</xref>; <xref ref-type="bibr" rid="B40">Sharma and Chen, 2018</xref>), similarity index (<xref ref-type="bibr" rid="B25">Lei et al., 2020</xref>), and training time (<xref ref-type="bibr" rid="B14">Hesamifard et al., 2017</xref>; <xref ref-type="bibr" rid="B20">Jiang et al., 2018</xref>).</p>
				</sec>
				<sec id="s5-2">
					<label>5.2</label>
					<title>Taxonomy of Defenses on Cloud-Hosted Machine Learning Model Attacks</title>
					<p>In this section, we present a taxonomy and summary of different defensive strategies against attacks on cloud-hosted ML/DL models as described above in thematic analysis. A taxonomy of these defenses strategies is presented in <xref ref-type="fig" rid="F9">Figure 9</xref> and is described next.</p>
					<fig id="F9" position="float">
						<label>FIGURE 9</label>
						<caption>
							<p>Taxonomy of different attacks realized on the third-party cloud-hosted machine learning (ML) or deep learning (DL) models.</p>
						</caption>
						<graphic xlink:href="fdata-03-587139-g009.tif"/>
					</fig>
					<sec id="s5-2-1">
						<label>5.2.1</label>
						<title>MiniONN</title>
						<p>DNNs are vulnerable to model inversion and extraction attacks. <xref ref-type="bibr" rid="B28">Liu et al. (2017)</xref> proposed that without making any changes to the training phase of the model it is possible to change the model into an oblivious neural network. They make the nonlinear function such as <italic>tanh</italic> and <italic>sigmoid</italic> function more flexible, and by training the models on several datasets, the authors demonstrated significant results with minimal loss in the accuracy. In addition, they also implemented the offline precomputation phase to perform encryption incremental operations along with the SIMD batch processing technique.</p>
					</sec>
					<sec id="s5-2-2">
						<label>5.2.2</label>
						<title>ReDCrypt</title>
						<p>A reconfigurable hardware-accelerated framework is proposed by <xref ref-type="bibr" rid="B35">Rouhani et al. (2018)</xref>, for protecting the privacy of deep neural models in cloud networks. The authors perform an innovative and power-efficient implementation of Yao&#x2019;s Garbled Circuit (GC) protocol on FPGAs for preserving privacy. The proposed framework is evaluated for different DL applications, and it has achieved up to 57-fold throughput gain per core.</p>
					</sec>
					<sec id="s5-2-3">
						<label>5.2.3</label>
						<title>Arden</title>
						<p>To offload the large portion of DNNs from the mobile devices to the clouds and to make the framework secure, a privacy-preserving mechanism Arden is proposed by <xref ref-type="bibr" rid="B52">Wang et al. (2018b)</xref>. While uploading the data to the mobile-cloud perturbation, noisy samples are included to make the data secure. To verify the robustness, the authors perform rigorous analysis based on three image datasets and demonstrated that this defense is capable to preserve the user privacy along with inference performance.</p>
					</sec>
					<sec id="s5-2-4">
						<label>5.2.4</label>
						<title>Image Disguising Techniques</title>
						<p>While leveraging services from the cloud GPU server, the adversary can realize an attack by introducing malicious created training data, perform model inversion, and use the model for getting desirable incentives and outcomes. To protect from such attacks and to preserve the data as well as the model, <xref ref-type="bibr" rid="B40">Sharma and Chen (2018)</xref> proposed an image disguising mechanism. They developed a toolkit that can be leveraged to calibrate certain parameter settings. They claim that the disguised images with block-wise permutation and transformations are resilient to GAN-based attack and model inversion attacks.</p>
					</sec>
					<sec id="s5-2-5">
						<label>5.2.5</label>
						<title>Homomorphic Encryption</title>
						<p>For making the cloud services of outsourced MLaaS secure, <xref ref-type="bibr" rid="B14">Hesamifard et al. (2017)</xref> proposed a privacy-preserving framework using homomorphic encryption. They trained the neural network using the encrypted data and then performed the encrypted predictions. The authors demonstrated that by carefully choosing the polynomials of the activation functions to adopt neural networks, it is possible to achieve the desired accuracy along with privacy-preserving training and classification.</p>
						<p>In a similar study, to preserve the privacy of outsourced biomedical data and computation on public cloud servers, <xref ref-type="bibr" rid="B20">Jiang et al. (2018)</xref> built a homomorphically encrypted model that reinforces the hardware security through Software Guard Extensions. They combined homomorphic encryption and Software Guard Extensions to devise a hybrid model for the security of the most commonly used model for biomedical applications, that is, LR. The robustness of the Secure LR framework is evaluated on various datasets, and the authors also compared its performance with state-of-the-art secure LR solutions and demonstrated its superior efficiency.</p>
					</sec>
					<sec id="s5-2-6">
						<label>5.2.6</label>
						<title>Pelican</title>
						<p>
							<xref ref-type="bibr" rid="B25">Lei et al. (2020)</xref> proposed three mutation-based evasion attacks and a sample-based collision attack in white-, gray-, and black box scenarios. They evaluated the attacks and demonstrated a 100% success rate of attack on Google&#x2019;s phishing page filter classifier, while a success rate of up to 81% for the transferability on Bitdefender TrafficLight. To deal with such attacks and to increase the robustness of classifiers, they proposed a defense method known as Pelican.</p>
					</sec>
					<sec id="s5-2-7">
						<label>5.2.7</label>
						<title>Rounding Confidences and Differential Privacy</title>
						<p>
							<xref ref-type="bibr" rid="B46">Tram&#xe8;r et al. (2016)</xref> presented the model extraction attacks against the online services of BigML and Amazon ML. The attacks are capable of model evasion, monetization, and can compromise the privacy of training data. The authors also proposed and evaluated countermeasures such as rounding confidences against equation-solving and decision tree pathfinding attacks; however, this defense has no impact on the regression tree model attack. For the preservation of training data, differential privacy is proposed; this defense reduces the ability of an attacker to learn insights about the training dataset. The impact of both defenses is evaluated on the attacks for different models, while the authors also proposed ensemble models to mitigate the impact of attacks; however, their resilience is not evaluated.</p>
					</sec>
					<sec id="s5-2-8">
						<label>5.2.8</label>
						<title>Increasing Entropy and Reducing Precision</title>
						<p>The training of attack using shadow training techniques against black box models in the cloud-based Google Prediction API and Amazon ML models are studied by <xref ref-type="bibr" rid="B41">Shokri et al. (2017)</xref>. The attack does not require prior knowledge of training data distribution. The authors emphasize that in order to protect the privacy of medical-related datasets or other public-related data, countermeasures should be designed. For instance, restriction of prediction vector to top <italic>k</italic> classes, which will prevent the leakage of important information or rounding down or up the classification probabilities in the prediction. They show that regularization can be effective to cope with overfitting and increasing the randomness of the prediction vector.</p>
					</sec>
					<sec id="s5-2-9">
						<label>5.2.9</label>
						<title>Dropout and Model Stacking</title>
						<p>In the study by <xref ref-type="bibr" rid="B37">Salem et al. (2018)</xref>, the authors created three diverse attacks and tested the applicability of these attacks on eight datasets from which six are similar as used by <xref ref-type="bibr" rid="B41">Shokri et al. (2017)</xref>, whereas in this work, news dataset and face dataset is included. In the threat model, the authors considered black box access to the target model which is a supervised ML classifier with binary classes that was trained for binary classification. To mitigate the privacy threats, the authors proposed a dropout-based method which reduces the impact of an attack by randomly deleting a proportion of edges in each training iteration in a fully connected neural network. The second defense strategy is model stacking, which hierarchically organizes multiple ML models to avoid overfitting. After extensive evaluation, these defense techniques showed the potential to mitigate the performance of the membership inference attack.</p>
					</sec>
					<sec id="s5-2-10">
						<label>5.2.10</label>
						<title>Randomness to Video Analysis Algorithms</title>
						<p>Hosseini et al. designed two attacks specifically to analyze the robustness of video classification and shot detection (<xref ref-type="bibr" rid="B17">Hosseini et al., 2017</xref>). The attack can subtly manipulate the content of the video in such a way that it is undetected by humans, while the output from the automatic video analysis method is altered. Depending on the fact that the video and shot labels are generated by API by processing only the first video frame of every second, the attack can successfully deceive API. To deal with the shot removal and generation attacks, the authors proposed the inclusion of randomness for enhancing the robustness of algorithms. However, in this article, the authors thoroughly evaluated the applicability of these attacks in different video setting, but the purposed defense is not rigorously evaluated.</p>
					</sec>
					<sec id="s5-2-11">
						<label>5.2.11</label>
						<title>Neuron Distance Threshold and Obfuscation</title>
						<p>Transfer learning is an effective technique for quickly building DL student models in which knowledge from a Teacher model is transferred to a Student model. However, <xref ref-type="bibr" rid="B51">Wang et al. (2018a)</xref> discussed that due to the centralization of model training, the vulnerability against misclassification attacks for image recognition on black box Student models increases. The authors proposed several defenses to mitigate the impact of such an attack, such as changing the internal representation of the Student model from the Teacher model. Other defense methods include increasing dropout randomization which alters the student model training process, modification in input data before classification, adding redundancy, and using orthogonal model against transfer learning attack. The authors analyzed the robustness of these attacks and demonstrated that the neuron distance threshold is the most effective in obfuscating the identity of the Teacher model.</p>
					</sec>
				</sec>
			</sec>
			<sec id="s6">
				<label>6</label>
				<title>Pitfalls and Limitations</title>
				<sec id="s6-1">
					<label>6.1</label>
					<title>Lack of Attack Diversity</title>
					<p>The attacks presented in the selected articles have limited scope and lack diversity, that is, they are limited to a specific setting, and the variability of attacks is limited as well. However, the diversity of attacks is an important consideration for developing robust attacks from the perspective of adversaries, and it ensures the detection and prevention of the attacks to be difficult. The diversity of attacks ultimately helps in the development of robust defense strategies. Moreover, the empirical evaluation of attack variabilities can identify the potential vulnerabilities of cybersecurity systems. Therefore, to make a more robust defense solution, it is important to test the model robustness under a diverse set of attacks.</p>
				</sec>
				<sec id="s6-2">
					<label>6.2</label>
					<title>Lack of Consideration for Adaptable Adversaries</title>
					<p>Most of the defenses in the systematically reviewed articles are proposed for a specific attack and did not consider the adaptable adversaries. On the other hand, in practice, the adversarial attacks are an arms race between attackers and defenders. That is, the attackers continuously evolve and enhance their knowledge and attacking strategies to evade the underlying defensive system. Therefore, the consideration of adaptable adversaries is crucial for developing a robust and long-lasting defense mechanism. If we do not consider this, the adversary will adapt to our defensive system over time and will bypass it to get the intended behavior or outcomes.</p>
				</sec>
				<sec id="s6-3">
					<label>6.3</label>
					<title>Limited Progress in Developing Defenses</title>
					<p>From the systematically selected articles that are collected from different databases, only 12 articles have presented defense methods for the proposed attack as compared to the articles that are focused on attacks, that is, 19. In these 12 articles, six have only discussed/presented a defense strategy and six have developed a defense against a particular attack. This indicates that there is limited activity from the research community in developing defense strategies for already proposed attacks in the literature. In addition, the proposed defenses only mitigate or detect those attacks for which they have been developed, and therefore, they are not generalizable. On the contrary, the increasing interest in developing different attacks and the popularity of cloud-hosted/third-party services demand a proportionate amount of interest in developing defense systems as well.</p>
				</sec>
			</sec>
			<sec id="s7">
				<label>7</label>
				<title>Open Research Issues</title>
				<sec id="s7-1">
					<label>7.1</label>
					<title>Adversarially Robust Machine Learning Models</title>
					<p>In recent years, adversarial ML attacks have emerged as a major panacea for ML/DL models and the systematically selected articles have highlighted the threat of these attacks for cloud-hosted Ml/DL models as well. Moreover, the diversity of these attacks is drastically increasing as compared with the defensive strategies that can pose serious challenges and consequences for the security of cloud-hosted ML/DL models. Each defense method presented in the literature so far has been shown resilient to a particular attack which is realized in specific, settings and it fails to withstand for yet stronger and unseen attacks. Therefore, the development of adversarially robust ML/DL models remains an open research problem, while the literature suggests that worst-case robustness analysis should be performed while considering adversarial ML settings (<xref ref-type="bibr" rid="B32">Qayyum et al., 2020a</xref>; <xref ref-type="bibr" rid="B33">Qayyum et al., 2020b</xref>; <xref ref-type="bibr" rid="B18">Ilahi et al., 2020</xref>). In addition, it has been argued in the literature that most of ML developers and security incident responders are unequipped with the required tools for securing industry-grade ML systems against adversarial ML attacks <xref ref-type="bibr" rid="B24">Kumar et al. (2020)</xref>. This indicates the increasing need for the development of defense strategies for securing ML/DL models against adversarial ML attacks.</p>
				</sec>
				<sec id="s7-2">
					<label>7.2</label>
					<title>Privacy-Preserving Machine Learning Models</title>
					<p>In cloud-hosted ML services, preserving user privacy is fundamentally important and is a matter of high concern. Also, it is desirable that ML models built using users&#x2019; data should not learn information that can compromise the privacy of the individuals. However, the literature on developing privacy-preserving ML/DL models or MLaaS is limited. On the other hand, one of the privacy-preserving techniques that have been used for privacy protection for building a defense system for cloud-hosted ML/DL models, that is, the homomorphic encryption-based protocol (<xref ref-type="bibr" rid="B20">Jiang et al., 2018</xref>), has been shown vulnerable to model extraction attack (<xref ref-type="bibr" rid="B34">Reith et al., 2019</xref>). Therefore, the development of privacy-preserving ML models for cloud computing platforms is another open research problem.</p>
				</sec>
				<sec id="s7-3">
					<label>7.3</label>
					<title>Proxy Metrics for Evaluating Security and Robustness</title>
					<p>From systematically reviewed literature on the security of cloud-hosted ML/DL models, we orchestrate that the interest from the research community in the development of novel security-centric proxy metrics for the evaluation of security threats and model robustness of cloud-hosted models is very limited. However, with the increasing proliferation of cloud-hosted ML services (i.e., MLaaS) and with the development/advancements of different attacks (e.g., adversarial ML attacks), the development of effective and scalable metrics for evaluating the robustness ML/DL models toward different attacks and defense strategies is required.</p>
				</sec>
			</sec>
			<sec id="s8">
				<label>8</label>
				<title>Threats to Validity</title>
				<p>We now briefly reflect on our methodology in order to identify any threats to the validity of our findings. First, internal validity is maintained as the research questions we pose in <xref ref-type="sec" rid="s2-2">Section 2.2</xref> capture the objectives of the study. Construct validity relies on a sound understanding of the literature and how it represents the state of the field. A detailed study of the reviewed articles along with deep discussions between the members of the research team helped ensure the quality of this understanding. Note that the research team is of diverse skills and expertise in ML, DL, cloud computing, ML/DL security, and analytics. Also, the inclusion and exclusion criteria (<xref ref-type="sec" rid="s2-3">Section 2.3</xref>) help define the remit of our survey. Data extraction is prone to human error as is always the case. This was mitigated by having different members of the research team review each reviewed article. However, we did not attempt to evaluate the quality of the reviewed studies or validate their content due to time constraints. In order to minimize selection bias, we cast a wide net in order to capture articles from different communities publishing in the area of MLaaS via a comprehensive set of bibliographical databases without discriminating based on the venue/source.</p>
			</sec>
			<sec sec-type="conclusion" id="s9">
				<label>9</label>
				<title>Conclusion</title>
				<p>In this article, we presented a systematic review of literature that is focused on the security of cloud-hosted ML/DL models, also named as MLaaS. The relevant articles were collected from eight major publishers that include ACM Digital Library, IEEE Xplore, ScienceDirect, international conference on machine learning, international conference on learning representations, journal of machine learning research, USENIX, neural information processing systems, and arXiv. For the selection of articles, we developed a review protocol that includes inclusion and exclusion formulas and analyzed the selected articles that fulfill these criteria across two dimensions (i.e., attacks and defenses) on MLaaS and provide a thematic analysis of these articles across five attack and five defense themes, respectively. We also identified the limitations and pitfalls from the reviewed literature, and finally, we have highlighted various open research issues that require further investigation.</p>
			</sec>
			<sec id="s10">
				<title>Data Availability Statement</title>
				<p>The original contributions presented in the study are included in the article/supplementary material, further inquiries can be directed to the corresponding author/s.</p>
			</sec>
			<sec id="s11">
				<title>Author Contributions</title>
				<p>AQ led the work in writing the manuscript and performed the annotation of the data and analysis as well. AI performed data acquisition, annotation, and analysis from four venues, and contributed to the paper write-up. MU contributed to writing a few sections, did annotations of papers, and helped in analysis. WI performed data scrapping, annotation, and analysis from four venues, and helped in developing graphics. All the first four authors validated the data, analysis, and contributed to the interpretation of the results. AQ and AI helped in developing and refining the methodology for this systematic review. JQ conceived the idea and supervises the overall work. JQ, YEK, and AF provided critical feedback and helped shape the research, analysis, and manuscript. All authors contributed to the final version of the manuscript.</p>
			</sec>
			<sec sec-type="COI-statement" id="s12">
				<title>Conflict of Interest</title>
				<p>The authors declare that the research was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
			</sec>
		</body>
		<back>
			<fn-group>
				<fn id="FN1" fn-type="other">
					<label>1</label>
					<p>We use MLaaS to cover both ML and DL as a Service cloud provisions.</p>
				</fn>
				<fn id="FN2" fn-type="other">
					<label>2</label>
					<p>
						<ext-link ext-link-type="uri" xlink:href="https://cloud.google.com/ml-engine/">https://cloud.google.com/ml-engine/</ext-link>.</p>
				</fn>
				<fn id="FN3" fn-type="other">
					<label>3</label>
					<p>A popular Python library for DL.</p>
				</fn>
				<fn id="FN4" fn-type="other">
					<label>4</label>
					<p>
						<ext-link ext-link-type="uri" xlink:href="https://azure.microsoft.com/en-us/services/machine-learning-service/">https://azure.microsoft.com/en-us/services/machine-learning-service/</ext-link>.</p>
				</fn>
				<fn id="FN5" fn-type="other">
					<label>5</label>
					<p>
						<ext-link ext-link-type="uri" xlink:href="https://docs.aws.amazon.com/dlami/latest/devguide/AML2_0.html">https://docs.aws.amazon.com/dlami/latest/devguide/AML2_0.html</ext-link>.</p>
				</fn>
				<fn id="FN6" fn-type="other">
					<label>6</label>
					<p>Backdoor attacks on cloud-hosted models can be further categorized into three categories (<xref ref-type="bibr" rid="B6">Chen et al., 2020</xref>): 1) complete model&#x2013;based attacks, 2) partial model&#x2013;based attacks, and 3) model-free attacks).</p>
				</fn>
			</fn-group>
			<ref-list>
				<title>References</title>
				<ref id="B1">
					<citation citation-type="journal">
						<person-group person-group-type="author">
							<name>
								<surname>Akhtar</surname>
								<given-names>N.</given-names>
							</name>
							<name>
								<surname>Mian</surname>
								<given-names>A.</given-names>
							</name>
						</person-group> (<year>2018</year>). <article-title>Threat of adversarial attacks on deep learning in computer vision: a survey</article-title>. <source>IEEE Access</source>
						<volume>6</volume>, <fpage>14410</fpage>&#x2013;<lpage>14430</lpage>. <pub-id pub-id-type="doi">10.1109/access.2018.2807385</pub-id>
					</citation>
				</ref>
				<ref id="B2">
					<citation citation-type="book">
						<person-group person-group-type="author">
							<name>
								<surname>Apruzzese</surname>
								<given-names>G.</given-names>
							</name>
							<name>
								<surname>Colajanni</surname>
								<given-names>M.</given-names>
							</name>
							<name>
								<surname>Ferretti</surname>
								<given-names>L.</given-names>
							</name>
							<name>
								<surname>Marchetti</surname>
								<given-names>M.</given-names>
							</name>
						</person-group> (<year>2019</year>). &#x201c;<article-title>Addressing adversarial attacks against security systems based on machine learning</article-title>,&#x201d; in <conf-name>2019 11th International conference on cyber conflict (CyCon)</conf-name>, <conf-loc>Tallinn, Estonia</conf-loc>, <conf-date>May 28&#x2013;31, 2019</conf-date> (<publisher-name>IEEE</publisher-name>), <volume>900</volume>, <fpage>1</fpage>&#x2013;<lpage>18</lpage>
					</citation>
				</ref>
				<ref id="B3">
					<citation citation-type="journal">
						<person-group person-group-type="author">
							<name>
								<surname>Brendel</surname>
								<given-names>W.</given-names>
							</name>
							<name>
								<surname>Rauber</surname>
								<given-names>J.</given-names>
							</name>
							<name>
								<surname>Bethge</surname>
								<given-names>M.</given-names>
							</name>
						</person-group> (<year>2017</year>). &#x201c;<article-title>Decision-based adversarial attacks: reliable attacks against black-box machine learning models</article-title>,&#x201d; in <conf-name>International Conference on Learning Representations (ICLR)</conf-name>
					</citation>
				</ref>
				<ref id="B4">
					<citation citation-type="journal">
						<person-group person-group-type="author">
							<name>
								<surname>Chen</surname>
								<given-names>S.</given-names>
							</name>
							<name>
								<surname>Xue</surname>
								<given-names>M.</given-names>
							</name>
							<name>
								<surname>Fan</surname>
								<given-names>L.</given-names>
							</name>
							<name>
								<surname>Hao</surname>
								<given-names>S.</given-names>
							</name>
							<name>
								<surname>Xu</surname>
								<given-names>L.</given-names>
							</name>
							<name>
								<surname>Zhu</surname>
								<given-names>H.</given-names>
							</name>
							<etal/>
						</person-group> (<year>2018</year>). <article-title>Automated poisoning attacks and defenses in malware detection systems: an adversarial machine learning approach</article-title>. <source>Comput. Secur.</source>
						<volume>73</volume>, <fpage>326</fpage>&#x2013;<lpage>344</lpage>. <pub-id pub-id-type="doi">10.1016/j.cose.2017.11.007</pub-id>
					</citation>
				</ref>
				<ref id="B5">
					<citation citation-type="journal">
						<person-group person-group-type="author">
							<name>
								<surname>Chen</surname>
								<given-names>X.</given-names>
							</name>
							<name>
								<surname>Liu</surname>
								<given-names>C.</given-names>
							</name>
							<name>
								<surname>Li</surname>
								<given-names>B.</given-names>
							</name>
							<name>
								<surname>Lu</surname>
								<given-names>K.</given-names>
							</name>
							<name>
								<surname>Song</surname>
								<given-names>D.</given-names>
							</name>
						</person-group> (<year>2017</year>). <article-title>Targeted backdoor attacks on deep learning systems using data poisoning</article-title>. <source>arXiv</source>
					</citation>
				</ref>
				<ref id="B6">
					<citation citation-type="journal">
						<person-group person-group-type="author">
							<name>
								<surname>Chen</surname>
								<given-names>Y.</given-names>
							</name>
							<name>
								<surname>Gong</surname>
								<given-names>X.</given-names>
							</name>
							<name>
								<surname>Wang</surname>
								<given-names>Q.</given-names>
							</name>
							<name>
								<surname>Di</surname>
								<given-names>X.</given-names>
							</name>
							<name>
								<surname>Huang</surname>
								<given-names>H.</given-names>
							</name>
						</person-group> (<year>2020</year>). <article-title>Backdoor attacks and defenses for deep neural networks in outsourced cloud environments</article-title>. <source>IEEE Network</source>
						<volume>34</volume> (<issue>5</issue>), <fpage>141</fpage>&#x2013;<lpage>147</lpage>. <pub-id pub-id-type="doi">10.1109/MNET.011.1900577</pub-id>
					</citation>
				</ref>
				<ref id="B7">
					<citation citation-type="journal">
						<person-group person-group-type="author">
							<name>
								<surname>Collobert</surname>
								<given-names>R.</given-names>
							</name>
							<name>
								<surname>Kavukcuoglu</surname>
								<given-names>K.</given-names>
							</name>
							<name>
								<surname>Farabet</surname>
								<given-names>C.</given-names>
							</name>
						</person-group> (<year>2011</year>). &#x201c;<article-title>Torch7: a Matlab-like environment for machine learning</article-title>,&#x201d; in <conf-name>BigLearn, NIPS workshop</conf-name>. </citation>
				</ref>
				<ref id="B8">
					<citation citation-type="book">
						<person-group person-group-type="author">
							<name>
								<surname>Correia-Silva</surname>
								<given-names>J. R.</given-names>
							</name>
							<name>
								<surname>Berriel</surname>
								<given-names>R. F.</given-names>
							</name>
							<name>
								<surname>Badue</surname>
								<given-names>C.</given-names>
							</name>
							<name>
								<surname>de Souza</surname>
								<given-names>A. F.</given-names>
							</name>
							<name>
								<surname>Oliveira-Santos</surname>
								<given-names>T.</given-names>
							</name>
						</person-group> (<year>2018</year>). &#x201c;<article-title>Copycat CNN: stealing knowledge by persuading confession with random non-labeled data</article-title>,&#x201d; in <conf-name>2018 International joint conference on neural networks (IJCNN)</conf-name>, <conf-loc>Rio de Janeiro, Brazil</conf-loc>, <conf-date>July 8&#x2013;13, 2018</conf-date> (<publisher-name>IEEE</publisher-name>), <volume>1&#x2013;8</volume>
					</citation>
				</ref>
				<ref id="B9">
					<citation citation-type="journal">
						<person-group person-group-type="author">
							<name>
								<surname>Demetrio</surname>
								<given-names>L.</given-names>
							</name>
							<name>
								<surname>Valenza</surname>
								<given-names>A.</given-names>
							</name>
							<name>
								<surname>Costa</surname>
								<given-names>G.</given-names>
							</name>
							<name>
								<surname>Lagorio</surname>
								<given-names>G.</given-names>
							</name>
						</person-group> (<year>2020</year>). &#x201c;<article-title>Waf-a-mole: evading web application firewalls through adversarial machine learning</article-title>,&#x201d; in <conf-name>Proceedings of the 35th annual ACM symposium on applied computing</conf-name>, <conf-loc>Brno, Czech Republic</conf-loc>, <conf-date>March 2020</conf-date>, <fpage>1745</fpage>&#x2013;<lpage>1752</lpage>
					</citation>
				</ref>
				<ref id="B10">
					<citation citation-type="journal">
						<person-group person-group-type="author">
							<name>
								<surname>Gong</surname>
								<given-names>Y.</given-names>
							</name>
							<name>
								<surname>Li</surname>
								<given-names>B.</given-names>
							</name>
							<name>
								<surname>Poellabauer</surname>
								<given-names>C.</given-names>
							</name>
							<name>
								<surname>Shi</surname>
								<given-names>Y.</given-names>
							</name>
						</person-group> (<year>2019</year>). &#x201c;<article-title>Real-time adversarial attacks</article-title>,&#x201d; in <conf-name>Proceedings of the 28th International Joint Conference on Artificial Intelligence (IJCAI)</conf-name>, <conf-loc>Macao, China</conf-loc>, <conf-date>August 2019</conf-date>
					</citation>
				</ref>
				<ref id="B11">
					<citation citation-type="journal">
						<person-group person-group-type="author">
							<name>
								<surname>Goodfellow</surname>
								<given-names>I. J.</given-names>
							</name>
							<name>
								<surname>Shlens</surname>
								<given-names>J.</given-names>
							</name>
							<name>
								<surname>Szegedy</surname>
								<given-names>C.</given-names>
							</name>
						</person-group> (<year>2014</year>). <article-title>Explaining and harnessing adversarial examples</article-title>. <source>arXiv</source>
					</citation>
				</ref>
				<ref id="B12">
					<citation citation-type="journal">
						<person-group person-group-type="author">
							<name>
								<surname>Gu</surname>
								<given-names>T.</given-names>
							</name>
							<name>
								<surname>Liu</surname>
								<given-names>K.</given-names>
							</name>
							<name>
								<surname>Dolan-Gavitt</surname>
								<given-names>B.</given-names>
							</name>
							<name>
								<surname>Garg</surname>
								<given-names>S.</given-names>
							</name>
						</person-group> (<year>2019</year>). <article-title>BadNets: evaluating backdooring attacks on deep neural networks</article-title>. <source>IEEE Access</source>
						<volume>7</volume>, <fpage>47230</fpage>&#x2013;<lpage>47244</lpage>. <pub-id pub-id-type="doi">10.1109/access.2019.2909068</pub-id>
					</citation>
				</ref>
				<ref id="B13">
					<citation citation-type="journal">
						<person-group person-group-type="author">
							<name>
								<surname>Han</surname>
								<given-names>D.</given-names>
							</name>
							<name>
								<surname>Wang</surname>
								<given-names>Z.</given-names>
							</name>
							<name>
								<surname>Zhong</surname>
								<given-names>Y.</given-names>
							</name>
							<name>
								<surname>Chen</surname>
								<given-names>W.</given-names>
							</name>
							<name>
								<surname>Yang</surname>
								<given-names>J.</given-names>
							</name>
							<name>
								<surname>Lu</surname>
								<given-names>S.</given-names>
							</name>
							<etal/>
						</person-group> (<year>2020</year>). <article-title>Practical traffic-space adversarial attacks on learning-based nidss</article-title>. <source>arXiv</source>
					</citation>
				</ref>
				<ref id="B14">
					<citation citation-type="journal">
						<person-group person-group-type="author">
							<name>
								<surname>Hesamifard</surname>
								<given-names>E.</given-names>
							</name>
							<name>
								<surname>Takabi</surname>
								<given-names>H.</given-names>
							</name>
							<name>
								<surname>Ghasemi</surname>
								<given-names>M.</given-names>
							</name>
							<name>
								<surname>Jones</surname>
								<given-names>C.</given-names>
							</name>
						</person-group> (<year>2017</year>). &#x201c;<article-title>Privacy-preserving machine learning in cloud</article-title>,&#x201d; in <conf-name>Proceedings of the 2017 on cloud computing security workshop</conf-name>, <fpage>39</fpage>&#x2013;<lpage>43</lpage>
					</citation>
				</ref>
				<ref id="B15">
					<citation citation-type="journal">
						<person-group person-group-type="author">
							<name>
								<surname>Hilprecht</surname>
								<given-names>B.</given-names>
							</name>
							<name>
								<surname>H&#xe4;rterich</surname>
								<given-names>M.</given-names>
							</name>
							<name>
								<surname>Bernau</surname>
								<given-names>D.</given-names>
							</name>
						</person-group> (<year>2019</year>). &#x201c;<article-title>Monte Carlo and reconstruction membership inference attacks against generative models</article-title>,&#x201d; in <conf-name>Proceedings on Privacy Enhancing Technologies</conf-name>, <conf-loc>Stockholm, Sweden</conf-loc>, <conf-date>July 2019</conf-date>, <volume>2019</volume>, <fpage>232</fpage>&#x2013;<lpage>249</lpage>
					</citation>
				</ref>
				<ref id="B16">
					<citation citation-type="book">
						<person-group person-group-type="author">
							<name>
								<surname>Hitaj</surname>
								<given-names>D.</given-names>
							</name>
							<name>
								<surname>Hitaj</surname>
								<given-names>B.</given-names>
							</name>
							<name>
								<surname>Mancini</surname>
								<given-names>L. V.</given-names>
							</name>
						</person-group> (<year>2019</year>). &#x201c;<article-title>Evasion attacks against watermarking techniques found in MLaaS systems</article-title>,&#x201d; in <conf-name>2019 sixth international conference on software defined systems (SDS)</conf-name>, <conf-loc>Rome, Italy</conf-loc>, <conf-date>June 10&#x2013;13, 2019</conf-date> (<publisher-name>IEEE</publisher-name>)</citation>
				</ref>
				<ref id="B17">
					<citation citation-type="journal">
						<person-group person-group-type="author">
							<name>
								<surname>Hosseini</surname>
								<given-names>H.</given-names>
							</name>
							<name>
								<surname>Xiao</surname>
								<given-names>B.</given-names>
							</name>
							<name>
								<surname>Clark</surname>
								<given-names>A.</given-names>
							</name>
							<name>
								<surname>Poovendran</surname>
								<given-names>R.</given-names>
							</name>
						</person-group> (<year>2017</year>). &#x201c;<article-title>Attacking automatic video analysis algorithms: a case study of google cloud video intelligence API</article-title>,&#x201d; in <conf-name>Proceedings of the 2017 conference on multimedia Privacy and security (ACM)</conf-name>, <fpage>21</fpage>&#x2013;<lpage>32</lpage>
					</citation>
				</ref>
				<ref id="B18">
					<citation citation-type="journal">
						<person-group person-group-type="author">
							<name>
								<surname>Ilahi</surname>
								<given-names>I.</given-names>
							</name>
							<name>
								<surname>Usama</surname>
								<given-names>M.</given-names>
							</name>
							<name>
								<surname>Qadir</surname>
								<given-names>J.</given-names>
							</name>
							<name>
								<surname>Janjua</surname>
								<given-names>M. U.</given-names>
							</name>
							<name>
								<surname>Al-Fuqaha</surname>
								<given-names>A.</given-names>
							</name>
							<name>
								<surname>Hoang</surname>
								<given-names>D. T.</given-names>
							</name>
							<etal/>
						</person-group> (<year>2020</year>). <article-title>Challenges and countermeasures for adversarial attacks on deep reinforcement learning</article-title>. <source>arXiv</source>
					</citation>
				</ref>
				<ref id="B19">
					<citation citation-type="journal">
						<person-group person-group-type="author">
							<name>
								<surname>Ji</surname>
								<given-names>Y.</given-names>
							</name>
							<name>
								<surname>Zhang</surname>
								<given-names>X.</given-names>
							</name>
							<name>
								<surname>Ji</surname>
								<given-names>S.</given-names>
							</name>
							<name>
								<surname>Luo</surname>
								<given-names>X.</given-names>
							</name>
							<name>
								<surname>Wang</surname>
								<given-names>T.</given-names>
							</name>
						</person-group> (<year>2018</year>). &#x201c;<article-title>Model-reuse attacks on deep learning systems</article-title>, &#x201c;in <conf-name>Proceedings of the 2018 ACM SIGSAC Conference on Computer and Communications Security (New York, NY: ACM)</conf-name>, <conf-date>December 2018</conf-date>, <fpage>349</fpage>&#x2013;<lpage>363</lpage>
					</citation>
				</ref>
				<ref id="B20">
					<citation citation-type="journal">
						<person-group person-group-type="author">
							<name>
								<surname>Jiang</surname>
								<given-names>Y.</given-names>
							</name>
							<name>
								<surname>Hamer</surname>
								<given-names>J.</given-names>
							</name>
							<name>
								<surname>Wang</surname>
								<given-names>C.</given-names>
							</name>
							<name>
								<surname>Jiang</surname>
								<given-names>X.</given-names>
							</name>
							<name>
								<surname>Kim</surname>
								<given-names>M.</given-names>
							</name>
							<name>
								<surname>Song</surname>
								<given-names>Y.</given-names>
							</name>
							<etal/>
						</person-group> (<year>2018</year>). <article-title>Securelr: secure logistic regression model via a hybrid cryptographic protocol</article-title>. <source>IEEE ACM Trans. Comput. Biol. Bioinf</source>
						<volume>16</volume>, <fpage>113</fpage>&#x2013;<lpage>123</lpage>. <pub-id pub-id-type="doi">10.1109/TCBB.2018.2833463</pub-id>
					</citation>
				</ref>
				<ref id="B21">
					<citation citation-type="book">
						<person-group person-group-type="author">
							<name>
								<surname>Joshi</surname>
								<given-names>N.</given-names>
							</name>
							<name>
								<surname>Tammana</surname>
								<given-names>R.</given-names>
							</name>
						</person-group> (<year>2019</year>). &#x201c;<article-title>GDALR: an efficient model duplication attack on black box machine learning models</article-title>,&#x201d; in <conf-name>2019 IEEE international Conference on system, computation, Automation and networking (ICSCAN)</conf-name>,<conf-loc>Pondicherry, India</conf-loc>, <conf-date>March 29&#x2013;30, 2019</conf-date> (<publisher-name>IEEE</publisher-name>), <fpage>1</fpage>&#x2013;<lpage>6</lpage>
					</citation>
				</ref>
				<ref id="B22">
					<citation citation-type="journal">
						<person-group person-group-type="author">
							<name>
								<surname>Kesarwani</surname>
								<given-names>M.</given-names>
							</name>
							<name>
								<surname>Mukhoty</surname>
								<given-names>B.</given-names>
							</name>
							<name>
								<surname>Arya</surname>
								<given-names>V.</given-names>
							</name>
							<name>
								<surname>Mehta</surname>
								<given-names>S.</given-names>
							</name>
						</person-group> (<year>2018</year>). <article-title>Model extraction warning in MLaaS paradigm</article-title>. In <conf-name>Proceedings of the 34th Annual Computer Security Applications Conference (ACM)</conf-name>, <fpage>371</fpage>&#x2013;<lpage>380</lpage>
					</citation>
				</ref>
				<ref id="B23">
					<citation citation-type="journal">
						<person-group person-group-type="author">
							<name>
								<surname>Krizhevsky</surname>
								<given-names>A.</given-names>
							</name>
							<name>
								<surname>Sutskever</surname>
								<given-names>I.</given-names>
							</name>
							<name>
								<surname>Hinton</surname>
								<given-names>G. E.</given-names>
							</name>
						</person-group> (<year>2012</year>). &#x201c;<article-title>Imagenet classification with deep convolutional neural networks</article-title>,&#x201d; in <conf-name>Advances in neural information processing systems</conf-name>, <fpage>1097</fpage>&#x2013;<lpage>1105</lpage> Available at: <ext-link ext-link-type="uri" xlink:href="http://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks.pdf">http://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks.pdf</ext-link>
					</citation>
				</ref>
				<ref id="B24">
					<citation citation-type="journal">
						<person-group person-group-type="author">
							<name>
								<surname>Kumar</surname>
								<given-names>R. S. S.</given-names>
							</name>
							<name>
								<surname>Nystr&#xf6;m</surname>
								<given-names>M.</given-names>
							</name>
							<name>
								<surname>Lambert</surname>
								<given-names>J.</given-names>
							</name>
							<name>
								<surname>Marshall</surname>
								<given-names>A.</given-names>
							</name>
							<name>
								<surname>Goertzel</surname>
								<given-names>M.</given-names>
							</name>
							<name>
								<surname>Comissoneru</surname>
								<given-names>A.</given-names>
							</name>
							<etal/>
						</person-group> (<year>2020</year>). <article-title>Adversarial machine learning&#x2013;industry perspectives</article-title>. <source>arXiv</source>. Available at: <ext-link ext-link-type="uri" xlink:href="https://papers.ssrn.com/sol3/papers.cfm?abstract_id=3532474">https://papers.ssrn.com/sol3/papers.cfm?abstract_id=3532474</ext-link>
					</citation>
				</ref>
				<ref id="B25">
					<citation citation-type="journal">
						<person-group person-group-type="author">
							<name>
								<surname>Lei</surname>
								<given-names>Y.</given-names>
							</name>
							<name>
								<surname>Chen</surname>
								<given-names>S.</given-names>
							</name>
							<name>
								<surname>Fan</surname>
								<given-names>L.</given-names>
							</name>
							<name>
								<surname>Song</surname>
								<given-names>F.</given-names>
							</name>
							<name>
								<surname>Liu</surname>
								<given-names>Y.</given-names>
							</name>
						</person-group> (<year>2020</year>). <article-title>Advanced evasion attacks and mitigations on practical ml-based phishing website classifiers</article-title>. <source>arXiv</source>
					</citation>
				</ref>
				<ref id="B26">
					<citation citation-type="journal">
						<person-group person-group-type="author">
							<name>
								<surname>Liang</surname>
								<given-names>B.</given-names>
							</name>
							<name>
								<surname>Su</surname>
								<given-names>M.</given-names>
							</name>
							<name>
								<surname>You</surname>
								<given-names>W.</given-names>
							</name>
							<name>
								<surname>Shi</surname>
								<given-names>W.</given-names>
							</name>
							<name>
								<surname>Yang</surname>
								<given-names>G.</given-names>
							</name>
						</person-group> (<year>2016</year>). &#x201c;<article-title>Cracking classifiers for evasion: a case study on the google&#x2019;s phishing pages filter</article-title>,&#x201d; in <conf-name>Proceedings of the 25th international conference on world wide web Montr&#x00E9;al, Qu&#x00E9;bec, Canada</conf-name>, <fpage>345</fpage>&#x2013;<lpage>356</lpage>
					</citation>
				</ref>
				<ref id="B27">
					<citation citation-type="journal">
						<person-group person-group-type="author">
							<name>
								<surname>Liao</surname>
								<given-names>C.</given-names>
							</name>
							<name>
								<surname>Zhong</surname>
								<given-names>H.</given-names>
							</name>
							<name>
								<surname>Zhu</surname>
								<given-names>S.</given-names>
							</name>
							<name>
								<surname>Squicciarini</surname>
								<given-names>A.</given-names>
							</name>
						</person-group> (<year>2018</year>). &#x201c;<article-title>Server-based manipulation attacks against machine learning models</article-title>,&#x201d; in <conf-name>Proceedings of the eighth ACM conference on data and application security and privacy (ACM)</conf-name>, <conf-loc>New York, NY</conf-loc>, <conf-date>March 2018</conf-date>, <fpage>24</fpage>&#x2013;<lpage>34</lpage>
					</citation>
				</ref>
				<ref id="B28">
					<citation citation-type="journal">
						<person-group person-group-type="author">
							<name>
								<surname>Liu</surname>
								<given-names>J.</given-names>
							</name>
							<name>
								<surname>Juuti</surname>
								<given-names>M.</given-names>
							</name>
							<name>
								<surname>Lu</surname>
								<given-names>Y.</given-names>
							</name>
							<name>
								<surname>Asokan</surname>
								<given-names>N.</given-names>
							</name>
						</person-group>. (<year>2017</year>). &#x201c;<article-title>Oblivious neural network predictions via minionn transformations</article-title>,&#x201d; in <conf-name>Proceedings of the 2017 ACM SIGSAC Conference on Computer and Communications Security</conf-name>, <conf-date>October 2017</conf-date>, <fpage>619</fpage>&#x2013;<lpage>631</lpage>
					</citation>
				</ref>
				<ref id="B29">
					<citation citation-type="book">
						<person-group person-group-type="author">
							<name>
								<surname>Liu</surname>
								<given-names>T.</given-names>
							</name>
							<name>
								<surname>Wen</surname>
								<given-names>W.</given-names>
							</name>
							<name>
								<surname>Jin</surname>
								<given-names>Y.</given-names>
							</name>
						</person-group> (<year>2018</year>). &#x201c;<article-title>SIN 2: stealth infection on neural network&#x2014;a low-cost agile neural Trojan attack methodology</article-title>,&#x201d; in <conf-name>2018 IEEE international symposium on hardware oriented security and trust (HOST)</conf-name>, <conf-loc>Washington, DC</conf-loc>, <conf-date>April 30&#x2013;4 May, 2018</conf-date> (<publisher-name>IEEE</publisher-name>), <fpage>227</fpage>&#x2013;<lpage>230</lpage>
					</citation>
				</ref>
				<ref id="B30">
					<citation citation-type="journal">
						<person-group person-group-type="author">
							<name>
								<surname>Nguyen</surname>
								<given-names>T. N.</given-names>
							</name>
						</person-group> (<year>2017</year>). <article-title>Attacking machine learning models as part of a cyber kill chain</article-title>. <source>arXiv</source>
					</citation>
				</ref>
				<ref id="B31">
					<citation citation-type="journal">
						<person-group person-group-type="author">
							<name>
								<surname>Parkhi</surname>
								<given-names>O. M.</given-names>
							</name>
							<name>
								<surname>Vedaldi</surname>
								<given-names>A.</given-names>
							</name>
							<name>
								<surname>Zisserman</surname>
								<given-names>A.</given-names>
							</name>
							<etal/>
						</person-group> (<year>2015</year>). <article-title>Deep face recognition</article-title>. <source>Bmvc</source>
						<volume>1</volume>, <fpage>6</fpage>. <pub-id pub-id-type="doi">10.5244/C.29.41</pub-id>
					</citation>
				</ref>
				<ref id="B32">
					<citation citation-type="journal">
						<person-group person-group-type="author">
							<name>
								<surname>Qayyum</surname>
								<given-names>A.</given-names>
							</name>
							<name>
								<surname>Qadir</surname>
								<given-names>J.</given-names>
							</name>
							<name>
								<surname>Bilal</surname>
								<given-names>M.</given-names>
							</name>
							<name>
								<surname>Al-Fuqaha</surname>
								<given-names>A.</given-names>
							</name>
						</person-group> (<year>2020a</year>). <article-title>Secure and robust machine learning for healthcare: a survey</article-title>. <source>IEEE Rev. Biomed. Eng.</source>, <fpage>1</fpage>. <pub-id pub-id-type="doi">10.1109/RBME.2020.3013489</pub-id>
					</citation>
				</ref>
				<ref id="B33">
					<citation citation-type="journal">
						<person-group person-group-type="author">
							<name>
								<surname>Qayyum</surname>
								<given-names>A.</given-names>
							</name>
							<name>
								<surname>Usama</surname>
								<given-names>M.</given-names>
							</name>
							<name>
								<surname>Qadir</surname>
								<given-names>J.</given-names>
							</name>
							<name>
								<surname>Al-Fuqaha</surname>
								<given-names>A.</given-names>
							</name>
						</person-group> (<year>2020b</year>). <article-title>Securing connected &#x26; autonomous vehicles: challenges posed by adversarial machine learning and the way forward</article-title>. <source>IEEE Commun. Surv. Tutorials</source>
						<volume>22</volume>, <fpage>998</fpage>&#x2013;<lpage>1026</lpage>. <pub-id pub-id-type="doi">10.1109/comst.2020.2975048</pub-id>
					</citation>
				</ref>
				<ref id="B34">
					<citation citation-type="journal">
						<person-group person-group-type="author">
							<name>
								<surname>Reith</surname>
								<given-names>R. N.</given-names>
							</name>
							<name>
								<surname>Schneider</surname>
								<given-names>T.</given-names>
							</name>
							<name>
								<surname>Tkachenko</surname>
								<given-names>O.</given-names>
							</name>
						</person-group> (<year>2019</year>). &#x201c;<article-title>Efficiently stealing your machine learning models</article-title>,&#x201d; in <conf-name>Proceedings of the 18th ACM workshop on privacy in the electronic society</conf-name>, <conf-date>November 2019</conf-date>, <fpage>198</fpage>&#x2013;<lpage>210</lpage>
					</citation>
				</ref>
				<ref id="B35">
					<citation citation-type="journal">
						<person-group person-group-type="author">
							<name>
								<surname>Rouhani</surname>
								<given-names>B. D.</given-names>
							</name>
							<name>
								<surname>Hussain</surname>
								<given-names>S. U.</given-names>
							</name>
							<name>
								<surname>Lauter</surname>
								<given-names>K.</given-names>
							</name>
							<name>
								<surname>Koushanfar</surname>
								<given-names>F.</given-names>
							</name>
						</person-group> (<year>2018</year>). <article-title>Redcrypt: real-time privacy-preserving deep learning inference in clouds using fpgas</article-title>. <source>ACM Trans. Reconfigurable Technol. Syst.</source>
						<volume>11</volume>, <fpage>1</fpage>&#x2013;<lpage>21</lpage>. <pub-id pub-id-type="doi">10.1145/3242899</pub-id>
					</citation>
				</ref>
				<ref id="B36">
					<citation citation-type="journal">
						<person-group person-group-type="author">
							<name>
								<surname>Saadatpanah</surname>
								<given-names>P.</given-names>
							</name>
							<name>
								<surname>Shafahi</surname>
								<given-names>A.</given-names>
							</name>
							<name>
								<surname>Goldstein</surname>
								<given-names>T.</given-names>
							</name>
						</person-group> (<year>2019</year>). <article-title>Adversarial attacks on copyright detection systems</article-title>. <source>arXiv</source>.</citation>
				</ref>
				<ref id="B37">
					<citation citation-type="journal">
						<person-group person-group-type="author">
							<name>
								<surname>Salem</surname>
								<given-names>A.</given-names>
							</name>
							<name>
								<surname>Zhang</surname>
								<given-names>Y.</given-names>
							</name>
							<name>
								<surname>Humbert</surname>
								<given-names>M.</given-names>
							</name>
							<name>
								<surname>Berrang</surname>
								<given-names>P.</given-names>
							</name>
							<name>
								<surname>Fritz</surname>
								<given-names>M.</given-names>
							</name>
							<name>
								<surname>Backes</surname>
								<given-names>M.</given-names>
							</name>
						</person-group> (<year>2018</year>). <article-title>ML-leaks: model and data independent membership inference attacks and defenses on machine learning models</article-title>. <source>arXiv</source>.</citation>
				</ref>
				<ref id="B38">
					<citation citation-type="journal">
						<person-group person-group-type="author">
							<name>
								<surname>Sehwag</surname>
								<given-names>V.</given-names>
							</name>
							<name>
								<surname>Bhagoji</surname>
								<given-names>A. N.</given-names>
							</name>
							<name>
								<surname>Song</surname>
								<given-names>L.</given-names>
							</name>
							<name>
								<surname>Sitawarin</surname>
								<given-names>C.</given-names>
							</name>
							<name>
								<surname>Cullina</surname>
								<given-names>D.</given-names>
							</name>
							<name>
								<surname>Chiang</surname>
								<given-names>M.</given-names>
							</name>
							<etal/>
						</person-group> (<year>2019</year>). <article-title>Better the devil you know: an analysis of evasion attacks using out-of-distribution adversarial examples</article-title>. <source>arXiv</source>.</citation>
				</ref>
				<ref id="B39">
					<citation citation-type="journal">
						<person-group person-group-type="author">
							<name>
								<surname>Sethi</surname>
								<given-names>T. S.</given-names>
							</name>
							<name>
								<surname>Kantardzic</surname>
								<given-names>M.</given-names>
							</name>
						</person-group> (<year>2018</year>). <article-title>Data driven exploratory attacks on black box classifiers in adversarial domains</article-title>. <source>Neurocomputing</source>
						<volume>289</volume>, <fpage>129</fpage>&#x2013;<lpage>143</lpage>. <pub-id pub-id-type="doi">10.1016/j.neucom.2018.02.007</pub-id>
					</citation>
				</ref>
				<ref id="B40">
					<citation citation-type="book">
						<person-group person-group-type="author">
							<name>
								<surname>Sharma</surname>
								<given-names>S.</given-names>
							</name>
							<name>
								<surname>Chen</surname>
								<given-names>K.</given-names>
							</name>
						</person-group>. (<year>2018</year>). &#x201c;<article-title>Image disguising for privacy-preserving deep learning</article-title>,&#x201d; in <conf-name>Proceedings of the 2018 ACM SIGSAC Conference on Computer and Communications Security</conf-name>, (<publisher-name>ACM, Toronto, Canada</publisher-name>), <fpage>2291</fpage>&#x2013;<lpage>2293</lpage>
					</citation>
				</ref>
				<ref id="B41">
					<citation citation-type="book">
						<person-group person-group-type="author">
							<name>
								<surname>Shokri</surname>
								<given-names>R.</given-names>
							</name>
							<name>
								<surname>Stronati</surname>
								<given-names>M.</given-names>
							</name>
							<name>
								<surname>Song</surname>
								<given-names>C.</given-names>
							</name>
							<name>
								<surname>Shmatikov</surname>
								<given-names>V.</given-names>
							</name>
						</person-group> (<year>2017</year>). &#x201c;<article-title>Membership inference attacks against machine learning models</article-title>,&#x201d; in <conf-name>2017 IEEE Symposium on Security and privacy (SP)</conf-name>, <conf-loc>San Jose, CA</conf-loc>, <conf-date>May 22&#x2013;26, 2017</conf-date> (<publisher-name>IEEE</publisher-name>), <volume>3&#x2013;18</volume>
					</citation>
				</ref>
				<ref id="B42">
					<citation citation-type="journal">
						<person-group person-group-type="author">
							<name>
								<surname>Simonyan</surname>
								<given-names>K.</given-names>
							</name>
							<name>
								<surname>Zisserman</surname>
								<given-names>A.</given-names>
							</name>
						</person-group> (<year>2015</year>). &#x201c;<article-title>Very deep convolutional networks for large-scale image recognition</article-title>,&#x201d;in <conf-name>International Conference on Learning Representations (ICLR)</conf-name>
					</citation>
				</ref>
				<ref id="B43">
					<citation citation-type="journal">
						<person-group person-group-type="author">
							<name>
								<surname>Song</surname>
								<given-names>Y.</given-names>
							</name>
							<name>
								<surname>Liu</surname>
								<given-names>T.</given-names>
							</name>
							<name>
								<surname>Wei</surname>
								<given-names>T.</given-names>
							</name>
							<name>
								<surname>Wang</surname>
								<given-names>X.</given-names>
							</name>
							<name>
								<surname>Tao</surname>
								<given-names>Z.</given-names>
							</name>
							<name>
								<surname>Chen</surname>
								<given-names>M.</given-names>
							</name>
						</person-group> (<year>2020</year>). <article-title>Fda3: federated defense against adversarial attacks for cloud-based iiot applications</article-title>. <source>IEEE Trans. Industr. Inform.</source>, <fpage>1</fpage>. <pub-id pub-id-type="doi">10.1109/TII.2020.3005969</pub-id>
					</citation>
				</ref>
				<ref id="B44">
					<citation citation-type="book">
						<person-group person-group-type="author">
							<name>
								<surname>Sun</surname>
								<given-names>Y.</given-names>
							</name>
							<name>
								<surname>Wang</surname>
								<given-names>X.</given-names>
							</name>
							<name>
								<surname>Tang</surname>
								<given-names>X.</given-names>
							</name>
						</person-group> (<year>2014</year>). &#x201c;<article-title>Deep learning face representation from predicting 10,000 classes</article-title>,&#x201d; in <conf-name>Proceedings of the IEEE conference on computer vision and pattern recognition</conf-name>, <conf-loc>Columbus, OH</conf-loc>, <conf-date>June 23&#x2013;28, 2014</conf-date>, (<publisher-name>IEEE</publisher-name>).</citation>
				</ref>
				<ref id="B45">
					<citation citation-type="book">
						<person-group person-group-type="author">
							<name>
								<surname>Szegedy</surname>
								<given-names>C.</given-names>
							</name>
							<name>
								<surname>Vanhoucke</surname>
								<given-names>V.</given-names>
							</name>
							<name>
								<surname>Ioffe</surname>
								<given-names>S.</given-names>
							</name>
							<name>
								<surname>Shlens</surname>
								<given-names>J.</given-names>
							</name>
							<name>
								<surname>Wojna</surname>
								<given-names>Z.</given-names>
							</name>
						</person-group> &#x201c;(<year>2016</year>). <article-title>Rethinking the inception architecture for computer vision</article-title>,&#x201d; in <conf-name>Proceedings of the IEEE conference on computer vision and pattern recognition (CVPR)</conf-name>, <conf-loc>Las Vegas, NV</conf-loc>, <conf-date>June 27&#x2013;30, 2016</conf-date> (<publisher-name>IEEE</publisher-name>), <fpage>2818</fpage>&#x2013;<lpage>2826</lpage>
					</citation>
				</ref>
				<ref id="B46">
					<citation citation-type="journal">
						<person-group person-group-type="author">
							<name>
								<surname>Tram&#xe8;r</surname>
								<given-names>F.</given-names>
							</name>
							<name>
								<surname>Zhang</surname>
								<given-names>F.</given-names>
							</name>
							<name>
								<surname>Juels</surname>
								<given-names>A.</given-names>
							</name>
							<name>
								<surname>Reiter</surname>
								<given-names>M. K.</given-names>
							</name>
							<name>
								<surname>Ristenpart</surname>
								<given-names>T.</given-names>
							</name>
						</person-group> (<year>2016</year>). &#x201c;<article-title>Stealing machine learning models via prediction APIs</article-title>,&#x201d; in <conf-name>25th USENIX security symposium (USENIX Security 16)</conf-name>, <fpage>601</fpage>&#x2013;<lpage>618</lpage>
					</citation>
				</ref>
				<ref id="B47">
					<citation citation-type="book">
						<person-group person-group-type="author">
							<name>
								<surname>Tyndall</surname>
								<given-names>J.</given-names>
							</name>
						</person-group> (<year>2010</year>). <source>AACODS checklist</source>. <publisher-loc>Adelaide, Australia</publisher-loc>: <publisher-name>Adelaide Flinders University</publisher-name>
					</citation>
				</ref>
				<ref id="B48">
					<citation citation-type="journal">
						<person-group person-group-type="author">
							<name>
								<surname>Usama</surname>
								<given-names>M.</given-names>
							</name>
							<name>
								<surname>Mitra</surname>
								<given-names>R. N.</given-names>
							</name>
							<name>
								<surname>Ilahi</surname>
								<given-names>I.</given-names>
							</name>
							<name>
								<surname>Qadir</surname>
								<given-names>J.</given-names>
							</name>
							<name>
								<surname>Marina</surname>
								<given-names>M. K.</given-names>
							</name>
						</person-group> (<year>2020a</year>). <article-title>Examining machine learning for 5g and beyond through an adversarial lens</article-title>. <source>arXiv</source>. Available at: <ext-link ext-link-type="uri" xlink:href="https://arxiv.org/abs/2009.02473">https://arxiv.org/abs/2009.02473</ext-link>.</citation>
				</ref>
				<ref id="B49">
					<citation citation-type="journal">
						<person-group person-group-type="author">
							<name>
								<surname>Usama</surname>
								<given-names>M.</given-names>
							</name>
							<name>
								<surname>Qadir</surname>
								<given-names>J.</given-names>
							</name>
							<name>
								<surname>Al-Fuqaha</surname>
								<given-names>A.</given-names>
							</name>
							<name>
								<surname>Hamdi</surname>
								<given-names>M.</given-names>
							</name>
						</person-group> (<year>2020b</year>). <article-title>The adversarial machine learning conundrum: can the insecurity of ML become the achilles&#x27; heel of cognitive networks?</article-title>
						<source>IEEE Network</source>
						<volume>34</volume>, <fpage>196</fpage>&#x2013;<lpage>203</lpage>. <pub-id pub-id-type="doi">10.1109/mnet.001.1900197</pub-id>
					</citation>
				</ref>
				<ref id="B50">
					<citation citation-type="journal">
						<person-group person-group-type="author">
							<name>
								<surname>Usama</surname>
								<given-names>M.</given-names>
							</name>
							<name>
								<surname>Qayyum</surname>
								<given-names>A.</given-names>
							</name>
							<name>
								<surname>Qadir</surname>
								<given-names>J.</given-names>
							</name>
							<name>
								<surname>Al-Fuqaha</surname>
								<given-names>A.</given-names>
							</name>
						</person-group> (<year>2019</year>). &#x201c;<article-title>Black-box adversarial machine learning attack on network traffic classification</article-title>, &#x201c;in <conf-name>2019 15th international wireless communications and mobile computing conference (IWCMC)</conf-name>, <conf-loc>Tangier, Morocco</conf-loc>, <conf-date>June 24&#x2013;28, 2019</conf-date>
					</citation>
				</ref>
				<ref id="B51">
					<citation citation-type="journal">
						<person-group person-group-type="author">
							<name>
								<surname>Wang</surname>
								<given-names>B.</given-names>
							</name>
							<name>
								<surname>Yao</surname>
								<given-names>Y.</given-names>
							</name>
							<name>
								<surname>Viswanath</surname>
								<given-names>B.</given-names>
							</name>
							<name>
								<surname>Zheng</surname>
								<given-names>H.</given-names>
							</name>
							<name>
								<surname>Zhao</surname>
								<given-names>B. Y.</given-names>
							</name>
						</person-group> (<year>2018a</year>). &#x201c;<article-title>With great training comes great vulnerability: practical attacks against transfer learning</article-title>,&#x201d; in <conf-name>27th USENIX security symposium (USENIX Security 18)</conf-name>, <conf-loc>Baltimore, MD</conf-loc>, <conf-date>August 2018</conf-date>, <fpage>1281</fpage>&#x2013;<lpage>1297</lpage>
					</citation>
				</ref>
				<ref id="B52">
					<citation citation-type="journal">
						<person-group person-group-type="author">
							<name>
								<surname>Wang</surname>
								<given-names>J.</given-names>
							</name>
							<name>
								<surname>Zhang</surname>
								<given-names>J.</given-names>
							</name>
							<name>
								<surname>Bao</surname>
								<given-names>W.</given-names>
							</name>
							<name>
								<surname>Zhu</surname>
								<given-names>X.</given-names>
							</name>
							<name>
								<surname>Cao</surname>
								<given-names>B.</given-names>
							</name>
							<name>
								<surname>Yu</surname>
								<given-names>P. S.</given-names>
							</name>
						</person-group> (<year>2018b</year>). &#x201c;<article-title>Not just privacy: improving performance of private deep learning in mobile cloud</article-title>,&#x201d; in <conf-name>Proceedings of the 24th ACM SIGKDD International Conference on Knowledge Discovery &#x26; Data Mining London, United Kingdom</conf-name>, <conf-date>January 2018</conf-date>, <fpage>2407</fpage>&#x2013;<lpage>2416</lpage>
					</citation>
				</ref>
				<ref id="B53">
					<citation citation-type="journal">
						<person-group person-group-type="author">
							<name>
								<surname>Yang</surname>
								<given-names>Z.</given-names>
							</name>
							<name>
								<surname>Zhang</surname>
								<given-names>J.</given-names>
							</name>
							<name>
								<surname>Chang</surname>
								<given-names>E.-C.</given-names>
							</name>
							<name>
								<surname>Liang</surname>
								<given-names>Z.</given-names>
							</name>
						</person-group> (<year>2019</year>). &#x201c;<article-title>Neural network inversion in adversarial setting via background knowledge alignment</article-title>,&#x201d; in <conf-name>Proceedings of the 2019 ACM SIGSAC conference on computer and communications security</conf-name>, <conf-loc>London, UK</conf-loc>, <conf-date>November 2019</conf-date>, <fpage>225</fpage>&#x2013;<lpage>240</lpage>
					</citation>
				</ref>
				<ref id="B54">
					<citation citation-type="journal">
						<person-group person-group-type="author">
							<name>
								<surname>Yuan</surname>
								<given-names>X.</given-names>
							</name>
							<name>
								<surname>He</surname>
								<given-names>P.</given-names>
							</name>
							<name>
								<surname>Zhu</surname>
								<given-names>Q.</given-names>
							</name>
							<name>
								<surname>Li</surname>
								<given-names>X.</given-names>
							</name>
						</person-group> (<year>2019</year>). <article-title>Adversarial examples: attacks and defenses for deep learning</article-title>. <source>IEEE Trans. Neural. Netw. Learn. Syst.</source>
						<volume>30</volume> (<issue>9</issue>), <fpage>2805</fpage>&#x2013;<lpage>2824</lpage>. <pub-id pub-id-type="doi">10.1109/TNNLS.2018.2886017</pub-id>
					</citation>
				</ref>
				<ref id="B55">
					<citation citation-type="journal">
						<person-group person-group-type="author">
							<name>
								<surname>Zhang</surname>
								<given-names>J.</given-names>
							</name>
							<name>
								<surname>Zhang</surname>
								<given-names>B.</given-names>
							</name>
							<name>
								<surname>Zhang</surname>
								<given-names>B.</given-names>
							</name>
						</person-group> (<year>2019</year>). &#x201c;<article-title>Defending adversarial attacks on cloud-aided automatic speech recognition systems</article-title>, &#x201c;in <conf-name>Proceedings of the seventh international workshop on security in cloud computing</conf-name>, <conf-loc>New York</conf-loc>, <fpage>23</fpage>&#x2013;<lpage>31</lpage>. Available at: <ext-link ext-link-type="uri" xlink:href="https://dl.acm.org/doi/proceedings/10.1145/3327962/">https://dl.acm.org/doi/proceedings/10.1145/3327962</ext-link>
					</citation>
				</ref>
			</ref-list>
		</back>
	</article>