@ARTICLE{10.3389/frai.2020.543305, AUTHOR={Hunt, Xanthe and Tomlinson, Mark and Sikander, Siham and Skeen, Sarah and Marlow, Marguerite and du Toit, Stefani and Eisner, Manuel}, TITLE={Artificial Intelligence, Big Data, and mHealth: The Frontiers of the Prevention of Violence Against Children}, JOURNAL={Frontiers in Artificial Intelligence}, VOLUME={3}, YEAR={2020}, URL={https://www.frontiersin.org/articles/10.3389/frai.2020.543305}, DOI={10.3389/frai.2020.543305}, ISSN={2624-8212}, ABSTRACT={Violence against children is a global public health threat of considerable concern. At least half of all children worldwide experience violence every year; globally, the total number of children between the ages of 2 and 17 years who have experienced violence in any given year is one billion. Based on a review of the literature, we argue that there is substantial potential for AI (and associated machine learning and big data), and mHealth approaches to be utilized to prevent and address violence at a large scale. This potential is particularly marked in low- and middle-income countries (LMIC), although whether it could translate into effective solutions at scale remains unclear. We discuss possible entry points for Artificial Intelligence (AI), big data, and mHealth approaches to violence prevention, linking these to the World Health Organization's seven INSPIRE strategies. However, such work should be approached with caution. We highlight clear directions for future work in technology-based and technology-enabled violence prevention. We argue that there is a need for good agent-based models at the level of entire cities where and when violence can occur, where local response systems are. Yet, there is a need to develop common, reliable, and valid population- and individual/family-level data on predictors of violence. These indicators could be integrated into routine health or other information systems and become the basis of Al algorithms for violence prevention and response systems. Further, data on individual help-seeking behavior, risk factors for child maltreatment, and other information which could help us to identify the parameters required to understand what happens to cause, and in response to violence, are needed. To respond to ethical issues engendered by these kinds of interventions, there must be concerted, meaningful efforts to develop participatory and user-led work in the AI space, to ensure that the privacy and profiling concerns outlined above are addressed explicitly going forward. Finally, we make the case that developing AI and other technological infrastructure will require substantial investment, particularly in LMIC.} }