<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.3 20070202//EN" "journalpublishing.dtd">
<article article-type="research-article" dtd-version="2.3" xml:lang="EN" xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Big Data</journal-id>
<journal-title>Frontiers in Big Data</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Big Data</abbrev-journal-title>
<issn pub-type="epub">2624-909X</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="publisher-id">689358</article-id>
<article-id pub-id-type="doi">10.3389/fdata.2021.689358</article-id>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Big Data</subject>
<subj-group>
<subject>Original Research</subject>
</subj-group>
</subj-group>
</article-categories>
<title-group>
<article-title>Computer Vision for Continuous Bedside Pharmacological Data Extraction: A Novel Application of Artificial Intelligence for Clinical Data Recording and Biomedical Research</article-title>
<alt-title alt-title-type="left-running-head">Froese et&#x20;al.</alt-title>
<alt-title alt-title-type="right-running-head">Computer Vision for Bedside Pharmacological Data</alt-title>
</title-group>
<contrib-group>
<contrib contrib-type="author" corresp="yes">
<name>
<surname>Froese</surname>
<given-names>Logan</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<xref ref-type="corresp" rid="c001">&#x2a;</xref>
<xref ref-type="fn" rid="fn1">
<sup>&#x2020;</sup>
</xref>
<xref ref-type="fn" rid="fn2">
<sup>&#x2021;</sup>
</xref>
<uri xlink:href="https://loop.frontiersin.org/people/1359928/overview"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Dian</surname>
<given-names>Joshua</given-names>
</name>
<xref ref-type="aff" rid="aff2">
<sup>2</sup>
</xref>
<xref ref-type="fn" rid="fn1">
<sup>&#x2020;</sup>
</xref>
<xref ref-type="fn" rid="fn2">
<sup>&#x2021;</sup>
</xref>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Batson</surname>
<given-names>Carleen</given-names>
</name>
<xref ref-type="aff" rid="aff3">
<sup>3</sup>
</xref>
<xref ref-type="fn" rid="fn1">
<sup>&#x2020;</sup>
</xref>
<uri xlink:href="https://loop.frontiersin.org/people/1390291/overview"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Gomez</surname>
<given-names>Alwyn</given-names>
</name>
<xref ref-type="aff" rid="aff2">
<sup>2</sup>
</xref>
<xref ref-type="aff" rid="aff3">
<sup>3</sup>
</xref>
<xref ref-type="fn" rid="fn1">
<sup>&#x2020;</sup>
</xref>
<uri xlink:href="https://loop.frontiersin.org/people/1359874/overview"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Sainbhi</surname>
<given-names>Amanjyot Singh</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<xref ref-type="fn" rid="fn1">
<sup>&#x2020;</sup>
</xref>
<uri xlink:href="https://loop.frontiersin.org/people/1257677/overview"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Unger</surname>
<given-names>Bertram</given-names>
</name>
<xref ref-type="aff" rid="aff4">
<sup>4</sup>
</xref>
<xref ref-type="fn" rid="fn1">
<sup>&#x2020;</sup>
</xref>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Zeiler</surname>
<given-names>Frederick A.</given-names>
</name>
<xref ref-type="aff" rid="aff1">
<sup>1</sup>
</xref>
<xref ref-type="aff" rid="aff2">
<sup>2</sup>
</xref>
<xref ref-type="aff" rid="aff3">
<sup>3</sup>
</xref>
<xref ref-type="aff" rid="aff5">
<sup>5</sup>
</xref>
<xref ref-type="aff" rid="aff6">
<sup>6</sup>
</xref>
<xref ref-type="fn" rid="fn1">
<sup>&#x2020;</sup>
</xref>
<uri xlink:href="https://loop.frontiersin.org/people/399211/overview"/>
</contrib>
</contrib-group>
<aff id="aff1">
<label>
<sup>1</sup>
</label>Biomedical Engineering, Faculty of Engineering, University of Manitoba, <addr-line>Winnipeg</addr-line>, <addr-line>MB</addr-line>, <country>Canada</country>
</aff>
<aff id="aff2">
<label>
<sup>2</sup>
</label>Section of Neurosurgery, Department of Surgery, Rady Faculty of Health Sciences, University of Manitoba, <addr-line>Winnipeg</addr-line>, <addr-line>MB</addr-line>, <country>Canada</country>
</aff>
<aff id="aff3">
<label>
<sup>3</sup>
</label>Department of Anatomy and Cell Science, Rady Faculty of Health Sciences, University of Manitoba, <addr-line>Winnipeg</addr-line>, <addr-line>MB</addr-line>, <country>Canada</country>
</aff>
<aff id="aff4">
<label>
<sup>4</sup>
</label>Section of Critical Care, Department of Medicine, Rady Faculty of Health Sciences, University of Manitoba, <addr-line>Winnipeg</addr-line>, <addr-line>MB</addr-line>, <country>Canada</country>
</aff>
<aff id="aff5">
<label>
<sup>5</sup>
</label>Centre on Aging, University of Manitoba, <addr-line>Winnipeg</addr-line>, <addr-line>MB</addr-line>, <country>Canada</country>
</aff>
<aff id="aff6">
<label>
<sup>6</sup>
</label>Division of Anaesthesia, Department of Medicine, Addenbrooke&#x2019;s Hospital, University of Cambridge, <addr-line>Cambridge</addr-line>, <country>United&#x20;Kingdom</country>
</aff>
<author-notes>
<fn fn-type="edited-by">
<p>
<bold>Edited by:</bold> <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/1260138/overview">Bernard Kamsu Foguem</ext-link>, Universit&#xe9; de Toulouse, France</p>
</fn>
<fn fn-type="edited-by">
<p>
<bold>Reviewed by:</bold> <ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/1029783/overview">Shivanand Sharanappa Gornale</ext-link>, Rani Channamma University, India</p>
<p>
<ext-link ext-link-type="uri" xlink:href="https://loop.frontiersin.org/people/1337071/overview">Jatinderkumar Saini</ext-link>, Symbiosis Institute of Computer Studies and Research (SICSR), India</p>
</fn>
<corresp id="c001">&#x2a;Correspondence: Logan Froese, <email>log.froese@gmail.com</email>
</corresp>
<fn fn-type="other" id="fn1">
<label>
<bold>
<sup>&#x2020;</sup>
</bold>
</label>
<p>
<bold>ORCID: </bold>Logan Froese</p>
<p>
<ext-link ext-link-type="uri" xlink:href="http://orcid.org/0000-0002-6076-0189">orcid.org/0000-0002-6076-0189</ext-link>
</p>
<p>Joshua Dian</p>
<p>
<ext-link ext-link-type="uri" xlink:href="http://orcid.org/0000-0002-2193-4916">orcid.org/0000-0002-2193-4916</ext-link>
</p>
<p>Carleen Batson</p>
<p>
<ext-link ext-link-type="uri" xlink:href="http://orcid.org/0000-0002-7928-8523">orcid.org/0000-0002-7928-8523</ext-link>
</p>
<p>Alwyn Gomez</p>
<p>
<ext-link ext-link-type="uri" xlink:href="http://orcid.org/0000-0002-3737-2065">orcid.org/0000-0002-3737-2065</ext-link>
</p>
<p>Amanjyot Singh Sainbhi</p>
<p>
<ext-link ext-link-type="uri" xlink:href="http://orcid.org/0000-0003-3231-5683">orcid.org/0000-0003-3231-5683</ext-link>
</p>
<p>Bertram Unger</p>
<p>
<ext-link ext-link-type="uri" xlink:href="http://orcid.org/0000-0002-5739-3955">orcid.org/0000-0002-5739-3955</ext-link>
</p>
<p>Frederick A. Zeiler</p>
<p>
<ext-link ext-link-type="uri" xlink:href="http://orcid.org/0000-0003-1737-0510">orcid.org/0000-0003-1737-0510</ext-link>
</p>
</fn>
<fn fn-type="other" id="fn2">
<label>
<sup>&#x2021;</sup>
</label>
<p>These authors share first authorship</p>
</fn>
<fn fn-type="other">
<p>This article was submitted to Medicine and Public Health, a section of the journal Frontiers in Big&#x20;Data</p>
</fn>
</author-notes>
<pub-date pub-type="epub">
<day>27</day>
<month>08</month>
<year>2021</year>
</pub-date>
<pub-date pub-type="collection">
<year>2021</year>
</pub-date>
<volume>4</volume>
<elocation-id>689358</elocation-id>
<history>
<date date-type="received">
<day>09</day>
<month>04</month>
<year>2021</year>
</date>
<date date-type="accepted">
<day>09</day>
<month>08</month>
<year>2021</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#xa9; 2021 Froese, Dian, Batson, Gomez, Sainbhi, Unger and Zeiler.</copyright-statement>
<copyright-year>2021</copyright-year>
<copyright-holder>Froese, Dian, Batson, Gomez, Sainbhi, Unger and Zeiler</copyright-holder>
<license xlink:href="http://creativecommons.org/licenses/by/4.0/">
<p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (CC BY). The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these&#x20;terms.</p>
</license>
</permissions>
<abstract>
<p>
<bold>Introduction:</bold> As real time data processing is integrated with medical care for traumatic brain injury (TBI) patients, there is a requirement for devices to have digital output. However, there are still many devices that fail to have the required hardware to export real time data into an acceptable digital format or in a continuously updating manner. This is particularly the case for many intravenous pumps and older technological systems. Such accurate and digital real time data integration within TBI care and other fields is critical as we move towards digitizing healthcare information and integrating clinical data streams to improve bedside care. We propose to address this gap in technology by building a system that employs Optical Character Recognition through computer vision, using real time images from a pump monitor to extract the desired real time information.</p>
<p>
<bold>Methods:</bold> Using freely available software and readily available technology, we built a script that extracts real time images from a medication pump and then processes them using Optical Character Recognition to create digital text from the image. This text was then transferred to an ICM &#x2b; real-time monitoring software in parallel with other retrieved physiological&#x20;data.</p>
<p>
<bold>Results:</bold> The prototype that was built works effectively for our device, with source code openly available to interested end-users. However, future work is required for a more universal application of such a system.</p>
<p>
<bold>Conclusion:</bold> Advances here can improve medical information collection in the clinical environment, eliminating human error with bedside charting, and aid in data integration for biomedical research where many complex data sets can be seamlessly integrated digitally. Our design demonstrates a simple adaptation of current technology to help with this integration.</p>
</abstract>
<kwd-group>
<kwd>computer vision</kwd>
<kwd>image modification</kwd>
<kwd>opitcal character recognition</kwd>
<kwd>system integration</kwd>
<kwd>data integration</kwd>
</kwd-group>
</article-meta>
</front>
<body>
<sec id="s1">
<title>Introduction</title>
<p>Current therapeutic interventions in Traumatic Brain Injury (TBI) are generally based on low frequency physiological response over large sample sizes, focusing on long epoch outcomes (<xref ref-type="bibr" rid="B11">Chalmers et&#x20;al., 1981</xref>; <xref ref-type="bibr" rid="B7">Carney et&#x20;al., 2017</xref>). Though this methodology can be effective in identifying large global phenomenon, momentary individualized events are masked within these large datasets. Thus, methodologies are emerging that leverage higher frequency data to find momentary phenomenon that focus on individualized patient response to medical treatment (<xref ref-type="bibr" rid="B7">Carney et&#x20;al., 2017</xref>; <xref ref-type="bibr" rid="B31">Matchett et&#x20;al., 2017</xref>; <xref ref-type="bibr" rid="B51">Zeiler et&#x20;al., 2018a</xref>). Furthermore, within TBI care, recent literature has emerged connecting high frequency physiology with TBI outcome (<xref ref-type="bibr" rid="B3">Balestreri et al., 2015</xref>; <xref ref-type="bibr" rid="B5">Cabella et&#x20;al., 2017</xref>; <xref ref-type="bibr" rid="B49">Zeiler et&#x20;al., 2018b</xref>). Yet, few studies connect the momentary response of high frequency physiology to current hourly recorded therapeutic infusions (<xref ref-type="bibr" rid="B17">Froese et&#x20;al., 2020a</xref>; <xref ref-type="bibr" rid="B16">Froese et&#x20;al., 2020b</xref>; <xref ref-type="bibr" rid="B24">Klein et&#x20;al., 2020</xref>). Through the use of more robust and individualized datasets, treatment guidelines can be focused on patient specific healthcare interventions which can lead to more individualized and personalized care. To take advantage of emerging technologies and new health metrics, real time high frequency physiological and treatment care data needs to be recorded and integrated. However, despite this increase in computational integration within health care, there are countless devices that are either released with insufficient digital output or are simply too outdated to carry the necessary hardware infrastructure to output the required data at a high frequency. This is particularly the case with many commercially available and clinically utilized medication pumps. As such, treatment information in many instances is still recorded manually at low frequency in bedside charts, or e-charts. Such methods are prone to errors in data entry and are time consuming for clinical&#x20;staff.</p>
<p>The limited compatibility of many bedside medical devices hinders clinicians&#x2019; ability to capture high frequency data, thus there is a need to leverage interfaces that convert such data from bedside devices directly into digital data. Many medical devices use text displays to convey the required information to the user. The text display therefore has the desired information, but based on the antiquated hardware, it lacks the compatibility to convert the information to a digital format. This problem is described as Text Information Extraction (TIE) (<xref ref-type="bibr" rid="B21">Jung et&#x20;al., 2004</xref>) and has been addressed in other environments like text-based image processing, (<xref ref-type="bibr" rid="B39">Park et&#x20;al., 1999</xref>; <xref ref-type="bibr" rid="B23">Kim et&#x20;al., 2002</xref>; <xref ref-type="bibr" rid="B10">Carvalho, 2016</xref>) document decoding (<xref ref-type="bibr" rid="B18">Cheng et&#x20;al., 1997</xref>; <xref ref-type="bibr" rid="B14">Feng et&#x20;al., 2006</xref>) and video text extraction (Locating Characters in Sc, 1047; <xref ref-type="bibr" rid="B15">Fischer et&#x20;al., 1995</xref>). All of these systems extract alphanumeric characters using Optical Character Recognition (OCR) via computer vision techniques, which leverage artificial intelligence to convert image characters into digital data (<xref ref-type="bibr" rid="B40">Schantz, 1982</xref>). This method, although well documented, has yet to be adapted for the use and conversion of medical monitoring equipment. Therefore, with the emergence of new openly available software and the universal nature of personal computers, there is a potential to adapt past medical devices to the computational&#x20;age.</p>
<p>Furthermore, for the integration of many older medical devices the only feasible solution to digital integration is through the use of scripting (<xref ref-type="bibr" rid="B9">Carvalho, 2013</xref>; <xref ref-type="bibr" rid="B12">Delaney et&#x20;al., 2013</xref>; <xref ref-type="bibr" rid="B8">Carvalho, 2021</xref>). Likewise, as clinical data collection exceeds the limits of humans, the need to leverage scripting to ensure accurate data collection becomes necessary (<xref ref-type="bibr" rid="B30">Mardis, 2011</xref>; <xref ref-type="bibr" rid="B12">Delaney et&#x20;al., 2013</xref>). To bridge this gap in compatibility, we have endeavoured to build a system that uses a camera to attain real time output from a text based display screen from bedside intravenous medication pumps and convert it into a continuously updated digital format to be captured and linked with other time-series data at the bedside in real&#x20;time.</p>
</sec>
<sec sec-type="materials|methods" id="s2">
<title>Materials and Methods</title>
<sec id="s2-1">
<title>Device Set-Up and Image Capture</title>
<p>This work was conducted at the Winnipeg Acute TBI Laboratories, at the University of Manitoba. The set-up consisted of a USB connected camera (Logitech C920s Pro HD Webcam, Logitech, Newark, CA, United&#x20;States) to take real time images of a commercially and commonly available intravenous medication pump (Baxter Colleague 3 CXE, Baxter Canada, Mississauga, Canada) which currently has no digital outport. Images are captured at 60 frames/second from a USB camera and copied directly onto a basic consumer laptop, see <xref ref-type="fig" rid="F1">Figure&#x20;1</xref>. The full Python scripting language code (Python 3, Scotts Valley, CA: CreateSpace) can be found in either <xref ref-type="sec" rid="s10">Supplementary Appendix A</xref> or GitHub (<ext-link ext-link-type="uri" xlink:href="https://github.com/lofro/TIE_OCR">https://github.com/lofro/TIE_OCR</ext-link>). The basic operation of this system leverages 4 main libraries in python; &#x201c;pytesseract,&#x201d; &#x201c;cv2,&#x201d; &#x201c;serial&#x201d; and &#x201c;tkinter.&#x201d; &#x201c;pytesseract&#x201d; and is used for the OCR processing. (<xref ref-type="bibr" rid="B26">Lee, 2007</xref>) &#x201c;Cv2&#x201d; is also an image processing and manipulation library. (<xref ref-type="bibr" rid="B19">Bradski, 2000</xref>) The use of these libraries will be detailed in the subsections to follow. &#x201c;Serial&#x201d; is a library in python that allows for the creation and use of serial sockets (<xref ref-type="bibr" rid="B43">Welcome to PySerial&#x2019;s Documentation PySerial 3.4 Documentation</xref>). Finally, we used the &#x201c;tkinter&#x201d; library to create the display and user interface that is seen in <xref ref-type="fig" rid="F2">Figure&#x20;2</xref> (<xref ref-type="bibr" rid="B42">Lundh, 1999</xref>). To create a video we leveraged the &#x201c;cv2. CaptureVideo&#x201d; function to extract frames and the &#x201c;tkinter.Canvas&#x201d; to display these frames. When either the <italic>snapshot</italic> button is press or the time delay is reached, the current frame captured will be processed.</p>
<fig id="F1" position="float">
<label>FIGURE 1</label>
<caption>
<p>Setup for the camera and pump. General setup for our design, with the monitor display being captured through an external camera is displayed in image <bold>(A)</bold> and <bold>(B)</bold>. In figure <bold>B</bold> the USB wire connecting the computer to camera can be seen. The current design has the camera directly in front of the text display.</p>
</caption>
<graphic xlink:href="fdata-04-689358-g001.tif"/>
</fig>
<fig id="F2" position="float">
<label>FIGURE 2</label>
<caption>
<p>Python interface. <bold>Left Panel</bold>&#x2013;Displays digital photo of medication pump taken by the camera, <bold>Right Panel</bold> - Displays the interface of our system, with real time data being updated from the extracted features from the medication pump display.</p>
</caption>
<graphic xlink:href="fdata-04-689358-g002.tif"/>
</fig>
</sec>
<sec id="s2-2">
<title>Image Processing and Feature Extraction</title>
<p>The TIE for these images was performed using Python. On the initiation of the code, an interface for the image capture will appear, as shown in <xref ref-type="fig" rid="F2">Figure&#x20;2</xref>. The subsequent image manipulations are demonstrated in <xref ref-type="fig" rid="F3">Figure&#x20;3</xref>, which illustrates our method to solve the TIE problem. The TIE problem can be divided into the following sub-problems: detection, localization, tracking, extraction/enhancement, and recognition (<xref ref-type="bibr" rid="B21">Jung et&#x20;al., 2004</xref>). Within our design we focused on localization, extraction/enhancement and recognition, as we can assume the images captured have some form of desired information, and that the features of interest stay relatively constant.</p>
<fig id="F3" position="float">
<label>FIGURE 3</label>
<caption>
<p>Steps for image processing&#x2013;TIE and OCR. TIE &#x3d; Text Information Extraction, OCR &#x3d; Optical Character Recognition. The processing proceeds in alphabetical order. <bold>(A)</bold> is the initial image converted to grayscale. <bold>(B)</bold> is the grayscaled image processed with the Canny function. <bold>(C)</bold> is the grayscaled cropped image using the rectangle contours of image <bold>(B)</bold>. <bold>(D)</bold> is the adaptive mean threshold function of image <bold>(C)</bold>. E is the cropped and rotated image <bold>(D)</bold> with the key horizontal line contained within a box. F is the features found with the google tesseract of image <bold>(E)</bold>. <bold>(G)</bold> is the string that Google Tesseract output from image <bold>(F)</bold>. <bold>(H)</bold> is the final output of the <bold>(G)</bold> string process.</p>
</caption>
<graphic xlink:href="fdata-04-689358-g003.tif"/>
</fig>
<p>An image can be captured manually or automatically after an allotted time. Once the image is captured it goes through the entire TIE image processing as seen in <xref ref-type="fig" rid="F3">Figure&#x20;3</xref>, proceeding alphabetically going from A to H. The two TIE subgroups of image localization and extraction/enhancement are performed in unison, shown in <xref ref-type="fig" rid="F3">Figures 3A&#x2013;E</xref>. Initially the image is converted to grayscale using a &#x201c;cv2&#x201d; function (<xref ref-type="fig" rid="F3">Figure&#x20;3A</xref>), then using Canny edge detection, the image edges are traced (<xref ref-type="fig" rid="F3">Figure&#x20;3B</xref>). (Open(X). Canny Edge) Canny highlights the edges of an image using the intensity gradient of the image, which is the color difference on local pixels to find the edge of shapes within the image (<xref ref-type="bibr" rid="B6">Canny, 1986</xref>). Using these edges, we can differentiate the display from the larger image by the rectangular aspect of the display. To do this the edges are grouped into contours. Contours are the bounding points that map the outline of a continuous white shape of <xref ref-type="fig" rid="F3">Figure&#x20;3B</xref>. Each continuous white shape is bounded by the smallest, best fitting rectangle that contains all the contours of that group. With all shapes having a respective bounding rectangle, the largest area rectangle can be found, which is assumed to be the display screen and used to give <xref ref-type="fig" rid="F3">Figure&#x20;3C</xref>.</p>
<p>The image is then enlarged to improve the small feature edges for the adaptive mean threshold. The adaptive mean threshold uses the area of local pixel brightness to find a mean brightness which then can be contrasted against the pixel of interest to identify if it should be black or white, resulting in <xref ref-type="fig" rid="F3">Figure&#x20;3D</xref>. (Open(X). Image Thres) Next, the contours of <xref ref-type="fig" rid="F3">Figure&#x20;3D</xref> are found in a similar way as before, (using canny edge detection) with the exception that it looks for the continuous black portions. Like before, the continuous black shapes are all bounded by a rectangle and used with their respective contours to rotate the image and crop the image for a second time. To rotate the image, a key horizontal line is needed (highlighted by the box around a line in <xref ref-type="fig" rid="F3">Figure&#x20;3E</xref>), this line is found by using the relative height to length of the bounding rectangle. The bounding rectangle must have a width greater than &#xbe; of the image width, and of the rectangles that meet this criterion, the one with the smallest height is chosen. Next, with the contours from which the previously described bounding rectangle encompasses, the line of best fit is made. That being, a best fit line is drawn through the key horizontal line. This is the least squares regression line with the contours as the points of interest. The best fit line is created using a &#x201c;cv2&#x201d; function and has an output of a location and an angle of rotation. (<xref ref-type="bibr" rid="B19">Bradski, 2000</xref>) This angle of rotation is also the angle for the image to rotate. To find the cropping area, the width and location of the bounding rectangle for the key horizontal line is used to find the x component of the cropped image (the horizontal location and width). The y component (vertical location and height) is assumed to be at the 5 and 90% of the initial image height, which allows the image to be cropped (<xref ref-type="fig" rid="F3">Figure&#x20;3E</xref>). This concludes the localization of the TIE process as the image is focused on only the text display. The last step in enhancement/extraction is performed using Google Tesseract&#x2019;s (Google Inc., <ext-link ext-link-type="uri" xlink:href="https://github.com/tesseract-ocr/tesseract/">https://github.com/tesseract-ocr/tesseract/</ext-link>) feature selection function, this function uses an artificial intelligence algorithm to find all key shapes within the image. (<xref ref-type="bibr" rid="B26">Lee, 2007</xref>) These are then cropped from the initial image and displayed in a consecutive order to give <xref ref-type="fig" rid="F3">Figure&#x20;3F</xref>.</p>
</sec>
<sec id="s2-3">
<title>Character Recognition</title>
<p>The last part of the TIE process, recognition, uses Google Tesseract OCR (<xref ref-type="bibr" rid="B26">Lee, 2007</xref>) to give the output text shown in <xref ref-type="fig" rid="F3">Figure&#x20;3G</xref>. This process, like all OCR, involves comparing a library of identified shapes to the data, in this way the best matched letter is assumed. (<xref ref-type="bibr" rid="B26">Lee, 2007</xref>) From <xref ref-type="fig" rid="F3">Figure&#x20;3G</xref> the desired values are extracted based on the nature of the OCR output and design of the text display, that being, the dose is always followed by the dose amount and left/time remaining, and the medication type is found by a list of predefined words of interest. Together the dose amount and medication can be paired up, and in almost any fashion given as <xref ref-type="fig" rid="F3">Figure&#x20;3H</xref>. To improve accuracy, we found the key words (those being greater then 4 characters of alphabetical values) and connected those with a number in a similar location, for the full OCR code see <xref ref-type="sec" rid="s10">Supplementary Appendix A.2</xref>. From here the data is digitized and can be output into any desired format. A full process map of the above TIE and OCR processes, from image capture to serial output can be seen in <xref ref-type="fig" rid="F4">Figure&#x20;4</xref>.</p>
<fig id="F4" position="float">
<label>FIGURE 4</label>
<caption>
<p>Process Map&#x2013;From Image Capture to Serial Output Figure displays the process taken to convert the image into its digital information and the steps to send the data to ICM&#x2b; (data acquisition platform). The best fit rectangles and line are the key shapes used to crop and rotate the image. Feature Extraction is a Google Tesseract function to find key shapes. Key Information Reduction Function is a function used to find and order the alphanumeric of interest.</p>
</caption>
<graphic xlink:href="fdata-04-689358-g004.tif"/>
</fig>
</sec>
<sec id="s2-4">
<title>Digitized Data Capture</title>
<p>Using a virtual serial port, we sent the serialized data (<xref ref-type="fig" rid="F3">Figure&#x20;3H</xref>) to Intensive Care Monitoring &#x201c;Plus&#x201d; (ICM&#x2b;) (Cambridge Enterprise Ltd., Cambridge, United&#x20;Kingdom, <ext-link ext-link-type="uri" xlink:href="http://icmplus.neurosurg.cam.ac.uk">http://icmplus.neurosurg.cam.ac.uk</ext-link>), generating continuously updating real time data (<xref ref-type="fig" rid="F2">Figure&#x20;2</xref>). The virtual serial port is an internal design that acts like serial port for any RS232 ASCII streaming device and was made using freely available software (null-modem emulator (com0com), <ext-link ext-link-type="uri" xlink:href="http://com0com.sourceforge.net">http://com0com.sourceforge.net</ext-link>). (<xref ref-type="bibr" rid="B35">Hatchett, 1991</xref>) In ICM&#x2b; the data was parsed into the desired functions identical to the parsing of any other device data. ICM&#x2b; was used as an example of a data acquisition platform for the continuous time-series capture of such data, as it is the platform utilized by our laboratory for bedside physiology research. The above-described design can be integrated with any data acquisition platform which can record serial&#x20;data.</p>
<p>Finally, to show-case the capture of continuous medical pump data in conjunction with other monitoring devices, we recorded continuous bifrontal cerebral regional oxygen saturations using near infrared spectroscopy (Covidien INVOS 7100, Medtronic Canada) and continuous non-invasive arterial blood pressure through a finger-cuff technique (Finapres NOVA Nanocare, Finapres Medical Systems, Enschede, Netherlands, <ext-link ext-link-type="uri" xlink:href="http://www.finapres.com/home">http://www.finapres.com/home</ext-link>), in a volunteer. The regional oxygen saturation was sampled at 1&#xa0;Hz, while the arterial blood pressure was sampled at 250&#xa0;Hz. Therefore, we can run our system in parallel with any number of compatible devises as can be seen in <xref ref-type="fig" rid="F5">Figure&#x20;5</xref>.</p>
<fig id="F5" position="float">
<label>FIGURE 5</label>
<caption>
<p>ICM &#x2b; Final Output. Displays the final output on ICM &#x2b; over a 60&#xa0;s period from top to bottom; the arterial blood pressure, fentanyl, sodium chloride and regional oxygen saturations.</p>
</caption>
<graphic xlink:href="fdata-04-689358-g005.tif"/>
</fig>
</sec>
</sec>
<sec sec-type="results|discussion" id="s3">
<title>Results and Discussion</title>
<sec id="s3-1">
<title>System Performance</title>
<p>As this entire system was a proof of concept, the design proves that there is technology available to complete an effective TIE process on a human-based text interface output, using an intravenous medication pump as an exemplar (examples of captured frames that worked can be seen in <xref ref-type="sec" rid="s10">Supplementary Appendix B</xref>). Furthermore, the design used only a common camera, a laptop and freely available open source software, (<xref ref-type="bibr" rid="B26">Lee, 2007</xref>; <xref ref-type="bibr" rid="B19">Bradski, 2000</xref>; <xref ref-type="bibr" rid="B35">Hatchett, 1991</xref>) demonstrating the accessibility of this conversion system.</p>
<p>Though we built a working prototype, there were some key issues that arose when operating the system. The first and perhaps most important, is the slightly inconsistent nature of the OCR recognition which has been documented in the past (<xref ref-type="bibr" rid="B10">Carvalho, 2016</xref>; <xref ref-type="bibr" rid="B40">Schantz, 1982</xref>; <xref ref-type="bibr" rid="B26">Lee, 2007</xref>). When implementing OCR, there is a tendency for letters and word orders to be mismatched. For example, a common error is the letter &#x201c;f&#x201d; interpreted as a &#x201c;t,&#x201d; i.e. &#x201c;tentanyl&#x201d; instead of &#x201c;fentanyl.&#x201d; This can be bypassed by backend language algorithms and deep learning techniques (<xref ref-type="bibr" rid="B32">Mokhtar et&#x20;al., 2018</xref>; <xref ref-type="bibr" rid="B25">Le et&#x20;al., 2019</xref>). Another common issue encountered is the mismatch of numbers &#x201c;5,&#x201d; &#x201c;6&#x201d;, &#x201c;8&#x201d; and &#x201c;9,&#x201d; which in operation have become interchangeable with one another if the image is insufficiently processed. To overcome this problem in operation, converting the image to <xref ref-type="fig" rid="F3">Figure&#x20;3F</xref>, with significant space between the lines of text, improved recognition. Also, the enlargement of features made the edges more robust (improving extraction/enhancement of the image). Though it must be acknowledged, in our described design and camera setup, we did not require these improvements to get sufficiently accurate data. Such modifications may be necessary with cheaper and lower resolution cameras.</p>
<p>The second issue is the interference that background noise can have on the image, which interferes with extraction and enhancement. If the display is dim, with a light that reflects directly in the camera, there are scenarios in which the captured image data can be masked behind this light. Likewise, if the camera is moved into such an angle as to obscure the image, the OCR software fails to accurately extract the information. Currently, there are no working examples that we know of that effectively adjust images at obscure angles to effectively output a coherent final image however, there are emerging proposed solutions (<xref ref-type="bibr" rid="B36">Oakley and Satherley, 1998</xref>; <xref ref-type="bibr" rid="B27">Li and Doermann, 1999</xref>; <xref ref-type="bibr" rid="B28">Li et&#x20;al., 2000</xref>; <xref ref-type="bibr" rid="B45">Witten et&#x20;al., 2004</xref>). Therefore, in the implementation of this design the most effective solution is setting up the camera to extract clear, centered images.</p>
</sec>
<sec id="s3-2">
<title>Reflections on Impact of the Designed System</title>
<sec id="s3-2-1">
<title>The TIE/OCR Process</title>
<p>This system of converting real time data from a medical device display into digital data, is the first that we have knowledge of. As such, this system illustrates that there is a bridge between computers and older devices that lack the necessary compatibility, using TIE processing. In this way there is an opportunity to extract data even when there is no capability of directly accessing the digital port, or when no digital output is offered. However, the design and operation of this system enforces the desire for a robust TIE methodology, due to the tenuous precision in the output. The mixed precision is caused by errors mostly relating to the OCR methodology for recognition, thus the field of text extraction is expanding with new developments and emerging improvements to all aspects of the TIE processing. These include word detection using Markov Random Field (<xref ref-type="bibr" rid="B47">Yalniz and Manmatha, 2019</xref>) and canonical correlation analysis, enhancing image quality by layering multiple images, (<xref ref-type="bibr" rid="B44">Wemhoener et&#x20;al., 2013</xref>) smoothing edges by using corner detection, (<xref ref-type="bibr" rid="B46">Yalniz and Manmatha, 2012</xref>) and having more robust feature detection methods (<xref ref-type="bibr" rid="B45">Witten et&#x20;al., 2004</xref>; <xref ref-type="bibr" rid="B36">Oakley and Satherley, 1998</xref>; <xref ref-type="bibr" rid="B28">Li et&#x20;al., 2000</xref>; <xref ref-type="bibr" rid="B27">Li and Doermann, 1999</xref>) with more areas and designs proposed to improve information retrieval from images (<xref ref-type="bibr" rid="B1">Allan et&#x20;al., 2003</xref>). These improvements highlight ideas to incrementally change the TIE methodology and enhance text extraction. Furthermore, by leveraging Deep Learning techniques before and after the OCR process, the shortcomings that are inherit with the OCR could be addressed. The two key areas to apply these Deep Learning solutions would be the creation of the improved text images (<xref ref-type="fig" rid="F3">Figures 3E,F</xref>) and error correction (<xref ref-type="fig" rid="F3">Figures 3G,H</xref>), which have emerging methods to address them (<xref ref-type="bibr" rid="B32">Mokhtar et&#x20;al., 2018</xref>; <xref ref-type="bibr" rid="B25">Le et&#x20;al., 2019</xref>; <xref ref-type="bibr" rid="B34">Namysl and Konya, 2019</xref>; <xref ref-type="bibr" rid="B48">Yin et&#x20;al., 2019</xref>; <xref ref-type="bibr" rid="B22">Karthikeyan et&#x20;al., 2021</xref>).</p>
<p>For individuals who endeavour to build a similar TIE system, the use of a prebuilt OCR is recommended. The open-source nature of Google Tesseract OCR makes it easily adaptable but supported under the Google banner also gives it access to a vast database to build its character recognition library on. As well, Google Tesseract OCR offers language conversion for over 50 different languages. (<xref ref-type="bibr" rid="B26">Lee, 2007</xref>) As global health becomes integrated, systems that can be adapted for a global community become imperative. These platforms bear the added benefit of being supported by a wide group of people, improving not only its functionality but its robustness as it pertains to various aspects including varying text font styles and languages. Therefore, although in theory it is possible to build one&#x2019;s own OCR system, there is limited practical reason to do&#x20;so.</p>
</sec>
<sec id="s3-2-2">
<title>Application to Bedside Medical Big Data</title>
<p>Aside from the novel application of computer vision to solve a digitization problem for medical device data, the TIE also offers the removal of the human element within data collection, as humans account for a large amount of the inconsistency within data processing (<xref ref-type="bibr" rid="B4">Barchard and Pace, 2011</xref>). In both the clinical care provision and biomedical research fields, data accuracy is critical. Errors in bedside or e-chart data entry, associated with human-based methods, can impact care delivery and safety for patients by allowing for treatment decisions to be made on inaccurate information. Similarly, accuracy of data in biomedical research is paramount as the focus of care becomes more responsive and individualized.</p>
<p>The TIE also improves the volume and frequency of data collection from such medical devices, exponentially higher than any human-based recording method. In almost all clinical data extraction, but in particular TBI data, the treatment methodologies are often updated at an hourly rate, with limited concern for the minute-to-minute fluctuations within care. Emerging studies in TBI research identify an optimal cerebral perfusion pressure which is coupled to minute to minute changes in physiology, (<xref ref-type="bibr" rid="B41">Steiner et&#x20;al., 2002</xref>; <xref ref-type="bibr" rid="B2">Aries et&#x20;al., 2012</xref>), with measures like intracranial pressure being well documented as having targeted goals to achieve (<xref ref-type="bibr" rid="B5">Cabella et&#x20;al., 2017</xref>; <xref ref-type="bibr" rid="B7">Carney et&#x20;al., 2017</xref>; <xref ref-type="bibr" rid="B50">Zeiler et&#x20;al., 2020</xref>). Such targets require the implementation of high frequency data analysis, however the treatments associated with these goals is either undocumented, or lack precision in documentation as to the exact momentary changes within care. Thus, methods to improve time resolution, allowing data to be linked with other physiologic information for a clearer picture of treatment response/effect, is required, as highlighted in our example in <xref ref-type="fig" rid="F5">Figure&#x20;5</xref>. Moreover, the nature of digitized information makes the update, dissemination, and archiving to prevent data loss a nearly trivial task. Thus, the breakdown or damage to one device can be mitigated by having continuous multi-connected data streams, limiting data&#x20;loss.</p>
</sec>
</sec>
</sec>
<sec id="s4">
<title>Future Directions</title>
<p>Despite the novel and interesting results described, future work is required in this area for further optimization. For this type of design there is a need to focus on three basic future implementations: the first, is creating a more robust TIE process with a focus on image enhancement and recognition. Such work will encompass variation in camera face angles and screen brightness/hues. The goal is to improve the efficiency of the output to more suitably honed results. Thus, the implementation of some previously proposed solutions to the OCR process using Deep Learning methods will be explored, including; convolutional neural networks, (<xref ref-type="bibr" rid="B1">Allan et&#x20;al., 2003</xref>) neural machine translation techniques (<xref ref-type="bibr" rid="B32">Mokhtar et&#x20;al., 2018</xref>) and provide improved lexicons.</p>
<p>The second area to address is a refined layout and interface. The goal for this style of technology is to have any user intuitively operate the device. As such, there will be work put in place to design a functional package that can be downloaded and will run like any other application. All of this will be done with freely available open-source software in order to promote the goal of improved data management and global health. One further aim is to expand applications to other medical devices and pumps that are commercially available.</p>
<p>Finally, to deploy this technology in both simulated and real-world healthcare environments. An example would be to setup this device in a simulation lab which is utilized to practice critical resuscitation skills for clinicians and trainees, prior to real-world application. Once feasibility and accuracy has been assessed in the simulated environment, the system can then be deployed in a real-world critical care environment here at the Health Sciences Centre in Winnipeg or other centers. Here real-time operational limitations will be explored, and the algorithms improved as needed. All future renditions and investigations will lead to improvements in the source code, which will be made openly available as new versions arise on GitHub.</p>
</sec>
</body>
<back>
<sec id="s5">
<title>Data Availability Statement</title>
<p>The raw data supporting the conclusion of this article will be made available by the authors, without undue reservation.</p>
</sec>
<sec id="s6">
<title>Author Contributions</title>
<p>LF was responsible for concept, design, analysis, manuscript preparation. JD was responsible for concept, design and manuscript preparation. CB, AG, and AS were responsible for manuscript composition and editing. BU was responsible for concept and manuscript editing. FZ was responsible for concept, design, analysis, manuscript preparation/editing and supervision.</p>
</sec>
<sec id="s7">
<title>Funding</title>
<p>This work was directly supported by the Manitoba Public Insurance (MPI) Neuroscience/TBI Research Endowment, the University of Manitoba Department of Surgery GFT Grant and University Research Grant Program (URGP). In addition, FAZ receives research support from the United&#x20;States National Institutes of Health (NIH) through the National Institute of Neurological Disorders and Stroke (NINDS) (Grant &#x23;: R03NS114335-01), the Canadian Institutes of Health Research (CIHR) (Grant &#x23;: 432061), the Canada Foundation for Innovation (CFI) (Project &#x23;: 38583), Research Manitoba (Grant &#x23;: 3906), the University of Manitoba VPRI Research Investment Fund (RIF), the University of Manitoba Centre on Aging, and the University of Manitoba Rudy Falk Clinician-Scientist Professorship. LF is supported through the University of Manitoba&#x2014;Department of Surgery GFT Research Grant, and the University of Manitoba Office of Research Services (ORS)&#x2014;University Research Grant Program (URGP). CB is supported through the Centre on Aging Fellowship at the University of Manitoba. AG is supported through the University of Manitoba Clinician Investigator Program.</p>
</sec>
<sec sec-type="COI-statement" id="s8">
<title>Conflict of Interest</title>
<p>The authors declare that the research was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
</sec>
<sec sec-type="disclaimer" id="s9">
<title>Publisher&#x2019;s Note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
<sec id="s10">
<title>Supplementary Material</title>
<p>The Supplementary Material for this article can be found online at: <ext-link ext-link-type="uri" xlink:href="https://www.frontiersin.org/articles/10.3389/fdata.2021.689358/full#supplementary-material">https://www.frontiersin.org/articles/10.3389/fdata.2021.689358/full&#x23;supplementary-material</ext-link>
</p>
<supplementary-material xlink:href="DataSheet2.docx" id="SM1" mimetype="application/docx" xmlns:xlink="http://www.w3.org/1999/xlink"/>
<supplementary-material xlink:href="DataSheet1.docx" id="SM2" mimetype="application/docx" xmlns:xlink="http://www.w3.org/1999/xlink"/>
</sec>
<ref-list>
<title>References</title>
<ref id="B1">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Allan</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Aslam</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Belkin</surname>
<given-names>N.</given-names>
</name>
<name>
<surname>Buckley</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Callan</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Croft</surname>
<given-names>B.</given-names>
</name>
<etal/>
</person-group> (<year>2003</year>). <article-title>Challenges in Information Retrieval and Language Modeling: Report of a Workshop Held at the Center for Intelligent Information Retrieval, University of Massachusetts Amherst, September 2002</article-title>. <source>ACM SIGIR Forum</source> <volume>37</volume>, <fpage>31</fpage>. <pub-id pub-id-type="doi">10.1145/945546.945549</pub-id> </citation>
</ref>
<ref id="B2">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Aries</surname>
<given-names>M. J.</given-names>
</name>
<name>
<surname>Czosnyka</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Budohoski</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Steiner</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Lavinio</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Kolias</surname>
<given-names>A.</given-names>
</name>
<etal/>
</person-group> (<year>2012</year>). <article-title>Continuous Determination of Optimal Cerebral Perfusion Pressure in Traumatic Brain Injury&#x2a;</article-title>. <source>Crit. Care Med.</source> <volume>40</volume>, <fpage>2456</fpage>. <pub-id pub-id-type="doi">10.1097/ccm.0b013e3182514eb6</pub-id> </citation>
</ref>
<ref id="B3">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Balestreri</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Czosnyka</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Steiner</surname>
<given-names>L. A.</given-names>
</name>
<name>
<surname>Hiler</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Schmidt</surname>
<given-names>E. A.</given-names>
</name>
<name>
<surname>Matta</surname>
<given-names>B.</given-names>
</name>
<etal/>
</person-group> (<year>2005</year>). <article-title>Association between Outcome, Cerebral Pressure Reactivity and Slow ICP Waves Following Head Injury</article-title>. <source>Acta Neurochir. Suppl.</source> <volume>95</volume>, <fpage>25</fpage>. <pub-id pub-id-type="doi">10.1007/3-211-32318-x_6</pub-id> </citation>
</ref>
<ref id="B4">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Barchard</surname>
<given-names>K. A.</given-names>
</name>
<name>
<surname>Pace</surname>
<given-names>L. A.</given-names>
</name>
</person-group> (<year>2011</year>). <article-title>Preventing Human Error: The Impact of Data Entry Methods on Data Accuracy and Statistical Results</article-title>. <source>Comput. Hum. Behav.</source> <volume>27</volume>, <fpage>1834</fpage>. <pub-id pub-id-type="doi">10.1016/j.chb.2011.04.004</pub-id> </citation>
</ref>
<ref id="B19">
<citation citation-type="web">
<person-group person-group-type="author">
<name>
<surname>Bradski</surname>
<given-names>G.</given-names>
</name>
</person-group> (<year>2000</year>). <collab>Home and Index OpenCV-Python Tutorials 1 Documentation</collab>. <comment>The OpenCV Library</comment>. <ext-link ext-link-type="uri" xlink:href="https://opencv.org/">https://opencv.org/</ext-link> (<comment>Accessed August 1, 2021</comment>).</citation>
</ref>
<ref id="B5">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Cabella</surname>
<given-names>B.</given-names>
</name>
<name>
<surname>Donnelly</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Cardim</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Liu</surname>
<given-names>X.</given-names>
</name>
<name>
<surname>Cabeleira</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Smielewski</surname>
<given-names>P.</given-names>
</name>
<etal/>
</person-group> (<year>2017</year>). <article-title>An Association between ICP-Derived Data and Outcome in TBI Patients: The Role of Sample Size</article-title>. <source>Neurocrit. Care</source> <volume>27</volume>, <fpage>103</fpage>. <pub-id pub-id-type="doi">10.1007/s12028-016-0319-x</pub-id> </citation>
</ref>
<ref id="B6">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Canny</surname>
<given-names>J.</given-names>
</name>
</person-group> (<year>1986</year>). <article-title>A Computational Approach to Edge Detection</article-title>. <source>Ieee Trans. Pattern Anal. Mach. Intelligence</source>, <volume>8</volume> (<issue>6</issue>), <fpage>679</fpage>&#x2013;<lpage>698</lpage>. <pub-id pub-id-type="doi">10.1109/tpami.1986.4767851</pub-id> </citation>
</ref>
<ref id="B7">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Carney</surname>
<given-names>N.</given-names>
</name>
<name>
<surname>Totten</surname>
<given-names>A. M.</given-names>
</name>
<name>
<surname>O&#x27;Reilly</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Ullman</surname>
<given-names>J.&#x20;S.</given-names>
</name>
<name>
<surname>Hawryluk</surname>
<given-names>G. W. J.</given-names>
</name>
<etal/>
</person-group> (<year>2017</year>). <article-title>Guidelines for the Management of Severe Traumatic Brain Injury</article-title>. <source>Neurosurgery</source> <edition>Fourth Edition</edition>, <volume>80</volume>, <fpage>6</fpage>&#x2013;<lpage>15</lpage>. <pub-id pub-id-type="doi">10.1227/neu.0000000000001432</pub-id> </citation>
</ref>
<ref id="B8">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Carvalho</surname>
<given-names>M. C.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>Miau, a Microbalance Autosampler, HardwareX</article-title>. <volume>10</volume>, <fpage>e00215</fpage>. <pub-id pub-id-type="doi">10.1016/j.ohx.2021.e00215</pub-id> </citation>
</ref>
<ref id="B9">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Carvalho</surname>
<given-names>M. C.</given-names>
</name>
</person-group> (<year>2013</year>). <article-title>Integration of Analytical Instruments with Computer Scripting</article-title>. <source>J.&#x20;Lab. Autom.</source> <volume>18</volume>, <fpage>328</fpage>. <pub-id pub-id-type="doi">10.1177/2211068213476288</pub-id> </citation>
</ref>
<ref id="B10">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Carvalho</surname>
<given-names>M. C.</given-names>
</name>
</person-group> (<year>2016</year>). <source>Optical Character Recognition Practical Laboratory Automation</source>. <publisher-loc>Weinheim, Germany</publisher-loc>: <publisher-name>John Wiley &#x26; Sons</publisher-name>, <fpage>207</fpage>&#x2013;<lpage>209</lpage>. <pub-id pub-id-type="doi">10.1002/9783527801954.app2</pub-id> </citation>
</ref>
<ref id="B11">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Chalmers</surname>
<given-names>T. C.</given-names>
</name>
<name>
<surname>Smith</surname>
<given-names>H.</given-names>
<suffix>Jr</suffix>
</name>
<name>
<surname>Blackburn</surname>
<given-names>B.</given-names>
</name>
<name>
<surname>Silverman</surname>
<given-names>B.</given-names>
</name>
</person-group> (<year>1981</year>). <article-title>A Method For Assessing The Quality Of a Randomized Control Trial, Control</article-title>. <source>Clin. Trials</source> <volume>2</volume>, <fpage>31</fpage>. </citation>
</ref>
<ref id="B18">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Cheng</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Bouman</surname>
<given-names>C. A.</given-names>
</name>
<name>
<surname>Allebach</surname>
<given-names>J.&#x20;P.</given-names>
</name>
</person-group> (<year>1997</year>). <article-title>Multiscale Document Segmentation</article-title>, in <conf-name>IS&#x26;T 50th Annual Conference</conf-name>, <conf-loc>Cambridge, MA</conf-loc>, <conf-date>May 18&#x2013;23, 1997</conf-date>, pp. <fpage>417</fpage>&#x2013;<lpage>425</lpage>. </citation>
</ref>
<ref id="B12">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Delaney</surname>
<given-names>N. F.</given-names>
</name>
<name>
<surname>Rojas Echenique</surname>
<given-names>J.&#x20;I.</given-names>
</name>
<name>
<surname>Marx</surname>
<given-names>C. J.</given-names>
</name>
</person-group> (<year>2013</year>). <article-title>Clarity: An Open-Source Manager for Laboratory Automation</article-title>. <source>J.&#x20;Lab. Autom.</source> <volume>18</volume>, <fpage>171</fpage>. <pub-id pub-id-type="doi">10.1177/2211068212460237</pub-id> </citation>
</ref>
<ref id="B14">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Feng</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Bouman</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Cheng</surname>
<given-names>H.</given-names>
</name>
</person-group> (<year>2006</year>). <article-title>High-Quality MRC Document Coding</article-title>. <source>IEEE Trans. Image Process.</source>. <pub-id pub-id-type="doi">10.1109/tip.2006.877493</pub-id> </citation>
</ref>
<ref id="B15">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Fischer</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Lienhart</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Effelsberg</surname>
<given-names>W.</given-names>
</name>
</person-group> (<year>1995</year>). <article-title>Automatic Recognition of Film Genres</article-title>.&#x201d; in <conf-name>Proceedings of the Third ACM International Conference on Multimedia</conf-name>. <publisher-loc>New York, NY, USA</publisher-loc>: <publisher-name>Association for Computing Machinery</publisher-name>, <fpage>295</fpage>&#x2013;<lpage>304</lpage>. <pub-id pub-id-type="doi">10.1145/217279.215283</pub-id> </citation>
</ref>
<ref id="B16">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Froese</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Dian</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Batson</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Gomez</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Alarifi</surname>
<given-names>N.</given-names>
</name>
<name>
<surname>Unger</surname>
<given-names>B.</given-names>
</name>
<etal/>
</person-group> (<year>2020</year>). <article-title>The Impact of Vasopressor and Sedative Agents on Cerebrovascular Reactivity and Compensatory Reserve in Traumatic Brain Injury: An Exploratory Analysis</article-title>. <source>Neurotrauma Rep.</source> <volume>1</volume>, <fpage>157</fpage>. <pub-id pub-id-type="doi">10.1089/neur.2020.0028</pub-id> </citation>
</ref>
<ref id="B17">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Froese</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Dian</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Batson</surname>
<given-names>C.</given-names>
</name>
<name>
<surname>Gomez</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Unger</surname>
<given-names>B.</given-names>
</name>
<name>
<surname>Zeiler</surname>
<given-names>F. A.</given-names>
</name>
</person-group> (<year>2020</year>). <article-title>The Impact of Hypertonic Saline on Cerebrovascular Reactivity and Compensatory Reserve in Traumatic Brain Injury: An Exploratory Analysis</article-title>. <source>Acta Neurochir. (Wien)</source> <volume>1</volume>. <pub-id pub-id-type="doi">10.1007/s00701-020-04579-0</pub-id> </citation>
</ref>
<ref id="B35">
<citation citation-type="web">
<person-group person-group-type="author">
<name>
<surname>Hatchett</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Vfrolov</surname>
<given-names>V.</given-names>
</name>
</person-group> (<year>1991</year>). <collab>Null-Modem Emulator</collab> (<article-title>Com0com) - Virtual Serial Port Driver for Windows</article-title>. <ext-link ext-link-type="uri" xlink:href="https://sourceforge.net/projects/com0com/">https://sourceforge.net/projects/com0com/</ext-link> (<comment>Accessed August 1, 2021</comment>). </citation>
</ref>
<ref id="B21">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Jung</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>In Kim</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Jain</surname>
<given-names>A. K.</given-names>
</name>
</person-group> (<year>2004</year>). <article-title>Text Information Extraction in Images and Video: A Survey</article-title>. <source>Pattern Recognit</source> <volume>37</volume>, <fpage>977</fpage>. <pub-id pub-id-type="doi">10.1016/j.patcog.2003.10.012</pub-id> </citation>
</ref>
<ref id="B22">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Karthikeyan</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Seco de Herrera</surname>
<given-names>A. G.</given-names>
</name>
<name>
<surname>Doctor</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Mirza</surname>
<given-names>A.</given-names>
</name>
</person-group> (<year>2021</year>). <article-title>An OCR Post-Correction Approach Using Deep Learning for Processing Medical Reports</article-title>. <source>IEEE Trans. Circuits Syst. Video Technol.</source> <volume>1</volume>. <pub-id pub-id-type="doi">10.1109/tcsvt.2021.3087641</pub-id> </citation>
</ref>
<ref id="B23">
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Kim</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Kim</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Ryu</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Kim</surname>
<given-names>G.</given-names>
</name>
</person-group> (<year>2002</year>). &#x201c;<article-title>A Robust License-Plate Extraction Method under Complex Image Conditions</article-title>,&#x201d; in <conf-name>Proceedings of the 16 Th International Conference on Pattern Recognition (ICPR&#x2019;02) Volume</conf-name> (<publisher-loc>USA</publisher-loc>: <publisher-name>IEEE Computer Society</publisher-name>), <fpage>30216</fpage>. </citation>
</ref>
<ref id="B24">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Klein</surname>
<given-names>S. P.</given-names>
</name>
<name>
<surname>Fieuws</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Meyfroidt</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Depreitere</surname>
<given-names>B.</given-names>
</name>
</person-group> (<year>2020</year>). <article-title>Effects of Norepinephrine, Propofol and Hemoglobin Concentration on Dynamic Measurements of Cerebrovascular Reactivity in Acute Brain Injury</article-title>. <source>J.&#x20;Neurotrauma</source> <volume>38</volume> (<issue>4</issue>), <fpage>506</fpage>&#x2013;<lpage>512</lpage>. <pub-id pub-id-type="doi">10.1089/neu.2020.7160</pub-id> </citation>
</ref>
<ref id="B25">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Le</surname>
<given-names>A. D.</given-names>
</name>
<name>
<surname>Pham</surname>
<given-names>D. V.</given-names>
</name>
<name>
<surname>Nguyen</surname>
<given-names>T. A.</given-names>
</name>
</person-group> (<year>2019</year>). &#x201c;<article-title>Deep Learning Approach for Receipt Recognition</article-title>,&#x201d; in <source>Future Data and Security Engineering</source>. Editors <person-group person-group-type="editor">
<name>
<surname>Dang</surname>
<given-names>T. K.</given-names>
</name>
<name>
<surname>K&#xfc;ng</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Takizawa</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Bui</surname>
<given-names>S. H.</given-names>
</name>
</person-group> (<publisher-loc>Cham</publisher-loc>: <publisher-name>Springer International Publishing</publisher-name>), <fpage>705</fpage>&#x2013;<lpage>712</lpage>. <pub-id pub-id-type="doi">10.1007/978-3-030-35653-8_50</pub-id> </citation>
</ref>
<ref id="B26">
<citation citation-type="other">
<person-group person-group-type="author">
<name>
<surname>Lee</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>2007</year>). <source>Pytesseract: Python-Tesseract Is a Python Wrapper for Google&#x2019;s Tesseract-OCR</source>.</citation>
</ref>
<ref id="B27">
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Li</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Doermann</surname>
<given-names>D.</given-names>
</name>
</person-group> (<year>1999</year>). &#x201c;<article-title>Text Enhancement in Digital Video Using Multiple Frame Integration</article-title>,&#x201d; in <conf-name>Proceedings of the Seventh ACM International Conference on Multimedia (Part 1) - MULTIMEDIA &#x2019;99</conf-name> (<publisher-loc>Orlando, Florida, United&#x20;States</publisher-loc>: <publisher-name>ACM Press</publisher-name>), <fpage>19</fpage>&#x2013;<lpage>22</lpage>. <pub-id pub-id-type="doi">10.1145/319463.319466</pub-id> </citation>
</ref>
<ref id="B28">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Li</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Doermann</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Kia</surname>
<given-names>O.</given-names>
</name>
</person-group> (<year>2000</year>). <article-title>Automatic Text Detection and Tracking in Digital Video</article-title>. <source>IEEE Trans. Image Process.</source> <volume>9</volume>, <fpage>147</fpage>. <pub-id pub-id-type="doi">10.1109/83.817607</pub-id> </citation>
</ref>
<ref id="B42">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Lundh</surname>
<given-names>F.</given-names>
</name>
</person-group> (<year>1999</year>). <article-title>An Introduction to Tkinter</article-title>. <comment>Available at: <ext-link ext-link-type="uri" xlink:href="Www.Pythonware.Com/Library/Tkinter/Introduction/Index.Htm">Www.Pythonware.Com/Library/Tkinter/Introduction/Index.Htm</ext-link>
</comment>.</citation>
</ref>
<ref id="B30">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Mardis</surname>
<given-names>E. R.</given-names>
</name>
</person-group> (<year>2011</year>). <article-title>A Decade&#x2019;s Perspective on DNA Sequencing Technology</article-title>. <source>Nature</source> <volume>470</volume>, <fpage>198</fpage>. <pub-id pub-id-type="doi">10.1038/nature09796</pub-id> </citation>
</ref>
<ref id="B31">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Matchett</surname>
<given-names>K. B.</given-names>
</name>
<name>
<surname>Lynam-Lennon</surname>
<given-names>N.</given-names>
</name>
<name>
<surname>Watson</surname>
<given-names>R. W.</given-names>
</name>
<name>
<surname>Brown</surname>
<given-names>J.&#x20;A. L.</given-names>
</name>
</person-group> (<year>2017</year>). <article-title>Advances in Precision Medicine: Tailoring Individualized Therapies</article-title>. <source>Cancers (Basel)</source> <volume>9</volume>. <pub-id pub-id-type="doi">10.3390/cancers9110146</pub-id> </citation>
</ref>
<ref id="B32">
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Mokhtar</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Bukhari</surname>
<given-names>S. S.</given-names>
</name>
<name>
<surname>Dengel</surname>
<given-names>A.</given-names>
</name>
</person-group> (<year>2018</year>). &#x201c;<article-title>OCR Error Correction: State-Of-The-Art vs an NMT-Based Approach</article-title>,&#x201d; in <conf-name>3th IAPR International Workshop on Document Analysis Systems</conf-name>, <conf-loc>Vienna, Austria</conf-loc> (<publisher-name>DAS</publisher-name>), <fpage>429</fpage>&#x2013;<lpage>434</lpage>. <pub-id pub-id-type="doi">10.1109/das.2018.63</pub-id> </citation>
</ref>
<ref id="B34">
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Namysl</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Konya</surname>
<given-names>I.</given-names>
</name>
</person-group> (<year>2019</year>). &#x201c;<article-title>Efficient, Lexicon-free OCR Using Deep Learning</article-title>,&#x201d; in <conf-name>International Conference on Document Analysis and Recognition</conf-name>, <conf-loc>Sydney, Australia</conf-loc>, <conf-date>September 20&#x2013;25, 2019</conf-date> (<publisher-name>ICDAR</publisher-name>), <fpage>295</fpage>&#x2013;<lpage>301</lpage>. <pub-id pub-id-type="doi">10.1109/icdar.2019.00055</pub-id> </citation>
</ref>
<ref id="B36">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Oakley</surname>
<given-names>J.&#x20;P.</given-names>
</name>
<name>
<surname>Satherley</surname>
<given-names>B. L.</given-names>
</name>
</person-group> (<year>1998</year>). <article-title>Improving Image Quality in Poor Visibility Conditions Using a Physical Model for Contrast Degradation</article-title>. <source>IEEE Trans. Image Process.</source> <volume>7</volume>, <fpage>167</fpage>. <pub-id pub-id-type="doi">10.1109/83.660994</pub-id> </citation>
</ref>
<ref id="B37">
<citation citation-type="web">
<collab>OpenCV</collab>. <article-title>Canny Edge Detection</article-title>. <comment>Available at: <ext-link ext-link-type="uri" xlink:href="https://docs.opencv.org/trunk/da/d22/tutorial_py_canny.html">https://docs.opencv.org/trunk/da/d22/tutorial_py_canny.html</ext-link>
</comment>. </citation>
</ref>
<ref id="B38">
<citation citation-type="web">
<collab>Open</collab>. <article-title>Image Thresholding</article-title> <comment>Available at: <ext-link ext-link-type="uri" xlink:href="https://docs.opencv.org/master/d7/d4d/tutorial_py_thresholding.html">https://docs.opencv.org/master/d7/d4d/tutorial_py_thresholding.html</ext-link>
</comment>. </citation>
</ref>
<ref id="B39">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Park</surname>
<given-names>S. H.</given-names>
</name>
<name>
<surname>Kim</surname>
<given-names>K. I.</given-names>
</name>
<name>
<surname>Jung</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Kim</surname>
<given-names>H. J.</given-names>
</name>
</person-group> (<year>1999</year>). <article-title>Locating Car License Plates Using Neural Networks</article-title>. <source>Electron. Lett.</source> <volume>35</volume>, <fpage>1475</fpage>. <pub-id pub-id-type="doi">10.1049/el:19990977</pub-id> </citation>
</ref>
<ref id="B40">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Schantz</surname>
<given-names>H. F.</given-names>
</name>
</person-group> (<year>1982</year>). <source>The History of OCR, Optical Character Recognition</source>: <publisher-name>[Manchester Center&#x202f;Recognition Technologies Users Association]</publisher-name>.</citation>
</ref>
<ref id="B41">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Steiner</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Czosnyka</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Piechnik</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Smielewski</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>Chatfield</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Menon</surname>
<given-names>D.</given-names>
</name>
<etal/>
</person-group> (<year>2002</year>). <article-title>Continuous Monitoring of Cerebrovascular Pressure Reactivity Allows Determination of Optimal Cerebral Perfusion Pressure in Patients with Traumatic Brain Injury</article-title>. <source>Crit. Care Med.</source> <volume>30</volume>, <fpage>733</fpage>. <pub-id pub-id-type="doi">10.1097/00003246-200204000-00002</pub-id> </citation>
</ref>
<ref id="B43">
<citation citation-type="web">
<collab>Welcome to PySerial&#x2019;s Documentation PySerial 3.4 Documentation</collab>. <comment>Available at: <ext-link ext-link-type="uri" xlink:href="https://pyserial.readthedocs.io/en/latest/">https://pyserial.readthedocs.io/en/latest/</ext-link>
</comment>.</citation>
</ref>
<ref id="B44">
<citation citation-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Wemhoener</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Yalniz</surname>
<given-names>I. Z.</given-names>
</name>
<name>
<surname>Manmatha</surname>
<given-names>R.</given-names>
</name>
</person-group> (<year>2013</year>). &#x201c;<article-title>Creating an Improved Version Using Noisy OCR from Multiple Editions</article-title>,&#x201d; in <conf-name>12th International Conference on Document Analysis and Recognition</conf-name>, <conf-loc>Washington, DC</conf-loc>, <conf-date>August 25&#x2013;28, 2013</conf-date> (<publisher-loc>Washington, DC, USA</publisher-loc>: <publisher-name>IEEE</publisher-name>), <fpage>160</fpage>&#x2013;<lpage>164</lpage>. <pub-id pub-id-type="doi">10.1109/icdar.2013.39</pub-id> </citation>
</ref>
<ref id="B45">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Witten</surname>
<given-names>I. H.</given-names>
</name>
<name>
<surname>Don</surname>
<given-names>K. J.</given-names>
</name>
<name>
<surname>Dewsnip</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Tablan</surname>
<given-names>V.</given-names>
</name>
</person-group> (<year>2004</year>). <article-title>Text Mining in a Digital Library</article-title>. <source>Int. J.&#x20;Digit. Libr.</source> <volume>4</volume>, <fpage>56</fpage>. <pub-id pub-id-type="doi">10.1007/s00799-003-0066-4</pub-id> </citation>
</ref>
<ref id="B46">
<citation citation-type="book">
<person-group person-group-type="author">
<name>
<surname>Yalniz</surname>
<given-names>I. Z.</given-names>
</name>
<name>
<surname>Manmatha</surname>
<given-names>R.</given-names>
</name>
</person-group> (<year>2012</year>). &#x201c;<article-title>An Efficient Framework for Searching Text in Noisy Document Images</article-title>,&#x201d; in <source>10th IAPR International Workshop on Document Analysis Systems</source>. <publisher-loc>Gold Coast, Australia</publisher-loc>: <publisher-name>IEEE</publisher-name>, <fpage>48</fpage>&#x2013;<lpage>52</lpage>. <pub-id pub-id-type="doi">10.1109/das.2012.18</pub-id> </citation>
</ref>
<ref id="B47">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Yalniz</surname>
<given-names>I. Z.</given-names>
</name>
<name>
<surname>Manmatha</surname>
<given-names>R.</given-names>
</name>
</person-group> (<year>2019</year>). <article-title>Dependence Models for Searching Text in Document Images</article-title>. <source>IEEE Trans. Pattern Anal. Mach. Intell.</source> <volume>41</volume>, <fpage>49</fpage>. <pub-id pub-id-type="doi">10.1109/tpami.2017.2780108</pub-id> </citation>
</ref>
<ref id="B48">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Yin</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Zhang</surname>
<given-names>W.</given-names>
</name>
<name>
<surname>Hong</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Yang</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Xiong</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Gui</surname>
<given-names>G.</given-names>
</name>
</person-group> (<year>2019</year>). <article-title>Deep Learning-Aided OCR Techniques for Chinese Uppercase Characters in the Application of Internet of Things</article-title>. <source>IEEE Access</source> <volume>7</volume>, <fpage>47043</fpage>. <pub-id pub-id-type="doi">10.1109/access.2019.2909401</pub-id> </citation>
</ref>
<ref id="B49">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Zeiler</surname>
<given-names>F. A.</given-names>
</name>
<name>
<surname>Donnelly</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Smielewski</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>Menon</surname>
<given-names>D. K.</given-names>
</name>
<name>
<surname>Hutchinson</surname>
<given-names>P. J.</given-names>
</name>
<name>
<surname>Czosnyka</surname>
<given-names>M.</given-names>
</name>
</person-group> (<year>2018</year>). <article-title>Critical Thresholds of Intracranial Pressure-Derived Continuous Cerebrovascular Reactivity Indices for Outcome Prediction in Noncraniectomized Patients with Traumatic Brain Injury</article-title>. <source>J.&#x20;Neurotrauma</source> <volume>35</volume>, <fpage>1107</fpage>&#x2013;<lpage>1115</lpage>. <pub-id pub-id-type="doi">10.1089/neu.2017.5472</pub-id> </citation>
</ref>
<ref id="B50">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Zeiler</surname>
<given-names>F. A.</given-names>
</name>
<name>
<surname>Ercole</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Cabeleira</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Beqiri</surname>
<given-names>E.</given-names>
</name>
<name>
<surname>Zoerle</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Carbonara</surname>
<given-names>M.</given-names>
</name>
</person-group>, (<year>2020</year>). <article-title>Patient-Specific ICP Epidemiologic Thresholds in Adult Traumatic Brain Injury: A CENTER-TBI Validation Study</article-title>. <source>J.&#x20;Neurosurg. Anesthesiol</source> <volume>33</volume> (<issue>1</issue>), <fpage>28</fpage>&#x2013;<lpage>38</lpage>. <pub-id pub-id-type="doi">10.1097/ANA.0000000000000616</pub-id> </citation>
</ref>
<ref id="B13">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Zeiler</surname>
<given-names>F. A</given-names>
</name>
<name>
<surname>Ercole</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Cabeleira</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Carbonara</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Stocchetti</surname>
<given-names>N.</given-names>
</name>
<name>
<surname>Menon</surname>
<given-names>D. K.</given-names>
</name>
<etal/>
</person-group> (<year>2019</year>). <article-title>Resolution (HR ICU) Sub-study Participants and Investigators, Comparison Of Performance Of Different Optimal Cerebral Perfusion Pressure Parameters For Outcome Prediction In Adult Traumatic Brain Injury: A Collaborative European NeuroTrauma Effectiveness Research In Traumatic Brain Injury (CENTER-TBI) Study</article-title>, <source>J.&#x20;Neurotrauma</source> <volume>36</volume>, <fpage>1505</fpage>. <pub-id pub-id-type="doi">10.1089/neu.2018.6182</pub-id> </citation>
</ref>
<ref id="B51">
<citation citation-type="journal">
<person-group person-group-type="author">
<name>
<surname>Zeiler</surname>
<given-names>F. A.</given-names>
</name>
<name>
<surname>Lee</surname>
<given-names>J.&#x20;K.</given-names>
</name>
<name>
<surname>Smielewski</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>Czosnyka</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Brady</surname>
<given-names>K.</given-names>
</name>
</person-group> (<year>2018</year>). <article-title>Validation of Intracranial Pressure-Derived Cerebrovascular Reactivity Indices against the Lower Limit of Autoregulation, Part II: Experimental Model of Arterial Hypotension</article-title>. <source>J.&#x20;Neurotrauma</source> <volume>35</volume>, <fpage>2812</fpage>&#x2013;<lpage>2819</lpage>. <pub-id pub-id-type="doi">10.1089/neu.2017.5604</pub-id> </citation>
</ref>
</ref-list>
</back>
</article>