<?xml version="1.0" encoding="utf-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.3 20070202//EN" "journalpublishing.dtd">
<article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" article-type="research-article" dtd-version="2.3" xml:lang="EN">
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">Front. Psychol.</journal-id>
<journal-title>Frontiers in Psychology</journal-title>
<abbrev-journal-title abbrev-type="pubmed">Front. Psychol.</abbrev-journal-title>
<issn pub-type="epub">1664-1078</issn>
<publisher>
<publisher-name>Frontiers Media S.A.</publisher-name>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="doi">10.3389/fpsyg.2024.1192565</article-id>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Psychology</subject>
<subj-group>
<subject>Original Research</subject>
</subj-group>
</subj-group>
</article-categories>
<title-group>
<article-title>The dynamics of experiencing Gestalt and Aha in cubist art: pupil responses and art evaluations show a complex interplay of task, stimuli content, and time course</article-title>
</title-group>
<contrib-group>
<contrib contrib-type="author" corresp="yes">
<name>
<surname>Spee</surname>
<given-names>Blanca T. M.</given-names>
</name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<xref ref-type="aff" rid="aff3"><sup>3</sup></xref>
<xref ref-type="corresp" rid="c001"><sup>&#x002A;</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/1210731/overview"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Arato</surname>
<given-names>Jozsef</given-names>
</name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/2311121/overview"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Mikuni</surname>
<given-names>Jan</given-names>
</name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/2281017/overview"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Tran</surname>
<given-names>Ulrich S.</given-names>
</name>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/43579/overview"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Pelowski</surname>
<given-names>Matthew</given-names>
</name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/154837/overview"/>
</contrib>
<contrib contrib-type="author">
<name>
<surname>Leder</surname>
<given-names>Helmut</given-names>
</name>
<xref ref-type="aff" rid="aff1"><sup>1</sup></xref>
<xref ref-type="aff" rid="aff2"><sup>2</sup></xref>
<uri xlink:href="https://loop.frontiersin.org/people/77149/overview"/>
</contrib>
</contrib-group>
<aff id="aff1"><sup>1</sup><institution>Vienna Cognitive Science Hub, University of Vienna</institution>, <addr-line>Vienna</addr-line>, <country>Austria</country></aff>
<aff id="aff2"><sup>2</sup><institution>Department of Cognition, Emotion, and Methods in Psychology, Faculty of Psychology, University of Vienna</institution>, <addr-line>Vienna</addr-line>, <country>Austria</country></aff>
<aff id="aff3"><sup>3</sup><institution>Department of Neurology, Center of Expertise for Parkinson &#x0026; Movement Disorders, Radboud University Medical Center, Donders Institute for Brain, Cognition and Behavior</institution>, <addr-line>Nijmegen</addr-line>, <country>Netherlands</country></aff>
<author-notes>
<fn id="fn0002" fn-type="edited-by"><p>Edited by: Massimiliano Di Luca, University of Birmingham, United Kingdom</p></fn>
<fn id="fn0003" fn-type="edited-by"><p>Reviewed by: Ronald H&#x00FC;bner, University of Konstanz, Germany</p>
<p>Haokui Xu, Zhejiang University, China</p></fn>
<corresp id="c001">&#x002A;Correspondence: Blanca T. M. Spee, <email>blanca.spee@univie.ac.at</email></corresp>
</author-notes>
<pub-date pub-type="epub">
<day>13</day>
<month>03</month>
<year>2024</year>
</pub-date>
<pub-date pub-type="collection">
<year>2024</year>
</pub-date>
<volume>15</volume>
<elocation-id>1192565</elocation-id>
<history>
<date date-type="received">
<day>23</day>
<month>03</month>
<year>2023</year>
</date>
<date date-type="accepted">
<day>13</day>
<month>02</month>
<year>2024</year>
</date>
</history>
<permissions>
<copyright-statement>Copyright &#x00A9; 2024 Spee, Arato, Mikuni, Tran, Pelowski and Leder.</copyright-statement>
<copyright-year>2024</copyright-year>
<copyright-holder>Spee, Arato, Mikuni, Tran, Pelowski and Leder</copyright-holder>
<license xlink:href="http://creativecommons.org/licenses/by/4.0/">
<p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (CC BY). The use, distribution or reproduction in other forums is permitted, provided the original author(s) and the copyright owner(s) are credited and that the original publication in this journal is cited, in accordance with accepted academic practice. No use, distribution or reproduction is permitted which does not comply with these terms.</p>
</license>
</permissions>
<abstract>
<sec>
<title>Introduction</title>
<p>Gestalt perception refers to the cognitive ability to perceive various elements as a unified whole. In our study, we delve deeper into the phenomenon of Gestalt recognition in visual cubist art, a transformative process culminating in what is often described as an Aha moment. This Aha moment signifies a sudden understanding of what is seen, merging seemingly disparate elements into a coherent meaningful picture. The onset of this Aha moment can vary, either appearing almost instantaneously, which is in line with theories of hedonic fluency, or manifesting after a period of time, supporting the concept of delayed but more in-depth meaningful insight.</p>
</sec>
<sec>
<title>Methods</title>
<p>We employed pupillometry to measure cognitive and affective shifts during art interaction, analyzing both maximum pupil dilation and average dilation across the trial. The study consisted of two parts: in the first, 84 participants identified faces in cubist paintings under various conditions, with Aha moments and pupil dilation measured. In part 2, the same 84 participants assessed the artworks through ratings in a no-task free-viewing condition.</p>
</sec>
<sec>
<title>Results</title>
<p>Results of part 1 indicate a distinctive pattern of pupil dilation, with maximum dilation occurring at both trial onset and end. Longer response times were observed for high-fluent, face-present stimuli, aligning with a delayed but accurate Aha-moment through recognition. Additionally, the time of maximum pupil dilation, rather than average dilation, exhibited significant associations, being later for high-fluent, face-present stimuli and correct detections. In part 2, average, not the time of maximum pupil dilation emerged as the significant factor. Face-stimuli and highly accessible art evoked stronger dilations, also reflecting high clearness and negative valence ratings.</p>
</sec>
<sec>
<title>Discussion</title>
<p>The study underscores a complex relationship between the timing of recognition and the Aha moment, suggesting nuanced differences in emotional and cognitive responses during art viewing. Pupil dilation measures offer insight into these processes especially for moments of recognition, though their application in evaluating emotional responses through artwork ratings warrants further exploration.</p>
</sec>
</abstract>
<kwd-group>
<kwd>art research</kwd>
<kwd>pupillometry</kwd>
<kwd>Gestalt perception</kwd>
<kwd>Aha moment</kwd>
<kwd>empirical art and aesthetics</kwd>
</kwd-group>
<counts>
<fig-count count="7"/>
<table-count count="5"/>
<equation-count count="0"/>
<ref-count count="70"/>
<page-count count="15"/>
<word-count count="11705"/>
</counts>
<custom-meta-wrap>
<custom-meta>
<meta-name>section-at-acceptance</meta-name>
<meta-value>Perception Science</meta-value>
</custom-meta>
</custom-meta-wrap>
</article-meta>
</front>
<body>
<sec sec-type="intro" id="sec1">
<label>1</label>
<title>Introduction</title>
<p>Gestalt perception, the ability to discern meaningful structures by organizing sensory data, is an essential facet of our interactions with the world (<xref ref-type="bibr" rid="ref68">Wertheimer, 1923</xref>; <xref ref-type="bibr" rid="ref60">Van de Cruys and Wagemans, 2011a</xref>; <xref ref-type="bibr" rid="ref67">Wagemans et al., 2012</xref>). The concept is deeply entrenched in the field of visual art, particularly in non-representational art styles such as Cubism, where abstract, fragmented forms coalesce into a holistic image (<xref ref-type="bibr" rid="ref2">Arnheim, 1954</xref>). This transformative moment is often related to the &#x2018;Aha moment&#x2019; (<xref ref-type="bibr" rid="ref57">Topolinski and Reber, 2010</xref>; <xref ref-type="bibr" rid="ref42">Muth and Carbon, 2013</xref>).</p>
<p>Aha moments, also referred to as &#x2018;epiphany&#x2019;, are moments of sudden realization, insight, or comprehension. The term is often used in psychology and cognitive science to describe the instance at which an individual moves from not understanding or being unable to solve a problem to sudden comprehension (<xref ref-type="bibr" rid="ref42">Muth and Carbon, 2013</xref>; <xref ref-type="bibr" rid="ref10">Danek and Wiley, 2017</xref>). It can also refer to the moment of recognition when one sees the solution to a problem, the answer to a question, or the meaning behind a complex pattern. Aha moments are pivotal points in our perceptual experience of art. They can be seen as the junctures where the veil of abstraction and ambiguity is lifted, and the true essence&#x2014;or the personal meaning&#x2014;of an artwork is revealed.</p>
<p>In our study, we are particularly interested in the temporal aspect of these Aha moments&#x2014;the &#x2018;when&#x2019; of their occurrence&#x2014;which is a yet underexplored facet in the research of art interaction. The unique qualities of the Aha moment, particularly its suddenness (<xref ref-type="bibr" rid="ref16">Gick and Lockhart, 1995</xref>; <xref ref-type="bibr" rid="ref29">Kounios and Beeman, 2014</xref>), and the ease or difficulty with which this moment of recognition arrives (<xref ref-type="bibr" rid="ref57">Topolinski and Reber, 2010</xref>), form the crux of our research investigation. Specifically, we are interested in understanding the appearance of slow and fast processes of Gestalt recognition, corresponding with an early or delayed Aha moment, respectively. With these concepts in mind, our primary research question asks: &#x201C;How do timing and success in Gestalt recognition, leading to Aha moments, depend on the accessibility of the stimulus, and how does this influence artwork evaluation?&#x201D;</p>
<p>Previous art research posits that both quick and slow recognition processes exist. Quick Gestalt recognition is often associated with fluent processing, marked by immediate ease and appreciation of perceptual fluency (<xref ref-type="bibr" rid="ref49">Reber et al., 2004</xref>; <xref ref-type="bibr" rid="ref6">Belke et al., 2010a</xref>). This &#x2018;hedonic fluency effect&#x2019; proposes that easily recognized images generate more positive emotions and are more liked. Conversely, slower recognition processes suggest that viewers derive reward from the effort invested in decoding abstract patterns in visual art, followed by a delayed ease, leading to profound insight and appreciation (<xref ref-type="bibr" rid="ref60">Van de Cruys and Wagemans, 2011a</xref>,<xref ref-type="bibr" rid="ref62">b</xref>; <xref ref-type="bibr" rid="ref67">Wagemans et al., 2012</xref>; <xref ref-type="bibr" rid="ref66">Wagemans, 2013</xref>). These research findings suggest that art perception, in terms of Gestalt recognition and Aha moments, is not a straightforward process but instead, a dynamic interplay between fast and slow processes, regulated by both top-down and bottom-up cognitive processes that are governed by the artwork&#x2019;s accessibility and the viewer&#x2019;s cognitive and emotional engagement (<xref ref-type="bibr" rid="ref35">Leder et al., 2004</xref>; <xref ref-type="bibr" rid="ref37">Leder and Nadal, 2014</xref>).</p>
<p>Our study seeks to examine this interplay by evaluating the impact of task specification (recognition task in a public or private condition versus free-viewing), stimulus content (faces versus landscapes), and ease of recognition (accessibility). In addition, we also implement and test an implicit physiological measure&#x2014;pupillometry&#x2014;to capture the moment and measuring, if behavioral and physiological responses coincide (<xref ref-type="bibr" rid="ref33">Laeng et al., 2012</xref>; <xref ref-type="bibr" rid="ref40">Math&#x00F4;t, 2018</xref>).</p>
<p>Our experiment is divided into two parts. Part 1 involves eliciting Aha moments through Gestalt recognition of faces in cubist art, using varying accessibility (high&#x2009;=&#x2009;easy/fluent, medium, low&#x2009;=&#x2009;difficult/non-fluent) stimuli, and recording response times. In addition to stimulus content and accessibility, a key experimental manipulation is the introduction of a public versus private paradigm to amplify the effort motivation expended during the recognition task. In part 1, we examine the possible scenarios for response times, including a shorter response time indicative of perceptual fluency versus a longer response time suggestive of a slower mechanism involved in Gestalt recognition; for the latter, we argue that this process is top-down steered to ensure accuracy of Gestalt detection and Aha as a meaningful insight into Gestalt recognition. We hypothesize that a recognition-oriented task will necessitate more time and effort for face recognition, indicating slower recognition processes, which is enhanced in the public condition. In contrast, we expect faster response times for non-face stimuli like landscapes, suggestive of a superficial, non-accurate immediate response. We will further explore if the performance outcome itself, that is, if the person made a correct answer/hit or an error (false alarm or missed target), is associated with response times.</p>
<p>In line with these predictions and to underpin our findings, we will utilize pupillometry, a well-established method to study sudden shifts in cognitive and affective quality. We anticipate finding associations between the time of the maximum pupil dilation and our main predictors, stimulus content and accessibility, as previous studies have shown (<xref ref-type="bibr" rid="ref31">Kuchinke et al., 2009</xref>; <xref ref-type="bibr" rid="ref12">Elschner et al., 2018</xref>), serving as a physiological marker for gaining Aha through Gestalt recognition (<xref ref-type="bibr" rid="ref33">Laeng et al., 2012</xref>; <xref ref-type="bibr" rid="ref40">Math&#x00F4;t, 2018</xref>). Hereby, we will also investigate the influence of our newly introduced effort motivation to gain a deeper understanding of pupillometric evidence supporting Aha detection in art research.</p>
<p>However, an issue with pupillometry is that it has been used for moments of shifts concerning recognition and detecting emotions. Hence, it is not yet clear if pupil dilations rather state recognizing patterns or are an emotional response. Therefore, in part 2 of our study, we will allow free viewing of the artworks without a specific task to investigate if pupil dilations respond differently, delivering a clearer interpretation of pupil dilations as a physiological marker. Here, we hope to observe a correlation between average pupil dilations and our main factors, which are stimulus content and accessibility, as a reflection of the emotional response toward the artworks. We hypothesize that compared to part 1 in part 2, the time of maximum pupil dilation might be less consistent or significant, that is, showing maximum dilations at different time points during the trial.</p>
<p>Building on prior work (<xref ref-type="bibr" rid="ref31">Kuchinke et al., 2009</xref>; <xref ref-type="bibr" rid="ref12">Elschner et al., 2018</xref>), we will consider a range of ratings for aesthetic judgments and attributes (arousal, clearness, liking, complexity, comprehension, and emotional valence) after image presentation in part 2. Despite a general analysis, where we anticipate positive associations across these attributes with face stimuli and varying levels of fluency, we will also investigate associations with pupil dilations. Here, we specifically expect positive associations with arousal, comprehension, clearness, liking, and positive valence. Furthermore, we will examine how performance in part 1 influences part 2&#x2019;s subjective ratings, as cognitive processing theories suggest that the depth of our processing and comprehension can affect our appraisal of an artwork (<xref ref-type="bibr" rid="ref37">Leder and Nadal, 2014</xref>; <xref ref-type="bibr" rid="ref43">Muth et al., 2015</xref>; <xref ref-type="bibr" rid="ref18">Gr&#x00FC;ner et al., 2019</xref>). The influence of the recognition effort in part 1 on part 2&#x2019;s ratings is therefore of interest to us, and we will approach these analyses exploratively due to limited prior research.</p>
<p>In conclusion, our study aspires to shed light on the multifaceted process of Gestalt recognition by examining the dynamics of fast and slow perceptual processes and the timing of Aha. Additionally, our study aims to unravel the potentials and limitations of pupillometry in art research to enhance our understanding of cognitive and emotional responses evoked by visual art. Through these investigations, we hope to provide valuable insight into the complex mechanisms underpinning our appreciation of visual art and gain a deeper understanding of using pupillometry in art research.</p>
<sec id="sec2">
<label>1.1</label>
<title>Pupillometry as a potentially salient indicator for Gestalt recognition and Aha</title>
<p>Pupillometry&#x2014;the analysis of pupil responses&#x2014;provides a compelling method to detect sudden shifts in cognitive or affective states (<xref ref-type="bibr" rid="ref5">Beatty and Lucero-Wagoner, 2000</xref>; <xref ref-type="bibr" rid="ref26">Jepma and Nieuwenhuis, 2011</xref>; <xref ref-type="bibr" rid="ref33">Laeng et al., 2012</xref>; <xref ref-type="bibr" rid="ref40">Math&#x00F4;t, 2018</xref>).<xref ref-type="fn" rid="fn0001"><sup>1</sup></xref> It has been employed in various fields of study, such as working memory (<xref ref-type="bibr" rid="ref27">Kahneman and Beatty, 1966</xref>), Stroop color-naming (<xref ref-type="bibr" rid="ref32">Laeng et al., 2011</xref>), and figure-ground recognition tasks (<xref ref-type="bibr" rid="ref65">Villani et al., 2015</xref>). Notably, pupil dilations have been linked with target detection during rapid serial presentation (<xref ref-type="bibr" rid="ref47">Privitera et al., 2010</xref>), perceptual selection predicting subsequent stability in perceptual rivalry (<xref ref-type="bibr" rid="ref11">Einhauser et al., 2008</xref>), and detection during subliminal repeating presentations (<xref ref-type="bibr" rid="ref33">Laeng et al., 2012</xref>). However, pupil dilations also appear due to emotions (<xref ref-type="bibr" rid="ref20">Hess, 1965</xref>; <xref ref-type="bibr" rid="ref7">Bernick et al., 1971</xref>; <xref ref-type="bibr" rid="ref1">Aboyoun and Dabbs, 1998</xref>), mainly associated with arousal and appeal of challenge (<xref ref-type="bibr" rid="ref21">Hess and Polt, 1960</xref>; <xref ref-type="bibr" rid="ref43">Muth et al., 2015</xref>), and pleasure in fluency (<xref ref-type="bibr" rid="ref31">Kuchinke et al., 2009</xref>; <xref ref-type="bibr" rid="ref12">Elschner et al., 2018</xref>). These findings suggest pupillometry holding promise as a yet underexplored method in art research for detecting recognition-related Aha moments and the presence of emotions.</p>
<p>Support for this approach is found not only in empirical data but also in neurology. Psychological (not light-induced) pupil responses are primarily modulated by the noradrenergic system in the locus coeruleus (<xref ref-type="bibr" rid="ref3">Aston-Jones and Cohen, 2005</xref>; <xref ref-type="bibr" rid="ref17">Gilzenrat et al., 2010</xref>; <xref ref-type="bibr" rid="ref25">Jefferies and Di Lollo, 2019</xref>; <xref ref-type="bibr" rid="ref30">Kret and Sjak-Shie, 2019</xref>). The locus coeruleus is a brain area associated with novelty perception and reward (<xref ref-type="bibr" rid="ref38">Libby et al., 1973</xref>; <xref ref-type="bibr" rid="ref19">Herv&#x00E9;-Minvielle and Sara, 1995</xref>). The fact that the locus coeruleus is involved in coordinating pupil reaction and pulling the strings during ongoing action/thought, focusing attention, and engaging in the notification of cognitive event boundaries, delivers a neurophysiological premise for using pupillometry as a key measure for notifying sudden moments.</p>
<p>To date, pupillometry has seen limited application in art recognition studies. Notable, two studies (i.e., <xref ref-type="bibr" rid="ref31">Kuchinke et al., 2009</xref>; <xref ref-type="bibr" rid="ref12">Elschner et al., 2018</xref>) leveraged pupil dilations to study fluency effects. In these studies, pupil responses were measured around the time of response, assumed to represent aesthetic emotions. While these studies provided valuable insights, they yielded contradictory results and only partially supported the idea of fluency-induced emotional response. In both studies, pupil responses were measured only around the time of response and were assumed to represent aesthetic emotions. Both studies asked participants to state with a key press when they recognized the artworks&#x2019; content&#x2014;response times measured the processing fluency. Both studies found longer response times with increasing abstractness (reducing levels of accessibility). <xref ref-type="bibr" rid="ref31">Kuchinke et al. (2009)</xref> focused on aesthetic emotions around the moment when participants recognized any figure in cubist paintings. In accordance with the hedonic fluency model, they found peak dilations just before stated recognition, which were larger with high-fluent artworks and positively correlated with preference ratings. <xref ref-type="bibr" rid="ref12">Elschner et al. (2018)</xref>, in a follow-up study, added expressionist abstract images. They found stronger dilations with decreasing levels of abstractness. The effect was stronger for cubist than expressionist art, although the latter was more liked. Hence, despite their effort to expand the design (additional styles; randomized instead of blocked style design), they did not find a fluency-induced emotional response reflected by dilations. It should be noted that art judgments were taken by participants who did not conduct the pupillometry experiment.</p>
<p>However, both studies detected peaks of pupil dilations at the moment of stated recognition. Given the study design that participants should recognize the content, maximum dilations may be interpreted as a physiological marker for Aha. As such, these studies can be reinterpreted as measures of Gestalt recognition, where pupil responses represented more a marker for Aha moment. However, the contradictory results could also have been grounded in the intermix of task and free viewing with no time limitation, yet participants should recognize the content of the artworks. In addition, both studies recorded pupil data short before the response and did not investigate the whole trial period. We address this limitation and explore not only average pupil dilation but also the time of the maximum pupil dilation covering both analyses of the whole trial period.</p>
<p>Building on this work, our study explores Gestalt recognition in abstract art as an Aha event in part 1 and as an affective event in part 2, measured by pupil dilations (time of maximum dilation and average dilation during the entire trial period) and potential associations with artwork judgments. We aim to delve deeper into this phenomenon, exploring not only the recognition process but also the strategies that participants might employ, from fluent, easy strategies to more meticulous and accurate ways of interaction. Note that we do not state that these working processes can be seen as being either perceptual, cognitive, or affective in quality. Instead, it is an interplay of all qualities. However, in part 1, we intend to focus the participants on the recognition task, and in part 2, we intend to give space for emotional response in a no-task condition and re-evaluation of their performance.</p>
</sec>
</sec>
<sec id="sec3">
<label>2</label>
<title>The present study</title>
<p>Our study, comprising two parts, seeks to understand Aha moments through Gestalt recognition in the context of visual cubist artworks. In part 1, we created a paradigm utilizing low-level artwork features to manipulate accessibility, varying from high to low fluency (see <xref ref-type="fig" rid="fig1">Figure 1</xref>). We incorporated an effort-taking context that required quick and accurate recognition of abstract faces in the artwork. This section involved a yes/no face-recognition task using short repeating stimulus presentations. Additionally, we instituted a public-private paradigm to manipulate the level of effort exerted during the task, emphasizing the goal of achieving Aha moments.</p>
<fig position="float" id="fig1">
<label>Figure 1</label>
<caption><p>Left: Examples for stimulus content with faces differing in degree of fluency: high, medium, and low (see for the full list of artworks used <xref rid="SM1" ref-type="supplementary-material">Supplementary Table S1</xref>). Right: Stimulus presentation: in part 1 (right top), each artwork was presented 10 times for 50&#x2009;ms in a row with 100&#x2009;ms interim blank screens; in total, 10,500&#x2009;ms. In the stimulus presentation in part 2 (right bottom), each artwork was presented one time for 9,000&#x2009;ms. Afterward, the participants were asked to give their ratings on all six scales. Shown works are in the public domain in their country of origin and other countries and areas where the copyright term is the author&#x2019;s life plus 70&#x2009;years or fewer. These works are in the public domain in the United States because it was published (or registered with the U.S. Copyright Office) before 1 January 1926 (for image search and copyrights: <ext-link xlink:href="https://commons.wikimedia.org/" ext-link-type="uri">https://commons.wikimedia.org/</ext-link>): <bold>(A)</bold> <italic>Portrait of Pablo Picasso</italic>. <bold>(B)</bold> <italic>Portrait de Madame Josette Gris</italic>. <bold>(C)</bold> <italic>Composition with Figures</italic>.</p></caption>
<graphic xlink:href="fpsyg-15-1192565-g001.tif"/>
</fig>
<p>Depending on participants&#x2019; response times, we anticipate two possible outcomes. Short response times would support theories of perceptual fluency considering recognition and should correspond to higher liking ratings in part 2. This hypothesis suggests that recognizable faces in high-fluent artworks would be detected fastest. Conversely, if participants prioritize accuracy over speed, indicating longer response times for Aha experiences, it might suggest a blend of perceptual and top-down controlled mechanisms at play. We expect to observe the latter, particularly for high fluent and face stimuli, and aligning of timing of maximum pupil dilation with behavioral response.</p>
<p>In part 2, we examine pupil responses over an extended viewing period, exploring the average dilation and timing of maximum dilation. We also collected participant ratings post-stimulus presentation to ascertain potential correlations between pupil responses, arousal, image clarity, liking, complexity, comprehension, and emotional valence. Finally, we investigated the influence of confidence in one&#x2019;s correctness on artwork ratings in part 2, examining the effects of correct and missed recognition made in part 1. This approach offers a holistic view of the interplay between cognition, emotion, and physiological responses in the context of art perception and recognition.</p>
</sec>
<sec sec-type="materials|methods" id="sec4">
<label>3</label>
<title>Materials and methods</title>
<sec id="sec5">
<label>3.1</label>
<title>Participants</title>
<p>The final sample included 84 participants (<italic>M</italic><sub>age</sub>&#x2009;=&#x2009;21.27, SD&#x2009;=&#x2009;2.16; 57.1% female) from an original sample of 106 mainly psychology students from the University of Vienna. From the original sample, 22 participants were excluded due to technical issues (<italic>n</italic>&#x2009;=&#x2009;16) or input recording (<italic>n</italic>&#x2009;=&#x2009;6). Another <italic>n</italic>&#x2009;=&#x2009;14 participants had to be excluded only for the pupil analysis (final eye-tracking data analysis <italic>N</italic>&#x2009;=&#x2009;70), as specific raw data files for several participants were not saved. Course credit was given for participation. No participant reported an academic background in fine arts, art history, or other related disciplines dealing with art. A pre-online questionnaire ensured that none of the participants was color-blind or had more than 1.2 diopters of visual impairment. All participants signed informed consent, and the ethics committee of the University of Vienna approved the study.</p>
</sec>
<sec id="sec6">
<label>3.2</label>
<title>Apparatus</title>
<p>Pupil measures were recorded with a video-based Eyelink 1,000 desktop-mounted eye-tracker (SR Research Ltd., Mississauga, Ontario, Canada). An infrared-sensitive camera provided pupillometry at a sampling rate of 1,000&#x2009;Hz. The experiment was controlled by Experiment Builder Software Version 1.10.1630 (SR Research Ltd., Mississauga, Ontario, Canada) on a Windows PC. EyeLink 1,000 has implemented two pupil tracking algorithms: centroid and ellipse fitting. We used the centroid mode, tracking the center of the threshold pupil using a center of the mass algorithm.</p>
</sec>
<sec id="sec7">
<label>3.3</label>
<title>Stimuli</title>
<p>The stimuli were the 39 cubist paintings from <xref ref-type="bibr" rid="ref31">Kuchinke et al. (2009)</xref>, differing in their degree of accessibility (see <xref ref-type="fig" rid="fig1">Figure 1</xref>, left, see <xref rid="SM1" ref-type="supplementary-material">Supplementary Table S1</xref>, for a full list of artworks). The accessibility level was pre-rated in their study. We used three levels of accessibility: high, medium, and low. These were further divided into stimulus contents (with visible abstract/figurative faces or landscapes). Of the 39 artworks, 30 paintings were used as stimuli in the present study (10 images for high-fluent/50% faces, 11 for medium-fluent /45% faces&#x2014;9 for low-fluent/55% faces), and nine paintings were used for warm-up trials. The stimuli subtended at maximum a vertical visual angle of 17.19&#x00B0; and a horizontal visual angle of 15.28&#x00B0; (based on <xref ref-type="bibr" rid="ref31">Kuchinke et al., 2009</xref>). The stimuli were presented on an LCD monitor (SyncMaster 2443BW, Samsung) with a resolution of 2,400 by 1,920 pixels and a screen refresh rate of 60&#x2009;Hz. The images were edited to mean luminance (both ps&#x2009;&#x003E;&#x2009;0.180; luminance high-fluent&#x2009;=&#x2009;156.46&#x2009;cd/mm<sup>2</sup>, high-fluent&#x2009;=&#x2009;159.55&#x2009;cd/mm<sup>2</sup>, low-fluent&#x2009;=&#x2009;162.40&#x2009;cd/mm<sup>2</sup>) to reduce the influence of luminance (<xref ref-type="bibr" rid="ref39">Loewenfeld, 1999</xref>; <xref ref-type="bibr" rid="ref31">Kuchinke et al., 2009</xref>).</p>
</sec>
<sec id="sec8">
<label>3.4</label>
<title>Calibration measurements and luminance issues</title>
<p>Participants were tested in a medium-lit room, sitting 60&#x2009;cm from a monitor with their heads on a chin rest. The dominant eye was determined, and its movements were calibrated with EyeLink 1,000. To avoid undesirable pupil reactions due to luminance differences, a single image was displayed at the screen&#x2019;s center, optimized for accurate viewing. The calibration quality was maintained within the pupil box preset of EyeLink 1,000, with camera setup and calibration repeated as needed to avoid corneal reflection loss. Two calibrations per participant were performed before the experiment. Pupil measurements were taken monocularly (dominant eye) during a fixation, with noise levels limited to 0.2% of the pupil diameter. Pupil size was reported in arbitrary units. This corresponds to a resolution of 0.01&#x2009;mm for a 5&#x2009;mm pupil. Pupil size was not taken in commonly used micrometers, but in units of the EyeLink 1,000 system default. Pupil size reported by EyeLink 1,000 is an integer number in arbitrary units (= &#x201C;au,&#x201D; system-typical pupil diameter measures ranged from 400 to 16,000&#x2009;units). Participants were asked to minimize blinking and avoid looking around during trials.</p>
<p>Potential pupil responses due to initial light reflexes and size fluctuations from the blank screens between image presentations were anticipated. Full luminance control was not possible in part 1, and we opted not to use scrambled image versions between screens to avoid disrupting recognition processes. However, prior research has successfully used pupillometry in similar repeating and subliminal stimulus presentations (<xref ref-type="bibr" rid="ref11">Einhauser et al., 2008</xref>; <xref ref-type="bibr" rid="ref23">Ionescu, 2016</xref>). Unlike prior art studies (<xref ref-type="bibr" rid="ref31">Kuchinke et al., 2009</xref>; <xref ref-type="bibr" rid="ref12">Elschner et al., 2018</xref>), we analyzed the entire trial duration in both parts. In part 2, the 9,000&#x2009;ms static image viewing period, we have posed no luminance issues.</p>
</sec>
<sec id="sec9">
<label>3.5</label>
<title>Procedure</title>
<p>All instructions were given in written form on the computer screen. Participants knew that the experiment had two parts (part 1: face-recognition task; part 2: viewing and rating task).</p>
<p>In the task of part 1, participants were asked to press either a yes-key or a no-key (on a normal keyboard, left/right key-assignment counterbalanced) when they recognized &#x2018;yes, there is a face or faces&#x2019; or &#x2018;no, there is no face or faces&#x2019; in the cubist paintings presented. Nine warm-up rounds ensured that participants understood the task. To start the trial, participants had to fixate on a center cross for at least 220&#x2009;ms. Afterward, an artwork was displayed a maximum number of 10 times in rapid succession for 50&#x2009;ms each time with an interval of 100&#x2009;ms in between per flashes (see <xref ref-type="fig" rid="fig1">Figure 1</xref>, top right). In total, participants had a maximum of 10,500&#x2009;ms time to make their decisions. If the participant did not press any key in time, the time-out was noted, and a new trial with a new artwork started. After each trial, a 3,000&#x2009;ms blank screen was presented, allowing a short pause, after which participants had to fixate on the fixation cross again, followed by a new trial. In total, the participants had 30 trials/artworks to give their yes or no response.</p>
<p>In part 2, participants were told that they were shown the same artworks again for a fixed-time duration of 9,000&#x2009;ms each (duration was based on the study results of <xref ref-type="bibr" rid="ref31">Kuchinke et al., 2009</xref>, where average recognition time was 9,000&#x2009;ms). After 9,000&#x2009;ms, the image disappeared from the screen. Participants gave their ratings in arousal, emotional valence, liking, complexity, comprehension, and clearness using a 7-point Likert-type scale, ranging from 1 (not at all or negative) to 7 (very much or positive). Regarding valence and arousal, we explicitly asked the participants to focus on their subjective felt elicited response, meaning what the artwork elicited in them.</p>
<sec id="sec10">
<label>3.5.1</label>
<title>Variation of effort motivation</title>
<p>The between-subjects factor effort motivation varied only in part 1 (i.e., face-recognition task). Participants were assigned counterbalanced to either a public (<italic>n</italic>&#x2009;=&#x2009;42) or a private condition (<italic>n</italic>&#x2009;=&#x2009;42). To manipulate the participants&#x2019; motivation in making an effort to focus on the Gestalt recognition task, in the public condition, participants were (wrongly) informed that their performance would be ranked in a high-score list and discussed in the next research seminar for further face-recognition evaluation reasons (for similar manipulation, see <xref ref-type="bibr" rid="ref64">Van Honk et al., 2016</xref>). In the private condition, participants were assured in the instruction that their performance and effort were anonymous and private.</p>
</sec>
</sec>
<sec id="sec11">
<label>3.6</label>
<title>Baseline correction, preprocessing, and usage of statistical analysis</title>
<p>Pupil data were analyzed in Python using custom code for preprocessing and the scipy, statsmodels (<xref ref-type="bibr" rid="ref52">Seabold and Perktold, 2010</xref>), and the pingouin (<xref ref-type="bibr" rid="ref58">Vallat, 2018</xref>) libraries for statistics. Blinks were detected as missing values in the signal. We removed pupil data around the blinks and replaced it with nan-s (no interpolation) in a time window of 20&#x2009;ms before and after each blink. Additionally, &#x00B1;20&#x2009;ms pupil data were removed around sudden sharp changes in the signal using a threshold of 3.5 SD&#x2019;s, as these are usually an indication of ocular artifacts (<xref ref-type="bibr" rid="ref41">Math&#x00F4;t et al., 2018</xref>).</p>
<p>To account for different baseline diameters in pupil dilation, a baseline correction was conducted. We calculated baselines for each individual trial by using the mean pupil size during participants&#x2019; fixation on the cross, which was shown before each trial. We followed the criteria suggested by <xref ref-type="bibr" rid="ref41">Math&#x00F4;t et al. (2018)</xref> for data exclusion, although no participants from the final sample reported in the results had to be excluded.</p>
<p>Additionally, we applied multivariate analysis and linear mixed models (LMMs). Stimulus content (faces and landscapes) and fluency (high, medium, and low) varied as within-subject factors and effort motivation (public versus private) as a between-subject factor (<xref ref-type="bibr" rid="ref4">Baayen et al., 2008</xref>; <xref ref-type="bibr" rid="ref22">Hox et al., 2010</xref>; <xref ref-type="bibr" rid="ref8">Bosker and Snijders, 2011</xref>; <xref ref-type="bibr" rid="ref15">Ga&#x0142;ecki and Burzykowski, 2013</xref>; <xref ref-type="bibr" rid="ref9">Brieber et al., 2014</xref>).</p>
</sec>
</sec>
<sec sec-type="results" id="sec12">
<label>4</label>
<title>Results</title>
<p>The results are presented as follows: (1) first, we describe the results of the behavioral outcomes in part 1 (response times for each main factor), and then, we present (2) the results of the pupil analysis along two aspects: the average pupil dilation and the time of the maximum pupil dilation within each trial for both part 1 and part 2. (3) For part 2, we describe the rating results, which we again complement with pupil data. Finally, we show (4) explorative analyses connecting both parts.</p>
<sec id="sec13">
<label>4.1</label>
<title>Behavioral analysis of part 1</title>
<p>Descriptive analysis is shown in <xref rid="SM1" ref-type="supplementary-material">Supplementary Table S2</xref>. We assessed the relation of the response time between the various conditions by using LMMs. For this analysis, we included only the results from the high- and low-fluent artworks since there were no differences in means between the medium- and low-fluent conditions (including the medium condition was also not shown to change the following main results). We used the response time for each stimulus as the dependent variable. The independent variables were performance outcome (dummy-coded) with the correct response (hit) as a baseline against both error types, false-alarm (false assessment) and misses (missed target), effort motivation (dummy-coded) with private as the baseline against the public, stimulus content (dummy-coded) with faces as the baseline against landscape, and the degree of fluency (dummy-coded) with low fluent as the baseline against high fluent. We further included the interaction between the degree of fluency and performance outcome. Finally, we estimated the intercept as a random coefficient (the intercept could vary between participants). Time-outs were excluded from the analysis.</p>
<p>We found significant main effects for the degree of fluency and stimulus content. Participants had 70.47&#x2009;ms shorter response times for landscape artworks than faces. The main effect of fluency resulted in longer response times (by 60.13&#x2009;ms) in the high-fluent than the low-fluent condition. Misses and false alarms did not differ significantly from correct answers in response times, and there was also no difference in response times due to effort motivation. To summarize, participants had longer response times for stimuli with faces and high-fluent artworks (see <xref ref-type="table" rid="tab1">Table 1</xref>).</p>
<table-wrap position="float" id="tab1">
<label>Table 1</label>
<caption><p>Part 1, fixed effects in the LMM predicting response times; baselines are represented by stimulus content&#x2014;face(s), degree of fluency&#x2014;low fluent as it represented the most difficult way to solve the artwork, and performance-outcome&#x2014;correct.</p></caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th/>
<th/>
<th align="center" valign="top" colspan="2">95% CI</th>
<th/>
<th/>
</tr>
<tr>
<th align="left" valign="top">Fixed effects</th>
<th align="center" valign="top">Estimate</th>
<th align="center" valign="top">Lower</th>
<th align="center" valign="top">Upper</th>
<th align="center" valign="top"><italic>t</italic>-value</th>
<th align="center" valign="top">Pr(&#x003E;|<italic>t</italic>|)</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left" valign="top">Intercept</td>
<td align="center" valign="top">506.62</td>
<td align="center" valign="top">472.78</td>
<td align="center" valign="top">540.48</td>
<td align="center" valign="top">29.36</td>
<td align="center" valign="top">&#x003C;&#x2009;0.01</td>
</tr>
<tr>
<td align="left" valign="top">High fluent</td>
<td align="center" valign="top">59.04</td>
<td align="center" valign="top">30.56</td>
<td align="center" valign="top">87.52</td>
<td align="center" valign="top">4.06</td>
<td align="center" valign="top">&#x003C;&#x2009;0.01</td>
</tr>
<tr>
<td align="left" valign="top">False alarm</td>
<td align="center" valign="top">&#x2212;31.51</td>
<td align="center" valign="top">&#x2212;121.34</td>
<td align="center" valign="top">58.32</td>
<td align="center" valign="top">0.69</td>
<td align="center" valign="top">0.492</td>
</tr>
<tr>
<td align="left" valign="top">Miss</td>
<td align="center" valign="top">&#x2212;31.39</td>
<td align="center" valign="top">&#x2212;78.97</td>
<td align="center" valign="top">16.19</td>
<td align="center" valign="top">1.29</td>
<td align="center" valign="top">0.20</td>
</tr>
<tr>
<td align="left" valign="top">Effort-motivation&#x2014;public</td>
<td align="center" valign="top">&#x2212;14.03</td>
<td align="center" valign="top">&#x2212;48.15</td>
<td align="center" valign="top">20.10</td>
<td align="center" valign="top">0.81</td>
<td align="center" valign="top">0.42</td>
</tr>
<tr>
<td align="left" valign="top">Stimulus content&#x2014;landscape</td>
<td align="center" valign="top">&#x2212;95.56</td>
<td align="center" valign="top">&#x2212;123.48</td>
<td align="center" valign="top">&#x2212;67.65</td>
<td align="center" valign="top">6.71</td>
<td align="center" valign="top">&#x003C;&#x2009;0.01</td>
</tr>
<tr>
<td align="left" valign="top">Interaction between High fluent&#x2014;false-alarm</td>
<td align="center" valign="top">&#x2212;14.16</td>
<td align="center" valign="top">&#x2212;131.91</td>
<td align="center" valign="top">103.59</td>
<td align="center" valign="top">0.24</td>
<td align="center" valign="top">0.81</td>
</tr>
<tr>
<td align="left" valign="top">Interaction between<break/>High fluent&#x2014;miss</td>
<td align="center" valign="top">&#x2212;111.77</td>
<td align="center" valign="top">&#x2212;282.05</td>
<td align="center" valign="top">58.50</td>
<td align="center" valign="top">1.29</td>
<td align="center" valign="top">0.20</td>
</tr>
</tbody>
</table>
</table-wrap>
</sec>
<sec id="sec14">
<label>4.2</label>
<title>Pupil analysis</title>
<p>We focused on two main aspects for the analysis of the pupil data: (1) the average pupil dilation in each trial and (2) the time of maximum pupil dilation within each trial. Since the trials in part 1 had different lengths, the time of the maximum was calculated relative to the total length of each trial.</p>
<sec id="sec15">
<label>4.2.1</label>
<title>Mixed model results for average pupil dilations&#x2014;part 1</title>
<p>Our first analysis looked at the average pupil dilation within the whole trial period. We found that neither of our main manipulations, <italic>stimulus content</italic> (<italic>p</italic>&#x2009;=&#x2009;0.37, <xref ref-type="table" rid="tab2">Table 2</xref>) and <italic>accessibility</italic> (<italic>p</italic>&#x2009;=&#x2009;0.60), influenced average baseline subtracted pupil size. There was a significant negative association with the trial number (<italic>p</italic>&#x2009;=&#x2009;0.04), suggesting that the average pupil change from baseline was getting smaller across trials. Adding <italic>performance outcome</italic> as a predictor did not change the main results as mean pupil dilation for correct and incorrect responses were not different (<italic>p</italic>&#x2009;=&#x2009;0.68, <xref rid="SM1" ref-type="supplementary-material">Supplementary Table S3</xref>). Finally, adding <italic>effort motivation</italic> as a predictor also had no significant influence on average dilation (<italic>p</italic>&#x2009;=&#x2009;0.07, <xref rid="SM1" ref-type="supplementary-material">Supplementary Table S3</xref>).</p>
<table-wrap position="float" id="tab2">
<label>Table 2</label>
<caption><p>Part 1 and part 2, results of multi-mixed methods for baseline-corrected average pupil size.</p></caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th align="left" valign="top">Fixed effects</th>
<th align="center" valign="top">Estimate</th>
<th align="center" valign="top">SE</th>
<th align="center" valign="top">Lower</th>
<th align="center" valign="top">Upper</th>
<th align="center" valign="top"><italic>z</italic>-value</th>
<th align="center" valign="top">Pr(&#x003E;|z|)</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left" valign="top"><italic>PART 1</italic></td>
<td align="left" valign="top" colspan="6">95% CI</td>
</tr>
<tr>
<td align="left" valign="top">Intercept</td>
<td align="center" valign="top">&#x2212;80.40</td>
<td align="center" valign="top">10.66</td>
<td align="center" valign="top">&#x2212;101.30</td>
<td align="center" valign="top">&#x2212;59.40</td>
<td align="center" valign="top">&#x2212;7.54</td>
<td align="center" valign="top">&#x003C;0.001</td>
</tr>
<tr>
<td align="left" valign="top">Accessibility</td>
<td align="center" valign="top">1.97</td>
<td align="center" valign="top">3.78</td>
<td align="center" valign="top">&#x2212;5.45</td>
<td align="center" valign="top">9.39</td>
<td align="center" valign="top">0.52</td>
<td align="center" valign="top">0.603</td>
</tr>
<tr>
<td align="left" valign="top">Stimulus content</td>
<td align="center" valign="top">5.51</td>
<td align="center" valign="top">5.97</td>
<td align="center" valign="top">&#x2212;6.20</td>
<td align="center" valign="top">17.21</td>
<td align="center" valign="top">0.92</td>
<td align="center" valign="top">0.356</td>
</tr>
<tr>
<td align="left" valign="top">Trial</td>
<td align="center" valign="top">&#x2212;0.71</td>
<td align="center" valign="top">0.34</td>
<td align="center" valign="top">&#x2212;1.39</td>
<td align="center" valign="top">&#x2212;0.04</td>
<td align="center" valign="top">&#x2212;2.08</td>
<td align="center" valign="top">0.038</td>
</tr>
<tr>
<td align="left" valign="top"><italic>PART 2</italic></td>
<td/>
<td/>
<td align="center" valign="top" colspan="2">95% CI</td>
<td/>
<td/>
</tr>
<tr>
<td align="left" valign="top">Intercept</td>
<td align="center" valign="top">66.57</td>
<td align="center" valign="top">23.41</td>
<td align="center" valign="top">20.69</td>
<td align="center" valign="top">112.45</td>
<td align="center" valign="top">2.84</td>
<td align="center" valign="top">0.004</td>
</tr>
<tr>
<td align="left" valign="top">Accessibility</td>
<td align="center" valign="top">&#x2212;25.96</td>
<td align="center" valign="top">4.56</td>
<td align="center" valign="top">&#x2212;34.90</td>
<td align="center" valign="top">&#x2212;17.01</td>
<td align="center" valign="top">&#x2212;5.69</td>
<td align="center" valign="top">&#x003C;0.0001</td>
</tr>
<tr>
<td align="left" valign="top">Stimulus content</td>
<td align="center" valign="top">33.38</td>
<td align="center" valign="top">7.12</td>
<td align="center" valign="top">19.25</td>
<td align="center" valign="top">47.52</td>
<td align="center" valign="top">4.63</td>
<td align="center" valign="top">&#x003C;0.0001</td>
</tr>
<tr>
<td align="left" valign="top">Trial</td>
<td align="center" valign="top">&#x2212;1.52</td>
<td align="center" valign="top">0.41</td>
<td align="center" valign="top">&#x2212;2.33</td>
<td align="center" valign="top">&#x2212;0.71</td>
<td align="center" valign="top">&#x2212;3.66</td>
<td align="center" valign="top">&#x003C;0.0001</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<p>The dependent variable was average pupil dilation. Independent variables were accessibility, stimulus content, and trail number.</p>
</table-wrap-foot>
</table-wrap>
</sec>
<sec id="sec16">
<label>4.2.2</label>
<title>Mixed model results for the time of maximum pupil dilation&#x2014;part 1</title>
<p>The analysis of the time of maximum pupil dilation showed a different pattern. There was a significant negative association with <italic>accessibility</italic> (<italic>p</italic>&#x2009;&#x003C;&#x2009;0.001, <xref ref-type="table" rid="tab3">Table 3</xref>), showing that low fluency predicted earlier maximum dilation. At the same time, the predictor <italic>stimulus content</italic> had a positive influence (<italic>p</italic>&#x2009;&#x003C;&#x2009;0.001, <xref ref-type="table" rid="tab3">Table 3</xref>), showing that face stimuli had later maximum dilation than landscapes.</p>
<table-wrap position="float" id="tab3">
<label>Table 3</label>
<caption><p>Part 1 and part 2, results of multi-mixed methods for maximum pupil size.</p></caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th align="left" valign="top">Fixed effects</th>
<th align="center" valign="top">Estimate</th>
<th align="center" valign="top">SE</th>
<th align="center" valign="top">Lower</th>
<th align="center" valign="top">Upper</th>
<th align="center" valign="top"><italic>z</italic>-value</th>
<th align="center" valign="top">Pr(&#x003E;|z|)</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left" valign="top"><italic>PART 1</italic></td>
<td align="left" valign="top" colspan="6">95% CI</td>
</tr>
<tr>
<td align="left" valign="top">Intercept</td>
<td align="center" valign="top">0.33</td>
<td align="center" valign="top">0.30</td>
<td align="center" valign="top">0.27</td>
<td align="center" valign="top">0.39</td>
<td align="center" valign="top">11.12</td>
<td align="center" valign="top">&#x003C;0.001</td>
</tr>
<tr>
<td align="left" valign="top">Accessibility</td>
<td align="center" valign="top">&#x2212;0.05</td>
<td align="center" valign="top">0.01</td>
<td align="center" valign="top">&#x2212;0.07</td>
<td align="center" valign="top">&#x2212;0.03</td>
<td align="center" valign="top">&#x2212;5.14</td>
<td align="center" valign="top">&#x003C;0.001</td>
</tr>
<tr>
<td align="left" valign="top">Stimulus content</td>
<td align="center" valign="top">0.09</td>
<td align="center" valign="top">0.02</td>
<td align="center" valign="top">0.06</td>
<td align="center" valign="top">0.12</td>
<td align="center" valign="top">5.94</td>
<td align="center" valign="top">&#x003C;0.001</td>
</tr>
<tr>
<td align="left" valign="top">Trial</td>
<td align="center" valign="top">0.00</td>
<td align="center" valign="top">0.00</td>
<td align="center" valign="top">0.00</td>
<td align="center" valign="top">0.00</td>
<td align="center" valign="top">4.23</td>
<td align="center" valign="top">&#x003C;0.001</td>
</tr>
<tr>
<td align="left" valign="top"><italic>PART 2</italic></td>
<td/>
<td/>
<td align="center" valign="top" colspan="2">95% CI</td>
<td/>
<td/>
</tr>
<tr>
<td align="left" valign="top">Intercept</td>
<td align="center" valign="top">0.15</td>
<td align="center" valign="top">0.02</td>
<td align="center" valign="top">0.11</td>
<td align="center" valign="top">0.19</td>
<td align="center" valign="top">7.24</td>
<td align="center" valign="top">&#x003C;0.0001</td>
</tr>
<tr>
<td align="left" valign="top">Accessibility</td>
<td align="center" valign="top">0.00</td>
<td align="center" valign="top">0.00</td>
<td align="center" valign="top">&#x2212;0.00</td>
<td align="center" valign="top">0.01</td>
<td align="center" valign="top">0.89</td>
<td align="center" valign="top">0.376</td>
</tr>
<tr>
<td align="left" valign="top">Stimulus content</td>
<td align="center" valign="top">&#x2212;0.01</td>
<td align="center" valign="top">0.01</td>
<td align="center" valign="top">&#x2212;0.02</td>
<td align="center" valign="top">0.00</td>
<td align="center" valign="top">&#x2212;1.46</td>
<td align="center" valign="top">0.145</td>
</tr>
<tr>
<td align="left" valign="top">Trial</td>
<td align="center" valign="top">&#x2212;0.00</td>
<td align="center" valign="top">0.00</td>
<td align="center" valign="top">&#x2212;0.00</td>
<td align="center" valign="top">&#x2212;0.00</td>
<td align="center" valign="top">&#x2212;2.53</td>
<td align="center" valign="top">0.011</td>
</tr>
</tbody>
</table>
<table-wrap-foot>
<p>The dependent variable was the time of the maximum pupil dilation. Independent variables were accessibility, stimulus content, and trail number.</p>
</table-wrap-foot>
</table-wrap>
<p>Adding <italic>performance outcome</italic> as a predictor also showed a significant influence (<italic>p</italic>&#x2009;=&#x2009;0.001, <xref rid="SM1" ref-type="supplementary-material">Supplementary Table S4</xref>) in predicting later maximum pupil dilation on correct response trials. Finally, <italic>effort motivation</italic> as a predictor did not show significance (<italic>p</italic>&#x2009;=&#x2009;0.67, <xref rid="SM1" ref-type="supplementary-material">Supplementary Table S4</xref>). Adding both latter predictors did not change the results of the three other predictors, which remained very similar.</p>
</sec>
<sec id="sec17">
<label>4.2.3</label>
<title>Mixed model results for average pupil dilations&#x2014;part 2</title>
<p>The results in part 2 showed a very different pattern from part 1. Here, all our main predictors were significant in the prediction of baseline-corrected average pupil dilation (<xref ref-type="table" rid="tab2">Table 2</xref>). <italic>Accessibility</italic> had a negative influence (<italic>p</italic>&#x2009;&#x003C;&#x2009;0.0001), showing that high-fluent artworks led to larger pupil dilation. Additionally, <italic>stimulus content</italic> had a positive effect (<italic>p</italic>&#x2009;&#x003C;&#x2009;0.0001), showing that dilation was larger for artworks with faces than landscapes. Finally, as in part 1, there was a significant negative effect of trial number (<italic>p</italic>&#x2009;&#x003C;&#x2009;0.0001), stemming from a smaller pupil dilation over time.</p>
</sec>
<sec id="sec18">
<label>4.2.4</label>
<title>Mixed model results for the time of maximum pupil dilation&#x2014;part 2</title>
<p>As opposed to the analysis of average pupil dilation, the analysis of the temporal position of maximum pupil dilation uncovered no significant effects (<xref ref-type="table" rid="tab3">Table 3</xref>) in part 2 for our main predictors. The control trial number was significant (<italic>p</italic>&#x2009;=&#x2009;0.011), but unlike in part 1, neither <italic>accessibility</italic> (<italic>p</italic>&#x2009;=&#x2009;0.376) nor faces (<italic>p&#x2009;=&#x2009;</italic>0.145) were significant predictors.</p>
</sec>
</sec>
<sec id="sec19">
<label>4.3</label>
<title>Analysis of artwork ratings&#x2014;part 2</title>
<p>Descriptive statistics of the ratings, differentiated by <italic>stimulus content</italic> and <italic>accessibility</italic>, are reported in <xref ref-type="table" rid="tab4">Table 4</xref>. <xref rid="SM1" ref-type="supplementary-material">Supplementary Figure S1</xref> shows a correlation heatmap of all ratings.</p>
<table-wrap position="float" id="tab4">
<label>Table 4</label>
<caption><p>Part 2, means and standard deviation of ratings (7-point Likert-type scale, ranging from 1&#x2009;=&#x2009;not at all or negative valence to 7&#x2009;=&#x2009;very much or positive valence) between the different stimuli presentations and degree of fluency.</p></caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th align="left" valign="top">Stimulus-condition</th>
<th align="left" valign="top">Accessibility</th>
<th align="center" valign="top">Complexity</th>
<th align="center" valign="top">Comprehension</th>
<th align="center" valign="top">Valence</th>
<th align="center" valign="top">Arousal</th>
<th align="center" valign="top">Clearness</th>
<th align="center" valign="top">Liking</th>
</tr>
<tr>
<th/>
<th/>
<th align="center" valign="middle"><italic>M</italic> (SD)</th>
<th align="center" valign="middle"><italic>M</italic> (SD)</th>
<th align="center" valign="middle"><italic>M</italic> (SD)</th>
<th align="center" valign="middle"><italic>M</italic> (SD)</th>
<th align="center" valign="middle"><italic>M</italic> (SD)</th>
<th align="center" valign="middle"><italic>M</italic> (SD)</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left" valign="top" rowspan="3">Faces</td>
<td align="left" valign="middle">High</td>
<td align="center" valign="middle">4.08 (0.95)</td>
<td align="center" valign="middle">4.58 (0.85)</td>
<td align="center" valign="middle">3.41 (0.76)</td>
<td align="center" valign="middle">4.45 (0.75)</td>
<td align="center" valign="middle">6.65 (0.47)</td>
<td align="center" valign="middle">3.88 (1.01)</td>
</tr>
<tr>
<td align="left" valign="middle">Medium</td>
<td align="center" valign="middle">4.55 (0.84)</td>
<td align="center" valign="middle">3.46 (0.98)</td>
<td align="center" valign="middle">3.82 (0.55)</td>
<td align="center" valign="middle">4.47 (0.72)</td>
<td align="center" valign="middle">4.70 (0.82)</td>
<td align="center" valign="middle">3.80 (0.88)</td>
</tr>
<tr>
<td align="left" valign="middle">Low</td>
<td align="center" valign="middle">5.16 (0.72)</td>
<td align="center" valign="middle">2.73 (0.93)</td>
<td align="center" valign="middle">3.93 (0.61)</td>
<td align="center" valign="middle">4.37 (0.78)</td>
<td align="center" valign="middle">3.55 (1.08)</td>
<td align="center" valign="middle">3.71 (0.90)</td>
</tr>
<tr>
<td align="left" valign="top" rowspan="3">Landscapes</td>
<td align="left" valign="middle">High</td>
<td align="center" valign="middle">3.89 (0.81)</td>
<td align="center" valign="middle">4.30 (1.00)</td>
<td align="center" valign="middle">4.59 (0.79)</td>
<td align="center" valign="middle">3.15 (0.83)</td>
<td align="center" valign="middle">1.43 (0.54)</td>
<td align="center" valign="middle">4.44 (0.87)</td>
</tr>
<tr>
<td align="left" valign="middle">Medium</td>
<td align="center" valign="middle">4.08 (0.78)</td>
<td align="center" valign="middle">3.94 (1.04)</td>
<td align="center" valign="middle">4.57 (0.71)</td>
<td align="center" valign="middle">3.40 (0.83)</td>
<td align="center" valign="middle">1.25 (0.38)</td>
<td align="center" valign="middle">4.37 (0.87)</td>
</tr>
<tr>
<td align="left" valign="middle">Low</td>
<td align="center" valign="middle">5.01 (0.75)</td>
<td align="center" valign="middle">3.15 (0.93)</td>
<td align="center" valign="middle">3.86 (0.83)</td>
<td align="center" valign="middle">4.18 (0.72)</td>
<td align="center" valign="middle">1.50 (0.57)</td>
<td align="center" valign="middle">4.09 (0.99)</td>
</tr>
</tbody>
</table>
</table-wrap>
<p>We used a series of LMMs for each rating scale with <italic>stimulus content</italic> (set &#x2018;face(s)&#x2019; as the baseline) and <italic>accessibility</italic> (set &#x2018;low-fluent&#x2019; as the baseline) as the independent variables, and we estimated the intercept as a random coefficient (the intercept could vary between participants). The structure of the fixed effects and the random effect were identical across all rating scales (<xref ref-type="fig" rid="fig2">Figure 2</xref>).</p>
<fig position="float" id="fig2">
<label>Figure 2</label>
<caption><p>Linear mixed models for each rating scale as dependent variable. Stimulus content (set &#x2018;face(s)&#x2019; as the baseline) and accessibility (set &#x2018;low-fluent&#x2019; as the baseline) were set as the independent variables and estimated the intercept as a random coefficient (the intercept could vary between participants).</p></caption>
<graphic xlink:href="fpsyg-15-1192565-g002.tif"/>
</fig>
<sec id="sec20">
<label>4.3.1</label>
<title>Pupil responses and art ratings&#x2014;part 2</title>
<p>In part 2, we expected that the ratings could be related to the average pupil dilation. First, we looked at the Pearson correlation between each of the ratings and dilation across trials for each participant separately. Next, we tested the <italic>r</italic> values against zero across participants in a one-sample <italic>t</italic>-test for each rating type. We found that clearness had a strong positive (<italic>t</italic> (69)&#x2009;=&#x2009;5.89 <italic>p</italic>&#x2009;&#x003C;&#x2009;0.001), while valence had a strong negative association (<italic>t</italic> (69)&#x2009;=&#x2009;4.09 <italic>p</italic>&#x2009;&#x003C;&#x2009;0.001) considering pupil size, meaning the more positive the image was, the more the pupil dilated. Additionally, comprehension had a smaller but significant (<italic>t</italic> (69)&#x2009;=&#x2009;3.52, <italic>p</italic>&#x2009;&#x003C;&#x2009;0.001) positive association with pupil size. The other three ratings did not show a significant relationship with pupil size [arousal: <italic>t</italic> (69)&#x2009;=&#x2009;1.05, <italic>p</italic>&#x2009;=&#x2009;0.299; complexity: <italic>t</italic> (69)&#x2009;=&#x2009;2.44, <italic>p</italic>&#x2009;=&#x2009;0.017; liking: <italic>t</italic> (69)&#x2009;=&#x2009;0.56, <italic>p</italic>&#x2009;=&#x2009;0.574], after correcting for multiple comparisons. <xref ref-type="fig" rid="fig3">Figure 3</xref> shows the average correlation with pupil size for each of the ratings.</p>
<fig position="float" id="fig3">
<label>Figure 3</label>
<caption><p>Average correlation with pupil size for each of the ratings.</p></caption>
<graphic xlink:href="fpsyg-15-1192565-g003.tif"/>
</fig>
<p>Next, we re-analyzed the same data with LMMs, including all six art ratings as predictors, and baseline-corrected pupil dilation as the outcome (<xref ref-type="table" rid="tab5">Table 5</xref>). Additionally, we included the trial number as a control variable and a random intercept for each participant. We confirmed the strong association of negative valence (<italic>p</italic>&#x2009;&#x003C;&#x2009;0.001) and clearness (<italic>p</italic>&#x2009;&#x003C;&#x2009;0.001), while comprehension (<italic>p</italic>&#x2009;=&#x2009;0.484) and arousal (<italic>p</italic>&#x2009;=&#x2009;0.312) were not significant. Liking (<italic>p</italic>&#x2009;=&#x2009;0.048) and complexity (<italic>p</italic>&#x2009;=&#x2009;0.034) were associated with pupil size only before the Bonferroni correction for multiple comparisons. The results differ from those using the Pearson correlation in the combined model as some ratings are slightly correlated (see <xref rid="SM1" ref-type="supplementary-material">Supplementary Figure S1</xref>).</p>
<table-wrap position="float" id="tab5">
<label>Table 5</label>
<caption><p>Part 2, Linear mixed models for artwork ratings and baseline-corrected average pupil size.</p></caption>
<table frame="hsides" rules="groups">
<thead>
<tr>
<th/>
<th/>
<th/>
<th align="center" valign="top" colspan="2">95% CI</th>
<th/>
<th/>
</tr>
<tr>
<th align="left" valign="top">Fixed effects</th>
<th align="center" valign="top">Estimate</th>
<th align="center" valign="top">SE</th>
<th align="center" valign="top">Lower</th>
<th align="center" valign="top">Upper</th>
<th align="center" valign="top"><italic>z</italic>-value</th>
<th align="center" valign="top">Pr(&#x003E;|z|)</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left" valign="top">Intercept</td>
<td align="center" valign="top">78.53</td>
<td align="center" valign="top">30.12</td>
<td align="center" valign="top">19.50</td>
<td align="center" valign="top">137.57</td>
<td align="center" valign="top">2.61</td>
<td align="center" valign="top">0.009</td>
</tr>
<tr>
<td align="left" valign="top">Arousal</td>
<td align="center" valign="top">&#x2212;2.87</td>
<td align="center" valign="top">2.84</td>
<td align="center" valign="top">&#x2212;8.44</td>
<td align="center" valign="top">2.69</td>
<td align="center" valign="top">&#x2212;1.01</td>
<td align="center" valign="top">0.312</td>
</tr>
<tr>
<td align="left" valign="top">Clearness</td>
<td align="center" valign="top">6.76</td>
<td align="center" valign="top">1.63</td>
<td align="center" valign="top">3.56</td>
<td align="center" valign="top">9.95</td>
<td align="center" valign="top">4.14</td>
<td align="center" valign="top">&#x003C;0.0001</td>
</tr>
<tr>
<td align="left" valign="top">Complexity</td>
<td align="center" valign="top">&#x2212;6.16</td>
<td align="center" valign="top">2.90</td>
<td align="center" valign="top">&#x2212;11.84</td>
<td align="center" valign="top">&#x2212;0.48</td>
<td align="center" valign="top">&#x2212;2.12</td>
<td align="center" valign="top">0.034</td>
</tr>
<tr>
<td align="left" valign="top">Comprehension</td>
<td align="center" valign="top">1.97</td>
<td align="center" valign="top">2.82</td>
<td align="center" valign="top">&#x2212;3.55</td>
<td align="center" valign="top">7.49</td>
<td align="center" valign="top">0.70</td>
<td align="center" valign="top">0.484</td>
</tr>
<tr>
<td align="left" valign="top">Liking</td>
<td align="center" valign="top">6.02</td>
<td align="center" valign="top">3.04</td>
<td align="center" valign="top">0.06</td>
<td align="center" valign="top">11.98</td>
<td align="center" valign="top">1.98</td>
<td align="center" valign="top">0.048</td>
</tr>
<tr>
<td align="left" valign="top">Valence</td>
<td align="center" valign="top">&#x2212;14.86</td>
<td align="center" valign="top">3.45</td>
<td align="center" valign="top">&#x2212;21.62</td>
<td align="center" valign="top">&#x2212;8.09</td>
<td align="center" valign="top">&#x2212;4.31</td>
<td align="center" valign="top">&#x003C;0.0001</td>
</tr>
<tr>
<td align="left" valign="top">Trial</td>
<td align="center" valign="top">&#x2212;1.51</td>
<td align="center" valign="top">0.42</td>
<td align="center" valign="top">&#x2212;2.33</td>
<td align="center" valign="top">&#x2212;0.69</td>
<td align="center" valign="top">&#x2212;3.60</td>
<td align="center" valign="top">&#x003C;0.0001</td>
</tr>
</tbody>
</table>
</table-wrap>
</sec>
</sec>
<sec id="sec21">
<label>4.4</label>
<title>Explorative results combining parts 1 and 2</title>
<sec id="sec22">
<label>4.4.1</label>
<title>Analysis of performance outcome in part 1 on ratings in part 2</title>
<p>To analyze the effects of performance outcome in part 1 on ratings in part 2, we took as independent variables the answer type correct/hit, false alarm, and miss, and we calculated LMMs for all ratings as dependent variables. Again, correct answers were taken as the baseline and the intercept as a random coefficient (the intercept could vary between participants).</p>
<p>We found that false alarms (meaning that participants responded that they saw faces although it was a landscape) were associated with lower ratings for clearness [&#x2212;1.56 points on average, <italic>t</italic>&#x2009;=&#x2009;6.89, <italic>p</italic>&#x2009;&#x003C;&#x2009;0.01, 95% CI (&#x2212;2.00, &#x2212;1.10)], higher liking [0.42 points on average, <italic>t</italic>&#x2009;=&#x2009;2.89, <italic>p</italic>&#x2009;&#x003C;&#x2009;0.01, 95% CI (0.14, 0.71)], and more positive valence [0.40 points on average, <italic>t</italic>&#x2009;=&#x2009;2.16, <italic>p</italic>&#x2009;&#x003C;&#x2009;0.01, 95% CI (0.15, 0.65)], compared to correct answers. However, there were no significant differences in arousal, comprehension, or complexity when participants made the false-alarm errors. When participants misidentified an image with faces as a landscape (miss), results showed that misses were associated with higher ratings of arousal [0.44 points on average (oa), <italic>t</italic>&#x2009;=&#x2009;5.09, <italic>p</italic>&#x2009;&#x003C;&#x2009;0.01, 95% CI (022, 0.61)] and complexity [0.51 points oa, <italic>t</italic>&#x2009;=&#x2009;6.01, <italic>p</italic>&#x2009;&#x003C;&#x2009;0.01, 95% CI (0.34, 0.68)]. Finally, liking [&#x2212;0.54 points oa, <italic>t</italic>&#x2009;=&#x2009;5.81, <italic>p</italic>&#x2009;&#x003C;&#x2009;0.01, 95% CI (&#x2212;0.73, &#x2212;0.36)], clearness [&#x2212;0.43 points oa, <italic>t</italic>&#x2009;=&#x2009;2.97, <italic>p</italic>&#x2009;&#x003C;&#x2009;0.01, 95% CI (&#x2212;0.72, &#x2212;0,15)], and positive valence [&#x2212;0.27 points oa, <italic>t</italic>&#x2009;=&#x2009;3.39, <italic>p</italic>&#x2009;&#x003C;&#x2009;0.01, 05% CI (&#x2212;0.43, &#x2212;0.12)] were all lower in turn. For the full report, see <xref rid="SM1" ref-type="supplementary-material">Supplementary Table S5</xref>.</p>
</sec>
<sec id="sec23">
<label>4.4.2</label>
<title>Combined analysis of average pupil dilation&#x2014;part 1 and part 2</title>
<p>In general, the average baseline-corrected pupil dilation was &#x2212;84.24&#x2009;&#x00B1;&#x2009;49.06 au. in part 1 and&#x2009;&#x2212;&#x2009;33.25&#x2009;&#x00B1;&#x2009;96.24 au. in part 2, showing that the average dilation was smaller in part 1 (<italic>t</italic><sub>69</sub>&#x2009;=&#x2009;4.3, <italic>p</italic>&#x2009;&#x003C;&#x2009;0.0001, <italic>d</italic>&#x2009;=&#x2009;0.66, <xref rid="SM1" ref-type="supplementary-material">Supplementary Figure S2a</xref>, see also <xref rid="SM1" ref-type="supplementary-material">Supplementary Figure S2b</xref>, where the effect of the flashing stimulus presentation resulting in wave-like patters is visible in the pupil data in part 1).</p>
<p>In the combined analysis of average pupil dilation for the two parts and <italic>accessibility</italic>, we used a two-way repeated-measures ANOVA (see <xref ref-type="fig" rid="fig4">Figure 4</xref>). We found that average baseline-corrected pupil dilation was significantly higher in part 2 (<italic>F</italic> (1,69)&#x2009;=&#x2009;17.18, <italic>p</italic>&#x2009;&#x003C;&#x2009;0.001). There was also a significant effect of <italic>accessibility</italic> (<italic>F</italic> (2,138)&#x2009;=&#x2009;10.04, <italic>p</italic>&#x2009;&#x003C;&#x2009;0.001); this effect of accessibility was more pronounced in part 2, resulting in an interaction between <italic>accessibility</italic> and the experimental parts (<italic>F</italic> (2,138)&#x2009;=&#x2009;10.93, <italic>p</italic>&#x2009;&#x003C;&#x2009;0.001). See results for both, <italic>accessibility</italic> and <italic>stimulus content</italic>, considering raw data in the <xref rid="SM1" ref-type="supplementary-material">Supplementary Figures S3a</xref>,<xref rid="SM1" ref-type="supplementary-material">b</xref>.</p>
<fig position="float" id="fig4">
<label>Figure 4</label>
<caption><p>Combined analysis of average pupil dilation for the two experimental parts considering accessibility (baseline-corrected and preprocessed pupil data).</p></caption>
<graphic xlink:href="fpsyg-15-1192565-g004.tif"/>
</fig>
<p>The combined analysis of the experimental parts and <italic>stimulus content</italic> (see <xref ref-type="fig" rid="fig5">Figure 5</xref>) was also performed with a two-way repeated-measures ANOVA. We found that, in general, baseline subtracted pupil dilation for face stimuli was larger (<italic>F</italic> (1,69)&#x2009;=&#x2009;9.659, <italic>p</italic>&#x2009;=&#x2009;0.003), and there was also a significant effect of the experimental parts (<italic>F</italic> (1,69)&#x2009;=&#x2009;16.35, <italic>p</italic>&#x2009;&#x003C;&#x2009;0.001), with a far larger pupil dilation in part 2. There was no significant interaction between the factors (<italic>F</italic> (1,69)&#x2009;=&#x2009;3.01, <italic>p</italic>&#x2009;=&#x2009;0.087).</p>
<fig position="float" id="fig5">
<label>Figure 5</label>
<caption><p>Combined analysis of average pupil dilation for the two experimental parts considering stimulus content (baseline-corrected and preprocessed pupil data).</p></caption>
<graphic xlink:href="fpsyg-15-1192565-g005.tif"/>
</fig>
</sec>
<sec id="sec24">
<label>4.4.3</label>
<title>Combined analysis of time of maximum pupil dilation&#x2014;part 1 and part 2</title>
<p>Considering the time of the maximum pupil dilation, results show that the maximum pupil dilation was later in part 1 (<italic>t</italic><sub>69</sub>&#x2009;=&#x2009;6.86, <italic>p</italic>&#x2009;&#x003C;&#x2009;0.0001, <italic>d</italic>&#x2009;=&#x2009;1.04; 0.33&#x2009;&#x00B1;&#x2009;0.16, units in proportion of trial), compared to part 2 (0.11&#x2009;&#x00B1;&#x2009;0.08, units in proportion of trial, see <xref ref-type="fig" rid="fig6">Figure 6</xref>). Finally, when <italic>accessibility</italic> was included as a factor in a repeated measures ANOVA, we found that this measure was also influenced by <italic>accessibility</italic> (<italic>F</italic> (2,138)&#x2009;=&#x2009;7.39, <italic>p</italic>&#x2009;&#x003E;&#x2009;0.001), with a strong effect of the experimental parts (<italic>F</italic> (1,69)&#x2009;=&#x2009;17. 16, <italic>p</italic>&#x2009;&#x003C;&#x2009;0.001) and an interaction between the experimental parts and <italic>accessibility</italic> (<italic>F</italic> (2,138)&#x2009;=&#x2009;10.27, <italic>p</italic>&#x2009;&#x003C;&#x2009;0.001). Notably, the effect of <italic>accessibility</italic> on dilation was significant, but negative in part 1 (<xref ref-type="table" rid="tab3">Table 3</xref>).</p>
<fig position="float" id="fig6">
<label>Figure 6</label>
<caption><p>Time of the maximum pupil dilation, with later dilations in part 1 (<italic>t</italic><sub>69</sub>&#x2009;=&#x2009;6.86, <italic>p</italic>&#x2009;&#x003C;&#x2009;0.0001, <italic>d</italic>&#x2009;=&#x2009;1.04; 0.33&#x2009;&#x00B1;&#x2009;0.16, units in proportion of trial), compared to part 2 (0.11&#x2009;&#x00B1;&#x2009;0.08, units in proportion of trial).</p></caption>
<graphic xlink:href="fpsyg-15-1192565-g006.tif"/>
</fig>
<p>Most importantly, in contrast to prior studies (<xref ref-type="bibr" rid="ref31">Kuchinke et al., 2009</xref>; <xref ref-type="bibr" rid="ref12">Elschner et al., 2018</xref>), we analyzed the whole trial period for each trial and connected this to pupil dilations in both parts. <xref ref-type="fig" rid="fig7">Figure 7</xref> shows the distribution of trials with respect to the relative time of maximum pupil dilation. Here, it shows that even though we found an association with our main predictors, <italic>stimulus content</italic> and <italic>accessibility</italic>, the maximum pupil dilation did not always coincide with the behavioral response. In part 1, we found an approximately bimodal distribution, with the maximum dilation mostly at the beginning or end of the trial. In part 2, the maximum dilation was mostly at the beginning.</p>
<fig position="float" id="fig7">
<label>Figure 7</label>
<caption><p>Distribution of trials with respect to the relative time of maximum pupil dilation. The <italic>x</italic>-axis shows the proportion of trial time, where the maximum pupil dilation was. The <italic>y</italic>-axis is the number of trials.</p></caption>
<graphic xlink:href="fpsyg-15-1192565-g007.tif"/>
</fig>
</sec>
</sec>
</sec>
<sec sec-type="discussion" id="sec25">
<label>5</label>
<title>Discussion</title>
<p>In studying the phenomenon of Gestalt perception, we enter a fascinating domain where our minds seamlessly form holistic concepts from noisy/fragmented sensory input (<xref ref-type="bibr" rid="ref68">Wertheimer, 1923</xref>; <xref ref-type="bibr" rid="ref66">Wagemans, 2013</xref>). This phenomenon is particularly captivating when this process involves an Aha moment, a sudden insight where abstract and first disconnected elements within a visual artwork converge into a meaningful, recognizable whole (<xref ref-type="bibr" rid="ref2">Arnheim, 1954</xref>; <xref ref-type="bibr" rid="ref44">Muth et al., 2013</xref>). This moment of recognition can vary from being immediate to delayed, where both encounters impact our experience with, our aesthetic judgment of, and our emotional response to art (<xref ref-type="bibr" rid="ref60">Van de Cruys and Wagemans, 2011a</xref>,<xref ref-type="bibr" rid="ref62">b</xref>; <xref ref-type="bibr" rid="ref48">Reber, 2012</xref>; <xref ref-type="bibr" rid="ref42">Muth and Carbon, 2013</xref>). While the recognition of Gestalt patterns is pivotal in experiencing these Aha moments, particularly in the context of cubist art, it forms only a part of the broader evaluative process. In cubist art, the recognition of Gestalt forms a foundation for evaluation, but this evaluation is more comprehensive, entailing the integration of these identified patterns with the entire artwork to elicit an overall emotional and, sometimes even an aesthetic, response. In our design, we emphasized the art-task instructions to focus participants on experiencing an Aha moment/recognition (part 1) or free-viewing and evaluating (part 2). This process aligns with the multi-stage models of art perception and evaluation (see, e.g., <xref ref-type="bibr" rid="ref35">Leder et al., 2004</xref>; <xref ref-type="bibr" rid="ref37">Leder and Nadal, 2014</xref>; for further reading, <xref ref-type="bibr" rid="ref46">Pelowski et al., 2016</xref>, <xref ref-type="bibr" rid="ref45">2017</xref>), which highlight not only the cognitive recognition of art elements but also the emotional engagement with and evaluation of the artwork.</p>
<sec id="sec26">
<label>5.1</label>
<title>Behavioral results</title>
<p>In part 1, we analyzed response times based on the following three main factors: stimulus content, accessibility, and performance motivation manipulated by a private versus public design. Our results showed longer response times for highly fluent artworks and face stimuli, supporting our hypotheses. However, no significant findings were identified for the performance outcome. These findings contradict the hedonic fluency model (<xref ref-type="bibr" rid="ref6">Belke et al., 2010</xref>; <xref ref-type="bibr" rid="ref48">Reber, 2012</xref>; <xref ref-type="bibr" rid="ref24">Jakesch et al., 2013</xref>; <xref ref-type="bibr" rid="ref12">Elschner et al., 2018</xref>), instead favoring theories that suggest a delayed, meaningful, and accurate recognition process (<xref ref-type="bibr" rid="ref60">Van de Cruys and Wagemans, 2011a</xref>,<xref ref-type="bibr" rid="ref62">b</xref>; <xref ref-type="bibr" rid="ref59">Van de Cruys, 2017</xref>).</p>
<p>The intriguing result of longer response times for recognizing faces challenges the conventional understanding that configural processing during face recognition is highly efficient, resulting in quick detection (<xref ref-type="bibr" rid="ref36">Leder and Bruce, 2000</xref>; <xref ref-type="bibr" rid="ref50">Sandford and Bindemann, 2020</xref>). It suggests that the task&#x2019;s nature and our attitude when encountering art can influence the visual art interaction mechanism, requiring more cognitive effort and time to decipher highly fluent artworks and faces, when attention is paid to this effort (<xref ref-type="bibr" rid="ref3">Aston-Jones and Cohen, 2005</xref>). Further analysis showed similar response times for landscapes and medium- to low-fluent face stimuli. This could indicate that participants either took their chances or adopted a superficially fast attitude that did not require deep attention.</p>
<p>We further would redefine the Aha moment from an instantaneous revelation to a potentially more &#x201C;calculated slow&#x201D; than &#x201C;reflective fast&#x201D; cognitive process. Our findings align with our hypothesis of prioritizing accurate interpretation and well-founded predictions. The observed delay in cognitive further shift suggests top-down control at a behavioral level, demanding further exploration in neuroscientific research (<xref ref-type="bibr" rid="ref61">Van de Cruys et al., 2017</xref>; <xref ref-type="bibr" rid="ref63">Van Geert and Wagemans, 2020</xref>).</p>
<p>Considering ratings, our results indicate a nuanced interplay between fluency and the emotion ratings valence and arousal. Valence ratings were more positive for low-fluent face stimuli but more negative for low-fluent landscapes. Landscapes showed more positive valence scores for high- and medium-fluent artworks, where face stimuli gained higher arousal ratings in general. Liking was indifferent to both stimulus content and accessibility in all categories. Clearness was particularly different for faces, with decreasing clearness ratings along with decreasing accessibility levels. Landscapes were rated as quite unclear in general. As these results do not show a clear association between preference, emotional valence, and comprehension considering content and accessibility, they hint at a potential influence of task and performance outcome. An analysis, which we conducted exploratively.</p>
<p>Our analyses reveal intriguing effects of performance outcomes of part 1 on artwork ratings in part 2. When participants falsely recognized faces in landscapes (false-alarm), they tended to rate the artwork as less clear, but they liked them more with more positive valence ratings than correct responses. No significant differences were observed in arousal, comprehension, or complexity ratings in these instances. Conversely, when participants failed to identify faces in the artwork (miss), the artwork was perceived as more arousing and complex; such errors were further associated with negative valence and lower ratings of liking, clearness, and comprehension.</p>
<p>One possible interpretation considering the art field is that false alarms are experienced as non-threatening, pleasurable, and positive as they reflect the artists&#x2019; skill in leaving ambiguity to a level where much can be seen, even though it is not there. On the contrary, not detecting a face, which is actually there, can be experienced as a failure in detecting the right meaning. These insights into how recognition errors influence judgment and affective responses to artwork could offer valuable contributions to the understanding of the cognitive and emotional landscapes of art appreciation. In addition, our results suggest that art appreciation is based on a more intricate interplay of cognitive state, task (or rather attitude), and affect, and not strictly on perceptual clarity or fluency (<xref ref-type="bibr" rid="ref53">Seth, 2013</xref>, <xref ref-type="bibr" rid="ref54">2019</xref>). Future research could explore state attitudes of people interacting with art and measure their varying experiences. This is relevant considering factors such as social context (<xref ref-type="bibr" rid="ref13">Fingerhut, 2020</xref>) in a crowded museum or perceived ability to decipher abstract patterns, which could influence the level of in-depth processing and attention.</p>
</sec>
<sec id="sec27">
<label>5.2</label>
<title>Pupillometry</title>
<p>Our study design aimed to integrate an implicit measure for notifying cognitive and affective shifts in perception and to clarify ambiguities around the contradictory findings of previous pupillometry studies regarding fluency effects and response times in art research (<xref ref-type="bibr" rid="ref31">Kuchinke et al., 2009</xref>; <xref ref-type="bibr" rid="ref12">Elschner et al., 2018</xref>). These studies focused solely on maximum dilation and end-of-trial pupil responses, overlooking other potential indicators of cognitive and affective processes throughout the trial period. We expanded the analysis and considered (1) the timing of maximum and (2) the average dilation throughout the entire trial period. This comprehensive approach allowed us to investigate whether some noteworthy pupil responses might have been missed in previous studies and whether late physiological responses correlate with behavioral responses (<xref ref-type="bibr" rid="ref33">Laeng et al., 2012</xref>; <xref ref-type="bibr" rid="ref40">Math&#x00F4;t, 2018</xref>).</p>
<p>For the recognition task (part 1), neither stimulus content nor accessibility significantly impacted average pupil size. In contrast, the time of maximum pupil dilation presented a different pattern: lower fluency predicted earlier maximum dilation, while face stimuli and correct response trials corresponded with later maximum dilation. During free viewing (part 2), average pupil dilation significantly responded to our main factors, whereas the time of maximum dilation did not. High-fluent artworks and face stimuli led to larger pupil dilation.</p>
<p>When comparing parts 1 and 2, part 2 showed significantly higher average pupil dilation, and the impact of accessibility and stimulus content was more pronounced in part 2. Although these results hint that average pupil dilations might be a marker for artwork evaluations, or emotional response, the found results on ratings are still inconclusive: average pupil dilation correlated especially with high clearness and negative valence. Liking showed a light positive, while complexity had a negative association with dilations. These results add to the controversial findings of prior studies (e.g., <xref ref-type="bibr" rid="ref31">Kuchinke et al., 2009</xref>; <xref ref-type="bibr" rid="ref12">Elschner et al., 2018</xref>), leaving, still, many open questions, if pupil dilations can truly represent directionality of emotional response, or, just an emotional response <italic>per se</italic>&#x2014;might this be a positive, arousing, or negative one.</p>
<p>In addition, our findings contrast with previous studies that focused solely on pupil size at the end of trials (<xref ref-type="bibr" rid="ref31">Kuchinke et al., 2009</xref>; <xref ref-type="bibr" rid="ref12">Elschner et al., 2018</xref>). By examining the entire trial period, we identified that maximum pupil dilation occurred at both the beginning and end of trials in part 1 (see <xref ref-type="fig" rid="fig7">Figure 7</xref>). This pattern indicates two distinct phases of participants&#x2019; physiological responses. The initial dilation at the start of the trial likely signifies the engagement of a search pattern, as the brain allocates resources to scan and process new information. This phase is closely tied to the adaptive gain theory, which posits that the locus coeruleus&#x2013;norepinephrine (LC-NE) system in the brain modulates cognitive functions to optimize performance (for further reading adaptive gain theory considering locus coeruleus activity, <xref ref-type="bibr" rid="ref17">Gilzenrat et al., 2010</xref>; <xref ref-type="bibr" rid="ref26">Jepma and Nieuwenhuis, 2011</xref>). According to this theory, the LC-NE system dynamically adjusts between the exploitation of recognizing familiar information and the exploration of new stimuli. The initial pupil dilation may thus reflect an increased LC activity, gearing the cognitive system toward exploration and heightened attention. Conversely, the dilation observed at the end of the trial could indicate recognition processes, where the LC-NE system shifts toward exploiting known information. This cycle of dilation aligns with the adaptive gain theory&#x2019;s framework, suggesting that the LC-NE system&#x2019;s activity is crucial in modulating attention and cognitive effort in response to changing task demands.</p>
<p>In sum, although we did find clear differences in pupillary measures considering recognition, i.e., time of maximum dilation, and as a potential affective response, the average pupil dilation, further research is necessary to deepen the understanding of our research. Future research could consider locus coeruleus activity (<xref ref-type="bibr" rid="ref3">Aston-Jones and Cohen, 2005</xref>; <xref ref-type="bibr" rid="ref33">Laeng et al., 2012</xref>; <xref ref-type="bibr" rid="ref34">Larsen and Waters, 2018</xref>; <xref ref-type="bibr" rid="ref55">Spee et al., 2018</xref>) during Gestalt recognition tasks and Aha moments to further explore the interplay of bottom-up and top-down influences as well as search patterns (seeking for meaning) relevant for art appreciation. Considering emotional response, we would suggest that pupillary measures should be connected with other physiological and/or neuroscientific approaches, as interpreting our results in relation to prior research results does not show a clear picture.</p>
</sec>
<sec id="sec28">
<label>5.3</label>
<title>Limitations and considerations</title>
<p>Several limitations and considerations warrant further discussion in our study. Primarily, our attempt to manipulate effort motivation through a private versus public condition failed to produce the anticipated impact. It could be that our manipulation was inadequate, or alternatively, it suggests that the act of identifying Gestalt in art, a socially and culturally trained behavior (<xref ref-type="bibr" rid="ref13">Fingerhut, 2020</xref>, <xref ref-type="bibr" rid="ref14">2021</xref>; <xref ref-type="bibr" rid="ref56">Spee et al., 2022</xref>), does not require further extrinsic motivation. Furthermore, the public condition might have been less compelling as art interaction often hinges on individual interpretation and personal meaning, rendering the opinions of others less influential.</p>
<p>Our results point toward a delayed response in achieving the Aha moment. While it might be questioned whether part 1 truly reflected an art interaction or merely a recognition challenge, we contend that art inherently poses a challenge&#x2014;discerning patterns in ambiguity, interpreting the artist&#x2019;s intent, and crafting our personal interpretations. Thus, our design might not diverge significantly from a realistic encounter with art, considering that art engagement often entails giving a thorough interpretation, an act that may enhance one&#x2019;s societal status (<xref ref-type="bibr" rid="ref56">Spee et al., 2022</xref>) through perceived intelligence.</p>
<p>We acknowledge that our analysis connecting part 1 and part 2, specifically that performance outcome influenced the ratings made in part 2, could be questioned. However, maintaining research in cognitive theories (<xref ref-type="bibr" rid="ref51">Schwartenbeck et al., 2013</xref>; <xref ref-type="bibr" rid="ref28">Kesner, 2014</xref>; <xref ref-type="bibr" rid="ref54">Seth, 2019</xref>) and our own experiences interacting with art suggest that success or failure in comprehending art, along with the context, influence our judgments. Given the short time span of part 1 (a few minutes), we support our exploratory findings, suggesting that the two parts influenced each other. Furthermore, the sequential nature of the experiment itself might have created a carry-over effect, where the cognitive processes and performance choices made in part 1 could have shaped the subjective ratings in part 2.</p>
<p>Certainly, we are aware that the rapid serial presentation of the artworks in a dark laboratory room influenced pupillary measurements. This is an inherent limitation of the study design. However, the approach was successfully employed in previous research (<xref ref-type="bibr" rid="ref32">Laeng et al., 2011</xref>, <xref ref-type="bibr" rid="ref33">2012</xref>) and, indeed, our findings still yielded significant results. However, acknowledging this constraint is essential and points toward the potential for alternative methodologies that may yield more naturalistic and comprehensive data. The advent of new technologies, such as movable eye trackers or brain pattern measures that allow participants more freedom of movement, could provide a more immersive and realistic environment for observing visual art interaction. This, in turn, could help to enhance the ecological validity of future studies, making findings more applicable to real-world art contexts.</p>
<p>Finally, as a significant limitation, it is important to note that our study, like previous research, was unable to fully disentangle whether pupil dilations were indicative of recognition or merely affect, as both are potential triggers for such physiological responses. We did find that different pupillary measures appear relevant for the diverse attentional states. Nonetheless, future research may endeavor to find more precise ways to separate these two effects to gain a clearer understanding of the cognitive and emotional processes involved in art appreciation.</p>
</sec>
<sec id="sec29">
<label>5.4</label>
<title>Summary and research prospects</title>
<p>Our study revealed that accurate or inaccurate predictions of Gestalt significantly influenced the time of behavioral response, that is of stating to have gained an Aha moment, suggesting that commonly observed behaviors, such as fluency effects, can be manipulated&#x2014;or even emphasized&#x2014;depending on the art task presented or participants state attitude. This suggests that the act of evaluating an artwork, the subsequent experience, the exploration process, and the desired outcomes are all products of a dynamic, reciprocal process (<xref ref-type="bibr" rid="ref62">Van de Cruys and Wagemans, 2011b</xref>; <xref ref-type="bibr" rid="ref63">Van Geert and Wagemans, 2020</xref>). Our results illuminate the complex ways in which we interact with visual art, showing how the delicate interplay of Gestalt recognition, insight, and exploration guides Aha and, potentially, our art judgments and emotional response to art.</p>
<p>Crucially, our findings demonstrate that Gestalt recognition as a nuanced process can be measured both behaviorally and physiologically, exemplified through our use of pupillometry as a measure of cognitive shifts. Given that pupillometry correlates with brain state (<xref ref-type="bibr" rid="ref3">Aston-Jones and Cohen, 2005</xref>; <xref ref-type="bibr" rid="ref17">Gilzenrat et al., 2010</xref>; <xref ref-type="bibr" rid="ref33">Laeng et al., 2012</xref>; <xref ref-type="bibr" rid="ref34">Larsen and Waters, 2018</xref>; <xref ref-type="bibr" rid="ref40">Math&#x00F4;t, 2018</xref>; <xref ref-type="bibr" rid="ref25">Jefferies and Di Lollo, 2019</xref>), we propose future research in art appreciation and neuroaesthetics to consider task-evoked differences in locus coeruleus activity during interaction with visual art (<xref ref-type="bibr" rid="ref55">Spee et al., 2018</xref>). We also underscore the importance of considering Gestalt recognition in the process of art evaluation, whether it concerns familiar Gestalt patterns from past experiences or novel figure-pattern constellations (<xref ref-type="bibr" rid="ref28">Kesner, 2014</xref>; <xref ref-type="bibr" rid="ref61">Van de Cruys et al., 2017</xref>; <xref ref-type="bibr" rid="ref13">Fingerhut, 2020</xref>).</p>
<p>Building on seminal theories positing that piecing together disparate features into a unified Gestalt and assigning meaning to the recognized pattern is an inherent aspect of art viewing (<xref ref-type="bibr" rid="ref68">Wertheimer, 1923</xref>; <xref ref-type="bibr" rid="ref2">Arnheim, 1954</xref>), we argue that the ability to predict Gestalt is an integral part of the art experience. It not only guides cognitive and affective processing but also profoundly shapes the overall quality of art experiences. We propose that the rewards derived from viewing art are not solely contingent on personal taste or stimulus valence but are intimately tied to the capacity for accurate Gestalt prediction. In this light, both the quality of the Gestalt and the precision of the predictions play a crucial role in determining the pleasure derived from art appreciation. This pivotal finding suggests a new dimension for future investigations into the cognitive and emotional landscapes of art interactions. Accordingly, both &#x2018;good Gestalt&#x2019; and &#x2018;good predictions&#x2019; would determine how rewarding the act of viewing an artwork can be.</p>
</sec>
</sec>
<sec sec-type="data-availability" id="sec30">
<title>Data availability statement</title>
<p>The original contributions presented in the study are included in the article/<xref ref-type="sec" rid="sec34">Supplementary material</xref>; further inquiries can be directed to the corresponding author.</p>
</sec>
<sec sec-type="ethics-statement" id="sec31">
<title>Ethics statement</title>
<p>The studies involving humans were approved by Ethics committee of the University of Vienna. The studies were conducted in accordance with the local legislation and institutional requirements. The participants provided their written informed consent to participate in this study.</p>
</sec>
<sec sec-type="author-contributions" id="sec32">
<title>Author contributions</title>
<p>BS development of study design, main lead in the design of analysis, collected data, contributed data and analysis tools, performed analysis, and wrote the main manuscript. JA analysis, especially eye-tracking data. JM analysis, especially behavioral data. UT: conceived and designed the analysis and supervision of data analysis. MP contributed to the analysis. HL supervised development of study design. All authors contributed to the article and approved the submitted version.</p>
</sec>
</body>
<back>
<sec sec-type="COI-statement" id="sec33">
<title>Conflict of interest</title>
<p>The authors declare that the research was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
</sec>
<sec id="sec100" sec-type="disclaimer">
<title>Publisher&#x2019;s note</title>
<p>All claims expressed in this article are solely those of the authors and do not necessarily represent those of their affiliated organizations, or those of the publisher, the editors and the reviewers. Any product that may be evaluated in this article, or claim that may be made by its manufacturer, is not guaranteed or endorsed by the publisher.</p>
</sec>
<sec sec-type="supplementary-material" id="sec34">
<title>Supplementary material</title>
<p>The Supplementary material for this article can be found online at: <ext-link xlink:href="https://www.frontiersin.org/articles/10.3389/fpsyg.2024.1192565/full#supplementary-material" ext-link-type="uri">https://www.frontiersin.org/articles/10.3389/fpsyg.2024.1192565/full#supplementary-material</ext-link></p>
<supplementary-material xlink:href="Data_Sheet_1.pdf" id="SM1" mimetype="application/pdf" xmlns:xlink="http://www.w3.org/1999/xlink"/>
</sec>
<fn-group>
<fn id="fn0001"><p><sup>1</sup>The pupil is controlled by the iris sphincter muscle, constricting the pupil and the dilatory pupil muscle that promotes dilation (<xref ref-type="bibr" rid="ref140">Borgdorff, 1975</xref>). Pupils respond to brightness or changes in depth of the focal field (mediated by cholinergic activity). Reduced illumination can provoke dilation up to 120% size compared to standard light conditions (<xref ref-type="bibr" rid="ref141">Wyatt, 1995</xref>). Pupils can also constrict or dilate due to arousing stimuli or mental effort (<xref ref-type="bibr" rid="ref40">Math&#x00F4;t, 2018</xref>). Task-evoked psychologically triggered pupil reactions of affective and cognitive quality show modest effects; on average, the pupil diameter changes about 0.5&#x2009;mm (&#x2248; 20%, <xref ref-type="bibr" rid="ref5">Beatty and Lucero-Wagoner, 2000</xref>; <xref ref-type="bibr" rid="ref33">Laeng et al., 2012</xref>). Pupil responses, therefore, are also a measure of higher cognitive function, though they are mostly reflexive and involuntary (<xref ref-type="bibr" rid="ref40">Math&#x00F4;t, 2018</xref>).</p></fn>
</fn-group>
<ref-list>
<title>References</title>
<ref id="ref1"><citation citation-type="other"><person-group person-group-type="author"><name><surname>Aboyoun</surname> <given-names>D. C.</given-names></name> <name><surname>Dabbs</surname> <given-names>J. M.</given-names></name></person-group> (<year>1998</year>). The <article-title>Hess pupil dilation findings: sex or novelty?</article-title> <source>Soc. Behav. Personal. Int. J.</source>, <volume>26</volume>. <fpage>415</fpage>&#x2013;<lpage>419</lpage>. doi: <pub-id pub-id-type="doi">10.2224/sbp.1998.26.4.415</pub-id></citation></ref>
<ref id="ref2"><citation citation-type="book"><person-group person-group-type="author"><name><surname>Arnheim</surname> <given-names>R.</given-names></name></person-group> (<year>1954</year>). <source>Art and visual perception: a psychology of the creative eye</source> <publisher-name>University of California Press</publisher-name>.</citation></ref>
<ref id="ref3"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Aston-Jones</surname> <given-names>G.</given-names></name> <name><surname>Cohen</surname> <given-names>J. D.</given-names></name></person-group> (<year>2005</year>). <article-title>An integrative theory of locus coeruleus-norepinephrine function: adaptive gain and optimal performance</article-title>. <source>Annu. Rev. Neurosci.</source> <volume>28</volume>, <fpage>403</fpage>&#x2013;<lpage>450</lpage>. doi: <pub-id pub-id-type="doi">10.1146/annurev.neuro.28.061604.135709</pub-id>, PMID: <pub-id pub-id-type="pmid">16022602</pub-id></citation></ref>
<ref id="ref4"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Baayen</surname> <given-names>R. H.</given-names></name> <name><surname>Davidson</surname> <given-names>D. J.</given-names></name> <name><surname>Bates</surname> <given-names>D. M.</given-names></name></person-group> (<year>2008</year>). <article-title>Mixed-effects modeling with crossed random effects for subjects and items</article-title>. <source>J. Mem. Lang.</source> <volume>59</volume>, <fpage>390</fpage>&#x2013;<lpage>412</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.jml.2007.12.005</pub-id></citation></ref>
<ref id="ref5"><citation citation-type="book"><person-group person-group-type="author"><name><surname>Beatty</surname> <given-names>J.</given-names></name> <name><surname>Lucero-Wagoner</surname> <given-names>B.</given-names></name></person-group> (<year>2000</year>). &#x201C;<article-title>The pupillary system</article-title>&#x201D; in <source>Handbook of psychophysiology</source>. eds. <person-group person-group-type="editor"><name><surname>Cacioppo</surname> <given-names>J. T.</given-names></name> <name><surname>Taqssinary</surname> <given-names>L. G.</given-names></name> <name><surname>Bernston</surname> <given-names>G. G.</given-names></name></person-group>. <edition>2nd</edition> ed (<publisher-name>Cambridge, UK: Cambridge University Press</publisher-name>), <fpage>142</fpage>&#x2013;<lpage>162</lpage>.</citation></ref>
<ref id="ref6"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Belke</surname> <given-names>B.</given-names></name> <name><surname>Leder</surname> <given-names>H.</given-names></name> <name><surname>Strobach</surname> <given-names>T.</given-names></name> <name><surname>Carbon</surname> <given-names>C. C.</given-names></name></person-group> (<year>2010</year>). <article-title>Cognitive fluency: high-level processing dynamics in art appreciation</article-title>. <source>Psychol. Aesthet. Creat. Arts</source> <volume>4</volume>, <fpage>214</fpage>&#x2013;<lpage>222</lpage>. doi: <pub-id pub-id-type="doi">10.1037/a0019648</pub-id></citation></ref>
<ref id="ref7"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Bernick</surname> <given-names>N.</given-names></name> <name><surname>Kling</surname> <given-names>A.</given-names></name> <name><surname>Borowitz</surname> <given-names>G.</given-names></name></person-group> (<year>1971</year>). <article-title>Physiologic differentiation of sexual arousal and anxiety</article-title>. <source>Psychosom. Med.</source> <volume>33</volume>, <fpage>341</fpage>&#x2013;<lpage>352</lpage>. doi: <pub-id pub-id-type="doi">10.1097/00006842-197107000-00004</pub-id>, PMID: <pub-id pub-id-type="pmid">5112331</pub-id></citation></ref>
<ref id="ref140"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Borgdorff</surname> <given-names>P.</given-names></name></person-group> (<year>1975</year>). <article-title>Respiratory fluctuations in pupil size</article-title>. <source>Am. J. Physiology-Legacy Content.</source> 228, 1094&#x2013;1102. doi: <pub-id pub-id-type="doi">10.1152/ajplegacy.1975.228.4.1094</pub-id>, PMID: <pub-id pub-id-type="pmid">34113285</pub-id></citation></ref>
<ref id="ref8"><citation citation-type="other"><person-group person-group-type="author"><name><surname>Bosker</surname> <given-names>R.</given-names></name> <name><surname>Snijders</surname> <given-names>T. A.</given-names></name></person-group> (<year>2011</year>). <source>Multilevel analysis: An introduction to basic and advanced multilevel modeling</source>. London: Sage.</citation></ref>
<ref id="ref9"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Brieber</surname> <given-names>D.</given-names></name> <name><surname>Nadal</surname> <given-names>M.</given-names></name> <name><surname>Leder</surname> <given-names>H.</given-names></name> <name><surname>Rosenberg</surname> <given-names>R.</given-names></name></person-group> (<year>2014</year>). <article-title>Art in time and space: context modulates the relation between art experience and viewing time</article-title>. <source>PLoS One</source> <volume>9</volume>:<fpage>e99019</fpage>. doi: <pub-id pub-id-type="doi">10.1371/journal.pone.0099019</pub-id>, PMID: <pub-id pub-id-type="pmid">24892829</pub-id></citation></ref>
<ref id="ref10"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Danek</surname> <given-names>A. H.</given-names></name> <name><surname>Wiley</surname> <given-names>J.</given-names></name></person-group> (<year>2017</year>). <article-title>What about false insights? Deconstructing the Aha! Experience along its multiple dimensions for correct and incorrect solutions separately</article-title>. <source>Front. Psychol.</source> <volume>7</volume>:<fpage>2077</fpage>. doi: <pub-id pub-id-type="doi">10.3389/fpsyg.2016.02077</pub-id>, PMID: <pub-id pub-id-type="pmid">28163687</pub-id></citation></ref>
<ref id="ref11"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Einhauser</surname> <given-names>W.</given-names></name> <name><surname>Stout</surname> <given-names>J.</given-names></name> <name><surname>Koch</surname> <given-names>C.</given-names></name> <name><surname>Carter</surname> <given-names>O.</given-names></name></person-group> (<year>2008</year>). <article-title>Pupil dilation reflects perceptual selection and predicts subsequent stability in perceptual rivalry</article-title>. <source>Proc. Natl. Acad. Sci.</source> <volume>105</volume>, <fpage>1704</fpage>&#x2013;<lpage>1709</lpage>. doi: <pub-id pub-id-type="doi">10.1073/pnas.0707727105</pub-id>, PMID: <pub-id pub-id-type="pmid">18250340</pub-id></citation></ref>
<ref id="ref12"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Elschner</surname> <given-names>S. G.</given-names></name> <name><surname>H&#x00FC;bner</surname> <given-names>R.</given-names></name> <name><surname>Dambacher</surname> <given-names>M.</given-names></name></person-group> (<year>2018</year>). <article-title>Do fluency-induced pupillary responses reflect aesthetic affect?</article-title> <source>Psychol. Aesthet. Creat. Arts</source> <volume>12</volume>, <fpage>294</fpage>&#x2013;<lpage>303</lpage>. doi: <pub-id pub-id-type="doi">10.1037/aca0000139</pub-id></citation></ref>
<ref id="ref13"><citation citation-type="book"><person-group person-group-type="author"><name><surname>Fingerhut</surname> <given-names>J.</given-names></name></person-group> (<year>2020</year>). &#x201C;<article-title>Habits and the enculturated mind: pervasive artifacts, predictive processing, and expansive habits</article-title>&#x201D; in <source>Habits: Pragmatist approaches from cognitive science, neuroscience, and social theory</source>. Eds. <person-group person-group-type="editor"><name><surname>Caruana</surname> <given-names>F.</given-names></name> <name><surname>Testa</surname> <given-names>I.</given-names></name></person-group> (<publisher-loc>Cambridge UK</publisher-loc>: <publisher-name>Cambridge University Press</publisher-name>), <fpage>352</fpage>&#x2013;<lpage>375</lpage>.</citation></ref>
<ref id="ref14"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Fingerhut</surname> <given-names>J.</given-names></name></person-group> (<year>2021</year>). <article-title>Enacting media. An embodied account of enculturation between Neuromediality and new cognitive media theory</article-title>. <source>Front. Psychol.</source> <volume>12</volume>:<fpage>635993</fpage>. doi: <pub-id pub-id-type="doi">10.3389/fpsyg.2021.635993</pub-id>, PMID: <pub-id pub-id-type="pmid">34113285</pub-id></citation></ref>
<ref id="ref15"><citation citation-type="other"><person-group person-group-type="author"><name><surname>Ga&#x0142;ecki</surname> <given-names>A.</given-names></name> <name><surname>Burzykowski</surname> <given-names>T.</given-names></name></person-group> (<year>2013</year>). <source>Linear mixed-effects model</source>. New York: Springer.</citation></ref>
<ref id="ref16"><citation citation-type="book"><person-group person-group-type="author"><name><surname>Gick</surname> <given-names>M. L.</given-names></name> <name><surname>Lockhart</surname> <given-names>R. S.</given-names></name></person-group> (<year>1995</year>). &#x201C;<article-title>Cognitive and affective components of insight</article-title>&#x201D; in <source>He nature of insight</source>. eds. <person-group person-group-type="editor"><name><surname>Sternberg</surname> <given-names>R. J.</given-names></name> <name><surname>Davidson</surname> <given-names>J. E.</given-names></name></person-group> (<publisher-name>Massachusetts, United States: The MIT Press</publisher-name>), <fpage>197</fpage>&#x2013;<lpage>228</lpage>.</citation></ref>
<ref id="ref17"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Gilzenrat</surname> <given-names>M. S.</given-names></name> <name><surname>Nieuwenhuis</surname> <given-names>S.</given-names></name> <name><surname>Jepma</surname> <given-names>M.</given-names></name> <name><surname>Cohen</surname> <given-names>J. D.</given-names></name></person-group> (<year>2010</year>). <article-title>Pupil diameter tracks changes in control state predicted by the adaptive gain theory of locus coeruleus function</article-title>. <source>Cogn. Affect. Behav. Neurosci.</source> <volume>10</volume>, <fpage>252</fpage>&#x2013;<lpage>269</lpage>. doi: <pub-id pub-id-type="doi">10.3758/CABN.10.2.252</pub-id>, PMID: <pub-id pub-id-type="pmid">20498349</pub-id></citation></ref>
<ref id="ref18"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Gr&#x00FC;ner</surname> <given-names>S.</given-names></name> <name><surname>Specker</surname> <given-names>E.</given-names></name> <name><surname>Leder</surname> <given-names>H.</given-names></name></person-group> (<year>2019</year>). <article-title>Effects of context and genuineness in the experience of art</article-title>. <source>Empir. Stud. Arts</source> <volume>37</volume>, <fpage>138</fpage>&#x2013;<lpage>152</lpage>. doi: <pub-id pub-id-type="doi">10.1177/0276237418822896</pub-id></citation></ref>
<ref id="ref19"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Herv&#x00E9;-Minvielle</surname> <given-names>A.</given-names></name> <name><surname>Sara</surname> <given-names>S. J.</given-names></name></person-group> (<year>1995</year>). <article-title>Rapid habituation of auditory responses of locus coeruleus cells in anaesthetized and awake rats</article-title>. <source>Neuroreport</source> <volume>6</volume>, <fpage>1363</fpage>&#x2013;<lpage>1368</lpage>. doi: <pub-id pub-id-type="doi">10.1097/00001756-199507100-00001</pub-id></citation></ref>
<ref id="ref20"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Hess</surname> <given-names>E. H.</given-names></name></person-group> (<year>1965</year>). <article-title>Attitude and pupil size</article-title>. <source>Sci. Am.</source> <volume>212</volume>, <fpage>46</fpage>&#x2013;<lpage>54</lpage>. doi: <pub-id pub-id-type="doi">10.1038/scientificamerican0465-46</pub-id></citation></ref>
<ref id="ref21"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Hess</surname> <given-names>E. H.</given-names></name> <name><surname>Polt</surname> <given-names>J. M.</given-names></name></person-group> (<year>1960</year>). <article-title>Pupil size as related to interest value of visual stimuli</article-title>. <source>Science</source> <volume>132</volume>, <fpage>349</fpage>&#x2013;<lpage>350</lpage>. doi: <pub-id pub-id-type="doi">10.1126/science.132.3423.349</pub-id>, PMID: <pub-id pub-id-type="pmid">14401489</pub-id></citation></ref>
<ref id="ref22"><citation citation-type="book"><person-group person-group-type="author"><name><surname>Hox</surname> <given-names>J. J.</given-names></name> <name><surname>Moerbeek</surname> <given-names>M.</given-names></name> <name><surname>Van de Schoot</surname> <given-names>R.</given-names></name></person-group> (<year>2010</year>). <source>Multilevel analysis: Techniques and applications</source>. <edition>3rd</edition> Edn. <publisher-name>England, UK: Routledge</publisher-name>.</citation></ref>
<ref id="ref23"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Ionescu</surname> <given-names>M. R.</given-names></name></person-group> (<year>2016</year>). <article-title>Subliminal perception of complex visual stimuli</article-title>. <source>Rom. J. Ophthalmol.</source> <volume>60</volume>, <fpage>226</fpage>&#x2013;<lpage>230</lpage>. <comment>Available at:</comment> <ext-link xlink:href="http://www.random.org" ext-link-type="uri">http://www.random.org</ext-link>. PMID: <pub-id pub-id-type="pmid">29450354</pub-id></citation></ref>
<ref id="ref24"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Jakesch</surname> <given-names>M.</given-names></name> <name><surname>Leder</surname> <given-names>H.</given-names></name> <name><surname>Forster</surname> <given-names>M.</given-names></name></person-group> (<year>2013</year>). <article-title>Image ambiguity and fluency</article-title>. <source>PLoS One</source> <volume>8</volume>:<fpage>e74084</fpage>. doi: <pub-id pub-id-type="doi">10.1371/journal.pone.0074084</pub-id>, PMID: <pub-id pub-id-type="pmid">24040172</pub-id></citation></ref>
<ref id="ref25"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Jefferies</surname> <given-names>L. N.</given-names></name> <name><surname>Di Lollo</surname> <given-names>V.</given-names></name></person-group> (<year>2019</year>). <article-title>Sudden events change old visual objects into new ones: a possible role for phasic activation of locus Coeruleus</article-title>. <source>Psychol. Sci.</source> <volume>30</volume>, <fpage>55</fpage>&#x2013;<lpage>64</lpage>. doi: <pub-id pub-id-type="doi">10.1177/0956797618807190</pub-id>, PMID: <pub-id pub-id-type="pmid">30426842</pub-id></citation></ref>
<ref id="ref26"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Jepma</surname> <given-names>M.</given-names></name> <name><surname>Nieuwenhuis</surname> <given-names>S.</given-names></name></person-group> (<year>2011</year>). <article-title>Pupil diameter predicts changes in the exploration-exploitation trade-off: evidence for the adaptive gain theory</article-title>. <source>J. Cogn. Neurosci.</source> <volume>23</volume>, <fpage>1587</fpage>&#x2013;<lpage>1596</lpage>. doi: <pub-id pub-id-type="doi">10.1162/jocn.2010.21548</pub-id>, PMID: <pub-id pub-id-type="pmid">20666595</pub-id></citation></ref>
<ref id="ref27"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Kahneman</surname> <given-names>D.</given-names></name> <name><surname>Beatty</surname> <given-names>J.</given-names></name></person-group> (<year>1966</year>). <article-title>Pupil diameter and load on memory</article-title>. <source>Science</source> <volume>154</volume>, <fpage>1583</fpage>&#x2013;<lpage>1585</lpage>. doi: <pub-id pub-id-type="doi">10.1126/science.154.3756.1583</pub-id></citation></ref>
<ref id="ref28"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Kesner</surname> <given-names>L.</given-names></name></person-group> (<year>2014</year>). <article-title>The predictive mind and the experience of visual art work</article-title>. <source>Front. Psychol.</source> <volume>5</volume>, <fpage>1</fpage>&#x2013;<lpage>13</lpage>. doi: <pub-id pub-id-type="doi">10.3389/fpsyg.2014.01417</pub-id>, PMID: <pub-id pub-id-type="pmid">25566111</pub-id></citation></ref>
<ref id="ref29"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Kounios</surname> <given-names>J.</given-names></name> <name><surname>Beeman</surname> <given-names>M.</given-names></name></person-group> (<year>2014</year>). <article-title>The cognitive neuroscience of insight</article-title>. <source>Annu. Rev. Psychol.</source> <volume>65</volume>, <fpage>71</fpage>&#x2013;<lpage>93</lpage>. doi: <pub-id pub-id-type="doi">10.1146/annurev-psych-010213-115154</pub-id></citation></ref>
<ref id="ref30"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Kret</surname> <given-names>M. E.</given-names></name> <name><surname>Sjak-Shie</surname> <given-names>E. E.</given-names></name></person-group> (<year>2019</year>). <article-title>Preprocessing pupil size data: Guidelines and code</article-title>. <source>Behav. Res. Methods</source> <volume>51</volume>, <fpage>1336</fpage>&#x2013;<lpage>1342</lpage>. doi: <pub-id pub-id-type="doi">10.3758/s13428-018-1075-y</pub-id>, PMID: <pub-id pub-id-type="pmid">29992408</pub-id></citation></ref>
<ref id="ref31"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Kuchinke</surname> <given-names>L.</given-names></name> <name><surname>Trapp</surname> <given-names>S.</given-names></name> <name><surname>Jacobs</surname> <given-names>A. M.</given-names></name> <name><surname>Leder</surname> <given-names>H.</given-names></name></person-group> (<year>2009</year>). <article-title>Pupillary responses in art appreciation: effects of aesthetic emotions</article-title>. <source>Psychol. Aesthet. Creat. Arts</source> <volume>3</volume>, <fpage>156</fpage>&#x2013;<lpage>163</lpage>. doi: <pub-id pub-id-type="doi">10.1037/a0014464</pub-id></citation></ref>
<ref id="ref32"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Laeng</surname> <given-names>B.</given-names></name> <name><surname>&#x00D8;rbo</surname> <given-names>M.</given-names></name> <name><surname>Holmlund</surname> <given-names>T.</given-names></name> <name><surname>Miozzo</surname> <given-names>M.</given-names></name></person-group> (<year>2011</year>). <article-title>Pupillary Stroop effects</article-title>. <source>Cogn. Process.</source> <volume>12</volume>, <fpage>13</fpage>&#x2013;<lpage>21</lpage>. doi: <pub-id pub-id-type="doi">10.1007/s10339-010-0370-z</pub-id>, PMID: <pub-id pub-id-type="pmid">20865297</pub-id></citation></ref>
<ref id="ref33"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Laeng</surname> <given-names>B.</given-names></name> <name><surname>Sirois</surname> <given-names>S.</given-names></name> <name><surname>Gredeb&#x00E4;ck</surname> <given-names>G.</given-names></name></person-group> (<year>2012</year>). <article-title>Pupillometry: a window to the preconscious?</article-title> <source>Perspect. Psychol. Sci.</source> <volume>7</volume>, <fpage>18</fpage>&#x2013;<lpage>27</lpage>. doi: <pub-id pub-id-type="doi">10.1177/1745691611427305</pub-id></citation></ref>
<ref id="ref34"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Larsen</surname> <given-names>R. S.</given-names></name> <name><surname>Waters</surname> <given-names>J.</given-names></name></person-group> (<year>2018</year>). <article-title>Neuromodulatory correlates of pupil dilation</article-title>. <source>Front. Neural Circuits</source> <volume>12</volume>:<fpage>21</fpage>. doi: <pub-id pub-id-type="doi">10.3389/fncir.2018.00021</pub-id>, PMID: <pub-id pub-id-type="pmid">29593504</pub-id></citation></ref>
<ref id="ref35"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Leder</surname> <given-names>H.</given-names></name> <name><surname>Belke</surname> <given-names>B.</given-names></name> <name><surname>Oeberst</surname> <given-names>A.</given-names></name> <name><surname>Augustin</surname> <given-names>D.</given-names></name></person-group> (<year>2004</year>). <article-title>A model of aesthetic appreciation and aesthetic judgments. In</article-title>. <source>Br. J. Psychol.</source> <volume>95</volume>, <fpage>489</fpage>&#x2013;<lpage>508</lpage>. doi: <pub-id pub-id-type="doi">10.1348/0007126042369811</pub-id></citation></ref>
<ref id="ref36"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Leder</surname> <given-names>H.</given-names></name> <name><surname>Bruce</surname> <given-names>V.</given-names></name></person-group> (<year>2000</year>). <article-title>When inverted faces are recognized: the role of Configural information in face recognition</article-title>. <source>Q. J. Exp. Psychol. Section A</source> <volume>53</volume>, <fpage>513</fpage>&#x2013;<lpage>536</lpage>. doi: <pub-id pub-id-type="doi">10.1080/713755889</pub-id></citation></ref>
<ref id="ref37"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Leder</surname> <given-names>H.</given-names></name> <name><surname>Nadal</surname> <given-names>M.</given-names></name></person-group> (<year>2014</year>). <article-title>Ten years of a model of aesthetic appreciation and aesthetic judgments: the aesthetic episode - developments and challenges in empirical aesthetics</article-title>. <source>Br. J. Psychol.</source> <volume>105</volume>, <fpage>443</fpage>&#x2013;<lpage>464</lpage>. doi: <pub-id pub-id-type="doi">10.1111/bjop.12084</pub-id>, PMID: <pub-id pub-id-type="pmid">25280118</pub-id></citation></ref>
<ref id="ref38"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Libby</surname> <given-names>W. L.</given-names></name> <name><surname>Lacey</surname> <given-names>B. C.</given-names></name> <name><surname>Lacey</surname> <given-names>J. I.</given-names></name></person-group> (<year>1973</year>). <article-title>Pupillary and cardiac activity during visual attention</article-title>. <source>Psychophysiology</source> <volume>10</volume>, <fpage>270</fpage>&#x2013;<lpage>294</lpage>. doi: <pub-id pub-id-type="doi">10.1111/j.1469-8986.1973.tb00526.x</pub-id>, PMID: <pub-id pub-id-type="pmid">4702521</pub-id></citation></ref>
<ref id="ref39"><citation citation-type="book"><person-group person-group-type="author"><name><surname>Loewenfeld</surname> <given-names>I. E.</given-names></name></person-group> (<year>1999</year>). <source>The pupil: Anatomy, physiology and clinical applications.</source> <publisher-name>Boston: Butterworth and Heinemann</publisher-name>.</citation></ref>
<ref id="ref40"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Math&#x00F4;t</surname> <given-names>S.</given-names></name></person-group> (<year>2018</year>). <article-title>Pupillometry: psychology, physiology, and function</article-title>. <source>J. Cogn.</source> <volume>1</volume>:<fpage>16</fpage>. doi: <pub-id pub-id-type="doi">10.5334/joc.18</pub-id>, PMID: <pub-id pub-id-type="pmid">31517190</pub-id></citation></ref>
<ref id="ref41"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Math&#x00F4;t</surname> <given-names>S.</given-names></name> <name><surname>Fabius</surname> <given-names>J.</given-names></name> <name><surname>Van Heusden</surname> <given-names>E.</given-names></name> <name><surname>Van der Stigchel</surname> <given-names>S.</given-names></name></person-group> (<year>2018</year>). <article-title>Safe and sensible preprocessing and baseline correction of pupil-size data</article-title>. <source>Behav. Res. Methods</source> <volume>50</volume>, <fpage>94</fpage>&#x2013;<lpage>106</lpage>. doi: <pub-id pub-id-type="doi">10.3758/s13428-017-1007-2</pub-id>, PMID: <pub-id pub-id-type="pmid">29330763</pub-id></citation></ref>
<ref id="ref42"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Muth</surname> <given-names>C.</given-names></name> <name><surname>Carbon</surname> <given-names>C. C.</given-names></name></person-group> (<year>2013</year>). <article-title>The aesthetic Aha: on the pleasure of having insights into gestalt</article-title>. <source>Acta Psychol.</source> <volume>144</volume>, <fpage>25</fpage>&#x2013;<lpage>30</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.actpsy.2013.05.001</pub-id>, PMID: <pub-id pub-id-type="pmid">23743342</pub-id></citation></ref>
<ref id="ref43"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Muth</surname> <given-names>C.</given-names></name> <name><surname>Hesslinger</surname> <given-names>V. M.</given-names></name> <name><surname>Carbon</surname> <given-names>C. C.</given-names></name></person-group> (<year>2015</year>). <article-title>The appeal of challenge in the perception of art: how ambiguity, solvability of ambiguity, and the opportunity for insight affect appreciation</article-title>. <source>Psychol. Aesthet. Creat. Arts</source> <volume>9</volume>, <fpage>206</fpage>&#x2013;<lpage>216</lpage>. doi: <pub-id pub-id-type="doi">10.1037/a0038814</pub-id></citation></ref>
<ref id="ref44"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Muth</surname> <given-names>C.</given-names></name> <name><surname>Pepperell</surname> <given-names>R.</given-names></name> <name><surname>Carbon</surname> <given-names>C. C.</given-names></name></person-group> (<year>2013</year>). <article-title>Give me gestalt! Preference for cubist artworks revealing high detectability of objects</article-title>. <source>Leonardo</source> <volume>46</volume>, <fpage>488</fpage>&#x2013;<lpage>489</lpage>. doi: <pub-id pub-id-type="doi">10.1162/LEON_a_00649</pub-id></citation></ref>
<ref id="ref45"><citation citation-type="book"><person-group person-group-type="author"><name><surname>Pelowski</surname> <given-names>M.</given-names></name> <name><surname>Markey</surname> <given-names>P. S.</given-names></name> <name><surname>Forster</surname> <given-names>M.</given-names></name> <name><surname>Gerger</surname> <given-names>G.</given-names></name> <name><surname>Leder</surname> <given-names>H.</given-names></name></person-group> (<year>2017</year>). &#x201C;<article-title>Move me, astonish me&#x2026; delight my eyes and brain: the Vienna integrated model of top-down and bottom-up processes in art perception (VIMAP) and corresponding affective, evaluative, and neurophysiological correlates</article-title>&#x201D; in <source>Physics of life reviews</source>, vol. <volume>21</volume> (<publisher-name>Elsevier B.V</publisher-name>), <fpage>80</fpage>&#x2013;<lpage>125</lpage>.</citation></ref>
<ref id="ref46"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Pelowski</surname> <given-names>M.</given-names></name> <name><surname>Markey</surname> <given-names>P. S.</given-names></name> <name><surname>Lauring</surname> <given-names>J. O.</given-names></name> <name><surname>Leder</surname> <given-names>H.</given-names></name></person-group> (<year>2016</year>). <article-title>Visualizing the impact of art: an update and comparison of current psychological models of art experience</article-title>. <source>Front. Hum. Neurosci.</source> <volume>10</volume>, <fpage>1</fpage>&#x2013;<lpage>21</lpage>. doi: <pub-id pub-id-type="doi">10.3389/fnhum.2016.00160</pub-id>, PMID: <pub-id pub-id-type="pmid">27199697</pub-id></citation></ref>
<ref id="ref47"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Privitera</surname> <given-names>C. M.</given-names></name> <name><surname>Renninger</surname> <given-names>L. W.</given-names></name> <name><surname>Carney</surname> <given-names>T.</given-names></name> <name><surname>Klein</surname> <given-names>S.</given-names></name> <name><surname>Aguilar</surname> <given-names>M.</given-names></name></person-group> (<year>2010</year>). <article-title>Pupil dilation during visual target detection</article-title>. <source>J. Vis.</source> <volume>10</volume>:<fpage>3</fpage>. doi: <pub-id pub-id-type="doi">10.1167/10.10.3</pub-id>, PMID: <pub-id pub-id-type="pmid">20884468</pub-id></citation></ref>
<ref id="ref48"><citation citation-type="book"><person-group person-group-type="author"><name><surname>Reber</surname> <given-names>R.</given-names></name></person-group> (<year>2012</year>). &#x201C;<article-title>Processing fluency, aesthetic pleasure, and culturally shared taste</article-title>&#x201D; in <source>Aesthetic science: Connecting minds, brains, and experience</source>. eds. <person-group person-group-type="editor"><name><surname>Shimamura</surname> <given-names>A. P.</given-names></name> <name><surname>Palmer</surname> <given-names>S. E.</given-names></name></person-group> (<publisher-name>Oxford, England, UK: Oxford University Press</publisher-name>), <fpage>223</fpage>&#x2013;<lpage>249</lpage>.</citation></ref>
<ref id="ref49"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Reber</surname> <given-names>R.</given-names></name> <name><surname>Schwarz</surname> <given-names>N.</given-names></name> <name><surname>Winkielman</surname> <given-names>P.</given-names></name></person-group> (<year>2004</year>). <article-title>Processing fluency and aesthetic pleasure: is beauty in the Perceiver&#x2019;s processing experience?</article-title> <source>Personal. Soc. Psychol. Rev.</source> <volume>8</volume>, <fpage>364</fpage>&#x2013;<lpage>382</lpage>. doi: <pub-id pub-id-type="doi">10.1207/s15327957pspr0804_3</pub-id>, PMID: <pub-id pub-id-type="pmid">15582859</pub-id></citation></ref>
<ref id="ref50"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Sandford</surname> <given-names>A.</given-names></name> <name><surname>Bindemann</surname> <given-names>M.</given-names></name></person-group> (<year>2020</year>). <article-title>Discrimination and recognition of faces with changed configuration</article-title>. <source>Mem. Cogn.</source> <volume>48</volume>, <fpage>287</fpage>&#x2013;<lpage>298</lpage>. doi: <pub-id pub-id-type="doi">10.3758/s13421-019-01010-7</pub-id>, PMID: <pub-id pub-id-type="pmid">31939041</pub-id></citation></ref>
<ref id="ref51"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Schwartenbeck</surname> <given-names>P.</given-names></name> <name><surname>FitzGerald</surname> <given-names>T.</given-names></name> <name><surname>Dolan</surname> <given-names>R. J.</given-names></name> <name><surname>Friston</surname> <given-names>K.</given-names></name></person-group> (<year>2013</year>). <article-title>Exploration, novelty, surprise, and free energy minimization</article-title>. <source>Front. Psychol.</source> <volume>4</volume>:<fpage>710</fpage>. doi: <pub-id pub-id-type="doi">10.3389/fpsyg.2013.00710</pub-id>, PMID: <pub-id pub-id-type="pmid">24109469</pub-id></citation></ref>
<ref id="ref52"><citation citation-type="confproc"><person-group person-group-type="author"><name><surname>Seabold</surname> <given-names>S.</given-names></name> <name><surname>Perktold</surname> <given-names>J.</given-names></name></person-group> (<year>2010</year>). <article-title>Statsmodels: econometric and statistical modeling with python</article-title>. <conf-name>Proceedings of the 9th Python in Science Conference</conf-name>, <fpage>10</fpage>&#x2013;<lpage>25080</lpage>.</citation></ref>
<ref id="ref53"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Seth</surname> <given-names>A. K.</given-names></name></person-group> (<year>2013</year>). <article-title>Interoceptive inference, emotion, and the embodied self</article-title>. <source>Trends Cogn. Sci.</source> <volume>17</volume>, <fpage>565</fpage>&#x2013;<lpage>573</lpage>. doi: <pub-id pub-id-type="doi">10.1016/j.tics.2013.09.007</pub-id>, PMID: <pub-id pub-id-type="pmid">24126130</pub-id></citation></ref>
<ref id="ref54"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Seth</surname> <given-names>A. K.</given-names></name></person-group> (<year>2019</year>). <article-title>From unconscious inference to the beholder&#x2019;s share: predictive perception and human experience</article-title>. <source>Eur. Rev.</source> <volume>27</volume>, <fpage>378</fpage>&#x2013;<lpage>410</lpage>. doi: <pub-id pub-id-type="doi">10.1017/S1062798719000061</pub-id></citation></ref>
<ref id="ref55"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Spee</surname> <given-names>B.</given-names></name> <name><surname>Ishizu</surname> <given-names>T.</given-names></name> <name><surname>Leder</surname> <given-names>H.</given-names></name> <name><surname>Mikuni</surname> <given-names>J.</given-names></name> <name><surname>Kawabata</surname> <given-names>H.</given-names></name> <name><surname>Pelowski</surname> <given-names>M.</given-names></name></person-group> (<year>2018</year>). <article-title>Neuropsychopharmacological aesthetics: a theoretical consideration of pharmacological approaches to causative brain study in aesthetics and art</article-title>. <source>Prog. Brain Res.</source> <volume>237</volume>, <fpage>343</fpage>&#x2013;<lpage>372</lpage>. doi: <pub-id pub-id-type="doi">10.1016/bs.pbr.2018.03.021</pub-id></citation></ref>
<ref id="ref56"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Spee</surname> <given-names>B. T. M.</given-names></name> <name><surname>Pelowski</surname> <given-names>M.</given-names></name> <name><surname>Arato</surname> <given-names>J.</given-names></name> <name><surname>Mikuni</surname> <given-names>J.</given-names></name> <name><surname>Tran</surname> <given-names>U. S.</given-names></name> <name><surname>Eisenegger</surname> <given-names>C.</given-names></name> <etal/></person-group>. (<year>2022</year>). <article-title>Social reputation influences on liking and willingness-to-pay for artworks: a multimethod design investigating choice behavior along with physiological measures and motivational factors</article-title>. <source>PLoS One</source> <volume>17</volume>:<fpage>e0266020</fpage>. doi: <pub-id pub-id-type="doi">10.1371/journal.pone.0266020</pub-id>, PMID: <pub-id pub-id-type="pmid">35442966</pub-id></citation></ref>
<ref id="ref57"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Topolinski</surname> <given-names>S.</given-names></name> <name><surname>Reber</surname> <given-names>R.</given-names></name></person-group> (<year>2010</year>). <article-title>Gaining insight into the &#x201C;Aha&#x201D; experience</article-title>. <source>Curr. Dir. Psychol. Sci.</source> <volume>19</volume>, <fpage>402</fpage>&#x2013;<lpage>405</lpage>. doi: <pub-id pub-id-type="doi">10.1177/0963721410388803</pub-id></citation></ref>
<ref id="ref58"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Vallat</surname> <given-names>R.</given-names></name></person-group> (<year>2018</year>). <article-title>Pingouin: statistics in Python</article-title>. <source>J. Open Source Softw.</source> <volume>3</volume>:<fpage>1026</fpage>. doi: <pub-id pub-id-type="doi">10.21105/joss.01026</pub-id></citation></ref>
<ref id="ref59"><citation citation-type="book"><person-group person-group-type="author"><name><surname>Van de Cruys</surname> <given-names>S.</given-names></name></person-group> (<year>2017</year>). <article-title>&#x201C;Affective value in the predictive mind&#x2019;&#x2019;</article-title>, in <italic>Philosophy and predictive processing</italic>. Eds. <person-group person-group-type="editor"><name><surname>Metzinger</surname> <given-names>T.</given-names></name> <name><surname>Wiese</surname> <given-names>W.</given-names></name></person-group> (<publisher-loc>Frankfurt am Main, Germany</publisher-loc>: <publisher-name>MIND Group</publisher-name>).</citation></ref>
<ref id="ref60"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Van de Cruys</surname> <given-names>S.</given-names></name> <name><surname>Wagemans</surname> <given-names>J.</given-names></name></person-group> (<year>2011a</year>). <article-title>Gestalts as predictions: some reflections and an application to art</article-title>. <source>Gestalt Theory</source> <volume>33</volume>, <fpage>325</fpage>&#x2013;<lpage>344</lpage>.</citation></ref>
<ref id="ref61"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Van de Cruys</surname> <given-names>S.</given-names></name> <name><surname>Chamberlain</surname> <given-names>R.</given-names></name> <name><surname>Wagemans</surname> <given-names>J.</given-names></name></person-group> (<year>2017</year>). <article-title>Tuning in to art: a predictive processing account of negative emotion in art</article-title>. <source>Behav. Brain Sci.</source> <volume>40</volume>:<fpage>e377</fpage>. doi: <pub-id pub-id-type="doi">10.1017/S0140525X17001868</pub-id>, PMID: <pub-id pub-id-type="pmid">29342804</pub-id></citation></ref>
<ref id="ref62"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Van de Cruys</surname> <given-names>S.</given-names></name> <name><surname>Wagemans</surname> <given-names>J.</given-names></name></person-group> (<year>2011b</year>). <article-title>Putting reward in art: a tentative prediction error account of visual art</article-title>. <source>I-Perception</source> <volume>2</volume>, <fpage>1035</fpage>&#x2013;<lpage>1062</lpage>. doi: <pub-id pub-id-type="doi">10.1068/i0466aap</pub-id>, PMID: <pub-id pub-id-type="pmid">23145260</pub-id></citation></ref>
<ref id="ref63"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Van Geert</surname> <given-names>E.</given-names></name> <name><surname>Wagemans</surname> <given-names>J.</given-names></name></person-group> (<year>2020</year>). <article-title>Order, complexity, and aesthetic appreciation</article-title>. <source>Psychol. Aesthet. Creat. Arts</source> <volume>14</volume>, <fpage>135</fpage>&#x2013;<lpage>154</lpage>. doi: <pub-id pub-id-type="doi">10.1037/aca0000224</pub-id></citation></ref>
<ref id="ref64"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Van Honk</surname> <given-names>J.</given-names></name> <name><surname>Will</surname> <given-names>G. J.</given-names></name> <name><surname>Terburg</surname> <given-names>D.</given-names></name> <name><surname>Raub</surname> <given-names>W.</given-names></name> <name><surname>Eisenegger</surname> <given-names>C.</given-names></name> <name><surname>Buskens</surname> <given-names>V.</given-names></name></person-group> (<year>2016</year>). <article-title>Effects of testosterone administration on strategic gambling in poker play</article-title>. <source>Sci. Rep.</source> <volume>6</volume>:<fpage>18096</fpage>. doi: <pub-id pub-id-type="doi">10.1038/srep18096</pub-id>, PMID: <pub-id pub-id-type="pmid">26727636</pub-id></citation></ref>
<ref id="ref65"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Villani</surname> <given-names>D.</given-names></name> <name><surname>Morganti</surname> <given-names>F.</given-names></name> <name><surname>Cipresso</surname> <given-names>P.</given-names></name> <name><surname>Ruggi</surname> <given-names>S.</given-names></name> <name><surname>Riva</surname> <given-names>G.</given-names></name> <name><surname>Gilli</surname> <given-names>G.</given-names></name></person-group> (<year>2015</year>). <article-title>Visual exploration patterns of human figures in action: an eye tracker study with art paintings</article-title>. <source>Front. Psychol.</source> <volume>6</volume>:<fpage>1636</fpage>. doi: <pub-id pub-id-type="doi">10.3389/fpsyg.2015.01636</pub-id>, PMID: <pub-id pub-id-type="pmid">26579021</pub-id></citation></ref>
<ref id="ref66"><citation citation-type="book"><person-group person-group-type="author"><name><surname>Wagemans</surname> <given-names>J.</given-names></name></person-group> (ed.). (<year>2013</year>). &#x201C;<article-title>How much of gestalt theory has survived a century of neuroscience?</article-title>&#x201D; in <source>Perception beyond gestalt</source> (<publisher-name>Psychology Press</publisher-name>), <fpage>23</fpage>&#x2013;<lpage>35</lpage>.</citation></ref>
<ref id="ref67"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Wagemans</surname> <given-names>J.</given-names></name> <name><surname>Elder</surname> <given-names>J. H.</given-names></name> <name><surname>Kubovy</surname> <given-names>M.</given-names></name> <name><surname>Palmer</surname> <given-names>S. E.</given-names></name> <name><surname>Peterson</surname> <given-names>M. A.</given-names></name> <name><surname>Singh</surname> <given-names>M.</given-names></name> <etal/></person-group>. (<year>2012</year>). <article-title>A century of gestalt psychology in visual perception: I. Perceptual grouping and figure-ground organization</article-title>. <source>Psychol. Bull.</source> <volume>138</volume>, <fpage>1172</fpage>&#x2013;<lpage>1217</lpage>. doi: <pub-id pub-id-type="doi">10.1037/a0029333</pub-id>, PMID: <pub-id pub-id-type="pmid">22845751</pub-id></citation></ref>
<ref id="ref68"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Wertheimer</surname> <given-names>M.</given-names></name></person-group> (<year>1923</year>). <article-title>Untersuchungen zur Lehre von der Gestalt</article-title>. <source>Psychol. Forsch.</source> <volume>4</volume>, <fpage>301</fpage>&#x2013;<lpage>350</lpage>. doi: <pub-id pub-id-type="doi">10.1007/BF00410640</pub-id></citation></ref>
<ref id="ref141"><citation citation-type="journal"><person-group person-group-type="author"><name><surname>Wyatt</surname> <given-names>H. J.</given-names></name></person-group> (<year>1995</year>). <article-title>The form of the human pupil</article-title>. <source>Vision Res.</source> <volume>35</volume>, <fpage>2021</fpage>&#x2013;<lpage>2036</lpage>. doi: <pub-id pub-id-type="doi">10.1016/0042-6989(94)00268-Q</pub-id></citation></ref>
</ref-list>
</back>
</article>
