<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "http://dtd.nlm.nih.gov/publishing/2.0/journalpublishing.dtd">
<article xmlns:xlink="http://www.w3.org/1999/xlink" article-type="research-article" dtd-version="2.0">
  <front>
    <journal-meta>
      <journal-id journal-id-type="publisher-id">JMIR</journal-id>
      <journal-id journal-id-type="nlm-ta">J Med Internet Res</journal-id>
      <journal-title>Journal of Medical Internet Research</journal-title>
      <issn pub-type="epub">1438-8871</issn>
      <publisher>
        <publisher-name>JMIR Publications</publisher-name>
        <publisher-loc>Toronto, Canada</publisher-loc>
      </publisher>
    </journal-meta>
    <article-meta>
      <article-id pub-id-type="publisher-id">v24i8e37611</article-id>
      <article-id pub-id-type="pmid">35994331</article-id>
      <article-id pub-id-type="doi">10.2196/37611</article-id>
      <article-categories>
        <subj-group subj-group-type="heading">
          <subject>Original Paper</subject>
        </subj-group>
        <subj-group subj-group-type="article-type">
          <subject>Original Paper</subject>
        </subj-group>
      </article-categories>
      <title-group>
        <article-title>The Adoption of Artificial Intelligence in Health Care and Social Services in Australia: Findings From a Methodologically Innovative National Survey of Values and Attitudes (the AVA-AI Study)</article-title>
      </title-group>
      <contrib-group>
        <contrib contrib-type="editor">
          <name>
            <surname>Eysenbach</surname>
            <given-names>Gunther</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Weinert</surname>
            <given-names>Lina</given-names>
          </name>
        </contrib>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Calisto</surname>
            <given-names>Francisco Maria</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib id="contrib1" contrib-type="author">
          <name name-style="western">
            <surname>Isbanner</surname>
            <given-names>Sebastian</given-names>
          </name>
          <degrees>BBE, MBA, PhD</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0001-5842-2407</ext-link>
        </contrib>
        <contrib id="contrib2" contrib-type="author">
          <name name-style="western">
            <surname>O’Shaughnessy</surname>
            <given-names>Pauline</given-names>
          </name>
          <degrees>BActSt, MActSt, PhD</degrees>
          <xref rid="aff2" ref-type="aff">2</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-4741-3326</ext-link>
        </contrib>
        <contrib id="contrib3" contrib-type="author">
          <name name-style="western">
            <surname>Steel</surname>
            <given-names>David</given-names>
          </name>
          <degrees>AStat, BSc, MSc, PhD</degrees>
          <xref rid="aff2" ref-type="aff">2</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-3137-9952</ext-link>
        </contrib>
        <contrib id="contrib4" contrib-type="author">
          <name name-style="western">
            <surname>Wilcock</surname>
            <given-names>Scarlet</given-names>
          </name>
          <degrees>BA, LLB, GDLP, PhD</degrees>
          <xref rid="aff3" ref-type="aff">3</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-0011-1363</ext-link>
        </contrib>
        <contrib id="contrib5" contrib-type="author" corresp="yes">
          <name name-style="western">
            <surname>Carter</surname>
            <given-names>Stacy</given-names>
          </name>
          <degrees>BAppSci, MPH, PhD</degrees>
          <xref rid="aff4" ref-type="aff">4</xref>
          <address>
            <institution>Australian Centre for Health Engagement Evidence and Values</institution>
            <institution>Faculty of the Arts, Social Sciences and Humanities</institution>
            <institution>University of Wollongong</institution>
            <addr-line>Northfields Ave</addr-line>
            <addr-line>Wollongong, 2522</addr-line>
            <country>Australia</country>
            <phone>61 2 4221 3243</phone>
            <email>stacyc@uow.edu.au</email>
          </address>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0003-2617-8694</ext-link>
        </contrib>
      </contrib-group>
      <aff id="aff1">
        <label>1</label>
        <institution>Social Marketing @ Griffith</institution>
        <institution>Griffith Business School</institution>
        <institution>Griffith University</institution>
        <addr-line>Brisbane</addr-line>
        <country>Australia</country>
      </aff>
      <aff id="aff2">
        <label>2</label>
        <institution>School of Mathematics and Applied Statistics</institution>
        <institution>Faculty of Engineering and Information Sciences</institution>
        <institution>University of Wollongong</institution>
        <addr-line>Wollongong</addr-line>
        <country>Australia</country>
      </aff>
      <aff id="aff3">
        <label>3</label>
        <institution>Australian Research Council Centre of Excellence for Automated Decision-Making and Society</institution>
        <institution>The University of Sydney Law School</institution>
        <institution>The University of Sydney</institution>
        <addr-line>Sydney</addr-line>
        <country>Australia</country>
      </aff>
      <aff id="aff4">
        <label>4</label>
        <institution>Australian Centre for Health Engagement Evidence and Values</institution>
        <institution>Faculty of the Arts, Social Sciences and Humanities</institution>
        <institution>University of Wollongong</institution>
        <addr-line>Wollongong</addr-line>
        <country>Australia</country>
      </aff>
      <author-notes>
        <corresp>Corresponding Author: Stacy Carter <email>stacyc@uow.edu.au</email></corresp>
      </author-notes>
      <pub-date pub-type="collection">
        <month>8</month>
        <year>2022</year>
      </pub-date>
      <pub-date pub-type="epub">
        <day>22</day>
        <month>8</month>
        <year>2022</year>
      </pub-date>
      <volume>24</volume>
      <issue>8</issue>
      <elocation-id>e37611</elocation-id>
      <history>
        <date date-type="received">
          <day>28</day>
          <month>2</month>
          <year>2022</year>
        </date>
        <date date-type="rev-request">
          <day>8</day>
          <month>4</month>
          <year>2022</year>
        </date>
        <date date-type="rev-recd">
          <day>25</day>
          <month>5</month>
          <year>2022</year>
        </date>
        <date date-type="accepted">
          <day>19</day>
          <month>7</month>
          <year>2022</year>
        </date>
      </history>
      <copyright-statement>©Sebastian Isbanner, Pauline O’Shaughnessy, David Steel, Scarlet Wilcock, Stacy Carter. Originally published in the Journal of Medical Internet Research (https://www.jmir.org), 22.08.2022.</copyright-statement>
      <copyright-year>2022</copyright-year>
      <license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/">
        <p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (https://creativecommons.org/licenses/by/4.0/), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in the Journal of Medical Internet Research, is properly cited. The complete bibliographic information, a link to the original publication on https://www.jmir.org/, as well as this copyright and license information must be included.</p>
      </license>
      <self-uri xlink:href="https://www.jmir.org/2022/8/e37611" xlink:type="simple"/>
      <abstract>
        <sec sec-type="background">
          <title>Background</title>
          <p>Artificial intelligence (AI) for use in health care and social services is rapidly developing, but this has significant ethical, legal, and social implications. Theoretical and conceptual research in AI ethics needs to be complemented with empirical research to understand the values and judgments of members of the public, who will be the ultimate recipients of AI-enabled services.</p>
        </sec>
        <sec sec-type="objective">
          <title>Objective</title>
          <p>The aim of the Australian Values and Attitudes on AI (AVA-AI) study was to assess and compare Australians’ general and particular judgments regarding the use of AI, compare Australians’ judgments regarding different health care and social service applications of AI, and determine the attributes of health care and social service AI systems that Australians consider most important.</p>
        </sec>
        <sec sec-type="methods">
          <title>Methods</title>
          <p>We conducted a survey of the Australian population using an innovative sampling and weighting methodology involving 2 sample components: one from an omnibus survey using a sample selected using scientific probability sampling methods and one from a nonprobability-sampled web-based panel. The web-based panel sample was calibrated to the omnibus survey sample using behavioral, lifestyle, and sociodemographic variables. Univariate and bivariate analyses were performed.</p>
        </sec>
        <sec sec-type="results">
          <title>Results</title>
          <p>We included weighted responses from 1950 Australians in the web-based panel along with a further 2498 responses from the omnibus survey for a subset of questions. Both weighted samples were sociodemographically well spread. An estimated 60% of Australians support the development of AI in general but, in specific health care scenarios, this diminishes to between 27% and 43% and, for social service scenarios, between 31% and 39%. Although all ethical and social dimensions of AI presented were rated as important, accuracy was consistently the most important and reducing costs the least important. Speed was also consistently lower in importance. In total, 4 in 5 Australians valued continued human contact and discretion in service provision more than any speed, accuracy, or convenience that AI systems might provide.</p>
        </sec>
        <sec sec-type="conclusions">
          <title>Conclusions</title>
          <p>The ethical and social dimensions of AI systems matter to Australians. Most think AI systems should augment rather than replace humans in the provision of both health care and social services. Although expressing broad support for AI, people made finely tuned judgments about the acceptability of particular AI applications with different potential benefits and downsides. Further qualitative research is needed to understand the reasons underpinning these judgments. The participation of ethicists, social scientists, and the public can help guide AI development and implementation, particularly in sensitive and value-laden domains such as health care and social services.</p>
        </sec>
      </abstract>
      <kwd-group>
        <kwd>artificial intelligence</kwd>
        <kwd>surveys and questionnaires</kwd>
        <kwd>consumer health informatics</kwd>
        <kwd>social welfare</kwd>
        <kwd>bioethics</kwd>
        <kwd>social values</kwd>
      </kwd-group>
    </article-meta>
  </front>
  <body>
    <sec sec-type="introduction">
      <title>Introduction</title>
      <sec>
        <title>Background</title>
        <p>Artificial intelligence (AI) and automation are accelerating in many fields driven by an increase in the availability of massive linked data sets, cloud computing, more powerful processors, and the development of new types of algorithms, particularly in the field of machine learning. In this paper, AI will be broadly conceptualized, consistent with the Australian Council of Learned Academies definition, as “a collection of interrelated technologies used to solve problems and perform tasks that, when humans do them, requires thinking” [<xref ref-type="bibr" rid="ref1">1</xref>]. These technologies are being applied in social services, including to automate eligibility verification, target and personalize welfare services, and aid in the detection of fraud and debt liability [<xref ref-type="bibr" rid="ref2">2</xref>,<xref ref-type="bibr" rid="ref3">3</xref>]. Health care, initially slow to adopt AI, is also seeing rapid development for applications including health service planning and resource allocation, triage, screening and diagnosis, prognostication, robotics in applications such as aged care, and health advice chatbots [<xref ref-type="bibr" rid="ref4">4</xref>-<xref ref-type="bibr" rid="ref6">6</xref>]. These areas of practice—social services and health care—have traditionally been provided via extensive human-to-human contact by staff with professional autonomy and the capacity to exercise discretion in handling the problems of service users or patients.</p>
      </sec>
      <sec>
        <title>Ethical, Legal, and Social Implications of AI</title>
        <p>A growing body of literature acknowledges the complex ethical, legal, and social implications (ELSI) of AI deployment [<xref ref-type="bibr" rid="ref1">1</xref>,<xref ref-type="bibr" rid="ref7">7</xref>,<xref ref-type="bibr" rid="ref8">8</xref>]. In the 2010s, many intergovernmental, academic, and industry groups examined the ELSI of AI in a general sense, producing lists of high-level principles for AI ethics [<xref ref-type="bibr" rid="ref1">1</xref>,<xref ref-type="bibr" rid="ref7">7</xref>,<xref ref-type="bibr" rid="ref9">9</xref>,<xref ref-type="bibr" rid="ref10">10</xref>] often reminiscent of existing frameworks in bioethics [<xref ref-type="bibr" rid="ref11">11</xref>]. In parallel, a set of approaches that foreground the significance of power, oppression, discrimination, and injustice has been developed, contextualized in the sociotechnical systems in which AI is embedded [<xref ref-type="bibr" rid="ref12">12</xref>]. Other work critiques the corporate AI sector for establishing AI ethics boards and documents while persisting with unethical practices and points to the difficulties faced by AI ethics researchers when working inside corporations producing AI systems [<xref ref-type="bibr" rid="ref13">13</xref>]. The abstract principles and frameworks that have proliferated in AI ethics offer accessible ways in to ethical debates, but they cannot be sufficient to address ethical issues in practice [<xref ref-type="bibr" rid="ref14">14</xref>]. There are now calls to complement ethical frameworks with other forms of knowledge, including analysis of detailed use cases and investigation of what members of the public think and value regarding the use of AI [<xref ref-type="bibr" rid="ref15">15</xref>]. Two linked cases are the focus of this study: the use of AI in health services and in social services, which are an important social determinant of health especially for marginalized and disadvantaged populations.</p>
        <p>For the first case, health care AI, research on ELSI has been rapidly expanding since 2019. In a 2020 review, Morley et al [<xref ref-type="bibr" rid="ref16">16</xref>] highlighted 3 groups of ELSI issues for health care AI: epistemic concerns (that the evidence on which health care AI is based is inconclusive, inscrutable, or misguided), normative concerns (highlighting unfairness and the potential for transformative unintended consequences), and concerns about the ability to either identify algorithmic harm or ascribe responsibility for it. Another 2020 review focused on health care emphasized the potential to worsen outcomes or cost-effectiveness, the problem of transportability (that algorithms may not work equally well in different populations), automation bias (that humans tend to be too willing to accept that algorithmic systems are correct), the potential to intensify inequities, the risk of clinical deskilling, increased threats to data protection and privacy, lack of contestability of algorithmic decisions, the need to preserve clinician and patient autonomy, and the potential to undermine trust in health care systems [<xref ref-type="bibr" rid="ref17">17</xref>]. A 2021 scoping review on health care AI ELSI highlighted data privacy and security, trust in AI, accountability and responsibility, and bias as key ethical issues for health care AI [<xref ref-type="bibr" rid="ref18">18</xref>]. Also in 2021, Goirand et al [<xref ref-type="bibr" rid="ref19">19</xref>] identified 84 AI-specific ethics frameworks relevant to health and &#62;11 principles recurring across these while noting that few frameworks had been implemented in practice. In parallel, empirical evidence demonstrates a continuing need to address the ELSI of health care AI. A well-known example is an AI system used to allocate health care in many US health services that allocated more care to White patients than to Black patients, even when the Black patients had greater need, because the AI learned from historical underservicing that Black patients had lower care requirements [<xref ref-type="bibr" rid="ref20">20</xref>].</p>
        <p>Regarding our second case, AI in the social services, ELSI research is also gaining momentum, particularly as part of broader inquiries into the digital welfare state or in relation to high-profile examples of technology failure [<xref ref-type="bibr" rid="ref2">2</xref>,<xref ref-type="bibr" rid="ref21">21</xref>,<xref ref-type="bibr" rid="ref22">22</xref>]. This research highlights the potential of AI to improve the consistency and accuracy of welfare decision-making and increase cost-efficiency. However, it also raises grave concerns regarding the social costs associated with implementing AI in the social services, particularly for vulnerable populations. For example, the pioneering ethnographic study by Eubanks [<xref ref-type="bibr" rid="ref21">21</xref>] of AI and automation technologies in the United States in 2018 illustrates how new technologies can disempower poor citizens, intensify existing patterns of discrimination, and <italic>automate inequality</italic>. Similar concerns have been raised in Australia in relation to the Online Compliance Intervention known as <italic>robodebt</italic>. The scheme automated the calculation of welfare debts based on an income-averaging algorithm. The legality of the algorithm was successfully challenged before a domestic court in 2019, culminating in an Aus $1.8 billion (US $1.25 billion) class action lawsuit against the Australian government and prompting significant public and scholarly criticism of the scheme [<xref ref-type="bibr" rid="ref23">23</xref>].</p>
        <p>AI applications in the welfare sector pose novel challenges to legal and regulatory compliance. Many AI systems, including robodebt, have been designed and implemented in the absence of proper legal frameworks or in contravention of prevailing laws and administrative principles [<xref ref-type="bibr" rid="ref2">2</xref>,<xref ref-type="bibr" rid="ref24">24</xref>]. Other high-profile examples include the System Risk Indication system of the Dutch government, which was used to predict an individual’s risk of welfare fraud. System Risk Indication was successfully challenged based on the fact that the system breached the right to privacy contained in the European Convention on Human Rights [<xref ref-type="bibr" rid="ref2">2</xref>]. Such cases have prompted a growing body of literature concentrated on the legal and human rights implications of AI in the social services. The recent report by the United Nations Special Rapporteur on Extreme Poverty [<xref ref-type="bibr" rid="ref2">2</xref>] calls for a human rights–based approach to digital regulation in social protection systems, which has prompted further research on AI and human rights principles [<xref ref-type="bibr" rid="ref25">25</xref>].</p>
      </sec>
      <sec>
        <title>Existing Research on Perceptions of the ELSI of Using AI, Including in Health Care and Social Services</title>
        <p>An approach to thinking about the ELSI of AI is to examine public attitudes and judgments toward these technologies. In areas such as health care and social services, this includes the attitudes and judgments of patients and service users. A small body of literature exists on general attitudes toward AI. In 2018, Zhang and Dafoe [<xref ref-type="bibr" rid="ref26">26</xref>] surveyed 2000 American adults and found mixed support for developing AI and strong support for the idea that AI should be carefully managed. In April 2020, the Monash Data Futures Institute surveyed 2019 Australians on their attitudes toward AI, adapting some questions from Zhang and Dafoe [<xref ref-type="bibr" rid="ref27">27</xref>]. They found that Australians did not consider themselves knowledgeable about AI, but 62.4% expressed support for the development of AI. When asked whether they supported the use of AI in particular fields, respondents were most supportive of AI use in health (44.1% strong support) and medicine (43% strong support) and less supportive of AI use in <italic>equality and inclusion</italic> (21.5% strong support) and public and social sector management (20.2% strong support). Respondents tended to agree that AI would do more social good than harm overall [<xref ref-type="bibr" rid="ref27">27</xref>].</p>
        <p>Research on the attitudes of patients and service users is developing; most research to date—such as this study—has been speculative, asking informants about their views or intentions rather than their direct experience of AI. Studies asking patients to imagine the use of AI in their care generally report broad acceptance [<xref ref-type="bibr" rid="ref28">28</xref>-<xref ref-type="bibr" rid="ref30">30</xref>] in areas including skin cancer screening and neurosurgery. Promises of greater diagnostic accuracy are well received [<xref ref-type="bibr" rid="ref30">30</xref>], and sharing deidentified health data for the development of medical AI may be acceptable to most [<xref ref-type="bibr" rid="ref28">28</xref>]. A study reported experiences with a diabetic retinopathy screening AI—96% of patients were satisfied or very satisfied [<xref ref-type="bibr" rid="ref31">31</xref>]. However, respondents in most studies also express concerns. Regarding skin cancer screening, concerns included inaccurate or limited training sets; lack of context; lack of physical examination; operator dependence; data protection; and potential errors, including false negatives and false positives [<xref ref-type="bibr" rid="ref28">28</xref>,<xref ref-type="bibr" rid="ref30">30</xref>]. In the context of neurosurgery, respondents wanted a human neurosurgeon to remain in control [<xref ref-type="bibr" rid="ref29">29</xref>]. Finally, a study of patients with cancer in China suggested that despite reporting that they <italic>believed in</italic> both diagnoses and therapeutic advice given by an AI (90% and 85%, respectively), when this differed from the advice given by a human clinician, most patients would prefer to take the human clinician’s recommendation (88% and 91%, respectively) [<xref ref-type="bibr" rid="ref32">32</xref>].</p>
        <p>Research examining public and professional attitudes toward AI in the welfare sector is very limited. To the authors’ knowledge, research is yet to explore citizens’ general attitudes toward AI in the domain of welfare provision. However, there is a small body of research documenting service users’ experiences of specific AI applications in the social services, particularly users’ negative experiences of exclusion and discrimination [<xref ref-type="bibr" rid="ref21">21</xref>,<xref ref-type="bibr" rid="ref33">33</xref>], providing context-specific insights into system users’ experiences of AI and illustrating the high-stakes nature of implementing AI in this domain. This work, together with some small-scale, mostly qualitative studies involving frontline social service staff [<xref ref-type="bibr" rid="ref34">34</xref>-<xref ref-type="bibr" rid="ref38">38</xref>], illustrates the complex and dynamic relationship between AI and the routines of social welfare professionals and indicates mixed reactions to these systems among staff. For example, the study by Zejnilović et al [<xref ref-type="bibr" rid="ref36">36</xref>] of counselors in a Portuguese employment service in 2020 found high levels of distrust and generally negative perceptions of an AI system used to score clients’ risk of long-term unemployment. However, the survey data also indicated that workers would continue to rely on the system even if it became optional, suggesting that respondents harbor mixed feelings about the system.</p>
        <p>The Australian Values and Attitudes on Artificial Intelligence (AVA-AI) study set out to understand Australians’ values and attitudes regarding the use of AI in health care and social services. Australia has been relatively slow to approve and adopt medical AI compared, for example, with the United Kingdom and the United States. The adoption of AI and automation technologies in the social services is comparatively advanced in Australia, although its development has been uneven and marked by controversy, including the case of robodebt. Multiple stakeholders are now confronting the opportunities and risks of these technologies. Policy makers need high-quality evidence of what Australians consider acceptable or unacceptable to ensure that their decision-making is legitimate. This study used an innovative methodology to survey Australians regarding these questions. Our aims were to understand Australians’ front-of-mind normative judgments about the use of AI, especially in the underresearched fields of social services and health care, and what attributes of AIs they would consider to be most important if those AIs were to be deployed in health care and social services. Although parallel literature seeks to model the characteristics of AI that predict acceptance [<xref ref-type="bibr" rid="ref39">39</xref>], this work has the complementary aim of seeking to understand the prevalence and patterning of different normative judgments about AI.</p>
        <p>The research questions answered in this study are as follows: (1) How do Australians’ general judgments regarding the use of AI compare with their judgments regarding the particular uses of AI in health care and social services? (2) Do Australians make different judgments about different health care and social service applications of AI? (3) What attributes of health care and social service AI systems do Australians consider most important?</p>
      </sec>
    </sec>
    <sec sec-type="methods">
      <title>Methods</title>
      <sec>
        <title>Aims</title>
        <p>The <italic>AVA-AI study</italic> was conducted to (1) provide information on Australians’ attitudes and values regarding AI, especially in health care and social services, and (2) allow for analysis of how these vary across different subpopulations and are associated with people’s sociodemographic characteristics and familiarity with technology. This study focuses on attitudes and values, how they differ for different scenarios, and the relative importance of different attributes of health care and social service AI. A selection of concepts from AI ethics relevant to understanding this study is outlined in <xref ref-type="boxed-text" rid="box1">Textbox 1</xref>. Analyses across different subpopulations will be reported in future papers.</p>
        <boxed-text id="box1" position="float">
          <title>Concepts from artificial intelligence (AI) ethics used in the Australian Values and Attitudes on Artificial Intelligence (AVA-AI) study.</title>
          <p>
            <bold>Concept and meaning in the context of AI ethics</bold>
          </p>
          <list list-type="bullet">
            <list-item>
              <p>Accuracy: the degree to which an AI can perform tasks without errors. In the context of screening or targeting, for example, this would include the ability of the AI to detect a condition or identify a person without false positives (where a case is identified as having a condition or being a target when they do not fit the criteria). It also includes the ability of the AI to avoid false negatives (where a case is identified as not having a condition or not being a target when they do fit the criteria).</p>
            </list-item>
            <list-item>
              <p>Algorithmic targeting: the use of AI to find people with a certain profile, often predictively (eg, to identify people likely to be unable to find work or people likely to commit a crime).</p>
            </list-item>
            <list-item>
              <p>Autonomous machine decision-making: situations in which an AI makes a decision that would previously have been made only by a person, for example, whether a person has a condition or whether a person is eligible for a social security payment.</p>
            </list-item>
            <list-item>
              <p>Contestability: whether machine decision-making can be effectively challenged. Contestability is to some extent dependent on explainability but is also dependent on policy settings.</p>
            </list-item>
            <list-item>
              <p>Explainability: whether it is possible to explain how an AI makes a decision. For some forms of AI, especially deep learning algorithms, humans do not explicitly instruct the AI on what basis it should make decisions. This makes explainability potentially more challenging, leading such algorithms to be labeled as <italic>black box</italic> algorithms.</p>
            </list-item>
            <list-item>
              <p>Deskilling: when tasks previously undertaken by humans are delegated to AI, humans lose their ability to complete those tasks; that is, they deskill in relation to those tasks.</p>
            </list-item>
            <list-item>
              <p>Fair treatment: AI systems tend to reflect human bias; this relates to the concept of justice, which is complex and multidimensional. Doing justice is unlikely to entail treating everyone identically as different people have different needs and opportunities. In the AVA-AI study, we asked respondents how important it was to “know that the system treats everyone fairly” to capture an intuitive judgment of a system’s capacity to deal justly or unjustly with different individuals and populations.</p>
            </list-item>
            <list-item>
              <p>Personal tailoring: the ability of an AI, by comparing the data of an individual with large, linked data sets, to recommend services or interventions that respond to the particularity of an individual’s situation.</p>
            </list-item>
            <list-item>
              <p>Privacy: freedom from intrusion into personal matters, including the ability to control personal information about oneself.</p>
            </list-item>
            <list-item>
              <p>Responsibility: a complex and multidimensional concept, which attributes moral or legal duties and moral or legal blame, including for errors or harms.</p>
            </list-item>
          </list>
        </boxed-text>
      </sec>
      <sec>
        <title>Instrument Development</title>
        <p>When designing the study, there were no existing instruments we could adopt. We used a question from the 2018 survey by Zhang and Dafoe [<xref ref-type="bibr" rid="ref26">26</xref>] and developed other questions based on a review of the AI ethics literature. Before the study commenced, the instrument underwent multiple rounds of input from investigators and expert colleagues, as well as cognitive testing.</p>
      </sec>
      <sec>
        <title>Final Instrument Design</title>
        <p>In addition to sociodemographic variables, the survey asked about the use of AI in health care and welfare. Questions were of 2 types. The first type, in the form of <italic>How much</italic> <italic>do you support or oppose,</italic> presented a 5-point scale. Questions of this type asked about the development of AI in general (B01, taken from Zhang and Dafoe [<xref ref-type="bibr" rid="ref26">26</xref>], running from <italic>strongly support</italic> to <italic>strongly oppose</italic>) and the use of AI in 6 particular health care and welfare AI scenarios for which potential advantages and disadvantages were presented in a balanced way (C03-C05 and D03-D05, for which the 5-point scale ran from <italic>I support this use of AI</italic> to <italic>I oppose this use of AI</italic>; <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>). A final question of this type (E01) asked respondents to indicate what they valued more on a 5-point scale: <italic>Quicker, more convenient, more accurate health and social services</italic> or <italic>More human contact and discretion in health and social services.</italic> This trade-off asked respondents to evaluate a bundle of benefits commonly attributed to AI-enabled services against a bundle of benefits commonly attributed to services provided by human professionals.</p>
        <p>The second type of question presented a scenario involving AI use and then asked respondents to consider 7 ELSI dimensions or values (eg, <italic>getting an answer quickly</italic> and <italic>getting an accurate answer</italic>) and rate how important each dimension was to them personally on a scale from <italic>extremely important</italic> to <italic>not at all important</italic>. There were 4 questions of this type: 2 with health care scenarios (C01-C02) and 2 with welfare scenarios (D01-D02). Module C presented health care questions and module D presented welfare questions; respondents were randomly allocated to receive module C or D first, and the order of presentation of the values was also randomized. <xref ref-type="table" rid="table1">Table 1</xref> summarizes the variables presented as well as the concepts each question was designed to assess. Note that the dimensions or values were identical for module C and D questions except that the health care questions had an item about responsibility, including mistakes (reflecting the status quo of medical professional autonomy), whereas the social service questions had an item about personal tailoring (reflecting a promised potential benefit of AI in social services).</p>
        <p>The final survey instrument is provided in <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>.</p>
        <table-wrap position="float" id="table1">
          <label>Table 1</label>
          <caption>
            <p>Summary of the variables collected in the Australian Values and Attitudes on Artificial Intelligence (AI) study.</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="240"/>
            <col width="370"/>
            <col width="390"/>
            <thead>
              <tr valign="top">
                <td>Type of variable</td>
                <td>Question number and variable</td>
                <td>Concepts tested</td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td>General support or opposition</td>
                <td>
                  <list list-type="bullet">
                    <list-item>
                      <p>B01—how much do you support or oppose the development of AI in general (with multiple examples given)?<sup>a</sup></p>
                    </list-item>
                  </list>
                </td>
                <td>
                  <list list-type="bullet">
                    <list-item>
                      <p>Broad support for or opposition to AI</p>
                    </list-item>
                  </list>
                </td>
              </tr>
              <tr valign="top">
                <td>Importance of different attributes of AI in health care scenarios</td>
                <td>
                  <list list-type="bullet">
                    <list-item>
                      <p>C01—machine reads medical test, diagnoses, and recommends treatment</p>
                    </list-item>
                    <list-item>
                      <p>C02—machine triages when you are unwell</p>
                    </list-item>
                  </list>
                </td>
                <td>
                  <list list-type="bullet">
                    <list-item>
                      <p>In relation to:</p>
                      <list>
                        <list-item>
                          <p>
                    C01—delegation of clinical decisions to an autonomous machine
                  </p>
                        </list-item>
                        <list-item>
                          <p>
                    C02—automating decisions about need for health care services (time-sensitive)
                  </p>
                        </list-item>
                      </list>
                    </list-item>
                    <list-item>
                      <p>Importance of:</p>
                      <list>
                        <list-item>
                          <p>
                    Explanation
                  </p>
                        </list-item>
                        <list-item>
                          <p>
                    Speed
                  </p>
                        </list-item>
                        <list-item>
                          <p>
                    Accuracy
                  </p>
                        </list-item>
                        <list-item>
                          <p>
                    Human contact
                  </p>
                        </list-item>
                        <list-item>
                          <p>
                    Reducing system costs
                  </p>
                        </list-item>
                        <list-item>
                          <p>
                    Fair treatment
                  </p>
                        </list-item>
                        <list-item>
                          <p>
                    Responsibility
                  </p>
                        </list-item>
                      </list>
                    </list-item>
                  </list>
                </td>
              </tr>
              <tr valign="top">
                <td>Importance of different attributes of AI in welfare scenarios</td>
                <td>
                  <list list-type="bullet">
                    <list-item>
                      <p>D01—machine processes application for unemployment benefits (data sharing required)</p>
                    </list-item>
                    <list-item>
                      <p>D02—chatbot advises about carer payments</p>
                    </list-item>
                  </list>
                </td>
                <td>
                  <list list-type="bullet">
                    <list-item>
                      <p>In relation to:</p>
                      <list>
                        <list-item>
                          <p>
                    D01—foregoing privacy as a barrier to access services
                  </p>
                        </list-item>
                        <list-item>
                          <p>
                    D02—automation of information services
                  </p>
                        </list-item>
                      </list>
                    </list-item>
                    <list-item>
                      <p>Importance of:</p>
                      <list>
                        <list-item>
                          <p>
                    Explanation
                  </p>
                        </list-item>
                        <list-item>
                          <p>
                    Speed
                  </p>
                        </list-item>
                        <list-item>
                          <p>
                    Accuracy
                  </p>
                        </list-item>
                        <list-item>
                          <p>
                    Human contact
                  </p>
                        </list-item>
                        <list-item>
                          <p>
                    Reducing system costs
                  </p>
                        </list-item>
                        <list-item>
                          <p>
                    Fair treatment
                  </p>
                        </list-item>
                        <list-item>
                          <p>
                    Personal tailoring
                  </p>
                        </list-item>
                      </list>
                    </list-item>
                  </list>
                </td>
              </tr>
              <tr valign="top">
                <td>Support for or opposition to AI in specific health care scenarios</td>
                <td>
                  <list list-type="bullet">
                    <list-item>
                      <p>C03—nonexplainable hospital algorithms</p>
                    </list-item>
                    <list-item>
                      <p>C04—data sharing for quality care</p>
                    </list-item>
                    <list-item>
                      <p>C05—deskilling physicians</p>
                    </list-item>
                  </list>
                </td>
                <td>
                  <list list-type="bullet">
                    <list-item>
                      <p>C03—importance of explainable machine recommendations</p>
                    </list-item>
                    <list-item>
                      <p>C04—importance of privacy (balanced against quality of care)</p>
                    </list-item>
                    <list-item>
                      <p>C05—importance of retaining human clinical skills</p>
                    </list-item>
                  </list>
                </td>
              </tr>
              <tr valign="top">
                <td>Support for or opposition to AI in specific welfare scenarios</td>
                <td>
                  <list list-type="bullet">
                    <list-item>
                      <p>D03—targeted compliance checking</p>
                    </list-item>
                    <list-item>
                      <p>D04—nonexplainable job services</p>
                    </list-item>
                    <list-item>
                      <p>D05—automated assignment of parent support with limited contestability</p>
                    </list-item>
                  </list>
                </td>
                <td>
                  <list list-type="bullet">
                    <list-item>
                      <p>D03—algorithmic targeting of punitive policy</p>
                    </list-item>
                    <list-item>
                      <p>D04—importance of explainable machine recommendations</p>
                    </list-item>
                    <list-item>
                      <p>D05—importance of contestability (balanced against accuracy)</p>
                    </list-item>
                  </list>
                </td>
              </tr>
              <tr valign="top">
                <td>Speed—human contact</td>
                <td>
                  <list list-type="bullet">
                    <list-item>
                      <p>E01—trade-off between quicker, more convenient, more accurate health care and social services and more human contact and discretion in health care and social services</p>
                    </list-item>
                  </list>
                </td>
                <td>
                  <list list-type="bullet">
                    <list-item>
                      <p>E01—speed and convenience and accuracy vs human contact and discretion</p>
                    </list-item>
                  </list>
                </td>
              </tr>
              <tr valign="top">
                <td>Sociodemographic</td>
                <td>
                  <list list-type="bullet">
                    <list-item>
                      <p>Age, gender, concession card type, and employment status; household income, education, household type, language other than English spoken at home, and general health</p>
                    </list-item>
                    <list-item>
                      <p>Centrelink payment, employment field, relevant experience, relevant degree, life satisfaction, and disability</p>
                    </list-item>
                  </list>
                </td>
                <td>
                  <list list-type="bullet">
                    <list-item>
                      <p>Descriptive variables collected using standard sociodemographic questions</p>
                    </list-item>
                  </list>
                </td>
              </tr>
              <tr valign="top">
                <td>Geographic</td>
                <td>
                  <list list-type="bullet">
                    <list-item>
                      <p>State or territory, capital city or rest of state, and SEIFA<sup>b</sup> (geographic measure of disadvantage)</p>
                    </list-item>
                  </list>
                </td>
                <td>
                  <list list-type="bullet">
                    <list-item>
                      <p>Descriptive variables collected using standard questions about location of residence</p>
                    </list-item>
                  </list>
                </td>
              </tr>
              <tr valign="top">
                <td>Lifestyle</td>
                <td>
                  <list list-type="bullet">
                    <list-item>
                      <p>How often they check the internet, how often they post comments or images to social media, how often they post
                  on
                  blogs, forums, or interest groups, early adopter by type, and television viewing by type of viewing</p>
                    </list-item>
                  </list>
                </td>
                <td>
                  <list list-type="bullet">
                    <list-item>
                      <p>Variables collected for weighting purposes</p>
                    </list-item>
                  </list>
                </td>
              </tr>
            </tbody>
          </table>
          <table-wrap-foot>
            <fn id="table1fn1">
              <p><sup>a</sup>Variables in italics were collected from both the Life in Australia and web-based panel samples; all others were collected from the web-based panel alone.</p>
            </fn>
            <fn id="table1fn2">
              <p><sup>b</sup>SEIFA: Socio-Economic Indexes for Areas.</p>
            </fn>
          </table-wrap-foot>
        </table-wrap>
      </sec>
      <sec>
        <title>Data Collection Processes and Weighting</title>
        <p>Data collection occurred between March 16, 2020, and March 29, 2020, with respondents mainly completing the questionnaire on the web.</p>
        <p>The AVA-AI study comprises 2 sample components: one obtained from the Life in Australia (LIA) survey [<xref ref-type="bibr" rid="ref40">40</xref>] with a responding sample size of 2448 and a web-based panel sample with a responding sample size of 2000. Thus, the combined responding sample size was 4448.</p>
        <p>The full set of questions was used for the web-based panel sample. For the LIA sample, a subset of sociodemographic variables and all the geographic and lifestyle questions were used. The LIA sample also answered the general support question (B01) and the importance of AI attributes for scenario C01. In <xref ref-type="table" rid="table1">Table 1</xref>, the variables in italics were collected from both the LIA and web-based panel samples, and all others were collected from the web-based panel alone.</p>
        <p>The LIA sample was selected using scientific probability sampling methods, whereas the web-based panel sample was a nonprobability sample. Weights for the LIA sample were calculated using standard methods for a probability sample using generalized regression estimation [<xref ref-type="bibr" rid="ref41">41</xref>] to adjust for differences in selection probabilities and nonresponse and calibrate to population benchmarks obtained from the population census, current demographic statistics, and the 2017 to 2018 National Health Survey obtained from the Australian Bureau of Statistics. The variables used in the calibration were age by highest education level, country of birth by state, smoking status by state, gender by state, household structure by state, part of state, and state or territory.</p>
        <p>A web-based panel allowed us to generate a relatively large sample, enabling a good level of disaggregation into subpopulations, comparisons between groups, and analysis of associations. Such panels can be subject to self-selection biases and coverage issues, reducing the accuracy of population prevalence estimates [<xref ref-type="bibr" rid="ref42">42</xref>], but may enable the examination of associations and, with adjustments to reduce biases, improve the estimation of population characteristics [<xref ref-type="bibr" rid="ref43">43</xref>]. The calibration to population benchmarks for major sociodemographic variables may not eliminate these issues. To enhance our adjustment of the web-based panel data in the AVA-AI study, we included 2 substantive questions, a set of behavioral and lifestyle questions, and major sociodemographic variables in both the web-based panel survey and the probability sample–based LIA survey, as indicated in <xref ref-type="table" rid="table1">Table 1</xref>. This approach was similar to that used in the study by Zhang and Dafoe [<xref ref-type="bibr" rid="ref26">26</xref>], although our approach for the AVA-AI study went further by adjusting for behavioral and lifestyle variables and 2 substantive variables. The use of behavioral and lifestyle variables in adjusting web surveys, also known as webographic variables, is discussed in the study by Schonlau et al [<xref ref-type="bibr" rid="ref44">44</xref>], for example.</p>
        <p>In the AVA-AI study, questions common to the LIA and web-based panel samples were used to calibrate the web-based panel to the LIA sample, producing weights designed to reduce potential biases owing to the web-based panel sample being nonrandom; the LIA served as a reference survey [<xref ref-type="bibr" rid="ref35">35</xref>]. The probability of inclusion for the web-based panel respondents was estimated using a propensity score model. This involved combining the LIA and web-based panel samples and fitting a logistic regression model, with the response variable being membership of the web-based panel. In fitting this model, the original LIA weights were used for respondents in that sample, and a weight of 1 was used for the web-based panel respondents. The variables used in the logistic regression were selected using Akaike Information Criterion–based stepwise regression and consisted of age by education, gender, household structure, language spoken at home, self-rated health, early adopter status, and television streaming watching. In a final calibration step, the weights were further adjusted to agree with the population benchmarks for these variables. This approach is described, for example, in the study by Valliant and Dever [<xref ref-type="bibr" rid="ref45">45</xref>,<xref ref-type="bibr" rid="ref46">46</xref>] and by Elliot and Valliant [<xref ref-type="bibr" rid="ref47">47</xref>]. The weighting led to a weighted sample of 1950 for the web-based panel and 2498 for the LIA sample.</p>
      </sec>
      <sec>
        <title>Statistical Analysis Methods</title>
        <sec>
          <title>Overview</title>
          <p>All estimates and analyses were based on a weighted analysis using the largest sample possible. Each respondent had a weight determined by the sample they came from. The weights were scaled so that the sum of the weights for the combined sample was 4448. Two substantive questions (B01 [general support or opposition] and C01 [support or opposition for autonomous machine decision-making in medical testing]) were asked to the combined LIA+web-based panel sample. The remainder of the attitude and value questions was asked only to the web-based panel sample. Any analysis involving questions included in the LIA and web-based panel sample was based on the combined sample and the associated weights. Any analysis involving questions that were only collected from the web-based panel sample was based on the web-based panel sample and the associated weights.</p>
          <p>The analyses focused on determining and comparing the distribution of responses to the attitude and value questions. The methods used accounted for the use of weights in calculating estimates and associated 95% CIs and allowed for the testing of statistical significance, assessed when the <italic>P</italic> value of the relevant statistical test was &#60;.05.</p>
        </sec>
        <sec>
          <title>Statistical Analysis of Each Question Using Univariate Analyses</title>
          <p>All variables concerning attitudes and values had 5 substantive response categories reflecting <italic>support</italic> or <italic>importance.</italic> Univariate analysis calculated the estimated percentage in each response category for each question, with 95% CIs for each estimated percentage. For questions asking for degree of support or opposition, we examined whether there was a majority support and compared across scenarios and between health care and welfare contexts; for questions asking for the importance attached to different attributes or values, we examined whether attributes or values mattered more in some contexts than others.</p>
          <p>Weights must be accounted for in the calculation of estimates and in the statistical inference, such as estimates of SEs and the associated CIs obtained from them and <italic>P</italic> values for any statistical tests used. The CIs and <italic>P</italic> values were obtained using <italic>Complex Samples</italic> in SPSS (version 26; IBM Corp), which accounts for the use of weights in producing the estimates. Although the use of weights can reduce bias, there is an associated increase in variances and SEs of the estimates. This is reflected in the design effect, the variance of an estimate accounting for the weights (and complex design if used), compared with the use of simple random sampling and no weighting. The effect is variable specific, but a broad indication can be obtained considering the design effect because of weighting or unequal weighting effect [<xref ref-type="bibr" rid="ref48">48</xref>,<xref ref-type="bibr" rid="ref49">49</xref>]. This is 1+<italic>Cw2</italic>, where <italic>Cw</italic> is the coefficient of variation of the weights, which is the SD of the weights divided by their mean. For the combined sample, the design effect because of weighting was 1.83; for the LIA, it was 1.99; and, for the web-based panel, it was 1.61. For any specific estimates or analysis in this study, the SEs estimated from the survey data accounting for the weights were used. The effect on the SE is the square root of the design effect (ie, the design factor [<xref ref-type="bibr" rid="ref50">50</xref>]) and is the factor by which the CIs are larger than if weights did not have to be used. A design effect of 1.83 implies a design factor of 1.35. In this analysis, the design effects were almost all between 1.50 and 2.00.</p>
          <p>For questions using ordinal scales from 1 to 5, we also calculated an overall mean response to each question and the associated 95% CI. These included variables assessing the degree of support (ie, B01, C03-C05, and D03-D05), importance attached to attributes of AI (ie, C01-C02 and D01-D02), and the final question (E01) on trading off machine versus human traits. Mean scores close to the midpoint of the scale (3.00) indicated an overall neutral or balanced response to the question, that is, an equal or symmetric distribution of respondents on the respective scale. For support-or-oppose questions, lower scores indicated support and higher scores indicated opposition; for importance questions, lower scores indicated greater importance and higher scores indicated less importance; for E01, lower scores favored machine traits and higher scores favored human traits. For all questions, we tested the null hypothesis that the mean was 3.00 (ie, a distribution centered at the midpoint of the scale, or a balanced distribution of responses) using a 2-tailed <italic>t</italic> test allowing for weighting.</p>
        </sec>
        <sec>
          <title>Statistical Analysis Comparing Responses to Questions Using Bivariate Analyses</title>
          <p>To assess differences in the responses to pairs of questions—for example, is the support for the use of AI different when respondents are presented with different scenarios?—we compared the distributions of the responses. This was not to assess whether the responses to the 2 questions were independent, which is unlikely, but whether the percentages in their marginal distributions were the same.</p>
          <p>Our goal was to determine what percentage of people changed their response between 2 questions and whether this change was net positive or negative. To examine this issue for any 2 questions, we created a <italic>shift variable</italic> to represent the difference between two variables (variables A and B): (1) if the response to variable A was in a category greater than the response to variable B, the <italic>shift variable</italic> was +1, which corresponded to a more positive attitude toward AI for variable B and, equivalently, a more negative attitude for variable A; (2) if the response to variable B was in a category greater than the response to variable A, the <italic>shift variable</italic> was −1, which corresponded to a more positive attitude toward AI for variable A and, equivalently, a more negative attitude for variable B; and (3) if the responses to variables A and B were identical, the <italic>shift variable</italic> was 0.</p>
          <p>We estimated the percentage of respondents where the <italic>shift variable</italic> was 0, indicating no change. For those that changed, we estimated the percentage with a shift variable of −1, corresponding to a more positive attitude for the first variable and a more negative attitude for the second variable, and tested for equal percentages of positive and negative changes. The adjusted Pearson chi-square test in SPSS <italic>Complex Samples</italic> was used, which is a variant of the second-order adjustment proposed by Rao and Scott [<xref ref-type="bibr" rid="ref51">51</xref>]. These tests allowed us to assess the statistical significance of the differences in responses under different scenarios.</p>
          <p>We also tested for equal marginal distributions using the ordinal scores. SPSS uses a paired <italic>t</italic> test using these scores, which is similar to the test for marginal homogeneity described in the study by Agresti [<xref ref-type="bibr" rid="ref52">52</xref>]. This test was implemented accounting for the weights using <italic>Complex Samples</italic> in SPSS by creating a variable for each person equal to the difference between the scores of the 2 questions and testing that the mean difference was 0. We tested answers to our research questions, that is, to determine whether respondents answered differently when questions tested the same ELSI concept in different settings or when questions tested different ELSI concepts in comparable settings. The estimated mean difference and associated 95% CI and the <italic>P</italic> value for the test that the mean difference was 0 were produced.</p>
        </sec>
      </sec>
      <sec>
        <title>Ethical Considerations</title>
        <p>This study was approved by the University of Wollongong Social Sciences Human Research Ethics Committee (protocol number 2019/458).</p>
      </sec>
    </sec>
    <sec sec-type="results">
      <title>Results</title>
      <sec>
        <title>Sample Composition</title>
        <p><xref ref-type="table" rid="table2">Table 2</xref> provides a summary of the weighted combined sample and web-based panel sample for the key variables. A full composition of the overall combined sample and the web-based panel, including unweighted and weighted frequencies and proportions for key sociodemographic variables, is provided in <xref ref-type="supplementary-material" rid="app2">Multimedia Appendix 2</xref>. The use of weights improved the representation of the combined sample for capital cities, age groups &#60;35 years, men, employed status, nonuniversity as the highest level of education, language other than English spoken at home, those with excellent or very good health, and people who look for information over the internet several times a day. The sample was well spread and had respondents across many different sociodemographic groups.</p>
        <p>The web-based panel sample was also well spread across many different sociodemographic groups. The effect of weighting was similar to that in the overall sample, although there was very little effect for age and capital cities. Comparing the weighted percentages between the combined sample and the web-based panel sample, the only appreciable difference is for those employed (2709/4448, 60.9% vs 1061/1950, 54.41%, respectively).</p>
        <table-wrap position="float" id="table2">
          <label>Table 2</label>
          <caption>
            <p>Sociodemographic composition of Australian artificial intelligence survey sample (weighted data only).</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="30"/>
            <col width="410"/>
            <col width="280"/>
            <col width="280"/>
            <thead>
              <tr valign="top">
                <td colspan="2">
                  <break/>
                </td>
                <td>Combined sample (n=4448), n (%)</td>
                <td>Web-based panel (n=1950), n (%)</td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td colspan="4">
                  <bold>Part of state</bold>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Capital city</td>
                <td>2957 (66.48)</td>
                <td>1300 (66.67)</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Rest of state</td>
                <td>1481 (33.3)</td>
                <td>640 (32.82)</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Not stated or unknown</td>
                <td>10 (0.22)</td>
                <td>10 (0.51)</td>
              </tr>
              <tr valign="top">
                <td colspan="4">
                  <bold>Age group (years)</bold>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>18 to 34</td>
                <td>1386 (31.16)</td>
                <td>637 (32.67)</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>35 to 54</td>
                <td>1472 (33.09)</td>
                <td>660 (33.85)</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>55 to 74</td>
                <td>1166 (26.21)</td>
                <td>497 (25.49)</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>≥75</td>
                <td>394 (8.86)</td>
                <td>156 (8)</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Not stated or unknown</td>
                <td>30 (0.67)</td>
                <td>0 (0)</td>
              </tr>
              <tr valign="top">
                <td colspan="4">
                  <bold>Gender</bold>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Men</td>
                <td>2180 (49.01)</td>
                <td>939 (48.15)</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Women</td>
                <td>2259 (50.79)</td>
                <td>1011 (51.85)</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Other</td>
                <td>9 (0.2)</td>
                <td>1 (0.05)</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Not stated or unknown</td>
                <td>0 (0)</td>
                <td>0 (0)</td>
              </tr>
              <tr valign="top">
                <td colspan="4">
                  <bold>Employment status</bold>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Employed</td>
                <td>2709 (60.9)</td>
                <td>1061 (54.41)</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Not employed</td>
                <td>1735 (39.01)</td>
                <td>890 (45.64)</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Not stated or unknown</td>
                <td>4 (0.09)</td>
                <td>0 (0)</td>
              </tr>
              <tr valign="top">
                <td colspan="4">
                  <bold>Highest education level</bold>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Postgraduate qualification</td>
                <td>529 (11.89)</td>
                <td>246 (12.62)</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Undergraduate or diploma</td>
                <td>1393 (31.32)</td>
                <td>676 (34.67)</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Vocational qualification</td>
                <td>937 (21.07)</td>
                <td>398 (20.41)</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>School qualification</td>
                <td>1492 (33.54)</td>
                <td>626 (32.1)</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Not stated or unknown</td>
                <td>96 (2.16)</td>
                <td>5 (0.26)</td>
              </tr>
              <tr valign="top">
                <td colspan="4">
                  <bold>Gross weekly household income</bold>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>≥Aus $3000 (US $2086.20)</td>
                <td>635 (14.28)</td>
                <td>211 (10.82)</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Aus $1500 to Aus $2999 (US $1043.10 to US $2085.50)</td>
                <td>1281 (28.8)</td>
                <td>589 (30.21)</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Aus $500 to Aus $1499 (US $347.70 to US $1042.40)</td>
                <td>1646 (37.01)</td>
                <td>793 (40.67)</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>&#60;Aus $500 (US $347.70)</td>
                <td>550 (12.37)</td>
                <td>261 (13.38)</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>None</td>
                <td>139 (3.13)</td>
                <td>70 (3.59)</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Negative income</td>
                <td>34 (0.76)</td>
                <td>26 (1.33)</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Not stated or unknown</td>
                <td>162 (3.64)</td>
                <td>0 (0)</td>
              </tr>
              <tr valign="top">
                <td colspan="4">
                  <bold>Other language spoken at home</bold>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Yes</td>
                <td>1036 (23.29)</td>
                <td>438 (22.46)</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>No</td>
                <td>3411 (76.69)</td>
                <td>1513 (77.59)</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Not stated or unknown</td>
                <td>1 (0.02)</td>
                <td>0 (0)</td>
              </tr>
              <tr valign="top">
                <td colspan="4">
                  <bold>General health</bold>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Excellent</td>
                <td>549 (12.34)</td>
                <td>236 (12.1)</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Very good</td>
                <td>1887 (42.42)</td>
                <td>837 (42.92)</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Good</td>
                <td>1302 (29.27)</td>
                <td>562 (28.82)</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Fair</td>
                <td>573 (12.88)</td>
                <td>255 (13.08)</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Poor</td>
                <td>131 (2.95)</td>
                <td>59 (3.03)</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Not stated or unknown</td>
                <td>6 (0.13)</td>
                <td>0 (0)</td>
              </tr>
            </tbody>
          </table>
        </table-wrap>
      </sec>
      <sec>
        <title>Support for AI in General and in Specific Scenarios</title>
        <sec>
          <title>Background</title>
          <p>We first discuss questions focused on support for or opposition to AI. The CIs for questions B01 and C01 tended to be narrower as they were based on the combined sample. However, for all questions, estimates of percentages had margins of error (ie, twice the SE) of &#60;3 percentage points, reflecting the relatively large sample size and the reliability of all estimates.</p>
        </sec>
        <sec>
          <title>Respondents Expressed General Support for AI</title>
          <p><xref rid="figure1" ref-type="fig">Figure 1</xref> and <xref ref-type="table" rid="table3">Table 3</xref> show the level of support for the development of AI in general—an estimated 60.3% in the <italic>strongly support</italic> or <italic>somewhat support</italic> categories.</p>
          <p>Although the estimate for the <italic>support</italic> categories was 60.3%, it was only 13.4% for the <italic>opposed</italic> categories and 26.3% for the <italic>neutral</italic> or <italic>don’t know</italic> responses. The on-balance support mean score of 2.35 was statistically significant when tested against the midpoint of 3.00 (<italic>P</italic>&#60;.001). The design effects are consistent with the design effect that was due to a weighting of 1.83.</p>
          <p><xref ref-type="table" rid="table4">Table 4</xref> shows the percentage that selected a <italic>support</italic> category after <italic>don’t know</italic> responses were excluded and also after don’t know and neutral responses were excluded. This allowed for direct comparison of support and opposition and examination of whether there was majority support. We tested whether the resulting percentages were &#62;50% using the adjusted Pearson F test for equal percentages in SPSS, where an estimate of 50% would indicate equal levels of support and opposition. <xref ref-type="table" rid="table4">Table 4</xref> clearly demonstrates majority support among those taking a positive or negative position—63.1% when <italic>don’t know</italic> responses were excluded and 81.8% when neutral and don’t know responses were excluded, with <italic>P</italic> values indicating that both estimates were statistically significantly different from 50%.</p>
          <p>For each question in the remaining analyses, the very small proportion of refused and <italic>don’t know</italic> responses were not included and were no more than 8 cases for any of these questions.</p>
          <fig id="figure1" position="float">
            <label>Figure 1</label>
            <caption>
              <p>Responses to question B01: How much do you support or oppose the development of artificial intelligence?</p>
            </caption>
            <graphic xlink:href="jmir_v24i8e37611_fig1.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
          </fig>
          <table-wrap position="float" id="table3">
            <label>Table 3</label>
            <caption>
              <p>Estimated percentages, mean, and 95% CIs for responses to question B01: How much do you support or oppose the development of artificial intelligence?<sup>a,b</sup></p>
            </caption>
            <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
              <col width="330"/>
              <col width="340"/>
              <col width="330"/>
              <thead>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>Estimated percentage (95% CI)</td>
                  <td>Design effect</td>
                </tr>
              </thead>
              <tbody>
                <tr valign="top">
                  <td>Strongly support</td>
                  <td>19.5 (17.9-21.1)</td>
                  <td>1.87</td>
                </tr>
                <tr valign="top">
                  <td>Somewhat support</td>
                  <td>40.8 (38.9-42.8)</td>
                  <td>1.84</td>
                </tr>
                <tr valign="top">
                  <td>Neither support nor oppose</td>
                  <td>21.9 (20.3-23.5)</td>
                  <td>1.74</td>
                </tr>
                <tr valign="top">
                  <td>Somewhat oppose</td>
                  <td>9.2 (8.1-10.4)</td>
                  <td>1.87</td>
                </tr>
                <tr valign="top">
                  <td>Strongly oppose</td>
                  <td>4.2 (3.5-5.1)</td>
                  <td>1.76</td>
                </tr>
                <tr valign="top">
                  <td>I don’t know</td>
                  <td>4.4 (3.6-5.3)</td>
                  <td>1.96</td>
                </tr>
              </tbody>
            </table>
            <table-wrap-foot>
              <fn id="table3fn1">
                <p><sup>a</sup>Percentages and CIs adjusted for weighting.</p>
              </fn>
              <fn id="table3fn2">
                <p><sup>b</sup>The mean score was 2.35 (95% CI 2.31-2.39) with a design effect of 1.83.</p>
              </fn>
            </table-wrap-foot>
          </table-wrap>
          <table-wrap position="float" id="table4">
            <label>Table 4</label>
            <caption>
              <p>Percentage of those who strongly support or somewhat support the development of artificial intelligence, 95% CIs, and <italic>P</italic> values for testing against 50%<sup>a</sup>.</p>
            </caption>
            <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
              <col width="400"/>
              <col width="300"/>
              <col width="300"/>
              <thead>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td colspan="2">Categories deleted</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>“Don’t know”</td>
                  <td>“Don’t know and neutral”</td>
                </tr>
              </thead>
              <tbody>
                <tr valign="top">
                  <td>Estimated percentage support (95% CI)</td>
                  <td>63.1 (61.1-65)</td>
                  <td>81.8 (80-83.5)</td>
                </tr>
                <tr valign="top">
                  <td><italic>P</italic> value<sup>b</sup></td>
                  <td>&#60;.001</td>
                  <td>&#60;.001</td>
                </tr>
                <tr valign="top">
                  <td>Design effect</td>
                  <td>1.80</td>
                  <td>1.83</td>
                </tr>
              </tbody>
            </table>
            <table-wrap-foot>
              <fn id="table4fn1">
                <p><sup>a</sup>Percentages and CIs adjusted for weighting.</p>
              </fn>
              <fn id="table4fn2">
                <p><sup>b</sup><italic>P</italic> value for adjusted Pearson <italic>F</italic> test for equal proportions in <italic>support</italic> and <italic>oppose</italic> categories.</p>
              </fn>
            </table-wrap-foot>
          </table-wrap>
        </sec>
        <sec>
          <title>Respondents Showed Less Support for Specific AI Use Scenarios and Supported Some Scenarios More Than Others</title>
          <p><xref rid="figure2" ref-type="fig">Figure 2</xref> shows the estimates of the level of support for AI in specific health care and welfare scenarios, with scenarios presented in increasing order of level of support. <xref ref-type="supplementary-material" rid="app3">Multimedia Appendix 3</xref> shows the related estimates and 95% CIs. <xref ref-type="table" rid="table5">Table 5</xref> presents estimates of support in categories 1 and 2 combined for specific scenarios, associated 95% CIs, and <italic>P</italic> values for the test against 50%. For all these specific scenarios, less support was expressed than in the question about AI in general (<xref rid="figure1" ref-type="fig">Figure 1</xref>).</p>
          <p><xref rid="figure2" ref-type="fig">Figure 2</xref> shows that the strongest support was expressed for a learning health care system making diagnostic and treatment recommendations, where <italic>over time, patients get different care depending on whether they do, or do not, share their health record with the AI system</italic> (ie, people receive health benefits only at the expense of health data privacy). Overall, the support for this item was 42.3% (<xref ref-type="table" rid="table5">Table 5</xref>). Regarding social services, the highest level of support was for targeted compliance checking for welfare debt (38.9%). In this scenario, a government department used an algorithm to check groups deemed <italic>high-risk</italic> for welfare overpayment twice as often, which found more welfare debts, saved money, and reduced the number of checks on other people but meant people in high-risk groups were checked more even if they had not done anything wrong. The next highest support was for automated systems to identify parents who required assistance to return to work with limited contestability (34.9%) and employment support recommendation systems that were nonexplainable to employment service workers (31.2%). The least support overall was expressed for AI systems that led to physician deskilling (27% support and 48.3% opposition) and those that made diagnostic and treatment recommendations but were not explainable to physicians (29.1% support and 41.6% opposition).</p>
          <p>For the estimates in <xref ref-type="table" rid="table5">Table 5</xref>, the neutral middle category with a score of 3 was included in the denominator. To directly compare the level of support and opposition and assess whether there was majority support or opposition, we removed the neutral category and recalculated the estimates and tests (<xref ref-type="table" rid="table6">Table 6</xref>). With the neutral score included, the level of support never reached a majority and ranged from 27% (deskilling physicians) to 42.3% (data sharing for quality care). Once the middle category was excluded, <xref ref-type="table" rid="table6">Table 6</xref> shows that, for the nonneutral respondents, there were majorities supporting data sharing and targeted compliance checking; a balance on automated parent support without contestability; and a majority opposed to nonexplainable hospital algorithms, nonexplainable job services, and especially deskilling physicians.</p>
          <p><xref ref-type="table" rid="table7">Table 7</xref> uses mean scores to indicate on-balance opposition or support—a score &#62;3.00 indicates on-balance opposition, and a score &#60;3.00 indicates on-balance support, along with <italic>P</italic> values for testing that the mean score was 3 (neither supportive nor opposed on balance). The means of general support for the development of AI were included for comparison. Marginal on-balance support was demonstrated for data sharing for quality care only (this should not be overinterpreted as the mean score was so close to neutral). For targeted compliance checking and noncontestable automated parent support, views were balanced. For both explainability scenarios and clinical deskilling, respondents expressed on-balance opposition at a statistically significant level.</p>
          <fig id="figure2" position="float">
            <label>Figure 2</label>
            <caption>
              <p>Responses to questions C03 to C05 and D03 to D05: support for or opposition to specific scenarios. AI: artificial intelligence.</p>
            </caption>
            <graphic xlink:href="jmir_v24i8e37611_fig2.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
          </fig>
          <table-wrap position="float" id="table5">
            <label>Table 5</label>
            <caption>
              <p>Percentage of those supporting artificial intelligence in specific scenarios, 95% CIs, and <italic>P</italic> values for testing against 50%<sup>a</sup>.</p>
            </caption>
            <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
              <col width="30"/>
              <col width="360"/>
              <col width="0"/>
              <col width="390"/>
              <col width="0"/>
              <col width="0"/>
              <col width="100"/>
              <col width="0"/>
              <col width="120"/>
              <thead>
                <tr valign="top">
                  <td colspan="3">Domain and scenario</td>
                  <td colspan="2">Estimated percentage in “support” or “strongly support” categories (95% CI)</td>
                  <td colspan="3"><italic>P</italic> value<sup>b</sup></td>
                  <td>Design effect</td>
                </tr>
              </thead>
              <tbody>
                <tr valign="top">
                  <td colspan="9">
                    <bold>Health</bold>
                  </td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>Data sharing for quality care (C04<sup>c</sup>)</td>
                  <td colspan="2">42.3 (39.6-45.1)</td>
                  <td colspan="3">&#60;.001</td>
                  <td colspan="2">1.62</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>Nonexplainable hospital algorithms (C03)</td>
                  <td colspan="2">29.1 (26.7-31.6)</td>
                  <td colspan="3">&#60;.001</td>
                  <td colspan="2">1.57</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>Deskilling physicians (C05)</td>
                  <td colspan="2">27 (24.6-29.5)</td>
                  <td colspan="3">&#60;.001</td>
                  <td colspan="2">1.57</td>
                </tr>
                <tr valign="top">
                  <td colspan="6">
                    <bold>Welfare</bold>
                  </td>
                  <td colspan="3">
                    <break/>
                  </td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>Targeted compliance checking (D03)</td>
                  <td colspan="2">38.9 (36.2-41.7)</td>
                  <td colspan="3">&#60;.001</td>
                  <td colspan="2">1.61</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>Automated parent support (contestability; D05)</td>
                  <td colspan="2">34.9 (32.3-37.6)</td>
                  <td colspan="3">&#60;.001</td>
                  <td colspan="2">1.59</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>Nonexplainable job services (D04)</td>
                  <td colspan="2">31.2 (28.7-33.8)</td>
                  <td colspan="3">&#60;.001</td>
                  <td colspan="2">1.56</td>
                </tr>
              </tbody>
            </table>
            <table-wrap-foot>
              <fn id="table5fn1">
                <p><sup>a</sup>Percentages and CIs adjusted for weighting.</p>
              </fn>
              <fn id="table5fn2">
                <p><sup>b</sup><italic>P</italic> value for adjusted Pearson <italic>F</italic> test for 50% proportions in categories 1 and 2 combined.</p>
              </fn>
              <fn id="table5fn3">
                <p><sup>c</sup>Code in parentheses (eg, C04) indicates question number in instrument.</p>
              </fn>
            </table-wrap-foot>
          </table-wrap>
          <table-wrap position="float" id="table6">
            <label>Table 6</label>
            <caption>
              <p>Proportion of respondents supporting artificial intelligence in specific scenarios, associated 95% CIs, and <italic>P</italic> values for testing against 50%; neutral responses deleted<sup>a</sup>.</p>
            </caption>
            <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
              <col width="30"/>
              <col width="360"/>
              <col width="390"/>
              <col width="0"/>
              <col width="100"/>
              <col width="120"/>
              <thead>
                <tr valign="top">
                  <td colspan="2">Domain and scenario</td>
                  <td>Estimated percentage in “support” or “strongly support” categories</td>
                  <td colspan="2"><italic>P</italic> value<sup>b</sup></td>
                  <td>Design effect</td>
                </tr>
              </thead>
              <tbody>
                <tr valign="top">
                  <td colspan="6">
                    <bold>Health</bold>
                  </td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>Data sharing for quality care (C04<sup>c</sup>)</td>
                  <td>57.8 (54.5-61.1)</td>
                  <td colspan="2">&#60;.001</td>
                  <td>1.63</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>Nonexplainable hospital algorithms (C03)</td>
                  <td>41.1 (38-44.4)</td>
                  <td colspan="2">&#60;.001</td>
                  <td>1.58</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>Deskilling physicians (C05)</td>
                  <td>35.8 (32.8-38.9)</td>
                  <td colspan="2">&#60;.001</td>
                  <td>1.58</td>
                </tr>
                <tr valign="top">
                  <td colspan="4">
                    <bold>Welfare</bold>
                  </td>
                  <td colspan="2">
                    <break/>
                  </td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>Targeted compliance checking (D03)</td>
                  <td>54.1 (50.9-57.4)</td>
                  <td colspan="2">.01</td>
                  <td>1.58</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>Automated parent support (contestability; D05)</td>
                  <td>50.4 (47-53.7)</td>
                  <td colspan="2">.82</td>
                  <td>1.62</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>Nonexplainable job services (D04)</td>
                  <td>44.1 (40.8-47.4)</td>
                  <td colspan="2">&#60;.001</td>
                  <td>1.59</td>
                </tr>
              </tbody>
            </table>
            <table-wrap-foot>
              <fn id="table6fn1">
                <p><sup>a</sup>Percentages and CIs adjusted for weighting.</p>
              </fn>
              <fn id="table6fn2">
                <p><sup>b</sup><italic>P</italic> value for adjusted Pearson <italic>F</italic> test for 50% proportions in categories 1 and 2 combined.</p>
              </fn>
              <fn id="table6fn3">
                <p><sup>c</sup>Code in parentheses (eg, C04) indicates question number in instrument.</p>
              </fn>
            </table-wrap-foot>
          </table-wrap>
          <table-wrap position="float" id="table7">
            <label>Table 7</label>
            <caption>
              <p>Analysis of mean support for use of artificial intelligence (AI) in specific scenarios, 95% CIs, and <italic>P</italic> values for testing against a mean of 3. A score &#60;3 represents support, and a score of &#62;3 represents opposition<sup>a</sup>.</p>
            </caption>
            <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
              <col width="30"/>
              <col width="450"/>
              <col width="0"/>
              <col width="250"/>
              <col width="0"/>
              <col width="130"/>
              <col width="0"/>
              <col width="140"/>
              <thead>
                <tr valign="top">
                  <td colspan="3">Domain and scenario</td>
                  <td colspan="2">Estimated mean (95% CI)</td>
                  <td colspan="2"><italic>P</italic> value<sup>b</sup></td>
                  <td>Design effect</td>
                </tr>
              </thead>
              <tbody>
                <tr valign="top">
                  <td colspan="3">General—support for the development of AI (B01<sup>c</sup>)</td>
                  <td colspan="2">2.35 (2.31-2.39)</td>
                  <td colspan="2">&#60;.001</td>
                  <td>1.83</td>
                </tr>
                <tr valign="top">
                  <td colspan="8">
                    <bold>Health</bold>
                  </td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>Data sharing for quality care (C04)</td>
                  <td colspan="2">2.90 (2.83-2.98)</td>
                  <td colspan="2">.01</td>
                  <td colspan="2">1.65</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>Nonexplainable hospital algorithms (C03)</td>
                  <td colspan="2">3.25 (3.18-3.32)</td>
                  <td colspan="2">&#60;.001</td>
                  <td colspan="2">1.57</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>Deskilling physicians (C05)</td>
                  <td colspan="2">3.39 (3.31-3.46)</td>
                  <td colspan="2">&#60;.001</td>
                  <td colspan="2">1.62</td>
                </tr>
                <tr valign="top">
                  <td colspan="8">
                    <bold>Welfare</bold>
                  </td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>Targeted compliance checking (D03)</td>
                  <td colspan="2">2.98 (2.91-3.06)</td>
                  <td colspan="2">.64</td>
                  <td colspan="2">1.62</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>Automated parent support (contestability; D05)</td>
                  <td colspan="2">3.06 (2.99-3.13)</td>
                  <td colspan="2">.10</td>
                  <td colspan="2">1.60</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>Nonexplainable job services (D04)</td>
                  <td colspan="2">3.19 (3.12-3.26)</td>
                  <td colspan="2">&#60;.001</td>
                  <td colspan="2">1.59</td>
                </tr>
              </tbody>
            </table>
            <table-wrap-foot>
              <fn id="table7fn1">
                <p><sup>a</sup>Means and CIs adjusted for weighting.</p>
              </fn>
              <fn id="table7fn2">
                <p><sup>b</sup><italic>P</italic> value for <italic>t</italic> test that the mean score was 3.0 using complex samples.</p>
              </fn>
              <fn id="table7fn3">
                <p><sup>c</sup>Code in parentheses (eg, B01) indicates question number in instrument.</p>
              </fn>
            </table-wrap-foot>
          </table-wrap>
        </sec>
        <sec>
          <title>Statistical Significance of Differences Between Support in General and in Specific Scenarios</title>
          <p>To further investigate these results, we statistically tested changes in responses between the general question (B01) and the more specific scenario questions (C03-C05 and D03-D05). <xref ref-type="table" rid="table8">Table 8</xref> shows the percentage of those who changed between question B01 and each of the more specific scenarios and, of those who changed, what percentage changed to a more negative attitude. The change was tested against 50%, which corresponded to an equal change in a positive and negative direction.</p>
          <p><xref ref-type="table" rid="table8">Table 8</xref> shows that the estimated percentage that answered differently between the general and the more specific questions was between 60.2% and 70.6%. Of those who changed, between 70.8% and 83% changed to a more negative response, and all of these changes were statistically significant. There was also a slight increase of 3% to 9% in neutral responses across specific scenarios compared with the general question.</p>
          <table-wrap position="float" id="table8">
            <label>Table 8</label>
            <caption>
              <p>Estimated percentage of those who changed their response between the general question on the development of artificial intelligence and the specific scenarios and, of those who changed, the percentage that had a more negative attitude in the specific scenarios, with 95% CIs and the <italic>P</italic> value for the test of equal change in each direction<sup>a</sup>.</p>
            </caption>
            <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
              <col width="30"/>
              <col width="340"/>
              <col width="0"/>
              <col width="180"/>
              <col width="0"/>
              <col width="230"/>
              <col width="0"/>
              <col width="100"/>
              <col width="0"/>
              <col width="120"/>
              <thead>
                <tr valign="top">
                  <td colspan="3">Domain and scenario</td>
                  <td colspan="2">Percentage of those who changed</td>
                  <td colspan="2">Percentage of those who changed becoming more negative (95% CI)</td>
                  <td colspan="2"><italic>P</italic> value<sup>b</sup></td>
                  <td>Design effect</td>
                </tr>
              </thead>
              <tbody>
                <tr valign="top">
                  <td colspan="10">
                    <bold>Health</bold>
                  </td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>Data sharing for quality care (C04<sup>c</sup>)</td>
                  <td colspan="2">60.2</td>
                  <td colspan="2">70.8 (67.3-74)</td>
                  <td colspan="2">&#60;.001</td>
                  <td colspan="2">1.59</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>Nonexplainable hospital algorithms (C03)</td>
                  <td colspan="2">65.6</td>
                  <td colspan="2">81.4 (78.6-83.9)</td>
                  <td colspan="2">&#60;.001</td>
                  <td colspan="2">1.53</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>Deskilling physicians (C05)</td>
                  <td colspan="2">70.6</td>
                  <td colspan="2">83 (80.3-85.3)</td>
                  <td colspan="2">&#60;.001</td>
                  <td colspan="2">1.56</td>
                </tr>
                <tr valign="top">
                  <td colspan="10">
                    <bold>Welfare</bold>
                  </td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>Targeted compliance checking (D03)</td>
                  <td colspan="2">63.8</td>
                  <td colspan="2">71.9 (68.5-75)</td>
                  <td colspan="2">&#60;.001</td>
                  <td colspan="2">1.65</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>Automated parent support (contestability; D05)</td>
                  <td colspan="2">65</td>
                  <td colspan="2">76.1 (73-78.9)</td>
                  <td colspan="2">&#60;.001</td>
                  <td colspan="2">1.56</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>Nonexplainable job services (D04)</td>
                  <td colspan="2">66.6</td>
                  <td colspan="2">80.3 (77.5-82.9)</td>
                  <td colspan="2">&#60;.001</td>
                  <td colspan="2">1.50</td>
                </tr>
              </tbody>
            </table>
            <table-wrap-foot>
              <fn id="table8fn1">
                <p><sup>a</sup>Percentages and CIs adjusted for weighting.</p>
              </fn>
              <fn id="table8fn2">
                <p><sup>b</sup>Adjusted Pearson <italic>F</italic> test for equal proportions changing in each direction.</p>
              </fn>
              <fn id="table8fn3">
                <p><sup>c</sup>Code in parentheses (eg, C04) indicates question number in instrument.</p>
              </fn>
            </table-wrap-foot>
          </table-wrap>
        </sec>
        <sec>
          <title>Statistical Significance of Differences in Support Between Scenarios</title>
          <p>To assess the statistical significance of differences in support for different detailed scenarios, <xref ref-type="table" rid="table9">Table 9</xref> shows estimates of the percentage of those who changed in response to pairs of questions and, of those who changed, the percentage expressing a more negative attitude on the second question and the associated test against 50%. Although most comparisons were within the health care or welfare domain, we asked about explainability in both the health care and welfare contexts, allowing us to make direct comparisons between this pair of questions.</p>
          <p>As noted, the health care and welfare question blocks were randomized per participant, and the questions were randomized within blocks. As shown in <xref ref-type="table" rid="table9">Table 9</xref>, respondents did make different judgments in specific scenarios—there were statistically significant changes within all pairs except between the questions regarding explainability in health care and in welfare. Despite 45.7% of people changing their responses between these 2 questions, people changed their minds in both directions in approximately equal proportions. This suggests divided views on the importance of explainability in different scenarios. The differences between all health care scenarios were statistically significant. Answers on nonexplainability and deskilling were significantly different, and most were more negative than those on data sharing; answers on deskilling were significantly different, and most were more negative than those on nonexplainability. In addition, most changed their responses between these questions in the same direction. A similar pattern was seen in the welfare scenarios—a significant proportion of respondents changed their response among targeted compliance checking, automated parent support without contestability, and nonexplainable job services, in all cases to a more negative response. Again, most tended to change their responses among these questions in the same direction.</p>
          <p>Comparisons of the general support and support in specific scenarios and between the scenarios were also analyzed using differences in the means, with similar conclusions.</p>
          <table-wrap position="float" id="table9">
            <label>Table 9</label>
            <caption>
              <p>Estimated proportion of those who changed their response between 2 scenarios and, of those who changed, the percentage that expressed a more negative attitude in the second question, with 95% CIs and the <italic>P</italic> value for the test of equal change in each direction<sup>a</sup>.</p>
            </caption>
            <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
              <col width="30"/>
              <col width="380"/>
              <col width="190"/>
              <col width="190"/>
              <col width="90"/>
              <col width="120"/>
              <thead>
                <tr valign="top">
                  <td colspan="2">Domain and scenarios compared</td>
                  <td>Percentage of those who changed</td>
                  <td>Percentage of those who changed becoming more negative (95% CI)</td>
                  <td><italic>P</italic> value<sup>b</sup></td>
                  <td>Design effect</td>
                </tr>
              </thead>
              <tbody>
                <tr valign="top">
                  <td colspan="6">
                    <bold>Health</bold>
                  </td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>C03<sup>c</sup> (explainability) vs C04<sup>d</sup> (data sharing)</td>
                  <td>38.1</td>
                  <td>26.7 (22.7-31.1)</td>
                  <td>&#60;.001</td>
                  <td>1.77</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>C03 (explainability) vs C05<sup>e</sup> (deskilling)</td>
                  <td>43.6</td>
                  <td>59.2 (55-63.3)</td>
                  <td>&#60;.001</td>
                  <td>1.62</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>C04 (data sharing) vs C05 (deskilling)</td>
                  <td>45.7</td>
                  <td>77.9 (74.2-81.2)</td>
                  <td>&#60;.001</td>
                  <td>1.69</td>
                </tr>
                <tr valign="top">
                  <td colspan="6">
                    <bold>Welfare</bold>
                  </td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>D03<sup>f</sup> (compliance checking) vs D04<sup>g</sup> (explainability)</td>
                  <td>41.7</td>
                  <td>64.2 (60-68.2)</td>
                  <td>&#60;.001</td>
                  <td>1.60</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>D03 (compliance checking) vs D05<sup>h</sup> (contestability)</td>
                  <td>45.1</td>
                  <td>55.6 (51.4-59.6)</td>
                  <td>.008</td>
                  <td>1.59</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>D04 (explainability) vs D05 (contestability)</td>
                  <td>42.3</td>
                  <td>41.7 (37.6-45.9)</td>
                  <td>&#60;.001</td>
                  <td>1.59</td>
                </tr>
                <tr valign="top">
                  <td colspan="2">Explainability in health vs in welfare—C03 vs D04</td>
                  <td>45.7</td>
                  <td>46.1 (42-50.2)</td>
                  <td>.06</td>
                  <td>1.64</td>
                </tr>
              </tbody>
            </table>
            <table-wrap-foot>
              <fn id="table9fn1">
                <p><sup>a</sup>Percentages and CIs adjusted for weighting.</p>
              </fn>
              <fn id="table9fn2">
                <p><sup>b</sup>Adjusted Pearson <italic>F</italic> test for equal proportions changing in each direction.</p>
              </fn>
              <fn id="table9fn3">
                <p><sup>c</sup>C03: nonexplainable hospital algorithms.</p>
              </fn>
              <fn id="table9fn4">
                <p><sup>d</sup>C04: data sharing for quality care.</p>
              </fn>
              <fn id="table9fn5">
                <p><sup>e</sup>C05: deskilling physicians.</p>
              </fn>
              <fn id="table9fn6">
                <p><sup>f</sup>D03: targeted compliance checking.</p>
              </fn>
              <fn id="table9fn7">
                <p><sup>g</sup>D04: nonexplainable job services.</p>
              </fn>
              <fn id="table9fn8">
                <p><sup>h</sup>D05: automated parent support (contestability).</p>
              </fn>
            </table-wrap-foot>
          </table-wrap>
        </sec>
      </sec>
      <sec>
        <title>Which Attributes of Health Care and Social Service AIs Were Most Important?</title>
        <p>We provided 2 health care scenarios (C01 [machine diagnosis and treatment recommendations] and C02 [machine triage]) and 2 social service scenarios (D01 [automation of unemployment benefit decision-making] and D02 [chatbot advice about carer payments]). We asked respondents to rate the importance of different attributes of the AI system in each one, where the attributes reflected a key ethical, legal, or social dimension of the AI or its use. For health care scenarios, these attributes included responsibility for decision-making as this is central to medicolegal frameworks and professional autonomy. For welfare scenarios, they included personal tailoring as this is a key promise of automation and machine decision-making in welfare contexts.</p>
        <p><xref rid="figure3" ref-type="fig">Figure 3</xref> shows these responses to the health care and welfare scenarios to allow comparisons to be made between the distributions of the responses to any 2 questions assessing the same ethical or social dimension of AI. <xref ref-type="supplementary-material" rid="app4">Multimedia Appendix 4</xref> provides the detailed estimates of the proportions and the associated estimates of 95% CIs on estimated proportions for <xref rid="figure3" ref-type="fig">Figure 3</xref>.</p>
        <p><xref ref-type="table" rid="table10">Table 10</xref> provides a summary of the importance that respondents ascribed to different attributes using mean scores, 95% CIs, and design effects. The response categories were scored from 1 for <italic>extremely important</italic> to 5 for <italic>not at all important</italic>; thus, lower scores indicate more importance. All means were &#60;3, the midpoint of the scale; t tests against a mean of 3 were statistically significant with <italic>P</italic>&#60;.001, indicating that more of the distribution of responses was in the extremely or very important categories. The attributes in <xref ref-type="table" rid="table10">Table 10</xref> are in ascending order of means, that is, from most to least important (where the most important value is presented first).</p>
        <p>As shown in <xref rid="figure3" ref-type="fig">Figure 3</xref> and <xref ref-type="table" rid="table10">Table 10</xref>, there were distinctions between attributes. In all 4 scenarios, accuracy was rated as most important on average (1.49-1.61), and the ability of an AI system to reduce system costs was rated as least important (2.30-2.60), especially in health care. After accuracy, fairness was the second most important attribute in both social service scenarios (1.80 and 1.81) but, in the health care scenarios, it placed lower relative to other attributes (1.87 and 1.94). After accuracy, responsibility and human contact were the next most important in both health care scenarios. Speed was slightly more important in a health care triage scenario (1.90) than in a medical testing scenario (2.08).</p>
        <p><xref ref-type="table" rid="table11">Table 11</xref> compares the mean responses to the attribute questions for the 2 health care scenarios (C01 vs C02) and the 2 welfare scenarios (D01 vs D02) to assess whether there were differences in importance in specific scenarios. In these comparisons, a negative estimate of the difference implies more importance for the first listed question, and a positive difference implies more importance for the second listed question. <xref ref-type="table" rid="table12">Table 12</xref> provides further analysis, including statistical significance testing, of shifts in responses to the questions. Taken together, these tables show that, among the health care scenarios, the only statistically significant differences were in relation to speed (more important in triage) and reducing costs (more important in decision support). In the social service scenarios, more statistically significant differences were found, with explanation and cost reduction being more important in automating unemployment benefits and human contact, speed, and personal tailoring being more important in receiving automated carer support advice.</p>
        <fig id="figure3" position="float">
          <label>Figure 3</label>
          <caption>
            <p>Responses to questions C01 to C02 versus D01 to D02: summary and comparison of health (C) and welfare (D) scenarios. Numerical estimates &#60;10% are not given.</p>
          </caption>
          <graphic xlink:href="jmir_v24i8e37611_fig3.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
        <table-wrap position="float" id="table10">
          <label>Table 10</label>
          <caption>
            <p>Means, 95% CIs, and design effects for importance of values.</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="30"/>
            <col width="390"/>
            <col width="290"/>
            <col width="290"/>
            <col width="0"/>
            <thead>
              <tr valign="top">
                <td colspan="2">
                  <break/>
                </td>
                <td>Estimate of the mean<sup>a</sup> (95% CI)</td>
                <td colspan="2">Design effect</td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td colspan="4">
                  <bold>C01<sup>b</sup> —machine reads medical test, diagnoses, and recommends treatment</bold>
                </td>
                <td>
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Accuracy</td>
                <td>1.49 (1.46-1.53)</td>
                <td colspan="2">1.98</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Human contact</td>
                <td>1.78 (1.74-1.81)</td>
                <td colspan="2">1.95</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Responsibility</td>
                <td>1.78 (1.75-1.82)</td>
                <td colspan="2">1.98</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Explanation</td>
                <td>1.86 (1.82-1.90)</td>
                <td colspan="2">1.96</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Fairness</td>
                <td>1.87 (1.83-1.91)</td>
                <td colspan="2">1.91</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Speed</td>
                <td>2.08 (2.04-2.12)</td>
                <td colspan="2">1.88</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Reducing costs</td>
                <td>2.30 (2.25-2.34)</td>
                <td colspan="2">1.92</td>
              </tr>
              <tr valign="top">
                <td colspan="4">
                  <bold>C02—machine triages when you are unwell</bold>
                </td>
                <td>
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Accuracy</td>
                <td>1.56 (1.51-1.61)</td>
                <td colspan="2">1.73</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Responsibility</td>
                <td>1.76 (1.71-1.81)</td>
                <td colspan="2">1.75</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Human contact</td>
                <td>1.81 (1.75-1.86)</td>
                <td colspan="2">1.72</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Explanation</td>
                <td>1.87 (1.82-1.93)</td>
                <td colspan="2">1.76</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Speed</td>
                <td>1.90 (1.85-1.95)</td>
                <td colspan="2">1.64</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Fairness</td>
                <td>1.94 (1.88-2.00)</td>
                <td colspan="2">1.81</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Reducing costs</td>
                <td>2.43 (2.36-2.50)</td>
                <td colspan="2">1.74</td>
              </tr>
              <tr valign="top">
                <td colspan="4">
                  <bold>D01—machine processes application for unemployment benefits (data sharing required)</bold>
                </td>
                <td>
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Accuracy</td>
                <td>1.61 (1.56-1.65)</td>
                <td colspan="2">1.53</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Fairness</td>
                <td>1.80 (1.75-1.85)</td>
                <td colspan="2">1.56</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Explanation</td>
                <td>1.86 (1.80-1.91)</td>
                <td colspan="2">1.61</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Personal tailoring</td>
                <td>1.87 (1.82-1.92)</td>
                <td colspan="2">1.58</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Human contact</td>
                <td>1.88 (1.82-1.93)</td>
                <td colspan="2">1.54</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Speed</td>
                <td>1.99 (1.93-2.04)</td>
                <td colspan="2">1.58</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Reducing costs</td>
                <td>2.51 (2.45-2.58)</td>
                <td colspan="2">1.59</td>
              </tr>
              <tr valign="top">
                <td colspan="4">
                  <bold>D02—chatbot advises about carer payments</bold>
                </td>
                <td>
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Accuracy</td>
                <td>1.60 (1.55-1.64)</td>
                <td colspan="2">1.6</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Fairness</td>
                <td>1.81 (1.76-1.87)</td>
                <td colspan="2">1.68</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Personal tailoring</td>
                <td>1.82 (1.77-1.87)</td>
                <td colspan="2">1.67</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Human contact</td>
                <td>1.83 (1.77-1.88)</td>
                <td colspan="2">1.63</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Speed</td>
                <td>1.91 (1.86-1.97)</td>
                <td colspan="2">1.71</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Explanation</td>
                <td>2.02 (1.96-2.08)</td>
                <td colspan="2">1.72</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Reducing costs</td>
                <td>2.60 (2.54-2.67)</td>
                <td colspan="2">1.71</td>
              </tr>
            </tbody>
          </table>
          <table-wrap-foot>
            <fn id="table10fn1">
              <p><sup>a</sup>Means and CIs adjusted for weighting.</p>
            </fn>
            <fn id="table10fn2">
              <p><sup>b</sup>Code (eg, C01) indicates question number in instrument.</p>
            </fn>
          </table-wrap-foot>
        </table-wrap>
        <table-wrap position="float" id="table11">
          <label>Table 11</label>
          <caption>
            <p>Differences in mean responses on importance of attributes between 2 scenarios<sup>a</sup>.</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="30"/>
            <col width="280"/>
            <col width="0"/>
            <col width="300"/>
            <col width="0"/>
            <col width="150"/>
            <col width="0"/>
            <col width="240"/>
            <thead>
              <tr valign="top">
                <td colspan="3">Domain and attribute</td>
                <td colspan="2">Mean difference (95% CI)</td>
                <td colspan="2"><italic>P</italic> value<sup>b</sup></td>
                <td>Design effect</td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td colspan="8">
                  <bold>Health—C01<sup>c</sup> vs C02<sup>d</sup></bold>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Explanation</td>
                <td colspan="2">−0.001 (−0.048 to 0.046)</td>
                <td colspan="2">.96</td>
                <td colspan="2">1.89</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Speed</td>
                <td colspan="2">0.082 (0.040 to 0.123)</td>
                <td colspan="2">&#60;.001</td>
                <td colspan="2">1.51</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Accuracy</td>
                <td colspan="2">−0.009 (−0.052 to 0.033)</td>
                <td colspan="2">.67</td>
                <td colspan="2">1.91</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Human contact</td>
                <td colspan="2">−0.012 (−0.060 to 0.036)</td>
                <td colspan="2">.63</td>
                <td colspan="2">2.12</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Responsibility</td>
                <td colspan="2">0.007 (−0.035 to 0.050)</td>
                <td colspan="2">.73</td>
                <td colspan="2">1.88</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Reducing costs</td>
                <td colspan="2">−0.111 (−0.162 to −0.060)</td>
                <td colspan="2">&#60;.001</td>
                <td colspan="2">1.99</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Fairness</td>
                <td colspan="2">−0.035 (−0.081 to 0.011)</td>
                <td colspan="2">.13</td>
                <td colspan="2">1.93</td>
              </tr>
              <tr valign="top">
                <td colspan="8">
                  <bold>Welfare—D01<sup>e</sup> vs D02<sup>f</sup></bold>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Explanation</td>
                <td colspan="2">−0.164 (−0.215 to −0.113)</td>
                <td colspan="2">&#60;.001</td>
                <td colspan="2">1.64</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Speed</td>
                <td colspan="2">0.070 (0.029 to 0.111)</td>
                <td colspan="2">&#60;.001</td>
                <td colspan="2">1.59</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Accuracy</td>
                <td colspan="2">0.012 (−0.023 to 0.048)</td>
                <td colspan="2">.50</td>
                <td colspan="2">1.42</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Human contact</td>
                <td colspan="2">0.049 (0.009 to 0.089)</td>
                <td colspan="2">.02</td>
                <td colspan="2">1.48</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Personal tailoring</td>
                <td colspan="2">0.048 (0.006 to 0.090)</td>
                <td colspan="2">.02</td>
                <td colspan="2">1.58</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Reducing costs</td>
                <td colspan="2">−0.091 (−0.136 to −0.046)</td>
                <td colspan="2">&#60;.001</td>
                <td colspan="2">1.54</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Fairness</td>
                <td colspan="2">−0.018 (−0.059 to 0.029)</td>
                <td colspan="2">.38</td>
                <td colspan="2">1.72</td>
              </tr>
            </tbody>
          </table>
          <table-wrap-foot>
            <fn id="table11fn1">
              <p><sup>a</sup>Means and CIs adjusted for weighting.</p>
            </fn>
            <fn id="table11fn2">
              <p><sup>b</sup><italic>P</italic> value for <italic>t</italic> test that the mean difference was 0 using complex samples.</p>
            </fn>
            <fn id="table11fn3">
              <p><sup>c</sup>C01: machine reads medical test, diagnoses, and recommends treatment.</p>
            </fn>
            <fn id="table11fn4">
              <p><sup>d</sup>C02: machine triages when you are unwell.</p>
            </fn>
            <fn id="table11fn5">
              <p><sup>e</sup>D01: machine processes application for unemployment benefits (data sharing required).</p>
            </fn>
            <fn id="table11fn6">
              <p><sup>f</sup>D02: chatbot advises about carer payments.</p>
            </fn>
          </table-wrap-foot>
        </table-wrap>
        <table-wrap position="float" id="table12">
          <label>Table 12</label>
          <caption>
            <p>Estimated percentages of those who changed their responses on importance of values between 2 scenarios and, of those, the percentage that ranked the value to be more important in the first question than in the second question (C01 vs C02 or D01 vs D02), with associated 95% CIs and the <italic>P</italic> value for the test of equal cell proportions<sup>a</sup>.</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="30"/>
            <col width="200"/>
            <col width="0"/>
            <col width="190"/>
            <col width="0"/>
            <col width="370"/>
            <col width="0"/>
            <col width="100"/>
            <col width="0"/>
            <col width="110"/>
            <thead>
              <tr valign="top">
                <td colspan="3">Domain and values</td>
                <td colspan="2">Percentage of those who changed</td>
                <td colspan="2">Percentage ranking the value as more important in C01 (vs C02) or D01 (vs D02) (95% CI)</td>
                <td colspan="2"><italic>P</italic> value<sup>b</sup></td>
                <td>Design effect</td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td colspan="10">
                  <bold>Health—C01<sup>c</sup> vs C02<sup>d</sup></bold>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Explanation</td>
                <td colspan="2">34.3</td>
                <td colspan="2">47.6 (42.8-52.4)</td>
                <td colspan="2">.33</td>
                <td colspan="2">1.68</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Speed</td>
                <td colspan="2">34.9</td>
                <td colspan="2">39.5 (35.2-44.1)</td>
                <td colspan="2">&#60;.001</td>
                <td colspan="2">1.52</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Accuracy</td>
                <td colspan="2">25.1</td>
                <td colspan="2">49.5 (43.8-55.2)</td>
                <td colspan="2">.86</td>
                <td colspan="2">1.68</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Human contact</td>
                <td colspan="2">29.9</td>
                <td colspan="2">50.3 (45-55.5)</td>
                <td colspan="2">.92</td>
                <td colspan="2">1.70</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Responsibility</td>
                <td colspan="2">28.3</td>
                <td colspan="2">47.7 (42.5-53)</td>
                <td colspan="2">.40</td>
                <td colspan="2">1.69</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Reducing costs</td>
                <td colspan="2">33</td>
                <td colspan="2">59.2 (54.3-63.9)</td>
                <td colspan="2">&#60;.001</td>
                <td colspan="2">1.66</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Fairness</td>
                <td colspan="2">29.3</td>
                <td colspan="2">53.7 (48.5-58.8)</td>
                <td colspan="2">.16</td>
                <td colspan="2">1.66</td>
              </tr>
              <tr valign="top">
                <td colspan="10">
                  <bold>Welfare—D01<sup>e</sup> vs D02<sup>f</sup></bold>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Explanation</td>
                <td colspan="2">39.6</td>
                <td colspan="2">63.7 (59.4-67.7)</td>
                <td colspan="2">&#60;.001</td>
                <td colspan="2">1.55</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Speed</td>
                <td colspan="2">32.7</td>
                <td colspan="2">41.8 (37-46.6)</td>
                <td colspan="2">.001</td>
                <td colspan="2">1.66</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Accuracy</td>
                <td colspan="2">26.4</td>
                <td colspan="2">48.4 (43.2-53.7)</td>
                <td colspan="2">.56</td>
                <td colspan="2">1.57</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Human contact</td>
                <td colspan="2">30.7</td>
                <td colspan="2">43.9 (39.1-48.8)</td>
                <td colspan="2">.02</td>
                <td colspan="2">1.64</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Personal tailoring</td>
                <td colspan="2">33.1</td>
                <td colspan="2">43.9 (39.1-48.8)</td>
                <td colspan="2">.01</td>
                <td colspan="2">1.69</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Reducing costs</td>
                <td colspan="2">35.1</td>
                <td colspan="2">58.8 (54.3-63.1)</td>
                <td colspan="2">&#60;.001</td>
                <td colspan="2">1.58</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Fairness</td>
                <td colspan="2">27.1</td>
                <td colspan="2">51.7 (46.3-57.1)</td>
                <td colspan="2">.53</td>
                <td colspan="2">1.70</td>
              </tr>
            </tbody>
          </table>
          <table-wrap-foot>
            <fn id="table12fn1">
              <p><sup>a</sup>Percentages and CIs adjusted for weighting.</p>
            </fn>
            <fn id="table12fn2">
              <p><sup>b</sup>Adjusted Pearson <italic>F</italic> test for equal proportions.</p>
            </fn>
            <fn id="table12fn3">
              <p><sup>c</sup>C01: machine reads medical test, diagnoses, and recommends treatment.</p>
            </fn>
            <fn id="table12fn4">
              <p><sup>d</sup>C02: machine triages when you are unwell.</p>
            </fn>
            <fn id="table12fn5">
              <p><sup>e</sup>D01: machine processes application for unemployment benefits (data sharing required).</p>
            </fn>
            <fn id="table12fn6">
              <p><sup>f</sup>D02: chatbot advises about carer payments.</p>
            </fn>
          </table-wrap-foot>
        </table-wrap>
      </sec>
      <sec>
        <title>Final Bundled Attribute Trade-off of AI and Human Attributes</title>
        <p><xref rid="figure4" ref-type="fig">Figure 4</xref> shows the estimated percentages for the final bundled trade-off question (E01), where respondents were asked to weigh speed, convenience, and accuracy against human contact and discretion. <xref ref-type="table" rid="table13">Table 13</xref> provides the estimated percentages, mean scores, and 95% CIs. These results show that human attributes were generally valued more, as indicated by a mean score &#62;3. The estimated proportion of those who preferred the machine attributes (categories 1 or 2) was 20.3%, whereas, for human attributes (categories 4 or 5), it was 52%; 27.7% selected a middle position.</p>
        <fig id="figure4" position="float">
          <label>Figure 4</label>
          <caption>
            <p>Responses to question E01: speed, accuracy, and convenience versus human contact and discretion.</p>
          </caption>
          <graphic xlink:href="jmir_v24i8e37611_fig4.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
        <table-wrap position="float" id="table13">
          <label>Table 13</label>
          <caption>
            <p>Speed, accuracy, and convenience versus human contact and discretion; estimated percentages; and 95% CIs for responses to question E01<sup>a</sup>.</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="500"/>
            <col width="500"/>
            <thead>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Estimate (95% CI)</td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td>1: speed, convenience, and accuracy</td>
                <td>7.6 (6.2-9.1)</td>
              </tr>
              <tr valign="top">
                <td>2</td>
                <td>12.7 (11-14.7)</td>
              </tr>
              <tr valign="top">
                <td>3</td>
                <td>27.7 (25.3-30.3)</td>
              </tr>
              <tr valign="top">
                <td>4</td>
                <td>28.5 (26.1-31.1)</td>
              </tr>
              <tr valign="top">
                <td>5: human contact and discretion</td>
                <td>23.5 (21.2-26)</td>
              </tr>
              <tr valign="top">
                <td>Mean score<sup>b</sup></td>
                <td>3.38 (3.41-3.54)</td>
              </tr>
            </tbody>
          </table>
          <table-wrap-foot>
            <fn id="table13fn1">
              <p><sup>a</sup>Percentages and CIs adjusted for weighting.</p>
            </fn>
            <fn id="table13fn2">
              <p><sup>b</sup><italic>P</italic>&#60;.001 for testing that the mean score was 3; design effect=1.602.</p>
            </fn>
          </table-wrap-foot>
        </table-wrap>
      </sec>
    </sec>
    <sec sec-type="discussion">
      <title>Discussion</title>
      <sec>
        <title>Principal Findings</title>
        <sec>
          <title>Overview</title>
          <p>The AVA-AI study has created one of the first large, robust data sets reflecting public views on the potential use of AI in health care and social services, with particular attention to the ELSI of those technologies. Future studies will provide a greater breakdown of the variation in responses among different population subgroups. This analysis focused on answering 3 key questions: how judgments in general compare with judgments in particular, how judgments about use in health care compare with judgments about use in social services, and whether judgments differ when ELSI differ.</p>
        </sec>
        <sec>
          <title>General Versus Particular Judgments About AI</title>
          <p>Our first general question about support for or opposition to AI was taken from the 2018 survey of the American public by Zhang and Dafoe [<xref ref-type="bibr" rid="ref26">26</xref>], which included 2000 respondents and used a similar weighting methodology; the Monash Data Futures survey [<xref ref-type="bibr" rid="ref27">27</xref>] also included this question and surveyed 2019 respondents. Owing to our methodology, we asked this question to 4448 respondents. <xref ref-type="table" rid="table14">Table 14</xref> compares these results—as the Monash survey reports combined <italic>all support</italic> and <italic>all oppose</italic> categories only, we have done the same. Both the AVA-AI study and the Monash survey suggest more positive general views in Australia than in the United States, although the results of the AVA-AI study are less positive than those of the Monash survey. Speculative reasons for this difference could include more prominent public discourse regarding harms from AI deployment in the US context (eg, in policing, justice, warfare, and the retail sector) or, more tentatively, that, in the 2 years between the surveys (mid-2018 for the study by Zhang and Dafoe [<xref ref-type="bibr" rid="ref26">26</xref>] vs March 2020-April 2020 for both the AVA-AI study and the Monash survey), Australians may have had additional positive experiences of the everyday AI described in that question (eg, language translation, spam filters, and streaming content suggestions).</p>
          <p>As a minority of AVA-AI study respondents began the survey with negative general views on AI and &#62;60% expressed support, any negative judgments expressed seem likely to be a response to the details of the scenarios presented rather than reflect prejudice against or fear of AI in general. When asked about specific scenarios for AI use, respondents were consistently more negative—the reduction in support between the general question and all 6 specific scenarios was statistically significant, and support expressed in the specific scenarios dropped between 17 and 33 percentage points. The simple opening <italic>support-or-oppose</italic> question presented familiar, helpful everyday examples of AI in use and did not demonstrate any downsides of AI. In contrast, the detailed scenario questions were designed for balance. Each question emphasized that AI could both improve services (eg, make them quicker, more convenient, and more accurate) and have downsides (eg, reduced explainability, contestability, and privacy; unfair burdens on minorities; or human deskilling). On the basis of our findings, we hypothesize that members of the general public may remain broadly unaware of the potential downsides of AI in use and that some of these downsides (eg, deskilling) matter more to them than others (eg, privacy). We did not test the level of awareness of ELSI problems with AI—this is a potential direction for future research. Participants’ more negative judgments in the case-specific questions also empirically reinforce what has already been argued in the literature: that the ELSI of AI applications need to be considered in the context of detailed cases.</p>
          <table-wrap position="float" id="table14">
            <label>Table 14</label>
            <caption>
              <p>Comparison of findings from the studies by Zhang and Dafoe [<xref ref-type="bibr" rid="ref26">26</xref>] and the Monash Data Futures Institute [<xref ref-type="bibr" rid="ref27">27</xref>] and from the Australian Values and Attitudes on Artificial Intelligence (AVA-AI): How much do you support or oppose the development of artificial intelligence?</p>
            </caption>
            <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
              <col width="240"/>
              <col width="240"/>
              <col width="310"/>
              <col width="210"/>
              <thead>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>Zhang and Dafoe [<xref ref-type="bibr" rid="ref26">26</xref>] (2018), weighted %</td>
                  <td>Monash Data Futures Institute [<xref ref-type="bibr" rid="ref27">27</xref>] (2020), weighted % by age only</td>
                  <td>AVA-AI (2020), weighted %</td>
                </tr>
              </thead>
              <tbody>
                <tr valign="top">
                  <td>Strongly or somewhat support</td>
                  <td>40.94</td>
                  <td>62.4</td>
                  <td>60.3</td>
                </tr>
                <tr valign="top">
                  <td>Neither support nor oppose</td>
                  <td>27.84</td>
                  <td>23</td>
                  <td>21.9</td>
                </tr>
                <tr valign="top">
                  <td>Strongly or somewhat oppose</td>
                  <td>21.69</td>
                  <td>10.5</td>
                  <td>13.4</td>
                </tr>
                <tr valign="top">
                  <td>I don’t know</td>
                  <td>9.54</td>
                  <td>4.1</td>
                  <td>4.4</td>
                </tr>
              </tbody>
            </table>
          </table-wrap>
        </sec>
        <sec>
          <title>Judgments About Health Care Versus Judgments About Social Services</title>
          <p>Respondents had slightly stronger, more diverse, and more negative views on using AI in health care as opposed to in social services. This may be because they themselves have more direct experience of using health care or consider health care more relevant to them; alternatively, respondents may consider health care to be a higher-stakes service for which they are less tolerant of social or ethical wrongs or harms. Again, respondents in the AVA-AI study were less strongly supportive than respondents in the Monash survey, expressing 27% to 43% support for health care scenarios and 31% to 39% support for social service scenarios. In the Monash survey, respondents were asked to rate their support or opposition to <italic>the application of artificial intelligence to social, humanitarian and environmental challenges</italic>. The areas that received the most support—&#62;75% of respondents—were <italic>health</italic> and <italic>medicine</italic>, whereas the areas that received the least support (although still &#62;60%) included <italic>equality and inclusion</italic> and <italic>public and social sector management</italic>.</p>
          <p>The differing responses to the 2 surveys may arise from the framing of the questions. The Monash questions were framed optimistically and presented no downsides; the AVA-AI questions presented both benefits and downsides or burdens. In health care, we held effectiveness and health benefits against requirements to share data, nonexplainability, and clinical deskilling. In social services, we held the accuracy and consistency of predictions and decisions against the potential for overtargeting, poor contestability, and nonexplainability. The differences in responses between the 2 surveys may show that the ethical and social risks of AI matter to people and will make a difference in their evaluations.</p>
        </sec>
        <sec>
          <title>Do Judgments Differ When ELSI Differ?</title>
          <p>The respondents clearly made judgments about the ELSI of AI. Although all ELSI were considered important, this was by degree. Respondents made quite finely graded judgments that intuitively aligned with the characteristics of the scenarios, suggesting both that they took the questions seriously and that different attributes will be differently important in different cases. For example, speed was more important in triage, where time is critical, than in diagnosis. Explanation was more important in automating unemployment benefits than in an information chatbot, which would be consistent with the view that people deserve to know why they do or do not receive payments. Human contact, personal tailoring, and speed were more important for the chatbot than for the benefits system, possibly reflecting that chatbot interactions are short and information-heavy and that people want a human to talk to if the automated system fails.</p>
          <p>Two things were consistent: accuracy was always the most highly valued, and reducing costs was always the least highly valued across health care and social services. The lack of any significant difference in the importance of accuracy across scenarios suggests that this is an entry-level requirement for the use of AI (although defining <italic>accuracy</italic> in different contexts is not straightforward). The lower importance given to cost reduction may reflect a general rejection of instrumental decision-making in policy and of cost-based arguments in public services. Contextual factors include Australia’s publicly funded health care system being <italic>popular and entrenched</italic> [<xref ref-type="bibr" rid="ref53">53</xref>] and that, despite holding negative views on welfare recipients, the Australian public remains similarly supportive of the welfare system as a whole [<xref ref-type="bibr" rid="ref54">54</xref>].</p>
          <p>Fairness was more important in social services than in health care. This may reflect the centrality of the concept of procedural fairness—that is, the fairness of the decision-making process—in social service administration, particularly within Australia’s bureaucratic and rule-bound welfare system [<xref ref-type="bibr" rid="ref55">55</xref>]. It may also reflect heightened concern for issues of fairness in light of the public controversy surrounding the robodebt program, which centered on the legality, accuracy, and fairness of the program’s debt calculations [<xref ref-type="bibr" rid="ref23">23</xref>]. Perhaps the most deliverable promise of AI is increased speed, but this was not highly valued by respondents in any of the scenarios presented.</p>
          <p>Knowing who is responsible for decisions, especially any mistakes made, was consistently important in health care, suggesting that the regulatory and ethical governance challenges in health care AI will matter to the public. Human contact was also important in health care. Prominent health care AI advocates have suggested that the core benefit of health care AI is its ability to release clinicians from mundane duties, freeing them to engage more deeply in care work [<xref ref-type="bibr" rid="ref56">56</xref>]. However, the digitization of health care in some contexts has had the opposite effect, overburdening clinicians with data management and system requirements that alienate them from patient care [<xref ref-type="bibr" rid="ref57">57</xref>]. This will be a key challenge to manage if health care AI is to deliver on its promises. Relatedly, respondents rejected medical deskilling most strongly among our 3 health care scenarios. This resonates with empirical research suggesting that people strongly value the preservation of human oversight for AI decision-making but also suggests the need for more work on what kinds of deskilling matter most as deskilling is highly likely to occur as automation increases. As in other research, participants were weakly supportive of sharing their health data with a learning health system if it delivered better quality care [<xref ref-type="bibr" rid="ref58">58</xref>], although qualitative and deliberative research suggests that this support is likely to be conditional [<xref ref-type="bibr" rid="ref59">59</xref>]. Respondents were also weakly supportive of algorithmic targeting of welfare compliance checking to high-risk groups if this saved money and reduced the number of checks on other people, which may reflect an on-balance judgment about proportionality or may simply reflect the aforementioned negative views on welfare recipients.</p>
          <p>We asked about explainability in both health care and welfare scenarios and contestability in welfare scenarios. Respondents expressed an on-balance opposition to both health care and welfare AIs that were not explainable to relevant professionals. However, different respondents valued explainability differently in health care and welfare scenarios, suggesting that there may be some divergence in people’s views on the domains in which explanation is more important. There was also an on-balance opposition to noncontestability in welfare scenarios, which reinforces support for processes of review and appeal when welfare decision-making is automated.</p>
          <p>When asked to make an on-balance judgment about the <italic>bundle</italic> of attributes most commonly associated with machines versus with humans, respondents strongly preferred human attributes. Although they considered attributes such as accuracy to be important if an AI system was to be implemented, they still highly valued human support and connection and were not prepared to give them up in exchange for accuracy (despite the accuracy of AI being highly valued in itself). This suggests the importance of pursuing an augmentation rather than a replacement role for AI in both health care and social services. For all of these findings, further qualitative research is needed to better understand the reasons underpinning people’s judgments.</p>
        </sec>
      </sec>
      <sec>
        <title>Limitations</title>
        <p>To the best of the authors’ knowledge, this study is one of the largest and most robust surveys of public attitudes toward health care and welfare AI to date. The methodological approach taken allowed for the collection of detailed information on attitudes for a substantial sample using a relatively low-cost web-based panel while compensating for the potential biases in the creation of such a panel. Although the results suggest that respondents were able to engage with the details of the questions, the relatively low level of knowledge of AI in the community and the speculative nature of the questions mean that people’s responses to a direct experience of AI may differ from their responses in this survey. A strength of our design was the use of questions that were deliberately structured to present both the potential benefits and the potential burdens or harms of AI while attempting to maintain neutral sentiment and avoid normative valence in the language used. The survey was conducted before the onset of the COVID-19 pandemic, which initiated the rapid digitization of many health care and social services; it is possible that responses would be different if the survey were repeated today.</p>
      </sec>
      <sec>
        <title>Conclusions</title>
        <p>Australians support the idea of AI in a general sense, but their support diminishes when considering the details of particular scenarios and the potential harms or burdens that may accompany any promised benefits. Respondents consistently rated the accuracy of performance as the most important attribute in an AI system, but only 1 in 5 valued the speed, accuracy, and convenience of AI systems more than continued human contact and discretion in service provision. Overall, this study suggests that the ethical and social dimensions of AI systems matter to Australians and that Australians want AI systems to augment rather than replace humans in the provision of both health care and social services and to reflect human values. Meaningful engagement and participation of ethicists, social scientists, and the public can highlight what harms and wrongs are most important to avoid in all stages of the development and implementation of AI, including in sensitive and value-laden domains such as health care and social services.</p>
      </sec>
    </sec>
  </body>
  <back>
    <app-group>
      <supplementary-material id="app1">
        <label>Multimedia Appendix 1</label>
        <p>Survey.</p>
        <media xlink:href="jmir_v24i8e37611_app1.docx" xlink:title="DOCX File , 3112 KB"/>
      </supplementary-material>
      <supplementary-material id="app2">
        <label>Multimedia Appendix 2</label>
        <p>Extended sample composition.</p>
        <media xlink:href="jmir_v24i8e37611_app2.docx" xlink:title="DOCX File , 28 KB"/>
      </supplementary-material>
      <supplementary-material id="app3">
        <label>Multimedia Appendix 3</label>
        <p>Support or opposition in specific artificial intelligence scenarios.</p>
        <media xlink:href="jmir_v24i8e37611_app3.docx" xlink:title="DOCX File , 19 KB"/>
      </supplementary-material>
      <supplementary-material id="app4">
        <label>Multimedia Appendix 4</label>
        <p>Importance of health (C) and welfare (D) scenarios.</p>
        <media xlink:href="jmir_v24i8e37611_app4.docx" xlink:title="DOCX File , 26 KB"/>
      </supplementary-material>
    </app-group>
    <glossary>
      <title>Abbreviations</title>
      <def-list>
        <def-item>
          <term id="abb1">AI</term>
          <def>
            <p>artificial intelligence</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb2">AVA-AI</term>
          <def>
            <p>Australian Values and Attitudes on Artificial Intelligence</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb3">ELSI</term>
          <def>
            <p>ethical, legal, and social implications</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb4">LIA</term>
          <def>
            <p>Life in Australia</p>
          </def>
        </def-item>
      </def-list>
    </glossary>
    <ack>
      <p>This research was funded by the University of Wollongong Global Challenges Program. The authors acknowledge Andrew Ward at the Social Research Centre for weighting the data and Kathleen Prokopovich, who assisted with project management and cognitive testing.</p>
    </ack>
    <fn-group>
      <fn fn-type="conflict">
        <p>None declared.</p>
      </fn>
    </fn-group>
    <ref-list>
      <ref id="ref1">
        <label>1</label>
        <nlm-citation citation-type="web">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Walsh</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Levy</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Bell</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Elliott</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Maclaurin</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Mareels</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Wood</surname>
              <given-names>F</given-names>
            </name>
          </person-group>
          <article-title>The effective and ethical development of artificial intelligence: an opportunity to improve our wellbeing</article-title>
          <source>Australian Council of Learned Academies</source>
          <year>2019</year>
          <month>7</month>
          <day>30</day>
          <access-date>2022-08-10</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://acola.org/hs4-artificial-intelligence-australia/">https://acola.org/hs4-artificial-intelligence-australia/</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref2">
        <label>2</label>
        <nlm-citation citation-type="web">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Alston</surname>
              <given-names>P</given-names>
            </name>
          </person-group>
          <article-title>Report of the special rapporteur on extreme poverty and human rights</article-title>
          <source>United Nations Digital Library</source>
          <year>2015</year>
          <access-date>2022-08-10</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://digitallibrary.un.org/record/798707?ln=en">https://digitallibrary.un.org/record/798707?ln=en</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref3">
        <label>3</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Carney</surname>
              <given-names>T</given-names>
            </name>
          </person-group>
          <article-title>Artificial intelligence in welfare: striking the vulnerability balance?</article-title>
          <source>Monash University Law Rev</source>
          <year>2021</year>
          <month>3</month>
          <day>15</day>
          <volume>46</volume>
          <issue>2</issue>
          <pub-id pub-id-type="doi">10.2139/ssrn.3805329</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref4">
        <label>4</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Tudor Car</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Dhinagaran</surname>
              <given-names>DA</given-names>
            </name>
            <name name-style="western">
              <surname>Kyaw</surname>
              <given-names>BM</given-names>
            </name>
            <name name-style="western">
              <surname>Kowatsch</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Joty</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Theng</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Atun</surname>
              <given-names>R</given-names>
            </name>
          </person-group>
          <article-title>Conversational agents in health care: scoping review and conceptual analysis</article-title>
          <source>J Med Internet Res</source>
          <year>2020</year>
          <month>08</month>
          <day>07</day>
          <volume>22</volume>
          <issue>8</issue>
          <fpage>e17158</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.jmir.org/2020/8/e17158/"/>
          </comment>
          <pub-id pub-id-type="doi">10.2196/17158</pub-id>
          <pub-id pub-id-type="medline">32763886</pub-id>
          <pub-id pub-id-type="pii">v22i8e17158</pub-id>
          <pub-id pub-id-type="pmcid">PMC7442948</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref5">
        <label>5</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Shahid</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Rappon</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Berta</surname>
              <given-names>W</given-names>
            </name>
          </person-group>
          <article-title>Applications of artificial neural networks in health care organizational decision-making: a scoping review</article-title>
          <source>PLoS One</source>
          <year>2019</year>
          <volume>14</volume>
          <issue>2</issue>
          <fpage>e0212356</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://dx.plos.org/10.1371/journal.pone.0212356"/>
          </comment>
          <pub-id pub-id-type="doi">10.1371/journal.pone.0212356</pub-id>
          <pub-id pub-id-type="medline">30779785</pub-id>
          <pub-id pub-id-type="pii">PONE-D-18-28913</pub-id>
          <pub-id pub-id-type="pmcid">PMC6380578</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref6">
        <label>6</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Nadarzynski</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Miles</surname>
              <given-names>O</given-names>
            </name>
            <name name-style="western">
              <surname>Cowie</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Ridge</surname>
              <given-names>D</given-names>
            </name>
          </person-group>
          <article-title>Acceptability of artificial intelligence (AI)-led chatbot services in healthcare: a mixed-methods study</article-title>
          <source>Digit Health</source>
          <year>2019</year>
          <volume>5</volume>
          <fpage>1</fpage>
          <lpage>12</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://journals.sagepub.com/doi/10.1177/2055207619871808?url_ver=Z39.88-2003&#38;rfr_id=ori:rid:crossref.org&#38;rfr_dat=cr_pub%3dpubmed"/>
          </comment>
          <pub-id pub-id-type="doi">10.1177/2055207619871808</pub-id>
          <pub-id pub-id-type="medline">31467682</pub-id>
          <pub-id pub-id-type="pii">10.1177_2055207619871808</pub-id>
          <pub-id pub-id-type="pmcid">PMC6704417</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref7">
        <label>7</label>
        <nlm-citation citation-type="web">
          <article-title>Montreal Declaration for a Responsible AI</article-title>
          <source>Université de Montréal</source>
          <access-date>2022-08-10</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://recherche.umontreal.ca/english/strategic-initiatives/montreal-declaration-for-a-responsible-ai/">https://recherche.umontreal.ca/english/strategic-initiatives/montreal-declaration-for-a-responsible-ai/</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref8">
        <label>8</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Cath</surname>
              <given-names>C</given-names>
            </name>
          </person-group>
          <article-title>Governing artificial intelligence: ethical, legal and technical opportunities and challenges</article-title>
          <source>Philos Trans A Math Phys Eng Sci</source>
          <year>2018</year>
          <month>10</month>
          <day>15</day>
          <volume>376</volume>
          <issue>2133</issue>
          <fpage>20180080</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://europepmc.org/abstract/MED/30322996"/>
          </comment>
          <pub-id pub-id-type="doi">10.1098/rsta.2018.0080</pub-id>
          <pub-id pub-id-type="medline">30322996</pub-id>
          <pub-id pub-id-type="pii">rsta.2018.0080</pub-id>
          <pub-id pub-id-type="pmcid">PMC6191666</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref9">
        <label>9</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Fjeld</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Achten</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Hilligoss</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Nagy</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Srikumar</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Principled artificial intelligence: mapping consensus in ethical and rights-based approaches to principles for AI</article-title>
          <source>SSRN J</source>
          <year>2020</year>
          <month>1</month>
          <day>15</day>
          <pub-id pub-id-type="doi">10.2139/ssrn.3518482</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref10">
        <label>10</label>
        <nlm-citation citation-type="web">
          <article-title>Independent high-level expert group on artificial intelligence</article-title>
          <source>European Commission</source>
          <year>2019</year>
          <month>4</month>
          <day>8</day>
          <access-date>2022-07-28</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.aepd.es/sites/default/files/2019-12/ai-definition.pdf">https://www.aepd.es/sites/default/files/2019-12/ai-definition.pdf</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref11">
        <label>11</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Floridi</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Cowls</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>A unified framework of five principles for AI in society</article-title>
          <source>Harvard Data Sci Rev</source>
          <year>2019</year>
          <month>06</month>
          <day>23</day>
          <volume>1</volume>
          <issue>1</issue>
          <pub-id pub-id-type="doi">10.1162/99608f92.8cd550d1</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref12">
        <label>12</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Crawford</surname>
              <given-names>K</given-names>
            </name>
          </person-group>
          <source>Atlas of AI</source>
          <year>2022</year>
          <publisher-loc>New Haven, Connecticut</publisher-loc>
          <publisher-name>Yale University Press</publisher-name>
        </nlm-citation>
      </ref>
      <ref id="ref13">
        <label>13</label>
        <nlm-citation citation-type="web">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Vincent</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Google is poisoning its reputation with AI researchers</article-title>
          <source>The Verge</source>
          <year>2021</year>
          <month>4</month>
          <day>13</day>
          <access-date>2021-10-10</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.theverge.com/2021/4/13/22370158/google-ai-ethics-timnit-gebru-margaret-mitchell-firing-reputation">https://www.theverge.com/2021/4/13/22370158/google-ai-ethics-timnit-gebru-margaret-mitchell-firing-reputation</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref14">
        <label>14</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Davis</surname>
              <given-names>RB</given-names>
            </name>
          </person-group>
          <article-title>The principlism debate: a critical overview</article-title>
          <source>J Med Philos</source>
          <year>1995</year>
          <month>02</month>
          <volume>20</volume>
          <issue>1</issue>
          <fpage>85</fpage>
          <lpage>105</lpage>
          <pub-id pub-id-type="doi">10.1093/jmp/20.1.85</pub-id>
          <pub-id pub-id-type="medline">7738461</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref15">
        <label>15</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Whittlestone</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Nyrup</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Alexandrova</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Dihal</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Cave</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <source>Ethical and Societal Implications of Algorithms, Data, and Artificial Intelligence: A Roadmap for Research</source>
          <year>2019</year>
          <publisher-loc>London</publisher-loc>
          <publisher-name>Nuffield Foundation</publisher-name>
        </nlm-citation>
      </ref>
      <ref id="ref16">
        <label>16</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Morley</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Machado</surname>
              <given-names>CC</given-names>
            </name>
            <name name-style="western">
              <surname>Burr</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Cowls</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Joshi</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Taddeo</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Floridi</surname>
              <given-names>L</given-names>
            </name>
          </person-group>
          <article-title>The ethics of AI in health care: a mapping review</article-title>
          <source>Soc Sci Med</source>
          <year>2020</year>
          <month>09</month>
          <volume>260</volume>
          <fpage>1</fpage>
          <lpage>14</lpage>
          <pub-id pub-id-type="doi">10.1016/j.socscimed.2020.113172</pub-id>
          <pub-id pub-id-type="medline">32702587</pub-id>
          <pub-id pub-id-type="pii">S0277-9536(20)30391-9</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref17">
        <label>17</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Carter</surname>
              <given-names>SM</given-names>
            </name>
            <name name-style="western">
              <surname>Rogers</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Win</surname>
              <given-names>KT</given-names>
            </name>
            <name name-style="western">
              <surname>Frazer</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Richards</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Houssami</surname>
              <given-names>N</given-names>
            </name>
          </person-group>
          <article-title>The ethical, legal and social implications of using artificial intelligence systems in breast cancer care</article-title>
          <source>Breast</source>
          <year>2020</year>
          <month>02</month>
          <volume>49</volume>
          <fpage>25</fpage>
          <lpage>32</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://linkinghub.elsevier.com/retrieve/pii/S0960-9776(19)30564-8"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.breast.2019.10.001</pub-id>
          <pub-id pub-id-type="medline">31677530</pub-id>
          <pub-id pub-id-type="pii">S0960-9776(19)30564-8</pub-id>
          <pub-id pub-id-type="pmcid">PMC7375671</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref18">
        <label>18</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Murphy</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Di Ruggiero</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Upshur</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Willison</surname>
              <given-names>DJ</given-names>
            </name>
            <name name-style="western">
              <surname>Malhotra</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Cai</surname>
              <given-names>JC</given-names>
            </name>
            <name name-style="western">
              <surname>Malhotra</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Lui</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Gibson</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Artificial intelligence for good health: a scoping review of the ethics literature</article-title>
          <source>BMC Med Ethics</source>
          <year>2021</year>
          <month>02</month>
          <day>15</day>
          <volume>22</volume>
          <issue>1</issue>
          <fpage>1</fpage>
          <lpage>17</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://bmcmedethics.biomedcentral.com/articles/10.1186/s12910-021-00577-8"/>
          </comment>
          <pub-id pub-id-type="doi">10.1186/s12910-021-00577-8</pub-id>
          <pub-id pub-id-type="medline">33588803</pub-id>
          <pub-id pub-id-type="pii">10.1186/s12910-021-00577-8</pub-id>
          <pub-id pub-id-type="pmcid">PMC7885243</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref19">
        <label>19</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Goirand</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Austin</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Clay-Williams</surname>
              <given-names>R</given-names>
            </name>
          </person-group>
          <article-title>Implementing ethics in healthcare AI-based applications: a scoping review</article-title>
          <source>Sci Eng Ethics</source>
          <year>2021</year>
          <month>09</month>
          <day>03</day>
          <volume>27</volume>
          <issue>5</issue>
          <fpage>1</fpage>
          <lpage>53</lpage>
          <pub-id pub-id-type="doi">10.1007/s11948-021-00336-3</pub-id>
          <pub-id pub-id-type="medline">34480239</pub-id>
          <pub-id pub-id-type="pii">10.1007/s11948-021-00336-3</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref20">
        <label>20</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Obermeyer</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Powers</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Vogeli</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Mullainathan</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Dissecting racial bias in an algorithm used to manage the health of populations</article-title>
          <source>Science</source>
          <year>2019</year>
          <month>10</month>
          <day>25</day>
          <volume>366</volume>
          <issue>6464</issue>
          <fpage>447</fpage>
          <lpage>53</lpage>
          <pub-id pub-id-type="doi">10.1126/science.aax2342</pub-id>
          <pub-id pub-id-type="medline">31649194</pub-id>
          <pub-id pub-id-type="pii">366/6464/447</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref21">
        <label>21</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Eubanks</surname>
              <given-names>V</given-names>
            </name>
          </person-group>
          <source>Automating Inequality: How High-Tech Tools Profile, Police, and Punish the Poor</source>
          <year>2018</year>
          <publisher-loc>New York</publisher-loc>
          <publisher-name>Picador, St Martin’s Press</publisher-name>
        </nlm-citation>
      </ref>
      <ref id="ref22">
        <label>22</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Dencik</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Kaun</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Datafication and the welfare state</article-title>
          <source>Global Perspect</source>
          <year>2020</year>
          <volume>1</volume>
          <issue>1</issue>
          <fpage>12912</fpage>
          <pub-id pub-id-type="doi">10.1525/gp.2020.12912</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref23">
        <label>23</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Carney</surname>
              <given-names>T</given-names>
            </name>
          </person-group>
          <article-title>The new digital future for welfare: debts without legal proofs or moral authority</article-title>
          <source>UNSW Law J Forum</source>
          <year>2018</year>
          <month>4</month>
          <day>4</day>
          <fpage>1</fpage>
          <lpage>16</lpage>
        </nlm-citation>
      </ref>
      <ref id="ref24">
        <label>24</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ng</surname>
              <given-names>Y-F</given-names>
            </name>
          </person-group>
          <article-title>Institutional adaptation and the administrative state</article-title>
          <source>Melbourne University Law Rev</source>
          <year>2021</year>
          <volume>44</volume>
          <issue>3</issue>
          <fpage>889</fpage>
          <lpage>927</lpage>
        </nlm-citation>
      </ref>
      <ref id="ref25">
        <label>25</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Langford</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Taming the digital leviathan: automated decision-making and international human rights</article-title>
          <source>AJIL Unbound</source>
          <year>2020</year>
          <month>04</month>
          <day>27</day>
          <volume>114</volume>
          <fpage>141</fpage>
          <lpage>6</lpage>
          <pub-id pub-id-type="doi">10.1017/aju.2020.31</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref26">
        <label>26</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Dafoe</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Artificial intelligence: American attitudes and trends</article-title>
          <source>SSRN J</source>
          <year>2019</year>
          <pub-id pub-id-type="doi">10.2139/ssrn.3312874</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref27">
        <label>27</label>
        <nlm-citation citation-type="web">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Selwyn</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Cordoba</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Andrejevic</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Campbell</surname>
              <given-names>L</given-names>
            </name>
          </person-group>
          <article-title>AI for social good? Australian public attitudes toward AI and society</article-title>
          <source>Monash University</source>
          <year>2020</year>
          <month>8</month>
          <access-date>2022-07-28</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.monash.edu/data-futures-institute/news/ai-for-social-good-australian-public-attitudes-toward-ai-and-society">https://www.monash.edu/data-futures-institute/news/ai-for-social-good-australian-public-attitudes-toward-ai-and-society</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref28">
        <label>28</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Jutzi</surname>
              <given-names>TB</given-names>
            </name>
            <name name-style="western">
              <surname>Krieghoff-Henning</surname>
              <given-names>EI</given-names>
            </name>
            <name name-style="western">
              <surname>Holland-Letz</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Utikal</surname>
              <given-names>JS</given-names>
            </name>
            <name name-style="western">
              <surname>Hauschild</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Schadendorf</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Sondermann</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Fröhling</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Hekler</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Schmitt</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Maron</surname>
              <given-names>RC</given-names>
            </name>
            <name name-style="western">
              <surname>Brinker</surname>
              <given-names>TJ</given-names>
            </name>
          </person-group>
          <article-title>Artificial intelligence in skin cancer diagnostics: the patients' perspective</article-title>
          <source>Front Med (Lausanne)</source>
          <year>2020</year>
          <volume>7</volume>
          <fpage>1</fpage>
          <lpage>9</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.3389/fmed.2020.00233"/>
          </comment>
          <pub-id pub-id-type="doi">10.3389/fmed.2020.00233</pub-id>
          <pub-id pub-id-type="medline">32671078</pub-id>
          <pub-id pub-id-type="pmcid">PMC7326111</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref29">
        <label>29</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Palmisciano</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Jamjoom</surname>
              <given-names>AA</given-names>
            </name>
            <name name-style="western">
              <surname>Taylor</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Stoyanov</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Marcus</surname>
              <given-names>HJ</given-names>
            </name>
          </person-group>
          <article-title>Attitudes of patients and their relatives toward artificial intelligence in neurosurgery</article-title>
          <source>World Neurosurg</source>
          <year>2020</year>
          <month>06</month>
          <volume>138</volume>
          <fpage>627</fpage>
          <lpage>33</lpage>
          <pub-id pub-id-type="doi">10.1016/j.wneu.2020.03.029</pub-id>
          <pub-id pub-id-type="medline">32179185</pub-id>
          <pub-id pub-id-type="pii">S1878-8750(20)30497-6</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref30">
        <label>30</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Nelson</surname>
              <given-names>CA</given-names>
            </name>
            <name name-style="western">
              <surname>Pérez-Chada</surname>
              <given-names>LM</given-names>
            </name>
            <name name-style="western">
              <surname>Creadore</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>SJ</given-names>
            </name>
            <name name-style="western">
              <surname>Lo</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Manjaly</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Pournamdari</surname>
              <given-names>AB</given-names>
            </name>
            <name name-style="western">
              <surname>Tkachenko</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Barbieri</surname>
              <given-names>JS</given-names>
            </name>
            <name name-style="western">
              <surname>Ko</surname>
              <given-names>JM</given-names>
            </name>
            <name name-style="western">
              <surname>Menon</surname>
              <given-names>AV</given-names>
            </name>
            <name name-style="western">
              <surname>Hartman</surname>
              <given-names>RI</given-names>
            </name>
            <name name-style="western">
              <surname>Mostaghimi</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Patient perspectives on the use of artificial intelligence for skin cancer screening: a qualitative study</article-title>
          <source>JAMA Dermatol</source>
          <year>2020</year>
          <month>05</month>
          <day>01</day>
          <volume>156</volume>
          <issue>5</issue>
          <fpage>501</fpage>
          <lpage>12</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://europepmc.org/abstract/MED/32159733"/>
          </comment>
          <pub-id pub-id-type="doi">10.1001/jamadermatol.2019.5014</pub-id>
          <pub-id pub-id-type="medline">32159733</pub-id>
          <pub-id pub-id-type="pii">2762711</pub-id>
          <pub-id pub-id-type="pmcid">PMC7066525</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref31">
        <label>31</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Keel</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Lee</surname>
              <given-names>PY</given-names>
            </name>
            <name name-style="western">
              <surname>Scheetz</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Kotowicz</surname>
              <given-names>MA</given-names>
            </name>
            <name name-style="western">
              <surname>MacIsaac</surname>
              <given-names>RJ</given-names>
            </name>
            <name name-style="western">
              <surname>He</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Feasibility and patient acceptability of a novel artificial intelligence-based screening model for diabetic retinopathy at endocrinology outpatient services: a pilot study</article-title>
          <source>Sci Rep</source>
          <year>2018</year>
          <month>03</month>
          <day>12</day>
          <volume>8</volume>
          <issue>1</issue>
          <fpage>1</fpage>
          <lpage>6</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1038/s41598-018-22612-2"/>
          </comment>
          <pub-id pub-id-type="doi">10.1038/s41598-018-22612-2</pub-id>
          <pub-id pub-id-type="medline">29531299</pub-id>
          <pub-id pub-id-type="pii">10.1038/s41598-018-22612-2</pub-id>
          <pub-id pub-id-type="pmcid">PMC5847544</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref32">
        <label>32</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Yang</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Zeng</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Peng</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Jiang</surname>
              <given-names>Y</given-names>
            </name>
          </person-group>
          <article-title>Attitudes of Chinese cancer patients toward the clinical use of artificial intelligence</article-title>
          <source>Patient Preference Adherence</source>
          <year>2019</year>
          <month>11</month>
          <day>1</day>
          <volume>13</volume>
          <fpage>1867</fpage>
          <lpage>75</lpage>
          <pub-id pub-id-type="doi">10.2147/ppa.s225952</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref33">
        <label>33</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Schou</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Pors</surname>
              <given-names>AS</given-names>
            </name>
          </person-group>
          <article-title>Digital by default? A qualitative study of exclusion in digitalised welfare</article-title>
          <source>Soc Policy Admin</source>
          <year>2018</year>
          <month>11</month>
          <day>22</day>
          <volume>53</volume>
          <issue>3</issue>
          <fpage>464</fpage>
          <lpage>77</lpage>
          <pub-id pub-id-type="doi">10.1111/spol.12470</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref34">
        <label>34</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Petersen</surname>
              <given-names>AC</given-names>
            </name>
            <name name-style="western">
              <surname>Christensen</surname>
              <given-names>LR</given-names>
            </name>
            <name name-style="western">
              <surname>Harper</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Hildebrandt</surname>
              <given-names>T</given-names>
            </name>
          </person-group>
          <article-title>"We would never write that down": classifications of unemployed and data challenges for AI</article-title>
          <source>Proc ACM Human Comput Interact</source>
          <year>2021</year>
          <month>04</month>
          <day>13</day>
          <volume>5</volume>
          <issue>CSCW1</issue>
          <fpage>1</fpage>
          <lpage>26</lpage>
          <pub-id pub-id-type="doi">10.1145/3449176</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref35">
        <label>35</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Raso</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Displacement as regulation: new regulatory technologies and front-line decision-making in Ontario works</article-title>
          <source>Can J Law Soc</source>
          <year>2017</year>
          <month>6</month>
          <day>27</day>
          <volume>32</volume>
          <issue>01</issue>
          <fpage>75</fpage>
          <lpage>95</lpage>
          <pub-id pub-id-type="doi">10.1017/cls.2017.6</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref36">
        <label>36</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Zejnilovic</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Lavado</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>de Troya</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Sim</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Bell</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Algorithmic long-term unemployment risk assessment in use: counselors’ perceptions and use practices</article-title>
          <source>Global Perspective</source>
          <year>2020</year>
          <month>6</month>
          <day>22</day>
          <volume>1</volume>
          <issue>1</issue>
          <fpage>1</fpage>
          <lpage>14</lpage>
          <pub-id pub-id-type="doi">10.1525/gp.2020.12908</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref37">
        <label>37</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Karusala</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Wilson</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Vayanos</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Rice</surname>
              <given-names>E</given-names>
            </name>
          </person-group>
          <article-title>Street-level realities of data practices in homeless services provision</article-title>
          <source>Proc ACM Human Comput Interact</source>
          <year>2019</year>
          <month>11</month>
          <day>07</day>
          <volume>3</volume>
          <issue>CSCW</issue>
          <fpage>1</fpage>
          <lpage>23</lpage>
          <pub-id pub-id-type="doi">10.1145/3359286</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref38">
        <label>38</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ammitzbøll Flügge</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Hildebrandt</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Møller</surname>
              <given-names>NH</given-names>
            </name>
          </person-group>
          <article-title>Street-level algorithms and AI in bureaucratic decision-making</article-title>
          <source>Proc ACM Human Comput Interact</source>
          <year>2021</year>
          <month>04</month>
          <day>13</day>
          <volume>5</volume>
          <issue>CSCW1</issue>
          <fpage>1</fpage>
          <lpage>23</lpage>
          <pub-id pub-id-type="doi">10.1145/3449114</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref39">
        <label>39</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Tao</surname>
              <given-names>D</given-names>
            </name>
          </person-group>
          <article-title>The roles of trust, personalization, loss of privacy, and anthropomorphism in public acceptance of smart healthcare services</article-title>
          <source>Comput Human Behav</source>
          <year>2022</year>
          <month>02</month>
          <volume>127</volume>
          <fpage>1</fpage>
          <lpage>11</lpage>
          <pub-id pub-id-type="doi">10.1016/j.chb.2021.107026</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref40">
        <label>40</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kaczmirek</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Phillips</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Pennay</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Lavrakas</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Neiger</surname>
              <given-names>D</given-names>
            </name>
          </person-group>
          <article-title>Building a probability-based online panel: life in Australia</article-title>
          <source>CSRM Methods Series</source>
          <year>2019</year>
          <issue>2/2019</issue>
          <fpage>1</fpage>
          <lpage>47</lpage>
        </nlm-citation>
      </ref>
      <ref id="ref41">
        <label>41</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Deville</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Särndal</surname>
              <given-names>C</given-names>
            </name>
          </person-group>
          <article-title>Calibration estimators in survey sampling</article-title>
          <source>J Am Stat Assoc</source>
          <year>1992</year>
          <month>06</month>
          <volume>87</volume>
          <issue>418</issue>
          <fpage>376</fpage>
          <lpage>82</lpage>
          <pub-id pub-id-type="doi">10.1080/01621459.1992.10475217</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref42">
        <label>42</label>
        <nlm-citation citation-type="web">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Baker</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Blumberg</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Brick</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Couper</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Courtright</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Dennis</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Dillman</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Frankel</surname>
              <given-names>MR</given-names>
            </name>
            <name name-style="western">
              <surname>Garland</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Groves</surname>
              <given-names>RM</given-names>
            </name>
            <name name-style="western">
              <surname>Kennedy</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Krosnick</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Lavrakas</surname>
              <given-names>PJ</given-names>
            </name>
            <name name-style="western">
              <surname>Piekarski</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Rao</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Rivers</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Thomas</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Zahs</surname>
              <given-names>D</given-names>
            </name>
          </person-group>
          <article-title>Report on Online Panels</article-title>
          <source>The American Association for Public Opinion Research</source>
          <year>2010</year>
          <month>6</month>
          <access-date>2022-07-28</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.aapor.org/Education-Resources/Reports/Report-on-Online-Panels.aspx">https://www.aapor.org/Education-Resources/Reports/Report-on-Online-Panels.aspx</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref43">
        <label>43</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>DiSogra</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Cobb</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Chan</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Dennis</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Calibrating non-probability internet samples with probability samples using early adopter characteristics</article-title>
          <source>Proceedings of the American Statistical Association, Section on Survey Research Joint Statistical Meetings (JSM)</source>
          <year>2011</year>
          <conf-name>American Statistical Association, Section on Survey Research Joint Statistical Meetings (JSM)</conf-name>
          <conf-date>Jul 30 - Aug 4, 2011</conf-date>
          <conf-loc>Miami Beach, Florida</conf-loc>
        </nlm-citation>
      </ref>
      <ref id="ref44">
        <label>44</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Schonlau</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>van Soest</surname>
              <given-names>AH</given-names>
            </name>
            <name name-style="western">
              <surname>Kapteyn</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Are 'webographic' or attitudinal questions useful for adjusting estimates from web surveys using propensity scoring?</article-title>
          <source>SSRN J</source>
          <year>2007</year>
          <month>6</month>
          <fpage>1</fpage>
          <lpage>18</lpage>
          <pub-id pub-id-type="doi">10.2139/ssrn.1006108</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref45">
        <label>45</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Valliant</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Dever</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <source>Survey Weights A Step-by-step Guide to Calculation</source>
          <year>2018</year>
          <publisher-loc>College Station, Texas</publisher-loc>
          <publisher-name>Stata Press</publisher-name>
        </nlm-citation>
      </ref>
      <ref id="ref46">
        <label>46</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Valliant</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Dever</surname>
              <given-names>JA</given-names>
            </name>
          </person-group>
          <article-title>Estimating propensity adjustments for volunteer web surveys</article-title>
          <source>Sociol Method Res</source>
          <year>2011</year>
          <month>01</month>
          <day>11</day>
          <volume>40</volume>
          <issue>1</issue>
          <fpage>105</fpage>
          <lpage>37</lpage>
          <pub-id pub-id-type="doi">10.1177/0049124110392533</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref47">
        <label>47</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Elliott</surname>
              <given-names>MR</given-names>
            </name>
            <name name-style="western">
              <surname>Valliant</surname>
              <given-names>R</given-names>
            </name>
          </person-group>
          <article-title>Inference for nonprobability samples</article-title>
          <source>Statist Sci</source>
          <year>2017</year>
          <month>5</month>
          <day>1</day>
          <volume>32</volume>
          <issue>2</issue>
          <fpage>249</fpage>
          <lpage>64</lpage>
          <pub-id pub-id-type="doi">10.1214/16-STS598</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref48">
        <label>48</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kish</surname>
              <given-names>L</given-names>
            </name>
          </person-group>
          <article-title>Weighting for unequal Pi</article-title>
          <source>J Official Stat</source>
          <year>1992</year>
          <volume>8</volume>
          <issue>2</issue>
          <fpage>183</fpage>
          <lpage>200</lpage>
        </nlm-citation>
      </ref>
      <ref id="ref49">
        <label>49</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kish</surname>
              <given-names>L</given-names>
            </name>
          </person-group>
          <source>Survey Sampling</source>
          <year>1965</year>
          <publisher-loc>Hoboken, New Jersey, United States</publisher-loc>
          <publisher-name>Wiley</publisher-name>
        </nlm-citation>
      </ref>
      <ref id="ref50">
        <label>50</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kish</surname>
              <given-names>L</given-names>
            </name>
          </person-group>
          <article-title>Methods for design effects</article-title>
          <source>J Official Stat</source>
          <year>1995</year>
          <volume>11</volume>
          <issue>1</issue>
          <fpage>55</fpage>
          <lpage>77</lpage>
          <pub-id pub-id-type="doi">10.4135/9780857020116.n132</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref51">
        <label>51</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Rao</surname>
              <given-names>JN</given-names>
            </name>
            <name name-style="western">
              <surname>Scott</surname>
              <given-names>AJ</given-names>
            </name>
          </person-group>
          <article-title>On chi-squared tests for multiway contingency tables with cell proportions estimated from survey data</article-title>
          <source>Ann Statist</source>
          <year>1984</year>
          <month>3</month>
          <volume>12</volume>
          <issue>1</issue>
          <fpage>46</fpage>
          <lpage>60</lpage>
          <pub-id pub-id-type="doi">10.1214/aos/1176346391</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref52">
        <label>52</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Agresti</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <source>Categorical Data Analysis</source>
          <year>2003</year>
          <publisher-loc>Hoboken, New Jersey, United States</publisher-loc>
          <publisher-name>John Wiley &#38; Sons</publisher-name>
        </nlm-citation>
      </ref>
      <ref id="ref53">
        <label>53</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Boxall</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Medicare: the making and consolidation of an Australian institution</article-title>
          <source>Successful Public Policy: Lessons from Australia and New Zealand</source>
          <year>2019</year>
          <publisher-loc>Canberra, Australia</publisher-loc>
          <publisher-name>ANU Press</publisher-name>
        </nlm-citation>
      </ref>
      <ref id="ref54">
        <label>54</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Schofield</surname>
              <given-names>TP</given-names>
            </name>
            <name name-style="western">
              <surname>Butterworth</surname>
              <given-names>P</given-names>
            </name>
          </person-group>
          <article-title>Patterns of welfare attitudes in the Australian population</article-title>
          <source>PLoS One</source>
          <year>2015</year>
          <month>11</month>
          <day>10</day>
          <volume>10</volume>
          <issue>11</issue>
          <fpage>1</fpage>
          <lpage>14</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://dx.plos.org/10.1371/journal.pone.0142792"/>
          </comment>
          <pub-id pub-id-type="doi">10.1371/journal.pone.0142792</pub-id>
          <pub-id pub-id-type="medline">26554361</pub-id>
          <pub-id pub-id-type="pii">PONE-D-15-11720</pub-id>
          <pub-id pub-id-type="pmcid">PMC4640565</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref55">
        <label>55</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Adler</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Henman</surname>
              <given-names>P</given-names>
            </name>
          </person-group>
          <article-title>e-justice: a comparative study of computerization and procedural justice in social security</article-title>
          <source>Int Rev Law Comput Technol</source>
          <year>2010</year>
          <month>07</month>
          <day>21</day>
          <volume>15</volume>
          <issue>2</issue>
          <fpage>195</fpage>
          <lpage>212</lpage>
          <pub-id pub-id-type="doi">10.1080/13600860120070510</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref56">
        <label>56</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Topol</surname>
              <given-names>E</given-names>
            </name>
          </person-group>
          <source>Deep Medicine How Artificial Intelligence Can Make Healthcare Human Again</source>
          <year>2019</year>
          <publisher-loc>New York</publisher-loc>
          <publisher-name>Basic Books</publisher-name>
        </nlm-citation>
      </ref>
      <ref id="ref57">
        <label>57</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Wachter</surname>
              <given-names>R</given-names>
            </name>
          </person-group>
          <source>The Digital Doctor: Hope, Hype, and Harm at the Dawn of Medicine's Computer Age</source>
          <year>2015</year>
          <publisher-loc>New York</publisher-loc>
          <publisher-name>McGraw-Hill Education</publisher-name>
        </nlm-citation>
      </ref>
      <ref id="ref58">
        <label>58</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Aitken</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>de St Jorre</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Pagliari</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Jepson</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Cunningham-Burley</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Public responses to the sharing and linkage of health data for research purposes: a systematic review and thematic synthesis of qualitative studies</article-title>
          <source>BMC Med Ethics</source>
          <year>2016</year>
          <month>11</month>
          <day>10</day>
          <volume>17</volume>
          <issue>1</issue>
          <fpage>1</fpage>
          <lpage>24</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://bmcmedethics.biomedcentral.com/articles/10.1186/s12910-016-0153-x"/>
          </comment>
          <pub-id pub-id-type="doi">10.1186/s12910-016-0153-x</pub-id>
          <pub-id pub-id-type="medline">27832780</pub-id>
          <pub-id pub-id-type="pii">10.1186/s12910-016-0153-x</pub-id>
          <pub-id pub-id-type="pmcid">PMC5103425</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref59">
        <label>59</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Street</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Fabrianesi</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Adams</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Flack</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Smith</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Carter</surname>
              <given-names>SM</given-names>
            </name>
            <name name-style="western">
              <surname>Lybrand</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Brown</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Joyner</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Mullan</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Lago</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Carolan</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Irvine</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Wales</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Braunack-Mayer</surname>
              <given-names>AJ</given-names>
            </name>
          </person-group>
          <article-title>Sharing administrative health data with private industry: a report on two citizens' juries</article-title>
          <source>Health Expect</source>
          <year>2021</year>
          <month>08</month>
          <volume>24</volume>
          <issue>4</issue>
          <fpage>1337</fpage>
          <lpage>48</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1111/hex.13268"/>
          </comment>
          <pub-id pub-id-type="doi">10.1111/hex.13268</pub-id>
          <pub-id pub-id-type="medline">34048624</pub-id>
          <pub-id pub-id-type="pmcid">PMC8369100</pub-id>
        </nlm-citation>
      </ref>
    </ref-list>
  </back>
</article>
