@article{11f568e340694d0883f763cc3311303a,
title = "Building Trust in Human-Machine Partnerships",
abstract = "Artificial intelligence (AI) is bringing radical change to our lives. Fostering trust in this technology requires the technology to be transparent, and one route to transparency is to make the decisions that are reached by AIs explainable to the humans that interact with them. This paper lays out an exploratory approach to developing explainability and trust, describing the specific technologies that we are adopting, the social and organizational context in which we are working, and some of the challenges that we are addressing.",
keywords = "Artificial intelligence, Explainable AI, Planning",
author = "Gerard Canal and Rita Borgo and Andrew Coles and Archie Drake and Dong Huynh and Perry Keller and Senka Krivi{\'c} and Paul Luff and Mahesar, {Quratul ain} and Luc Moreau and Simon Parsons and Menisha Patel and Sklar, {Elizabeth I.}",
note = "Funding Information: This work has been partially supported by EPSRC grant EP/R033722/1 . Publisher Copyright: {\textcopyright} 2020 Gerard Canal, Rita Borgo, Andrew Coles, Archie Drake, Dong Huynh, Perry Keller, Senka Krivi{\'c}, Paul Luff, Quratul-ain Mahesar, Luc Moreau, Simon Parsons, Menisha Patel, Elizabeth I Sklar Copyright: Copyright 2020 Elsevier B.V., All rights reserved. Part of special issue: SI: The Feasibility of Computable Explanations for AI",
year = "2020",
month = nov,
day = "1",
doi = "10.1016/j.clsr.2020.105489",
language = "English",
volume = "39",
journal = "Computer Law and Security Review",
issn = "0267-3649",
publisher = "Elsevier Limited",
}