font
Yazdanpanah, Vahid; Gerding, Enrico; Stein, Sebastian; Dastani, Mehdi; Jonker, Catholijn M; Norman, Timothy; Ramchurn, Sarvapali
Reasoning About Responsibility in Autonomous Systems: Challenges and Opportunities Journal Article
In: AI & Society, 2022.
Abstract | Links | BibTeX | Tags: Artificial Intelligence, Citizen-Centric AI Systems, human-agent collectives, Human-Centred AI, Multiagent Responsibility Reasoning, Multiagent Systems, Trustworthy Autonomous Systems
@article{soton471971,
title = {Reasoning About Responsibility in Autonomous Systems: Challenges and Opportunities},
author = {Vahid Yazdanpanah and Enrico Gerding and Sebastian Stein and Mehdi Dastani and Catholijn M Jonker and Timothy Norman and Sarvapali Ramchurn},
url = {https://eprints.soton.ac.uk/471971/},
year = {2022},
date = {2022-11-01},
journal = {AI & Society},
abstract = {Ensuring the trustworthiness of autonomous systems and artificial intelligenceensuremath<br/ensuremath>is an important interdisciplinary endeavour. In this position paper, we argue thatensuremath<br/ensuremath>this endeavour will benefit from technical advancements in capturing various forms of responsibility, and we present a comprehensive research agenda to achieve this. In particular, we argue that ensuring the reliability of autonomous system can take advantage of technical approaches for quantifying degrees of responsibility and for coordinating tasks based on that. Moreover, we deem that, in certifying the legality of an AI system, formal and computationally implementable notions of responsibility, blame, accountability, and liability are applicable for addressing potential responsibility gaps (i.e., situations in which a group is responsible, but individuals? responsibility may be unclear). This is a call to enable AI systems themselves, as well as those involved in the design, monitoring, and governance of AI systems, to represent and reason about who can be seen as responsible in prospect (e.g., for completing a task in future) and who can be seen as responsible retrospectively (e.g., for a failure that has already occurred). To that end, in this work, we show that across all stages of the design, development, and deployment of Trustworthy Autonomous Systems (TAS), responsibility reasoning should play a key role. This position paper is the first step towards establishing a road-map and research agenda on how the notion of responsibility can provide novel solution concepts for ensuring the reliability and legality of TAS and, as a result, enables an effective embedding of AI technologies into society.},
keywords = {Artificial Intelligence, Citizen-Centric AI Systems, human-agent collectives, Human-Centred AI, Multiagent Responsibility Reasoning, Multiagent Systems, Trustworthy Autonomous Systems},
pubstate = {published},
tppubtype = {article}
}
Soorati, Mohammad Divband; Gerding, Enrico; Marchioni, Enrico; Naumov, Pavel; Norman, Timothy; Ramchurn, Sarvapali; Rastegari, Baharak; Sobey, Adam; Stein, Sebastian; Tarapore, Danesh; Yazdanpanah, Vahid; Zhang, Jie
From Intelligent Agents to Trustworthy Human-Centred Multiagent Systems Journal Article
In: AI Communications, 2022.
Abstract | Links | BibTeX | Tags: Artificial Intelligence, Distributed Artificial Intelligence, Intelligent Agents, Multiagent Systems, Trustworthy Autonomous Systems
@article{soton467975,
title = {From Intelligent Agents to Trustworthy Human-Centred Multiagent Systems},
author = {Mohammad Divband Soorati and Enrico Gerding and Enrico Marchioni and Pavel Naumov and Timothy Norman and Sarvapali Ramchurn and Baharak Rastegari and Adam Sobey and Sebastian Stein and Danesh Tarapore and Vahid Yazdanpanah and Jie Zhang},
url = {https://eprints.soton.ac.uk/467975/},
year = {2022},
date = {2022-07-01},
journal = {AI Communications},
abstract = {The Agents, Interaction and Complexity research group at the University of Southampton has a long track record of research in multiagent systems (MAS). We have made substantial scientific contributions across learning in MAS, game-theoretic techniques for coordinating agent systems, and formal methods for representation and reasoning. We highlight key results achieved by the group and elaborate on recent work and open research challenges in developing trustworthy autonomous systems and deploying human-centred AI systems that aim to support societal good.},
keywords = {Artificial Intelligence, Distributed Artificial Intelligence, Intelligent Agents, Multiagent Systems, Trustworthy Autonomous Systems},
pubstate = {published},
tppubtype = {article}
}
Yazdanpanah, Vahid; Gerding, Enrico H; Stein, Sebastian; Dastani, Mehdi; Jonker, Catholijn M; Norman, Timothy J; Ramchurn, Sarvapali D
Responsibility ascription in trustworthy autonomous systems Proceedings Article
In: Embedding AI in Society (18/02/21 - 19/02/21), 2021.
Abstract | Links | BibTeX | Tags: Multiagent Systems, Reliable AI, Responsibility Reasoning, Trustworthy Autonomous Systems
@inproceedings{soton446459,
title = {Responsibility ascription in trustworthy autonomous systems},
author = {Vahid Yazdanpanah and Enrico H Gerding and Sebastian Stein and Mehdi Dastani and Catholijn M Jonker and Timothy J Norman and Sarvapali D Ramchurn},
url = {https://eprints.soton.ac.uk/446459/},
year = {2021},
date = {2021-02-01},
booktitle = {Embedding AI in Society (18/02/21 - 19/02/21)},
abstract = {To develop and effectively deploy Trustworthy Autonomous Systems (TAS), we face various social, technological, legal, and ethical challenges in which different notions of responsibility can play a key role. In this work, we elaborate on these challenges, discuss research gaps, and show how the multidimensional notion of responsibility can play a key role to bridge them. We argue that TAS requires operational tools to represent and reason about the responsibilities of humans as well as AI agents.},
keywords = {Multiagent Systems, Reliable AI, Responsibility Reasoning, Trustworthy Autonomous Systems},
pubstate = {published},
tppubtype = {inproceedings}
}
Yazdanpanah, Vahid; Gerding, Enrico; Stein, Sebastian; Dastani, Mehdi; Jonker, Catholijn M; Norman, Timothy; Ramchurn, Sarvapali
Reasoning About Responsibility in Autonomous Systems: Challenges and Opportunities Journal Article
In: AI & Society, 2022.
@article{soton471971,
title = {Reasoning About Responsibility in Autonomous Systems: Challenges and Opportunities},
author = {Vahid Yazdanpanah and Enrico Gerding and Sebastian Stein and Mehdi Dastani and Catholijn M Jonker and Timothy Norman and Sarvapali Ramchurn},
url = {https://eprints.soton.ac.uk/471971/},
year = {2022},
date = {2022-11-01},
journal = {AI & Society},
abstract = {Ensuring the trustworthiness of autonomous systems and artificial intelligenceensuremath<br/ensuremath>is an important interdisciplinary endeavour. In this position paper, we argue thatensuremath<br/ensuremath>this endeavour will benefit from technical advancements in capturing various forms of responsibility, and we present a comprehensive research agenda to achieve this. In particular, we argue that ensuring the reliability of autonomous system can take advantage of technical approaches for quantifying degrees of responsibility and for coordinating tasks based on that. Moreover, we deem that, in certifying the legality of an AI system, formal and computationally implementable notions of responsibility, blame, accountability, and liability are applicable for addressing potential responsibility gaps (i.e., situations in which a group is responsible, but individuals? responsibility may be unclear). This is a call to enable AI systems themselves, as well as those involved in the design, monitoring, and governance of AI systems, to represent and reason about who can be seen as responsible in prospect (e.g., for completing a task in future) and who can be seen as responsible retrospectively (e.g., for a failure that has already occurred). To that end, in this work, we show that across all stages of the design, development, and deployment of Trustworthy Autonomous Systems (TAS), responsibility reasoning should play a key role. This position paper is the first step towards establishing a road-map and research agenda on how the notion of responsibility can provide novel solution concepts for ensuring the reliability and legality of TAS and, as a result, enables an effective embedding of AI technologies into society.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Soorati, Mohammad Divband; Gerding, Enrico; Marchioni, Enrico; Naumov, Pavel; Norman, Timothy; Ramchurn, Sarvapali; Rastegari, Baharak; Sobey, Adam; Stein, Sebastian; Tarapore, Danesh; Yazdanpanah, Vahid; Zhang, Jie
From Intelligent Agents to Trustworthy Human-Centred Multiagent Systems Journal Article
In: AI Communications, 2022.
@article{soton467975,
title = {From Intelligent Agents to Trustworthy Human-Centred Multiagent Systems},
author = {Mohammad Divband Soorati and Enrico Gerding and Enrico Marchioni and Pavel Naumov and Timothy Norman and Sarvapali Ramchurn and Baharak Rastegari and Adam Sobey and Sebastian Stein and Danesh Tarapore and Vahid Yazdanpanah and Jie Zhang},
url = {https://eprints.soton.ac.uk/467975/},
year = {2022},
date = {2022-07-01},
journal = {AI Communications},
abstract = {The Agents, Interaction and Complexity research group at the University of Southampton has a long track record of research in multiagent systems (MAS). We have made substantial scientific contributions across learning in MAS, game-theoretic techniques for coordinating agent systems, and formal methods for representation and reasoning. We highlight key results achieved by the group and elaborate on recent work and open research challenges in developing trustworthy autonomous systems and deploying human-centred AI systems that aim to support societal good.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Yazdanpanah, Vahid; Gerding, Enrico H; Stein, Sebastian; Dastani, Mehdi; Jonker, Catholijn M; Norman, Timothy J; Ramchurn, Sarvapali D
Responsibility ascription in trustworthy autonomous systems Proceedings Article
In: Embedding AI in Society (18/02/21 - 19/02/21), 2021.
@inproceedings{soton446459,
title = {Responsibility ascription in trustworthy autonomous systems},
author = {Vahid Yazdanpanah and Enrico H Gerding and Sebastian Stein and Mehdi Dastani and Catholijn M Jonker and Timothy J Norman and Sarvapali D Ramchurn},
url = {https://eprints.soton.ac.uk/446459/},
year = {2021},
date = {2021-02-01},
booktitle = {Embedding AI in Society (18/02/21 - 19/02/21)},
abstract = {To develop and effectively deploy Trustworthy Autonomous Systems (TAS), we face various social, technological, legal, and ethical challenges in which different notions of responsibility can play a key role. In this work, we elaborate on these challenges, discuss research gaps, and show how the multidimensional notion of responsibility can play a key role to bridge them. We argue that TAS requires operational tools to represent and reason about the responsibilities of humans as well as AI agents.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Yazdanpanah, Vahid; Gerding, Enrico; Stein, Sebastian; Dastani, Mehdi; Jonker, Catholijn M; Norman, Timothy; Ramchurn, Sarvapali
Reasoning About Responsibility in Autonomous Systems: Challenges and Opportunities Journal Article
In: AI & Society, 2022.
Abstract | Links | BibTeX | Tags: Artificial Intelligence, Citizen-Centric AI Systems, human-agent collectives, Human-Centred AI, Multiagent Responsibility Reasoning, Multiagent Systems, Trustworthy Autonomous Systems
@article{soton471971,
title = {Reasoning About Responsibility in Autonomous Systems: Challenges and Opportunities},
author = {Vahid Yazdanpanah and Enrico Gerding and Sebastian Stein and Mehdi Dastani and Catholijn M Jonker and Timothy Norman and Sarvapali Ramchurn},
url = {https://eprints.soton.ac.uk/471971/},
year = {2022},
date = {2022-11-01},
journal = {AI & Society},
abstract = {Ensuring the trustworthiness of autonomous systems and artificial intelligenceensuremath<br/ensuremath>is an important interdisciplinary endeavour. In this position paper, we argue thatensuremath<br/ensuremath>this endeavour will benefit from technical advancements in capturing various forms of responsibility, and we present a comprehensive research agenda to achieve this. In particular, we argue that ensuring the reliability of autonomous system can take advantage of technical approaches for quantifying degrees of responsibility and for coordinating tasks based on that. Moreover, we deem that, in certifying the legality of an AI system, formal and computationally implementable notions of responsibility, blame, accountability, and liability are applicable for addressing potential responsibility gaps (i.e., situations in which a group is responsible, but individuals? responsibility may be unclear). This is a call to enable AI systems themselves, as well as those involved in the design, monitoring, and governance of AI systems, to represent and reason about who can be seen as responsible in prospect (e.g., for completing a task in future) and who can be seen as responsible retrospectively (e.g., for a failure that has already occurred). To that end, in this work, we show that across all stages of the design, development, and deployment of Trustworthy Autonomous Systems (TAS), responsibility reasoning should play a key role. This position paper is the first step towards establishing a road-map and research agenda on how the notion of responsibility can provide novel solution concepts for ensuring the reliability and legality of TAS and, as a result, enables an effective embedding of AI technologies into society.},
keywords = {Artificial Intelligence, Citizen-Centric AI Systems, human-agent collectives, Human-Centred AI, Multiagent Responsibility Reasoning, Multiagent Systems, Trustworthy Autonomous Systems},
pubstate = {published},
tppubtype = {article}
}
Soorati, Mohammad Divband; Gerding, Enrico; Marchioni, Enrico; Naumov, Pavel; Norman, Timothy; Ramchurn, Sarvapali; Rastegari, Baharak; Sobey, Adam; Stein, Sebastian; Tarapore, Danesh; Yazdanpanah, Vahid; Zhang, Jie
From Intelligent Agents to Trustworthy Human-Centred Multiagent Systems Journal Article
In: AI Communications, 2022.
Abstract | Links | BibTeX | Tags: Artificial Intelligence, Distributed Artificial Intelligence, Intelligent Agents, Multiagent Systems, Trustworthy Autonomous Systems
@article{soton467975,
title = {From Intelligent Agents to Trustworthy Human-Centred Multiagent Systems},
author = {Mohammad Divband Soorati and Enrico Gerding and Enrico Marchioni and Pavel Naumov and Timothy Norman and Sarvapali Ramchurn and Baharak Rastegari and Adam Sobey and Sebastian Stein and Danesh Tarapore and Vahid Yazdanpanah and Jie Zhang},
url = {https://eprints.soton.ac.uk/467975/},
year = {2022},
date = {2022-07-01},
journal = {AI Communications},
abstract = {The Agents, Interaction and Complexity research group at the University of Southampton has a long track record of research in multiagent systems (MAS). We have made substantial scientific contributions across learning in MAS, game-theoretic techniques for coordinating agent systems, and formal methods for representation and reasoning. We highlight key results achieved by the group and elaborate on recent work and open research challenges in developing trustworthy autonomous systems and deploying human-centred AI systems that aim to support societal good.},
keywords = {Artificial Intelligence, Distributed Artificial Intelligence, Intelligent Agents, Multiagent Systems, Trustworthy Autonomous Systems},
pubstate = {published},
tppubtype = {article}
}
Yazdanpanah, Vahid; Gerding, Enrico H; Stein, Sebastian; Dastani, Mehdi; Jonker, Catholijn M; Norman, Timothy J; Ramchurn, Sarvapali D
Responsibility ascription in trustworthy autonomous systems Proceedings Article
In: Embedding AI in Society (18/02/21 - 19/02/21), 2021.
Abstract | Links | BibTeX | Tags: Multiagent Systems, Reliable AI, Responsibility Reasoning, Trustworthy Autonomous Systems
@inproceedings{soton446459,
title = {Responsibility ascription in trustworthy autonomous systems},
author = {Vahid Yazdanpanah and Enrico H Gerding and Sebastian Stein and Mehdi Dastani and Catholijn M Jonker and Timothy J Norman and Sarvapali D Ramchurn},
url = {https://eprints.soton.ac.uk/446459/},
year = {2021},
date = {2021-02-01},
booktitle = {Embedding AI in Society (18/02/21 - 19/02/21)},
abstract = {To develop and effectively deploy Trustworthy Autonomous Systems (TAS), we face various social, technological, legal, and ethical challenges in which different notions of responsibility can play a key role. In this work, we elaborate on these challenges, discuss research gaps, and show how the multidimensional notion of responsibility can play a key role to bridge them. We argue that TAS requires operational tools to represent and reason about the responsibilities of humans as well as AI agents.},
keywords = {Multiagent Systems, Reliable AI, Responsibility Reasoning, Trustworthy Autonomous Systems},
pubstate = {published},
tppubtype = {inproceedings}
}
Yazdanpanah, Vahid; Gerding, Enrico; Stein, Sebastian; Dastani, Mehdi; Jonker, Catholijn M; Norman, Timothy; Ramchurn, Sarvapali
Reasoning About Responsibility in Autonomous Systems: Challenges and Opportunities Journal Article
In: AI & Society, 2022.
@article{soton471971,
title = {Reasoning About Responsibility in Autonomous Systems: Challenges and Opportunities},
author = {Vahid Yazdanpanah and Enrico Gerding and Sebastian Stein and Mehdi Dastani and Catholijn M Jonker and Timothy Norman and Sarvapali Ramchurn},
url = {https://eprints.soton.ac.uk/471971/},
year = {2022},
date = {2022-11-01},
journal = {AI & Society},
abstract = {Ensuring the trustworthiness of autonomous systems and artificial intelligenceensuremath<br/ensuremath>is an important interdisciplinary endeavour. In this position paper, we argue thatensuremath<br/ensuremath>this endeavour will benefit from technical advancements in capturing various forms of responsibility, and we present a comprehensive research agenda to achieve this. In particular, we argue that ensuring the reliability of autonomous system can take advantage of technical approaches for quantifying degrees of responsibility and for coordinating tasks based on that. Moreover, we deem that, in certifying the legality of an AI system, formal and computationally implementable notions of responsibility, blame, accountability, and liability are applicable for addressing potential responsibility gaps (i.e., situations in which a group is responsible, but individuals? responsibility may be unclear). This is a call to enable AI systems themselves, as well as those involved in the design, monitoring, and governance of AI systems, to represent and reason about who can be seen as responsible in prospect (e.g., for completing a task in future) and who can be seen as responsible retrospectively (e.g., for a failure that has already occurred). To that end, in this work, we show that across all stages of the design, development, and deployment of Trustworthy Autonomous Systems (TAS), responsibility reasoning should play a key role. This position paper is the first step towards establishing a road-map and research agenda on how the notion of responsibility can provide novel solution concepts for ensuring the reliability and legality of TAS and, as a result, enables an effective embedding of AI technologies into society.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Soorati, Mohammad Divband; Gerding, Enrico; Marchioni, Enrico; Naumov, Pavel; Norman, Timothy; Ramchurn, Sarvapali; Rastegari, Baharak; Sobey, Adam; Stein, Sebastian; Tarapore, Danesh; Yazdanpanah, Vahid; Zhang, Jie
From Intelligent Agents to Trustworthy Human-Centred Multiagent Systems Journal Article
In: AI Communications, 2022.
@article{soton467975,
title = {From Intelligent Agents to Trustworthy Human-Centred Multiagent Systems},
author = {Mohammad Divband Soorati and Enrico Gerding and Enrico Marchioni and Pavel Naumov and Timothy Norman and Sarvapali Ramchurn and Baharak Rastegari and Adam Sobey and Sebastian Stein and Danesh Tarapore and Vahid Yazdanpanah and Jie Zhang},
url = {https://eprints.soton.ac.uk/467975/},
year = {2022},
date = {2022-07-01},
journal = {AI Communications},
abstract = {The Agents, Interaction and Complexity research group at the University of Southampton has a long track record of research in multiagent systems (MAS). We have made substantial scientific contributions across learning in MAS, game-theoretic techniques for coordinating agent systems, and formal methods for representation and reasoning. We highlight key results achieved by the group and elaborate on recent work and open research challenges in developing trustworthy autonomous systems and deploying human-centred AI systems that aim to support societal good.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Yazdanpanah, Vahid; Gerding, Enrico H; Stein, Sebastian; Dastani, Mehdi; Jonker, Catholijn M; Norman, Timothy J; Ramchurn, Sarvapali D
Responsibility ascription in trustworthy autonomous systems Proceedings Article
In: Embedding AI in Society (18/02/21 - 19/02/21), 2021.
@inproceedings{soton446459,
title = {Responsibility ascription in trustworthy autonomous systems},
author = {Vahid Yazdanpanah and Enrico H Gerding and Sebastian Stein and Mehdi Dastani and Catholijn M Jonker and Timothy J Norman and Sarvapali D Ramchurn},
url = {https://eprints.soton.ac.uk/446459/},
year = {2021},
date = {2021-02-01},
booktitle = {Embedding AI in Society (18/02/21 - 19/02/21)},
abstract = {To develop and effectively deploy Trustworthy Autonomous Systems (TAS), we face various social, technological, legal, and ethical challenges in which different notions of responsibility can play a key role. In this work, we elaborate on these challenges, discuss research gaps, and show how the multidimensional notion of responsibility can play a key role to bridge them. We argue that TAS requires operational tools to represent and reason about the responsibilities of humans as well as AI agents.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Multi-agent signal-less intersection management with dynamic platoon formationĀ
AI Foundation Models: initial review, CMA Consultation, TAS Hub ResponseĀ
The effect of data visualisation quality and task density on human-swarm interaction
Demonstrating performance benefits of human-swarm teamingĀ
Yazdanpanah, Vahid; Gerding, Enrico; Stein, Sebastian; Dastani, Mehdi; Jonker, Catholijn M; Norman, Timothy; Ramchurn, Sarvapali
Reasoning About Responsibility in Autonomous Systems: Challenges and Opportunities Journal Article
In: AI & Society, 2022.
@article{soton471971,
title = {Reasoning About Responsibility in Autonomous Systems: Challenges and Opportunities},
author = {Vahid Yazdanpanah and Enrico Gerding and Sebastian Stein and Mehdi Dastani and Catholijn M Jonker and Timothy Norman and Sarvapali Ramchurn},
url = {https://eprints.soton.ac.uk/471971/},
year = {2022},
date = {2022-11-01},
journal = {AI & Society},
abstract = {Ensuring the trustworthiness of autonomous systems and artificial intelligenceensuremath<br/ensuremath>is an important interdisciplinary endeavour. In this position paper, we argue thatensuremath<br/ensuremath>this endeavour will benefit from technical advancements in capturing various forms of responsibility, and we present a comprehensive research agenda to achieve this. In particular, we argue that ensuring the reliability of autonomous system can take advantage of technical approaches for quantifying degrees of responsibility and for coordinating tasks based on that. Moreover, we deem that, in certifying the legality of an AI system, formal and computationally implementable notions of responsibility, blame, accountability, and liability are applicable for addressing potential responsibility gaps (i.e., situations in which a group is responsible, but individuals? responsibility may be unclear). This is a call to enable AI systems themselves, as well as those involved in the design, monitoring, and governance of AI systems, to represent and reason about who can be seen as responsible in prospect (e.g., for completing a task in future) and who can be seen as responsible retrospectively (e.g., for a failure that has already occurred). To that end, in this work, we show that across all stages of the design, development, and deployment of Trustworthy Autonomous Systems (TAS), responsibility reasoning should play a key role. This position paper is the first step towards establishing a road-map and research agenda on how the notion of responsibility can provide novel solution concepts for ensuring the reliability and legality of TAS and, as a result, enables an effective embedding of AI technologies into society.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Soorati, Mohammad Divband; Gerding, Enrico; Marchioni, Enrico; Naumov, Pavel; Norman, Timothy; Ramchurn, Sarvapali; Rastegari, Baharak; Sobey, Adam; Stein, Sebastian; Tarapore, Danesh; Yazdanpanah, Vahid; Zhang, Jie
From Intelligent Agents to Trustworthy Human-Centred Multiagent Systems Journal Article
In: AI Communications, 2022.
@article{soton467975,
title = {From Intelligent Agents to Trustworthy Human-Centred Multiagent Systems},
author = {Mohammad Divband Soorati and Enrico Gerding and Enrico Marchioni and Pavel Naumov and Timothy Norman and Sarvapali Ramchurn and Baharak Rastegari and Adam Sobey and Sebastian Stein and Danesh Tarapore and Vahid Yazdanpanah and Jie Zhang},
url = {https://eprints.soton.ac.uk/467975/},
year = {2022},
date = {2022-07-01},
journal = {AI Communications},
abstract = {The Agents, Interaction and Complexity research group at the University of Southampton has a long track record of research in multiagent systems (MAS). We have made substantial scientific contributions across learning in MAS, game-theoretic techniques for coordinating agent systems, and formal methods for representation and reasoning. We highlight key results achieved by the group and elaborate on recent work and open research challenges in developing trustworthy autonomous systems and deploying human-centred AI systems that aim to support societal good.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Yazdanpanah, Vahid; Gerding, Enrico H; Stein, Sebastian; Dastani, Mehdi; Jonker, Catholijn M; Norman, Timothy J; Ramchurn, Sarvapali D
Responsibility ascription in trustworthy autonomous systems Proceedings Article
In: Embedding AI in Society (18/02/21 - 19/02/21), 2021.
@inproceedings{soton446459,
title = {Responsibility ascription in trustworthy autonomous systems},
author = {Vahid Yazdanpanah and Enrico H Gerding and Sebastian Stein and Mehdi Dastani and Catholijn M Jonker and Timothy J Norman and Sarvapali D Ramchurn},
url = {https://eprints.soton.ac.uk/446459/},
year = {2021},
date = {2021-02-01},
booktitle = {Embedding AI in Society (18/02/21 - 19/02/21)},
abstract = {To develop and effectively deploy Trustworthy Autonomous Systems (TAS), we face various social, technological, legal, and ethical challenges in which different notions of responsibility can play a key role. In this work, we elaborate on these challenges, discuss research gaps, and show how the multidimensional notion of responsibility can play a key role to bridge them. We argue that TAS requires operational tools to represent and reason about the responsibilities of humans as well as AI agents.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}