font
Soorati, Mohammad D.; Naiseh, Mohammad; Hunt, William; Parnell, Katie; Clark, Jediah; Ramchurn, Sarvapali D.
Enabling trustworthiness in human-swarm systems through a digital twin Book Section
In: Dasgupta, Prithviraj; Llinas, James; Gillespie, Tony; Fouse, Scott; Lawless, William; Mittu, Ranjeev; Sofge, Donlad (Ed.): Putting AI in the Critical Loop: Assured Trust and Autonomy in Human-Machine Teams, pp. 93–125, Academic Press, 2024, (Publisher Copyright: © 2024 Elsevier Inc. All rights reserved.).
Abstract | Links | BibTeX | Tags: Digital twin, Explainability, Human-swarm interaction, Trustworthy Autonomous Systems, User-centered design
@incollection{soton491769,
title = {Enabling trustworthiness in human-swarm systems through a digital twin},
author = {Mohammad D. Soorati and Mohammad Naiseh and William Hunt and Katie Parnell and Jediah Clark and Sarvapali D. Ramchurn},
editor = {Prithviraj Dasgupta and James Llinas and Tony Gillespie and Scott Fouse and William Lawless and Ranjeev Mittu and Donlad Sofge},
url = {https://eprints.soton.ac.uk/491769/},
year = {2024},
date = {2024-02-01},
booktitle = {Putting AI in the Critical Loop: Assured Trust and Autonomy in Human-Machine Teams},
pages = {93–125},
publisher = {Academic Press},
abstract = {Robot swarms are highly dynamic systems that exhibit fault-tolerant behavior in accomplishing given tasks. Applications of swarm robotics are very limited due to the lack of complex decision-making capability. Real-world applications are only possible if we use human supervision to monitor and control the behavior of the swarm. Ensuring that human operators can trust the swarm system is one of the key challenges in human-swarm systems. This chapter presents a digital twin for trustworthy human-swarm teaming. The first element in designing such a simulation platform is to understand the trust requirements to label a human-swarm system as trustworthy. In order to outline the key trust requirements, we interviewed a group of experienced uncrewed aerial vehicle (UAV) operators and collated their suggestions for building and repairing trusts in single and multiple UAV systems. We then performed a survey to gather swarm experts? points of view on creating a taxonomy for explainability in human-swarm systems. This chapter presents a digital twin platform that implements a disaster management use case and has the capacity to meet the extracted trust and explainability requirements.},
note = {Publisher Copyright:
© 2024 Elsevier Inc. All rights reserved.},
keywords = {Digital twin, Explainability, Human-swarm interaction, Trustworthy Autonomous Systems, User-centered design},
pubstate = {published},
tppubtype = {incollection}
}
Krook, Joshua; Downer, John; Winter, Peter; Williams, Jennifer; Ives, Jonathan; Bratu, Roxana; Sheir, Stephanie; Williams, Robin; Anderson, Stuart; Li, Phoebe; Ramamoorthy, Subramanian; Ramchurn, Sarvapali
AI regulation: a pro-innovation approach ? policy proposals: TASHub Response Miscellaneous
2023.
Abstract | Links | BibTeX | Tags: Artificial Intelligence, Consultation, innovation, Regulation, Trustworthy Autonomous Systems
@misc{soton478329,
title = {AI regulation: a pro-innovation approach ? policy proposals: TASHub Response},
author = {Joshua Krook and John Downer and Peter Winter and Jennifer Williams and Jonathan Ives and Roxana Bratu and Stephanie Sheir and Robin Williams and Stuart Anderson and Phoebe Li and Subramanian Ramamoorthy and Sarvapali Ramchurn},
url = {https://eprints.soton.ac.uk/478329/},
year = {2023},
date = {2023-06-01},
publisher = {University of Southampton},
abstract = {Response to open consultation from: Department for Science, Innovation and Technologyensuremath<br/ensuremath>and Office for Artificial Intelligence},
keywords = {Artificial Intelligence, Consultation, innovation, Regulation, Trustworthy Autonomous Systems},
pubstate = {published},
tppubtype = {misc}
}
Yazdanpanah, Vahid; Gerding, Enrico; Stein, Sebastian; Dastani, Mehdi; Jonker, Catholijn M; Norman, Timothy; Ramchurn, Sarvapali
Reasoning About Responsibility in Autonomous Systems: Challenges and Opportunities Journal Article
In: AI & Society, 2022.
Abstract | Links | BibTeX | Tags: Artificial Intelligence, Citizen-Centric AI Systems, human-agent collectives, Human-Centred AI, Multiagent Responsibility Reasoning, Multiagent Systems, Trustworthy Autonomous Systems
@article{soton471971,
title = {Reasoning About Responsibility in Autonomous Systems: Challenges and Opportunities},
author = {Vahid Yazdanpanah and Enrico Gerding and Sebastian Stein and Mehdi Dastani and Catholijn M Jonker and Timothy Norman and Sarvapali Ramchurn},
url = {https://eprints.soton.ac.uk/471971/},
year = {2022},
date = {2022-11-01},
journal = {AI & Society},
abstract = {Ensuring the trustworthiness of autonomous systems and artificial intelligenceensuremath<br/ensuremath>is an important interdisciplinary endeavour. In this position paper, we argue thatensuremath<br/ensuremath>this endeavour will benefit from technical advancements in capturing various forms of responsibility, and we present a comprehensive research agenda to achieve this. In particular, we argue that ensuring the reliability of autonomous system can take advantage of technical approaches for quantifying degrees of responsibility and for coordinating tasks based on that. Moreover, we deem that, in certifying the legality of an AI system, formal and computationally implementable notions of responsibility, blame, accountability, and liability are applicable for addressing potential responsibility gaps (i.e., situations in which a group is responsible, but individuals? responsibility may be unclear). This is a call to enable AI systems themselves, as well as those involved in the design, monitoring, and governance of AI systems, to represent and reason about who can be seen as responsible in prospect (e.g., for completing a task in future) and who can be seen as responsible retrospectively (e.g., for a failure that has already occurred). To that end, in this work, we show that across all stages of the design, development, and deployment of Trustworthy Autonomous Systems (TAS), responsibility reasoning should play a key role. This position paper is the first step towards establishing a road-map and research agenda on how the notion of responsibility can provide novel solution concepts for ensuring the reliability and legality of TAS and, as a result, enables an effective embedding of AI technologies into society.},
keywords = {Artificial Intelligence, Citizen-Centric AI Systems, human-agent collectives, Human-Centred AI, Multiagent Responsibility Reasoning, Multiagent Systems, Trustworthy Autonomous Systems},
pubstate = {published},
tppubtype = {article}
}
Soorati, Mohammad Divband; Gerding, Enrico; Marchioni, Enrico; Naumov, Pavel; Norman, Timothy; Ramchurn, Sarvapali; Rastegari, Baharak; Sobey, Adam; Stein, Sebastian; Tarapore, Danesh; Yazdanpanah, Vahid; Zhang, Jie
From Intelligent Agents to Trustworthy Human-Centred Multiagent Systems Journal Article
In: AI Communications, 2022.
Abstract | Links | BibTeX | Tags: Artificial Intelligence, Distributed Artificial Intelligence, Intelligent Agents, Multiagent Systems, Trustworthy Autonomous Systems
@article{soton467975,
title = {From Intelligent Agents to Trustworthy Human-Centred Multiagent Systems},
author = {Mohammad Divband Soorati and Enrico Gerding and Enrico Marchioni and Pavel Naumov and Timothy Norman and Sarvapali Ramchurn and Baharak Rastegari and Adam Sobey and Sebastian Stein and Danesh Tarapore and Vahid Yazdanpanah and Jie Zhang},
url = {https://eprints.soton.ac.uk/467975/},
year = {2022},
date = {2022-07-01},
journal = {AI Communications},
abstract = {The Agents, Interaction and Complexity research group at the University of Southampton has a long track record of research in multiagent systems (MAS). We have made substantial scientific contributions across learning in MAS, game-theoretic techniques for coordinating agent systems, and formal methods for representation and reasoning. We highlight key results achieved by the group and elaborate on recent work and open research challenges in developing trustworthy autonomous systems and deploying human-centred AI systems that aim to support societal good.},
keywords = {Artificial Intelligence, Distributed Artificial Intelligence, Intelligent Agents, Multiagent Systems, Trustworthy Autonomous Systems},
pubstate = {published},
tppubtype = {article}
}
Yazdanpanah, Vahid; Gerding, Enrico H; Stein, Sebastian; Dastani, Mehdi; Jonker, Catholijn M; Norman, Timothy J; Ramchurn, Sarvapali D
Responsibility ascription in trustworthy autonomous systems Proceedings Article
In: Embedding AI in Society (18/02/21 - 19/02/21), 2021.
Abstract | Links | BibTeX | Tags: Multiagent Systems, Reliable AI, Responsibility Reasoning, Trustworthy Autonomous Systems
@inproceedings{soton446459,
title = {Responsibility ascription in trustworthy autonomous systems},
author = {Vahid Yazdanpanah and Enrico H Gerding and Sebastian Stein and Mehdi Dastani and Catholijn M Jonker and Timothy J Norman and Sarvapali D Ramchurn},
url = {https://eprints.soton.ac.uk/446459/},
year = {2021},
date = {2021-02-01},
booktitle = {Embedding AI in Society (18/02/21 - 19/02/21)},
abstract = {To develop and effectively deploy Trustworthy Autonomous Systems (TAS), we face various social, technological, legal, and ethical challenges in which different notions of responsibility can play a key role. In this work, we elaborate on these challenges, discuss research gaps, and show how the multidimensional notion of responsibility can play a key role to bridge them. We argue that TAS requires operational tools to represent and reason about the responsibilities of humans as well as AI agents.},
keywords = {Multiagent Systems, Reliable AI, Responsibility Reasoning, Trustworthy Autonomous Systems},
pubstate = {published},
tppubtype = {inproceedings}
}
Soorati, Mohammad D.; Naiseh, Mohammad; Hunt, William; Parnell, Katie; Clark, Jediah; Ramchurn, Sarvapali D.
Enabling trustworthiness in human-swarm systems through a digital twin Book Section
In: Dasgupta, Prithviraj; Llinas, James; Gillespie, Tony; Fouse, Scott; Lawless, William; Mittu, Ranjeev; Sofge, Donlad (Ed.): Putting AI in the Critical Loop: Assured Trust and Autonomy in Human-Machine Teams, pp. 93–125, Academic Press, 2024, (Publisher Copyright: © 2024 Elsevier Inc. All rights reserved.).
@incollection{soton491769,
title = {Enabling trustworthiness in human-swarm systems through a digital twin},
author = {Mohammad D. Soorati and Mohammad Naiseh and William Hunt and Katie Parnell and Jediah Clark and Sarvapali D. Ramchurn},
editor = {Prithviraj Dasgupta and James Llinas and Tony Gillespie and Scott Fouse and William Lawless and Ranjeev Mittu and Donlad Sofge},
url = {https://eprints.soton.ac.uk/491769/},
year = {2024},
date = {2024-02-01},
booktitle = {Putting AI in the Critical Loop: Assured Trust and Autonomy in Human-Machine Teams},
pages = {93–125},
publisher = {Academic Press},
abstract = {Robot swarms are highly dynamic systems that exhibit fault-tolerant behavior in accomplishing given tasks. Applications of swarm robotics are very limited due to the lack of complex decision-making capability. Real-world applications are only possible if we use human supervision to monitor and control the behavior of the swarm. Ensuring that human operators can trust the swarm system is one of the key challenges in human-swarm systems. This chapter presents a digital twin for trustworthy human-swarm teaming. The first element in designing such a simulation platform is to understand the trust requirements to label a human-swarm system as trustworthy. In order to outline the key trust requirements, we interviewed a group of experienced uncrewed aerial vehicle (UAV) operators and collated their suggestions for building and repairing trusts in single and multiple UAV systems. We then performed a survey to gather swarm experts? points of view on creating a taxonomy for explainability in human-swarm systems. This chapter presents a digital twin platform that implements a disaster management use case and has the capacity to meet the extracted trust and explainability requirements.},
note = {Publisher Copyright:
© 2024 Elsevier Inc. All rights reserved.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Krook, Joshua; Downer, John; Winter, Peter; Williams, Jennifer; Ives, Jonathan; Bratu, Roxana; Sheir, Stephanie; Williams, Robin; Anderson, Stuart; Li, Phoebe; Ramamoorthy, Subramanian; Ramchurn, Sarvapali
AI regulation: a pro-innovation approach ? policy proposals: TASHub Response Miscellaneous
2023.
@misc{soton478329,
title = {AI regulation: a pro-innovation approach ? policy proposals: TASHub Response},
author = {Joshua Krook and John Downer and Peter Winter and Jennifer Williams and Jonathan Ives and Roxana Bratu and Stephanie Sheir and Robin Williams and Stuart Anderson and Phoebe Li and Subramanian Ramamoorthy and Sarvapali Ramchurn},
url = {https://eprints.soton.ac.uk/478329/},
year = {2023},
date = {2023-06-01},
publisher = {University of Southampton},
abstract = {Response to open consultation from: Department for Science, Innovation and Technologyensuremath<br/ensuremath>and Office for Artificial Intelligence},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
Yazdanpanah, Vahid; Gerding, Enrico; Stein, Sebastian; Dastani, Mehdi; Jonker, Catholijn M; Norman, Timothy; Ramchurn, Sarvapali
Reasoning About Responsibility in Autonomous Systems: Challenges and Opportunities Journal Article
In: AI & Society, 2022.
@article{soton471971,
title = {Reasoning About Responsibility in Autonomous Systems: Challenges and Opportunities},
author = {Vahid Yazdanpanah and Enrico Gerding and Sebastian Stein and Mehdi Dastani and Catholijn M Jonker and Timothy Norman and Sarvapali Ramchurn},
url = {https://eprints.soton.ac.uk/471971/},
year = {2022},
date = {2022-11-01},
journal = {AI & Society},
abstract = {Ensuring the trustworthiness of autonomous systems and artificial intelligenceensuremath<br/ensuremath>is an important interdisciplinary endeavour. In this position paper, we argue thatensuremath<br/ensuremath>this endeavour will benefit from technical advancements in capturing various forms of responsibility, and we present a comprehensive research agenda to achieve this. In particular, we argue that ensuring the reliability of autonomous system can take advantage of technical approaches for quantifying degrees of responsibility and for coordinating tasks based on that. Moreover, we deem that, in certifying the legality of an AI system, formal and computationally implementable notions of responsibility, blame, accountability, and liability are applicable for addressing potential responsibility gaps (i.e., situations in which a group is responsible, but individuals? responsibility may be unclear). This is a call to enable AI systems themselves, as well as those involved in the design, monitoring, and governance of AI systems, to represent and reason about who can be seen as responsible in prospect (e.g., for completing a task in future) and who can be seen as responsible retrospectively (e.g., for a failure that has already occurred). To that end, in this work, we show that across all stages of the design, development, and deployment of Trustworthy Autonomous Systems (TAS), responsibility reasoning should play a key role. This position paper is the first step towards establishing a road-map and research agenda on how the notion of responsibility can provide novel solution concepts for ensuring the reliability and legality of TAS and, as a result, enables an effective embedding of AI technologies into society.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Soorati, Mohammad Divband; Gerding, Enrico; Marchioni, Enrico; Naumov, Pavel; Norman, Timothy; Ramchurn, Sarvapali; Rastegari, Baharak; Sobey, Adam; Stein, Sebastian; Tarapore, Danesh; Yazdanpanah, Vahid; Zhang, Jie
From Intelligent Agents to Trustworthy Human-Centred Multiagent Systems Journal Article
In: AI Communications, 2022.
@article{soton467975,
title = {From Intelligent Agents to Trustworthy Human-Centred Multiagent Systems},
author = {Mohammad Divband Soorati and Enrico Gerding and Enrico Marchioni and Pavel Naumov and Timothy Norman and Sarvapali Ramchurn and Baharak Rastegari and Adam Sobey and Sebastian Stein and Danesh Tarapore and Vahid Yazdanpanah and Jie Zhang},
url = {https://eprints.soton.ac.uk/467975/},
year = {2022},
date = {2022-07-01},
journal = {AI Communications},
abstract = {The Agents, Interaction and Complexity research group at the University of Southampton has a long track record of research in multiagent systems (MAS). We have made substantial scientific contributions across learning in MAS, game-theoretic techniques for coordinating agent systems, and formal methods for representation and reasoning. We highlight key results achieved by the group and elaborate on recent work and open research challenges in developing trustworthy autonomous systems and deploying human-centred AI systems that aim to support societal good.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Yazdanpanah, Vahid; Gerding, Enrico H; Stein, Sebastian; Dastani, Mehdi; Jonker, Catholijn M; Norman, Timothy J; Ramchurn, Sarvapali D
Responsibility ascription in trustworthy autonomous systems Proceedings Article
In: Embedding AI in Society (18/02/21 - 19/02/21), 2021.
@inproceedings{soton446459,
title = {Responsibility ascription in trustworthy autonomous systems},
author = {Vahid Yazdanpanah and Enrico H Gerding and Sebastian Stein and Mehdi Dastani and Catholijn M Jonker and Timothy J Norman and Sarvapali D Ramchurn},
url = {https://eprints.soton.ac.uk/446459/},
year = {2021},
date = {2021-02-01},
booktitle = {Embedding AI in Society (18/02/21 - 19/02/21)},
abstract = {To develop and effectively deploy Trustworthy Autonomous Systems (TAS), we face various social, technological, legal, and ethical challenges in which different notions of responsibility can play a key role. In this work, we elaborate on these challenges, discuss research gaps, and show how the multidimensional notion of responsibility can play a key role to bridge them. We argue that TAS requires operational tools to represent and reason about the responsibilities of humans as well as AI agents.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Soorati, Mohammad D.; Naiseh, Mohammad; Hunt, William; Parnell, Katie; Clark, Jediah; Ramchurn, Sarvapali D.
Enabling trustworthiness in human-swarm systems through a digital twin Book Section
In: Dasgupta, Prithviraj; Llinas, James; Gillespie, Tony; Fouse, Scott; Lawless, William; Mittu, Ranjeev; Sofge, Donlad (Ed.): Putting AI in the Critical Loop: Assured Trust and Autonomy in Human-Machine Teams, pp. 93–125, Academic Press, 2024, (Publisher Copyright: © 2024 Elsevier Inc. All rights reserved.).
Abstract | Links | BibTeX | Tags: Digital twin, Explainability, Human-swarm interaction, Trustworthy Autonomous Systems, User-centered design
@incollection{soton491769,
title = {Enabling trustworthiness in human-swarm systems through a digital twin},
author = {Mohammad D. Soorati and Mohammad Naiseh and William Hunt and Katie Parnell and Jediah Clark and Sarvapali D. Ramchurn},
editor = {Prithviraj Dasgupta and James Llinas and Tony Gillespie and Scott Fouse and William Lawless and Ranjeev Mittu and Donlad Sofge},
url = {https://eprints.soton.ac.uk/491769/},
year = {2024},
date = {2024-02-01},
booktitle = {Putting AI in the Critical Loop: Assured Trust and Autonomy in Human-Machine Teams},
pages = {93–125},
publisher = {Academic Press},
abstract = {Robot swarms are highly dynamic systems that exhibit fault-tolerant behavior in accomplishing given tasks. Applications of swarm robotics are very limited due to the lack of complex decision-making capability. Real-world applications are only possible if we use human supervision to monitor and control the behavior of the swarm. Ensuring that human operators can trust the swarm system is one of the key challenges in human-swarm systems. This chapter presents a digital twin for trustworthy human-swarm teaming. The first element in designing such a simulation platform is to understand the trust requirements to label a human-swarm system as trustworthy. In order to outline the key trust requirements, we interviewed a group of experienced uncrewed aerial vehicle (UAV) operators and collated their suggestions for building and repairing trusts in single and multiple UAV systems. We then performed a survey to gather swarm experts? points of view on creating a taxonomy for explainability in human-swarm systems. This chapter presents a digital twin platform that implements a disaster management use case and has the capacity to meet the extracted trust and explainability requirements.},
note = {Publisher Copyright:
© 2024 Elsevier Inc. All rights reserved.},
keywords = {Digital twin, Explainability, Human-swarm interaction, Trustworthy Autonomous Systems, User-centered design},
pubstate = {published},
tppubtype = {incollection}
}
Krook, Joshua; Downer, John; Winter, Peter; Williams, Jennifer; Ives, Jonathan; Bratu, Roxana; Sheir, Stephanie; Williams, Robin; Anderson, Stuart; Li, Phoebe; Ramamoorthy, Subramanian; Ramchurn, Sarvapali
AI regulation: a pro-innovation approach ? policy proposals: TASHub Response Miscellaneous
2023.
Abstract | Links | BibTeX | Tags: Artificial Intelligence, Consultation, innovation, Regulation, Trustworthy Autonomous Systems
@misc{soton478329,
title = {AI regulation: a pro-innovation approach ? policy proposals: TASHub Response},
author = {Joshua Krook and John Downer and Peter Winter and Jennifer Williams and Jonathan Ives and Roxana Bratu and Stephanie Sheir and Robin Williams and Stuart Anderson and Phoebe Li and Subramanian Ramamoorthy and Sarvapali Ramchurn},
url = {https://eprints.soton.ac.uk/478329/},
year = {2023},
date = {2023-06-01},
publisher = {University of Southampton},
abstract = {Response to open consultation from: Department for Science, Innovation and Technologyensuremath<br/ensuremath>and Office for Artificial Intelligence},
keywords = {Artificial Intelligence, Consultation, innovation, Regulation, Trustworthy Autonomous Systems},
pubstate = {published},
tppubtype = {misc}
}
Yazdanpanah, Vahid; Gerding, Enrico; Stein, Sebastian; Dastani, Mehdi; Jonker, Catholijn M; Norman, Timothy; Ramchurn, Sarvapali
Reasoning About Responsibility in Autonomous Systems: Challenges and Opportunities Journal Article
In: AI & Society, 2022.
Abstract | Links | BibTeX | Tags: Artificial Intelligence, Citizen-Centric AI Systems, human-agent collectives, Human-Centred AI, Multiagent Responsibility Reasoning, Multiagent Systems, Trustworthy Autonomous Systems
@article{soton471971,
title = {Reasoning About Responsibility in Autonomous Systems: Challenges and Opportunities},
author = {Vahid Yazdanpanah and Enrico Gerding and Sebastian Stein and Mehdi Dastani and Catholijn M Jonker and Timothy Norman and Sarvapali Ramchurn},
url = {https://eprints.soton.ac.uk/471971/},
year = {2022},
date = {2022-11-01},
journal = {AI & Society},
abstract = {Ensuring the trustworthiness of autonomous systems and artificial intelligenceensuremath<br/ensuremath>is an important interdisciplinary endeavour. In this position paper, we argue thatensuremath<br/ensuremath>this endeavour will benefit from technical advancements in capturing various forms of responsibility, and we present a comprehensive research agenda to achieve this. In particular, we argue that ensuring the reliability of autonomous system can take advantage of technical approaches for quantifying degrees of responsibility and for coordinating tasks based on that. Moreover, we deem that, in certifying the legality of an AI system, formal and computationally implementable notions of responsibility, blame, accountability, and liability are applicable for addressing potential responsibility gaps (i.e., situations in which a group is responsible, but individuals? responsibility may be unclear). This is a call to enable AI systems themselves, as well as those involved in the design, monitoring, and governance of AI systems, to represent and reason about who can be seen as responsible in prospect (e.g., for completing a task in future) and who can be seen as responsible retrospectively (e.g., for a failure that has already occurred). To that end, in this work, we show that across all stages of the design, development, and deployment of Trustworthy Autonomous Systems (TAS), responsibility reasoning should play a key role. This position paper is the first step towards establishing a road-map and research agenda on how the notion of responsibility can provide novel solution concepts for ensuring the reliability and legality of TAS and, as a result, enables an effective embedding of AI technologies into society.},
keywords = {Artificial Intelligence, Citizen-Centric AI Systems, human-agent collectives, Human-Centred AI, Multiagent Responsibility Reasoning, Multiagent Systems, Trustworthy Autonomous Systems},
pubstate = {published},
tppubtype = {article}
}
Soorati, Mohammad Divband; Gerding, Enrico; Marchioni, Enrico; Naumov, Pavel; Norman, Timothy; Ramchurn, Sarvapali; Rastegari, Baharak; Sobey, Adam; Stein, Sebastian; Tarapore, Danesh; Yazdanpanah, Vahid; Zhang, Jie
From Intelligent Agents to Trustworthy Human-Centred Multiagent Systems Journal Article
In: AI Communications, 2022.
Abstract | Links | BibTeX | Tags: Artificial Intelligence, Distributed Artificial Intelligence, Intelligent Agents, Multiagent Systems, Trustworthy Autonomous Systems
@article{soton467975,
title = {From Intelligent Agents to Trustworthy Human-Centred Multiagent Systems},
author = {Mohammad Divband Soorati and Enrico Gerding and Enrico Marchioni and Pavel Naumov and Timothy Norman and Sarvapali Ramchurn and Baharak Rastegari and Adam Sobey and Sebastian Stein and Danesh Tarapore and Vahid Yazdanpanah and Jie Zhang},
url = {https://eprints.soton.ac.uk/467975/},
year = {2022},
date = {2022-07-01},
journal = {AI Communications},
abstract = {The Agents, Interaction and Complexity research group at the University of Southampton has a long track record of research in multiagent systems (MAS). We have made substantial scientific contributions across learning in MAS, game-theoretic techniques for coordinating agent systems, and formal methods for representation and reasoning. We highlight key results achieved by the group and elaborate on recent work and open research challenges in developing trustworthy autonomous systems and deploying human-centred AI systems that aim to support societal good.},
keywords = {Artificial Intelligence, Distributed Artificial Intelligence, Intelligent Agents, Multiagent Systems, Trustworthy Autonomous Systems},
pubstate = {published},
tppubtype = {article}
}
Yazdanpanah, Vahid; Gerding, Enrico H; Stein, Sebastian; Dastani, Mehdi; Jonker, Catholijn M; Norman, Timothy J; Ramchurn, Sarvapali D
Responsibility ascription in trustworthy autonomous systems Proceedings Article
In: Embedding AI in Society (18/02/21 - 19/02/21), 2021.
Abstract | Links | BibTeX | Tags: Multiagent Systems, Reliable AI, Responsibility Reasoning, Trustworthy Autonomous Systems
@inproceedings{soton446459,
title = {Responsibility ascription in trustworthy autonomous systems},
author = {Vahid Yazdanpanah and Enrico H Gerding and Sebastian Stein and Mehdi Dastani and Catholijn M Jonker and Timothy J Norman and Sarvapali D Ramchurn},
url = {https://eprints.soton.ac.uk/446459/},
year = {2021},
date = {2021-02-01},
booktitle = {Embedding AI in Society (18/02/21 - 19/02/21)},
abstract = {To develop and effectively deploy Trustworthy Autonomous Systems (TAS), we face various social, technological, legal, and ethical challenges in which different notions of responsibility can play a key role. In this work, we elaborate on these challenges, discuss research gaps, and show how the multidimensional notion of responsibility can play a key role to bridge them. We argue that TAS requires operational tools to represent and reason about the responsibilities of humans as well as AI agents.},
keywords = {Multiagent Systems, Reliable AI, Responsibility Reasoning, Trustworthy Autonomous Systems},
pubstate = {published},
tppubtype = {inproceedings}
}
Soorati, Mohammad D.; Naiseh, Mohammad; Hunt, William; Parnell, Katie; Clark, Jediah; Ramchurn, Sarvapali D.
Enabling trustworthiness in human-swarm systems through a digital twin Book Section
In: Dasgupta, Prithviraj; Llinas, James; Gillespie, Tony; Fouse, Scott; Lawless, William; Mittu, Ranjeev; Sofge, Donlad (Ed.): Putting AI in the Critical Loop: Assured Trust and Autonomy in Human-Machine Teams, pp. 93–125, Academic Press, 2024, (Publisher Copyright: © 2024 Elsevier Inc. All rights reserved.).
@incollection{soton491769,
title = {Enabling trustworthiness in human-swarm systems through a digital twin},
author = {Mohammad D. Soorati and Mohammad Naiseh and William Hunt and Katie Parnell and Jediah Clark and Sarvapali D. Ramchurn},
editor = {Prithviraj Dasgupta and James Llinas and Tony Gillespie and Scott Fouse and William Lawless and Ranjeev Mittu and Donlad Sofge},
url = {https://eprints.soton.ac.uk/491769/},
year = {2024},
date = {2024-02-01},
booktitle = {Putting AI in the Critical Loop: Assured Trust and Autonomy in Human-Machine Teams},
pages = {93–125},
publisher = {Academic Press},
abstract = {Robot swarms are highly dynamic systems that exhibit fault-tolerant behavior in accomplishing given tasks. Applications of swarm robotics are very limited due to the lack of complex decision-making capability. Real-world applications are only possible if we use human supervision to monitor and control the behavior of the swarm. Ensuring that human operators can trust the swarm system is one of the key challenges in human-swarm systems. This chapter presents a digital twin for trustworthy human-swarm teaming. The first element in designing such a simulation platform is to understand the trust requirements to label a human-swarm system as trustworthy. In order to outline the key trust requirements, we interviewed a group of experienced uncrewed aerial vehicle (UAV) operators and collated their suggestions for building and repairing trusts in single and multiple UAV systems. We then performed a survey to gather swarm experts? points of view on creating a taxonomy for explainability in human-swarm systems. This chapter presents a digital twin platform that implements a disaster management use case and has the capacity to meet the extracted trust and explainability requirements.},
note = {Publisher Copyright:
© 2024 Elsevier Inc. All rights reserved.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Krook, Joshua; Downer, John; Winter, Peter; Williams, Jennifer; Ives, Jonathan; Bratu, Roxana; Sheir, Stephanie; Williams, Robin; Anderson, Stuart; Li, Phoebe; Ramamoorthy, Subramanian; Ramchurn, Sarvapali
AI regulation: a pro-innovation approach ? policy proposals: TASHub Response Miscellaneous
2023.
@misc{soton478329,
title = {AI regulation: a pro-innovation approach ? policy proposals: TASHub Response},
author = {Joshua Krook and John Downer and Peter Winter and Jennifer Williams and Jonathan Ives and Roxana Bratu and Stephanie Sheir and Robin Williams and Stuart Anderson and Phoebe Li and Subramanian Ramamoorthy and Sarvapali Ramchurn},
url = {https://eprints.soton.ac.uk/478329/},
year = {2023},
date = {2023-06-01},
publisher = {University of Southampton},
abstract = {Response to open consultation from: Department for Science, Innovation and Technologyensuremath<br/ensuremath>and Office for Artificial Intelligence},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
Yazdanpanah, Vahid; Gerding, Enrico; Stein, Sebastian; Dastani, Mehdi; Jonker, Catholijn M; Norman, Timothy; Ramchurn, Sarvapali
Reasoning About Responsibility in Autonomous Systems: Challenges and Opportunities Journal Article
In: AI & Society, 2022.
@article{soton471971,
title = {Reasoning About Responsibility in Autonomous Systems: Challenges and Opportunities},
author = {Vahid Yazdanpanah and Enrico Gerding and Sebastian Stein and Mehdi Dastani and Catholijn M Jonker and Timothy Norman and Sarvapali Ramchurn},
url = {https://eprints.soton.ac.uk/471971/},
year = {2022},
date = {2022-11-01},
journal = {AI & Society},
abstract = {Ensuring the trustworthiness of autonomous systems and artificial intelligenceensuremath<br/ensuremath>is an important interdisciplinary endeavour. In this position paper, we argue thatensuremath<br/ensuremath>this endeavour will benefit from technical advancements in capturing various forms of responsibility, and we present a comprehensive research agenda to achieve this. In particular, we argue that ensuring the reliability of autonomous system can take advantage of technical approaches for quantifying degrees of responsibility and for coordinating tasks based on that. Moreover, we deem that, in certifying the legality of an AI system, formal and computationally implementable notions of responsibility, blame, accountability, and liability are applicable for addressing potential responsibility gaps (i.e., situations in which a group is responsible, but individuals? responsibility may be unclear). This is a call to enable AI systems themselves, as well as those involved in the design, monitoring, and governance of AI systems, to represent and reason about who can be seen as responsible in prospect (e.g., for completing a task in future) and who can be seen as responsible retrospectively (e.g., for a failure that has already occurred). To that end, in this work, we show that across all stages of the design, development, and deployment of Trustworthy Autonomous Systems (TAS), responsibility reasoning should play a key role. This position paper is the first step towards establishing a road-map and research agenda on how the notion of responsibility can provide novel solution concepts for ensuring the reliability and legality of TAS and, as a result, enables an effective embedding of AI technologies into society.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Soorati, Mohammad Divband; Gerding, Enrico; Marchioni, Enrico; Naumov, Pavel; Norman, Timothy; Ramchurn, Sarvapali; Rastegari, Baharak; Sobey, Adam; Stein, Sebastian; Tarapore, Danesh; Yazdanpanah, Vahid; Zhang, Jie
From Intelligent Agents to Trustworthy Human-Centred Multiagent Systems Journal Article
In: AI Communications, 2022.
@article{soton467975,
title = {From Intelligent Agents to Trustworthy Human-Centred Multiagent Systems},
author = {Mohammad Divband Soorati and Enrico Gerding and Enrico Marchioni and Pavel Naumov and Timothy Norman and Sarvapali Ramchurn and Baharak Rastegari and Adam Sobey and Sebastian Stein and Danesh Tarapore and Vahid Yazdanpanah and Jie Zhang},
url = {https://eprints.soton.ac.uk/467975/},
year = {2022},
date = {2022-07-01},
journal = {AI Communications},
abstract = {The Agents, Interaction and Complexity research group at the University of Southampton has a long track record of research in multiagent systems (MAS). We have made substantial scientific contributions across learning in MAS, game-theoretic techniques for coordinating agent systems, and formal methods for representation and reasoning. We highlight key results achieved by the group and elaborate on recent work and open research challenges in developing trustworthy autonomous systems and deploying human-centred AI systems that aim to support societal good.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Yazdanpanah, Vahid; Gerding, Enrico H; Stein, Sebastian; Dastani, Mehdi; Jonker, Catholijn M; Norman, Timothy J; Ramchurn, Sarvapali D
Responsibility ascription in trustworthy autonomous systems Proceedings Article
In: Embedding AI in Society (18/02/21 - 19/02/21), 2021.
@inproceedings{soton446459,
title = {Responsibility ascription in trustworthy autonomous systems},
author = {Vahid Yazdanpanah and Enrico H Gerding and Sebastian Stein and Mehdi Dastani and Catholijn M Jonker and Timothy J Norman and Sarvapali D Ramchurn},
url = {https://eprints.soton.ac.uk/446459/},
year = {2021},
date = {2021-02-01},
booktitle = {Embedding AI in Society (18/02/21 - 19/02/21)},
abstract = {To develop and effectively deploy Trustworthy Autonomous Systems (TAS), we face various social, technological, legal, and ethical challenges in which different notions of responsibility can play a key role. In this work, we elaborate on these challenges, discuss research gaps, and show how the multidimensional notion of responsibility can play a key role to bridge them. We argue that TAS requires operational tools to represent and reason about the responsibilities of humans as well as AI agents.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Multi-agent signal-less intersection management with dynamic platoon formation
AI Foundation Models: initial review, CMA Consultation, TAS Hub Response
The effect of data visualisation quality and task density on human-swarm interaction
Demonstrating performance benefits of human-swarm teaming
Soorati, Mohammad D.; Naiseh, Mohammad; Hunt, William; Parnell, Katie; Clark, Jediah; Ramchurn, Sarvapali D.
Enabling trustworthiness in human-swarm systems through a digital twin Book Section
In: Dasgupta, Prithviraj; Llinas, James; Gillespie, Tony; Fouse, Scott; Lawless, William; Mittu, Ranjeev; Sofge, Donlad (Ed.): Putting AI in the Critical Loop: Assured Trust and Autonomy in Human-Machine Teams, pp. 93–125, Academic Press, 2024, (Publisher Copyright: © 2024 Elsevier Inc. All rights reserved.).
@incollection{soton491769,
title = {Enabling trustworthiness in human-swarm systems through a digital twin},
author = {Mohammad D. Soorati and Mohammad Naiseh and William Hunt and Katie Parnell and Jediah Clark and Sarvapali D. Ramchurn},
editor = {Prithviraj Dasgupta and James Llinas and Tony Gillespie and Scott Fouse and William Lawless and Ranjeev Mittu and Donlad Sofge},
url = {https://eprints.soton.ac.uk/491769/},
year = {2024},
date = {2024-02-01},
booktitle = {Putting AI in the Critical Loop: Assured Trust and Autonomy in Human-Machine Teams},
pages = {93–125},
publisher = {Academic Press},
abstract = {Robot swarms are highly dynamic systems that exhibit fault-tolerant behavior in accomplishing given tasks. Applications of swarm robotics are very limited due to the lack of complex decision-making capability. Real-world applications are only possible if we use human supervision to monitor and control the behavior of the swarm. Ensuring that human operators can trust the swarm system is one of the key challenges in human-swarm systems. This chapter presents a digital twin for trustworthy human-swarm teaming. The first element in designing such a simulation platform is to understand the trust requirements to label a human-swarm system as trustworthy. In order to outline the key trust requirements, we interviewed a group of experienced uncrewed aerial vehicle (UAV) operators and collated their suggestions for building and repairing trusts in single and multiple UAV systems. We then performed a survey to gather swarm experts? points of view on creating a taxonomy for explainability in human-swarm systems. This chapter presents a digital twin platform that implements a disaster management use case and has the capacity to meet the extracted trust and explainability requirements.},
note = {Publisher Copyright:
© 2024 Elsevier Inc. All rights reserved.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Krook, Joshua; Downer, John; Winter, Peter; Williams, Jennifer; Ives, Jonathan; Bratu, Roxana; Sheir, Stephanie; Williams, Robin; Anderson, Stuart; Li, Phoebe; Ramamoorthy, Subramanian; Ramchurn, Sarvapali
AI regulation: a pro-innovation approach ? policy proposals: TASHub Response Miscellaneous
2023.
@misc{soton478329,
title = {AI regulation: a pro-innovation approach ? policy proposals: TASHub Response},
author = {Joshua Krook and John Downer and Peter Winter and Jennifer Williams and Jonathan Ives and Roxana Bratu and Stephanie Sheir and Robin Williams and Stuart Anderson and Phoebe Li and Subramanian Ramamoorthy and Sarvapali Ramchurn},
url = {https://eprints.soton.ac.uk/478329/},
year = {2023},
date = {2023-06-01},
publisher = {University of Southampton},
abstract = {Response to open consultation from: Department for Science, Innovation and Technologyensuremath<br/ensuremath>and Office for Artificial Intelligence},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
Yazdanpanah, Vahid; Gerding, Enrico; Stein, Sebastian; Dastani, Mehdi; Jonker, Catholijn M; Norman, Timothy; Ramchurn, Sarvapali
Reasoning About Responsibility in Autonomous Systems: Challenges and Opportunities Journal Article
In: AI & Society, 2022.
@article{soton471971,
title = {Reasoning About Responsibility in Autonomous Systems: Challenges and Opportunities},
author = {Vahid Yazdanpanah and Enrico Gerding and Sebastian Stein and Mehdi Dastani and Catholijn M Jonker and Timothy Norman and Sarvapali Ramchurn},
url = {https://eprints.soton.ac.uk/471971/},
year = {2022},
date = {2022-11-01},
journal = {AI & Society},
abstract = {Ensuring the trustworthiness of autonomous systems and artificial intelligenceensuremath<br/ensuremath>is an important interdisciplinary endeavour. In this position paper, we argue thatensuremath<br/ensuremath>this endeavour will benefit from technical advancements in capturing various forms of responsibility, and we present a comprehensive research agenda to achieve this. In particular, we argue that ensuring the reliability of autonomous system can take advantage of technical approaches for quantifying degrees of responsibility and for coordinating tasks based on that. Moreover, we deem that, in certifying the legality of an AI system, formal and computationally implementable notions of responsibility, blame, accountability, and liability are applicable for addressing potential responsibility gaps (i.e., situations in which a group is responsible, but individuals? responsibility may be unclear). This is a call to enable AI systems themselves, as well as those involved in the design, monitoring, and governance of AI systems, to represent and reason about who can be seen as responsible in prospect (e.g., for completing a task in future) and who can be seen as responsible retrospectively (e.g., for a failure that has already occurred). To that end, in this work, we show that across all stages of the design, development, and deployment of Trustworthy Autonomous Systems (TAS), responsibility reasoning should play a key role. This position paper is the first step towards establishing a road-map and research agenda on how the notion of responsibility can provide novel solution concepts for ensuring the reliability and legality of TAS and, as a result, enables an effective embedding of AI technologies into society.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Soorati, Mohammad Divband; Gerding, Enrico; Marchioni, Enrico; Naumov, Pavel; Norman, Timothy; Ramchurn, Sarvapali; Rastegari, Baharak; Sobey, Adam; Stein, Sebastian; Tarapore, Danesh; Yazdanpanah, Vahid; Zhang, Jie
From Intelligent Agents to Trustworthy Human-Centred Multiagent Systems Journal Article
In: AI Communications, 2022.
@article{soton467975,
title = {From Intelligent Agents to Trustworthy Human-Centred Multiagent Systems},
author = {Mohammad Divband Soorati and Enrico Gerding and Enrico Marchioni and Pavel Naumov and Timothy Norman and Sarvapali Ramchurn and Baharak Rastegari and Adam Sobey and Sebastian Stein and Danesh Tarapore and Vahid Yazdanpanah and Jie Zhang},
url = {https://eprints.soton.ac.uk/467975/},
year = {2022},
date = {2022-07-01},
journal = {AI Communications},
abstract = {The Agents, Interaction and Complexity research group at the University of Southampton has a long track record of research in multiagent systems (MAS). We have made substantial scientific contributions across learning in MAS, game-theoretic techniques for coordinating agent systems, and formal methods for representation and reasoning. We highlight key results achieved by the group and elaborate on recent work and open research challenges in developing trustworthy autonomous systems and deploying human-centred AI systems that aim to support societal good.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Yazdanpanah, Vahid; Gerding, Enrico H; Stein, Sebastian; Dastani, Mehdi; Jonker, Catholijn M; Norman, Timothy J; Ramchurn, Sarvapali D
Responsibility ascription in trustworthy autonomous systems Proceedings Article
In: Embedding AI in Society (18/02/21 - 19/02/21), 2021.
@inproceedings{soton446459,
title = {Responsibility ascription in trustworthy autonomous systems},
author = {Vahid Yazdanpanah and Enrico H Gerding and Sebastian Stein and Mehdi Dastani and Catholijn M Jonker and Timothy J Norman and Sarvapali D Ramchurn},
url = {https://eprints.soton.ac.uk/446459/},
year = {2021},
date = {2021-02-01},
booktitle = {Embedding AI in Society (18/02/21 - 19/02/21)},
abstract = {To develop and effectively deploy Trustworthy Autonomous Systems (TAS), we face various social, technological, legal, and ethical challenges in which different notions of responsibility can play a key role. In this work, we elaborate on these challenges, discuss research gaps, and show how the multidimensional notion of responsibility can play a key role to bridge them. We argue that TAS requires operational tools to represent and reason about the responsibilities of humans as well as AI agents.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}