font
Soorati, Mohammad D.; Naiseh, Mohammad; Hunt, William; Parnell, Katie; Clark, Jediah; Ramchurn, Sarvapali D.
Enabling trustworthiness in human-swarm systems through a digital twin Book Section
In: Dasgupta, Prithviraj; Llinas, James; Gillespie, Tony; Fouse, Scott; Lawless, William; Mittu, Ranjeev; Sofge, Donlad (Ed.): Putting AI in the Critical Loop: Assured Trust and Autonomy in Human-Machine Teams, pp. 93–125, Academic Press, 2024, (Publisher Copyright: © 2024 Elsevier Inc. All rights reserved.).
Abstract | Links | BibTeX | Tags: Digital twin, Explainability, Human-swarm interaction, Trustworthy Autonomous Systems, User-centered design
@incollection{soton491769,
title = {Enabling trustworthiness in human-swarm systems through a digital twin},
author = {Mohammad D. Soorati and Mohammad Naiseh and William Hunt and Katie Parnell and Jediah Clark and Sarvapali D. Ramchurn},
editor = {Prithviraj Dasgupta and James Llinas and Tony Gillespie and Scott Fouse and William Lawless and Ranjeev Mittu and Donlad Sofge},
url = {https://eprints.soton.ac.uk/491769/},
year = {2024},
date = {2024-02-01},
booktitle = {Putting AI in the Critical Loop: Assured Trust and Autonomy in Human-Machine Teams},
pages = {93–125},
publisher = {Academic Press},
abstract = {Robot swarms are highly dynamic systems that exhibit fault-tolerant behavior in accomplishing given tasks. Applications of swarm robotics are very limited due to the lack of complex decision-making capability. Real-world applications are only possible if we use human supervision to monitor and control the behavior of the swarm. Ensuring that human operators can trust the swarm system is one of the key challenges in human-swarm systems. This chapter presents a digital twin for trustworthy human-swarm teaming. The first element in designing such a simulation platform is to understand the trust requirements to label a human-swarm system as trustworthy. In order to outline the key trust requirements, we interviewed a group of experienced uncrewed aerial vehicle (UAV) operators and collated their suggestions for building and repairing trusts in single and multiple UAV systems. We then performed a survey to gather swarm experts? points of view on creating a taxonomy for explainability in human-swarm systems. This chapter presents a digital twin platform that implements a disaster management use case and has the capacity to meet the extracted trust and explainability requirements.},
note = {Publisher Copyright:
© 2024 Elsevier Inc. All rights reserved.},
keywords = {Digital twin, Explainability, Human-swarm interaction, Trustworthy Autonomous Systems, User-centered design},
pubstate = {published},
tppubtype = {incollection}
}
Soorati, Mohammad D.; Naiseh, Mohammad; Hunt, William; Parnell, Katie; Clark, Jediah; Ramchurn, Sarvapali D.
Enabling trustworthiness in human-swarm systems through a digital twin Book Section
In: Dasgupta, Prithviraj; Llinas, James; Gillespie, Tony; Fouse, Scott; Lawless, William; Mittu, Ranjeev; Sofge, Donlad (Ed.): Putting AI in the Critical Loop: Assured Trust and Autonomy in Human-Machine Teams, pp. 93–125, Academic Press, 2024, (Publisher Copyright: © 2024 Elsevier Inc. All rights reserved.).
@incollection{soton491769,
title = {Enabling trustworthiness in human-swarm systems through a digital twin},
author = {Mohammad D. Soorati and Mohammad Naiseh and William Hunt and Katie Parnell and Jediah Clark and Sarvapali D. Ramchurn},
editor = {Prithviraj Dasgupta and James Llinas and Tony Gillespie and Scott Fouse and William Lawless and Ranjeev Mittu and Donlad Sofge},
url = {https://eprints.soton.ac.uk/491769/},
year = {2024},
date = {2024-02-01},
booktitle = {Putting AI in the Critical Loop: Assured Trust and Autonomy in Human-Machine Teams},
pages = {93–125},
publisher = {Academic Press},
abstract = {Robot swarms are highly dynamic systems that exhibit fault-tolerant behavior in accomplishing given tasks. Applications of swarm robotics are very limited due to the lack of complex decision-making capability. Real-world applications are only possible if we use human supervision to monitor and control the behavior of the swarm. Ensuring that human operators can trust the swarm system is one of the key challenges in human-swarm systems. This chapter presents a digital twin for trustworthy human-swarm teaming. The first element in designing such a simulation platform is to understand the trust requirements to label a human-swarm system as trustworthy. In order to outline the key trust requirements, we interviewed a group of experienced uncrewed aerial vehicle (UAV) operators and collated their suggestions for building and repairing trusts in single and multiple UAV systems. We then performed a survey to gather swarm experts? points of view on creating a taxonomy for explainability in human-swarm systems. This chapter presents a digital twin platform that implements a disaster management use case and has the capacity to meet the extracted trust and explainability requirements.},
note = {Publisher Copyright:
© 2024 Elsevier Inc. All rights reserved.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Soorati, Mohammad D.; Naiseh, Mohammad; Hunt, William; Parnell, Katie; Clark, Jediah; Ramchurn, Sarvapali D.
Enabling trustworthiness in human-swarm systems through a digital twin Book Section
In: Dasgupta, Prithviraj; Llinas, James; Gillespie, Tony; Fouse, Scott; Lawless, William; Mittu, Ranjeev; Sofge, Donlad (Ed.): Putting AI in the Critical Loop: Assured Trust and Autonomy in Human-Machine Teams, pp. 93–125, Academic Press, 2024, (Publisher Copyright: © 2024 Elsevier Inc. All rights reserved.).
Abstract | Links | BibTeX | Tags: Digital twin, Explainability, Human-swarm interaction, Trustworthy Autonomous Systems, User-centered design
@incollection{soton491769,
title = {Enabling trustworthiness in human-swarm systems through a digital twin},
author = {Mohammad D. Soorati and Mohammad Naiseh and William Hunt and Katie Parnell and Jediah Clark and Sarvapali D. Ramchurn},
editor = {Prithviraj Dasgupta and James Llinas and Tony Gillespie and Scott Fouse and William Lawless and Ranjeev Mittu and Donlad Sofge},
url = {https://eprints.soton.ac.uk/491769/},
year = {2024},
date = {2024-02-01},
booktitle = {Putting AI in the Critical Loop: Assured Trust and Autonomy in Human-Machine Teams},
pages = {93–125},
publisher = {Academic Press},
abstract = {Robot swarms are highly dynamic systems that exhibit fault-tolerant behavior in accomplishing given tasks. Applications of swarm robotics are very limited due to the lack of complex decision-making capability. Real-world applications are only possible if we use human supervision to monitor and control the behavior of the swarm. Ensuring that human operators can trust the swarm system is one of the key challenges in human-swarm systems. This chapter presents a digital twin for trustworthy human-swarm teaming. The first element in designing such a simulation platform is to understand the trust requirements to label a human-swarm system as trustworthy. In order to outline the key trust requirements, we interviewed a group of experienced uncrewed aerial vehicle (UAV) operators and collated their suggestions for building and repairing trusts in single and multiple UAV systems. We then performed a survey to gather swarm experts? points of view on creating a taxonomy for explainability in human-swarm systems. This chapter presents a digital twin platform that implements a disaster management use case and has the capacity to meet the extracted trust and explainability requirements.},
note = {Publisher Copyright:
© 2024 Elsevier Inc. All rights reserved.},
keywords = {Digital twin, Explainability, Human-swarm interaction, Trustworthy Autonomous Systems, User-centered design},
pubstate = {published},
tppubtype = {incollection}
}
Soorati, Mohammad D.; Naiseh, Mohammad; Hunt, William; Parnell, Katie; Clark, Jediah; Ramchurn, Sarvapali D.
Enabling trustworthiness in human-swarm systems through a digital twin Book Section
In: Dasgupta, Prithviraj; Llinas, James; Gillespie, Tony; Fouse, Scott; Lawless, William; Mittu, Ranjeev; Sofge, Donlad (Ed.): Putting AI in the Critical Loop: Assured Trust and Autonomy in Human-Machine Teams, pp. 93–125, Academic Press, 2024, (Publisher Copyright: © 2024 Elsevier Inc. All rights reserved.).
@incollection{soton491769,
title = {Enabling trustworthiness in human-swarm systems through a digital twin},
author = {Mohammad D. Soorati and Mohammad Naiseh and William Hunt and Katie Parnell and Jediah Clark and Sarvapali D. Ramchurn},
editor = {Prithviraj Dasgupta and James Llinas and Tony Gillespie and Scott Fouse and William Lawless and Ranjeev Mittu and Donlad Sofge},
url = {https://eprints.soton.ac.uk/491769/},
year = {2024},
date = {2024-02-01},
booktitle = {Putting AI in the Critical Loop: Assured Trust and Autonomy in Human-Machine Teams},
pages = {93–125},
publisher = {Academic Press},
abstract = {Robot swarms are highly dynamic systems that exhibit fault-tolerant behavior in accomplishing given tasks. Applications of swarm robotics are very limited due to the lack of complex decision-making capability. Real-world applications are only possible if we use human supervision to monitor and control the behavior of the swarm. Ensuring that human operators can trust the swarm system is one of the key challenges in human-swarm systems. This chapter presents a digital twin for trustworthy human-swarm teaming. The first element in designing such a simulation platform is to understand the trust requirements to label a human-swarm system as trustworthy. In order to outline the key trust requirements, we interviewed a group of experienced uncrewed aerial vehicle (UAV) operators and collated their suggestions for building and repairing trusts in single and multiple UAV systems. We then performed a survey to gather swarm experts? points of view on creating a taxonomy for explainability in human-swarm systems. This chapter presents a digital twin platform that implements a disaster management use case and has the capacity to meet the extracted trust and explainability requirements.},
note = {Publisher Copyright:
© 2024 Elsevier Inc. All rights reserved.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Multi-agent signal-less intersection management with dynamic platoon formation
AI Foundation Models: initial review, CMA Consultation, TAS Hub Response
The effect of data visualisation quality and task density on human-swarm interaction
Demonstrating performance benefits of human-swarm teaming
Soorati, Mohammad D.; Naiseh, Mohammad; Hunt, William; Parnell, Katie; Clark, Jediah; Ramchurn, Sarvapali D.
Enabling trustworthiness in human-swarm systems through a digital twin Book Section
In: Dasgupta, Prithviraj; Llinas, James; Gillespie, Tony; Fouse, Scott; Lawless, William; Mittu, Ranjeev; Sofge, Donlad (Ed.): Putting AI in the Critical Loop: Assured Trust and Autonomy in Human-Machine Teams, pp. 93–125, Academic Press, 2024, (Publisher Copyright: © 2024 Elsevier Inc. All rights reserved.).
@incollection{soton491769,
title = {Enabling trustworthiness in human-swarm systems through a digital twin},
author = {Mohammad D. Soorati and Mohammad Naiseh and William Hunt and Katie Parnell and Jediah Clark and Sarvapali D. Ramchurn},
editor = {Prithviraj Dasgupta and James Llinas and Tony Gillespie and Scott Fouse and William Lawless and Ranjeev Mittu and Donlad Sofge},
url = {https://eprints.soton.ac.uk/491769/},
year = {2024},
date = {2024-02-01},
booktitle = {Putting AI in the Critical Loop: Assured Trust and Autonomy in Human-Machine Teams},
pages = {93–125},
publisher = {Academic Press},
abstract = {Robot swarms are highly dynamic systems that exhibit fault-tolerant behavior in accomplishing given tasks. Applications of swarm robotics are very limited due to the lack of complex decision-making capability. Real-world applications are only possible if we use human supervision to monitor and control the behavior of the swarm. Ensuring that human operators can trust the swarm system is one of the key challenges in human-swarm systems. This chapter presents a digital twin for trustworthy human-swarm teaming. The first element in designing such a simulation platform is to understand the trust requirements to label a human-swarm system as trustworthy. In order to outline the key trust requirements, we interviewed a group of experienced uncrewed aerial vehicle (UAV) operators and collated their suggestions for building and repairing trusts in single and multiple UAV systems. We then performed a survey to gather swarm experts? points of view on creating a taxonomy for explainability in human-swarm systems. This chapter presents a digital twin platform that implements a disaster management use case and has the capacity to meet the extracted trust and explainability requirements.},
note = {Publisher Copyright:
© 2024 Elsevier Inc. All rights reserved.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}