font
Naiseh, Mohammad; Webb, Catherine; Underwood, Tim; Ramchurn, Gopal; Walters, Zoe; Thavanesan, Navamayooran; Vigneswaran, Ganesh
XAI for group-AI interaction: towards collaborative and inclusive explanations Proceedings Article
In: Longo, Luca; Liu, Weiru; Montavon, Gregoire (Ed.): Joint Proceedings of the xAI 2024 Late-breaking Work, Demos and Doctoral Consortium co-located with the 2nd World Conference on eXplainable Artificial Intelligence (xAI 2024), pp. 249–256, CEUR Workshop Proceedings, 2024.
Abstract | Links | BibTeX | Tags: Explainable AI, Group-AI Interaction, Interaction Design
@inproceedings{soton497829,
title = {XAI for group-AI interaction: towards collaborative and inclusive explanations},
author = {Mohammad Naiseh and Catherine Webb and Tim Underwood and Gopal Ramchurn and Zoe Walters and Navamayooran Thavanesan and Ganesh Vigneswaran},
editor = {Luca Longo and Weiru Liu and Gregoire Montavon},
url = {https://eprints.soton.ac.uk/497829/},
year = {2024},
date = {2024-07-01},
booktitle = {Joint Proceedings of the xAI 2024 Late-breaking Work, Demos and Doctoral Consortium co-located with the 2nd World Conference on eXplainable Artificial Intelligence (xAI 2024)},
volume = {3793},
pages = {249–256},
publisher = {CEUR Workshop Proceedings},
abstract = {ensuremath<pensuremath>The increasing integration of Machine Learning (ML) into decision-making across various sectors has raised concerns about ethics, legality, explainability, and safety, highlighting the necessity of human oversight. In response, eXplainable AI (XAI) has emerged as a means to enhance transparency by providing insights into ML model decisions and offering humans an understanding of the underlying logic. Despite its potential, existing XAI models often lack practical usability and fail to improve human-AI performance, as they may introduce issues such as overreliance. This underscores the need for further research in Human-Centered XAI to improve the usability of current XAI methods. Notably, much of the current research focuses on one-to-one interactions between the XAI and individual decision-makers, overlooking the dynamics of many-to-one relationships in real-world scenarios where groups of humans collaborate using XAI in collective decision-making. In this late-breaking work, we draw upon current work in Human-Centered XAI research and discuss how XAI design could be transitioned to group-AI interaction. We discuss four potential challenges in the transition of XAI from human-AI interaction to group-AI interaction. This paper contributes to advancing the field of Human-Centered XAI and facilitates the discussion on group-XAI interaction, calling for further research in this area.ensuremath</pensuremath>},
keywords = {Explainable AI, Group-AI Interaction, Interaction Design},
pubstate = {published},
tppubtype = {inproceedings}
}
Naiseh, Mohammad; Webb, Catherine; Underwood, Tim; Ramchurn, Gopal; Walters, Zoe; Thavanesan, Navamayooran; Vigneswaran, Ganesh
XAI for group-AI interaction: towards collaborative and inclusive explanations Proceedings Article
In: Longo, Luca; Liu, Weiru; Montavon, Gregoire (Ed.): Joint Proceedings of the xAI 2024 Late-breaking Work, Demos and Doctoral Consortium co-located with the 2nd World Conference on eXplainable Artificial Intelligence (xAI 2024), pp. 249–256, CEUR Workshop Proceedings, 2024.
@inproceedings{soton497829,
title = {XAI for group-AI interaction: towards collaborative and inclusive explanations},
author = {Mohammad Naiseh and Catherine Webb and Tim Underwood and Gopal Ramchurn and Zoe Walters and Navamayooran Thavanesan and Ganesh Vigneswaran},
editor = {Luca Longo and Weiru Liu and Gregoire Montavon},
url = {https://eprints.soton.ac.uk/497829/},
year = {2024},
date = {2024-07-01},
booktitle = {Joint Proceedings of the xAI 2024 Late-breaking Work, Demos and Doctoral Consortium co-located with the 2nd World Conference on eXplainable Artificial Intelligence (xAI 2024)},
volume = {3793},
pages = {249–256},
publisher = {CEUR Workshop Proceedings},
abstract = {ensuremath<pensuremath>The increasing integration of Machine Learning (ML) into decision-making across various sectors has raised concerns about ethics, legality, explainability, and safety, highlighting the necessity of human oversight. In response, eXplainable AI (XAI) has emerged as a means to enhance transparency by providing insights into ML model decisions and offering humans an understanding of the underlying logic. Despite its potential, existing XAI models often lack practical usability and fail to improve human-AI performance, as they may introduce issues such as overreliance. This underscores the need for further research in Human-Centered XAI to improve the usability of current XAI methods. Notably, much of the current research focuses on one-to-one interactions between the XAI and individual decision-makers, overlooking the dynamics of many-to-one relationships in real-world scenarios where groups of humans collaborate using XAI in collective decision-making. In this late-breaking work, we draw upon current work in Human-Centered XAI research and discuss how XAI design could be transitioned to group-AI interaction. We discuss four potential challenges in the transition of XAI from human-AI interaction to group-AI interaction. This paper contributes to advancing the field of Human-Centered XAI and facilitates the discussion on group-XAI interaction, calling for further research in this area.ensuremath</pensuremath>},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Naiseh, Mohammad; Webb, Catherine; Underwood, Tim; Ramchurn, Gopal; Walters, Zoe; Thavanesan, Navamayooran; Vigneswaran, Ganesh
XAI for group-AI interaction: towards collaborative and inclusive explanations Proceedings Article
In: Longo, Luca; Liu, Weiru; Montavon, Gregoire (Ed.): Joint Proceedings of the xAI 2024 Late-breaking Work, Demos and Doctoral Consortium co-located with the 2nd World Conference on eXplainable Artificial Intelligence (xAI 2024), pp. 249–256, CEUR Workshop Proceedings, 2024.
Abstract | Links | BibTeX | Tags: Explainable AI, Group-AI Interaction, Interaction Design
@inproceedings{soton497829,
title = {XAI for group-AI interaction: towards collaborative and inclusive explanations},
author = {Mohammad Naiseh and Catherine Webb and Tim Underwood and Gopal Ramchurn and Zoe Walters and Navamayooran Thavanesan and Ganesh Vigneswaran},
editor = {Luca Longo and Weiru Liu and Gregoire Montavon},
url = {https://eprints.soton.ac.uk/497829/},
year = {2024},
date = {2024-07-01},
booktitle = {Joint Proceedings of the xAI 2024 Late-breaking Work, Demos and Doctoral Consortium co-located with the 2nd World Conference on eXplainable Artificial Intelligence (xAI 2024)},
volume = {3793},
pages = {249–256},
publisher = {CEUR Workshop Proceedings},
abstract = {ensuremath<pensuremath>The increasing integration of Machine Learning (ML) into decision-making across various sectors has raised concerns about ethics, legality, explainability, and safety, highlighting the necessity of human oversight. In response, eXplainable AI (XAI) has emerged as a means to enhance transparency by providing insights into ML model decisions and offering humans an understanding of the underlying logic. Despite its potential, existing XAI models often lack practical usability and fail to improve human-AI performance, as they may introduce issues such as overreliance. This underscores the need for further research in Human-Centered XAI to improve the usability of current XAI methods. Notably, much of the current research focuses on one-to-one interactions between the XAI and individual decision-makers, overlooking the dynamics of many-to-one relationships in real-world scenarios where groups of humans collaborate using XAI in collective decision-making. In this late-breaking work, we draw upon current work in Human-Centered XAI research and discuss how XAI design could be transitioned to group-AI interaction. We discuss four potential challenges in the transition of XAI from human-AI interaction to group-AI interaction. This paper contributes to advancing the field of Human-Centered XAI and facilitates the discussion on group-XAI interaction, calling for further research in this area.ensuremath</pensuremath>},
keywords = {Explainable AI, Group-AI Interaction, Interaction Design},
pubstate = {published},
tppubtype = {inproceedings}
}
Naiseh, Mohammad; Webb, Catherine; Underwood, Tim; Ramchurn, Gopal; Walters, Zoe; Thavanesan, Navamayooran; Vigneswaran, Ganesh
XAI for group-AI interaction: towards collaborative and inclusive explanations Proceedings Article
In: Longo, Luca; Liu, Weiru; Montavon, Gregoire (Ed.): Joint Proceedings of the xAI 2024 Late-breaking Work, Demos and Doctoral Consortium co-located with the 2nd World Conference on eXplainable Artificial Intelligence (xAI 2024), pp. 249–256, CEUR Workshop Proceedings, 2024.
@inproceedings{soton497829,
title = {XAI for group-AI interaction: towards collaborative and inclusive explanations},
author = {Mohammad Naiseh and Catherine Webb and Tim Underwood and Gopal Ramchurn and Zoe Walters and Navamayooran Thavanesan and Ganesh Vigneswaran},
editor = {Luca Longo and Weiru Liu and Gregoire Montavon},
url = {https://eprints.soton.ac.uk/497829/},
year = {2024},
date = {2024-07-01},
booktitle = {Joint Proceedings of the xAI 2024 Late-breaking Work, Demos and Doctoral Consortium co-located with the 2nd World Conference on eXplainable Artificial Intelligence (xAI 2024)},
volume = {3793},
pages = {249–256},
publisher = {CEUR Workshop Proceedings},
abstract = {ensuremath<pensuremath>The increasing integration of Machine Learning (ML) into decision-making across various sectors has raised concerns about ethics, legality, explainability, and safety, highlighting the necessity of human oversight. In response, eXplainable AI (XAI) has emerged as a means to enhance transparency by providing insights into ML model decisions and offering humans an understanding of the underlying logic. Despite its potential, existing XAI models often lack practical usability and fail to improve human-AI performance, as they may introduce issues such as overreliance. This underscores the need for further research in Human-Centered XAI to improve the usability of current XAI methods. Notably, much of the current research focuses on one-to-one interactions between the XAI and individual decision-makers, overlooking the dynamics of many-to-one relationships in real-world scenarios where groups of humans collaborate using XAI in collective decision-making. In this late-breaking work, we draw upon current work in Human-Centered XAI research and discuss how XAI design could be transitioned to group-AI interaction. We discuss four potential challenges in the transition of XAI from human-AI interaction to group-AI interaction. This paper contributes to advancing the field of Human-Centered XAI and facilitates the discussion on group-XAI interaction, calling for further research in this area.ensuremath</pensuremath>},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Multi-agent signal-less intersection management with dynamic platoon formation
AI Foundation Models: initial review, CMA Consultation, TAS Hub Response
The effect of data visualisation quality and task density on human-swarm interaction
Demonstrating performance benefits of human-swarm teaming
Naiseh, Mohammad; Webb, Catherine; Underwood, Tim; Ramchurn, Gopal; Walters, Zoe; Thavanesan, Navamayooran; Vigneswaran, Ganesh
XAI for group-AI interaction: towards collaborative and inclusive explanations Proceedings Article
In: Longo, Luca; Liu, Weiru; Montavon, Gregoire (Ed.): Joint Proceedings of the xAI 2024 Late-breaking Work, Demos and Doctoral Consortium co-located with the 2nd World Conference on eXplainable Artificial Intelligence (xAI 2024), pp. 249–256, CEUR Workshop Proceedings, 2024.
@inproceedings{soton497829,
title = {XAI for group-AI interaction: towards collaborative and inclusive explanations},
author = {Mohammad Naiseh and Catherine Webb and Tim Underwood and Gopal Ramchurn and Zoe Walters and Navamayooran Thavanesan and Ganesh Vigneswaran},
editor = {Luca Longo and Weiru Liu and Gregoire Montavon},
url = {https://eprints.soton.ac.uk/497829/},
year = {2024},
date = {2024-07-01},
booktitle = {Joint Proceedings of the xAI 2024 Late-breaking Work, Demos and Doctoral Consortium co-located with the 2nd World Conference on eXplainable Artificial Intelligence (xAI 2024)},
volume = {3793},
pages = {249–256},
publisher = {CEUR Workshop Proceedings},
abstract = {ensuremath<pensuremath>The increasing integration of Machine Learning (ML) into decision-making across various sectors has raised concerns about ethics, legality, explainability, and safety, highlighting the necessity of human oversight. In response, eXplainable AI (XAI) has emerged as a means to enhance transparency by providing insights into ML model decisions and offering humans an understanding of the underlying logic. Despite its potential, existing XAI models often lack practical usability and fail to improve human-AI performance, as they may introduce issues such as overreliance. This underscores the need for further research in Human-Centered XAI to improve the usability of current XAI methods. Notably, much of the current research focuses on one-to-one interactions between the XAI and individual decision-makers, overlooking the dynamics of many-to-one relationships in real-world scenarios where groups of humans collaborate using XAI in collective decision-making. In this late-breaking work, we draw upon current work in Human-Centered XAI research and discuss how XAI design could be transitioned to group-AI interaction. We discuss four potential challenges in the transition of XAI from human-AI interaction to group-AI interaction. This paper contributes to advancing the field of Human-Centered XAI and facilitates the discussion on group-XAI interaction, calling for further research in this area.ensuremath</pensuremath>},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}