@inproceedings{soton478647,
title = {Multi-agent signal-less intersection management with dynamic platoon formation},
author = {Phuriwat Worrawichaipat and Enrico Gerding and Ioannis Kaparias and Sarvapali Ramchurn},
url = {https://eprints.soton.ac.uk/478647/},
year = {2023},
date = {2023-05-01},
booktitle = {22nd International Conference on Autonomous Agents and Multiagent Systems (29/05/23 - 02/06/23)},
pages = {1542--1550},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
@misc{soton477553,
title = {AI Foundation Models: initial review, CMA Consultation, TAS Hub Response},
author = {Joshua Krook and Derek McAuley and Stuart Anderson and John Downer and Peter Winter and Sarvapali D Ramchurn},
url = {https://eprints.soton.ac.uk/477553/},
year = {2023},
date = {2023-06-01},
publisher = {University of Southampton},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
@inproceedings{soton479970,
title = {The effect of data visualisation quality and task density on human-swarm interaction},
author = {Ayodeji Abioye and Mohammad Naiseh and William Hunt and Jediah R Clark and Sarvapali D Ramchurn and Mohammad Soorati},
url = {https://eprints.soton.ac.uk/479970/},
year = {2023},
date = {2023-06-01},
booktitle = {Proceedings of the 2023 32nd IEEE International Conference on Robot and Human Interactive Communication (RO-MAN)},
publisher = {IEEE},
abstract = {Despite the advantages of having robot swarms, human supervision is required for real-world applications. The performance of the human-swarm system depends on several factors including the data availability for the human operators. In this paper, we study the human factors aspect of the human-swarm interaction and investigate how having access to high-quality data can affect the performance of the human-swarm system - the number of tasks completed and the human trust level in operation. We designed an experiment where a human operator is tasked to operate a swarm to identify casualties in an area within a given time period. One group of operators had the option to request high-quality pictures while the other group had to base their decision on the available low-quality images. We performed a user study with 120 participants and recorded their success rate (directly logged via the simulation platform) as well as their workload and trust level (measured through a questionnaire after completing a human-swarm scenario). The findings from our study indicated that the group granted access to high-quality data exhibited an increased workload and placed greater trust in the swarm, thus confirming our initial hypothesis. However, we also found that the number of accurately identified casualties did not significantly vary between the two groups, suggesting that data quality had no impact on the successful completion of tasks.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Autonomous swarms of robots can bring robustness, scalability and adaptability to safety-critical tasks such as search and rescue but their application is still very limited. Using semi-autonomous swarms with human control can bring robot swarms to real-world applications. Human operators can define goals for the swarm, monitor their performance and interfere with, or overrule, the decisions and behaviour. We present the "Human And Robot Interactive Swarm'' simulator (HARIS) that allows multi-user interaction with a robot swarm and facilitates qualitative and quantitative user studies through simulation of robot swarms completing tasks, from package delivery to search and rescue, with varying levels of human control. In this demonstration, we showcase the simulator by using it to study the performance gain offered by maintaining a "human-in-the-loop'' over a fully autonomous system as an example. This is illustrated in the context of search and rescue, with an autonomous allocation of resources to those in need.
https://eprints.soton.ac.uk/479903/
@inproceedings{soton479903,
title = {Demonstrating performance benefits of human-swarm teaming},
author = {William Hunt and Jack Ryan and Ayodeji O Abioye and Sarvapali D Ramchurn and Mohammad D Soorati},
url = {https://eprints.soton.ac.uk/479903/},
year = {2023},
date = {2023-05-01},
booktitle = {Proceedings of the 2023 International Conference on Autonomous Agents and Multiagent Systems},
pages = {3062--3064},
publisher = {International Foundation for Autonomous Agents and Multiagent Systems (IFAAMAS)},
abstract = {Autonomous swarms of robots can bring robustness, scalability and adaptability to safety-critical tasks such as search and rescue but their application is still very limited. Using semi-autonomous swarms with human control can bring robot swarms to real-world applications. Human operators can define goals for the swarm, monitor their performance and interfere with, or overrule, the decisions and behaviour. We present the "Human And Robot Interactive Swarm'' simulator (HARIS) that allows multi-user interaction with a robot swarm and facilitates qualitative and quantitative user studies through simulation of robot swarms completing tasks, from package delivery to search and rescue, with varying levels of human control. In this demonstration, we showcase the simulator by using it to study the performance gain offered by maintaining a "human-in-the-loop'' over a fully autonomous system as an example. This is illustrated in the context of search and rescue, with an autonomous allocation of resources to those in need.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
We define trust-based mechanism design as an augmentation of traditional mechanism design in which agents take into account the degree of trust that they have in their counterparts when determining their allocations. To this end, we develop an efficient, individually rational, and incentive compatible mechanism based on trust. This mechanism is embedded in a task allocation scenario in which the trust in an agent is derived from the reported performance success of that agent by all the other agents in the system. We also empirically study the evolution of our mechanism when iterated and show that, in the long run, it always chooses the most successful and cheapest agents to fulfill an allocation and chooses better allocations than other comparable models when faced with biased reporting.
http://eprints.soton.ac.uk/259352/
@inproceedings{eps259352,
title = {Trust-Based Mechanism Design},
author = {R. K . Dash and S.D. Ramchurn and N. R. Jennings},
url = {http://eprints.soton.ac.uk/259352/},
year = {2004},
date = {2004-01-01},
booktitle = {3rd Int. Conf. on Autonomous Agents and Multi-Agent Systems},
pages = {748--755},
abstract = {We define trust-based mechanism design as an augmentation of traditional mechanism design in which agents take into account the degree of trust that they have in their counterparts when determining their allocations. To this end, we develop an efficient, individually rational, and incentive compatible mechanism based on trust. This mechanism is embedded in a task allocation scenario in which the trust in an agent is derived from the reported performance success of that agent by all the other agents in the system. We also empirically study the evolution of our mechanism when iterated and show that, in the long run, it always chooses the most successful and cheapest agents to fulfill an allocation and chooses better allocations than other comparable models when faced with biased reporting.},
note = {Event Dates: 19-23 July 2004},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
This paper highlights intrusiveness as a key issue in the field of pervasive computing environments and presents a multi-agent approach to tackling it. Specifically, we discuss how interruptions can impact on individual and group tasks and how they can be managed by taking into account user and group preferences through negotiation between software agents. The system we develop is implemented on the Jabber platform and is deployed in the context of a meeting room scenario.
http://eprints.soton.ac.uk/259566/
@inproceedings{eps259566,
title = {Minimising intrusiveness in pervasive computing environments using multi-agent negotiation},
author = {S.D. Ramchurn and B. Deitch and M.K. Thompson and D. C. de Roure and N. R. Jennings and M. Luck},
url = {http://eprints.soton.ac.uk/259566/},
year = {2004},
date = {2004-01-01},
booktitle = {First Annual International Conference on Mobile and Ubiquitous Systems: Networking and Services (MobiQuitous'04)},
pages = {364--372},
publisher = {IEEE},
abstract = {This paper highlights intrusiveness as a key issue in the field of pervasive computing environments and presents a multi-agent approach to tackling it. Specifically, we discuss how interruptions can impact on individual and group tasks and how they can be managed by taking into account user and group preferences through negotiation between software agents. The system we develop is implemented on the Jabber platform and is deployed in the context of a meeting room scenario.},
note = {Event Dates: August 22 - 26, 2004},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Trust is a fundamental concern in large-scale open distributed systems. It lies at the core of all interactions between the entities that have to operate in such uncertain and constantly changing environments. Given this complexity, these components, and the ensuing system, are increasingly being conceptualised, designed, and built using agent-based techniques and, to this end, this paper examines the specific role of trust in multi-agent systems. In particular, we survey the state of the art and provide an account of the main directions along which research efforts are being focused. In so doing, we critically evaluate the relative strengths and weaknesses of the main models that have been proposed and show how, fundamentally, they all seek to minimise the uncertainty in interactions. Finally, we outline the areas that require further research in order to develop a comprehensive treatment of trust in complex computational settings.
http://eprints.soton.ac.uk/259564/
@article{eps259564,
title = {Trust in Multiagent Systems},
author = {S.D. Ramchurn and T.D. Huynh and N. R. Jennings},
url = {http://eprints.soton.ac.uk/259564/},
year = {2004},
date = {2004-01-01},
journal = {The Knowledge Engineering Review},
volume = {19},
number = {1},
pages = {1--25},
abstract = {Trust is a fundamental concern in large-scale open distributed systems. It lies at the core of all interactions between the entities that have to operate in such uncertain and constantly changing environments. Given this complexity, these components, and the ensuing system, are increasingly being conceptualised, designed, and built using agent-based techniques and, to this end, this paper examines the specific role of trust in multi-agent systems. In particular, we survey the state of the art and provide an account of the main directions along which research efforts are being focused. In so doing, we critically evaluate the relative strengths and weaknesses of the main models that have been proposed and show how, fundamentally, they all seek to minimise the uncertainty in interactions. Finally, we outline the areas that require further research in order to develop a comprehensive treatment of trust in complex computational settings.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
In this thesis, we propose a panoply of tools and techniques to manage inter-agent dependencies in open, distributed multi-agent systems that have significant degrees of uncertainty. In particular, we focus on situations in which agents are involved in repeated interactions where they need to negotiate to resolve conflicts that may arise between them. To this end, we endow agents with decision making models that exploit the notion of trust and use persuasive techniques during the negotiation process to reduce the level of uncertainty and achieve better deals in the long run. Firstly, we develop and evaluate a new trust model (called CREDIT) that allows agents to measure the degree of trust they should place in their opponents. This model reduces the uncertainty that agents have about their opponents' reliability. Thus, over repeated interactions, CREDIT enables agents to model their opponents' reliability using probabilistic techniques and a fuzzy reasoning mechanism that allows the combination of measures based on reputation (indirect interactions) and confidence (direct interactions). In so doing, CREDIT takes a wider range of behaviour-influencing factors into account than existing models, including the norms of the agents and the institution within which transactions occur. We then explore a novel application of trust models by showing how the measures developed in CREDIT ca be applied negotiations in multiple encounters. Specifically we show that agents that use CREDIT are able to avoid unreliable agents, both during the selection of interaction partners and during the negotiation process itself by using trust to adjust their negotiation stance. Also, we empirically show that agents are able to reach good deals with agents that are unreliable to some degree (rather than completely unreliable) and with those that try to strategically exploit their opponent. Secondly, having applied CREDIT to negotiations, we further extend the application of trust to reduce uncertainty about the reliability of agents in mechanism design (where the honesty of agents is elicited by the protocol). Thus, we develop $backslash$acftbmd that allows agents using a trust model (such as CREDIT) to reach efficient agreements that choose the most reliable agents in the long run. In particular, we show that our mechanism enforces truth-telling from the agents (i.e. it is incentive compatible), both about their perceived reliability of their opponent and their valuations for the goods to be traded. In proving the latter properties, our trust-based mechanism is shown to be the first reputation mechanism that implements individual rationality, incentive compatibility, and efficiency. Our trust-based mechanism is also empirically evaluated and shown to be better than other comparable models in reaching the outcome that maximises all the negotiating agents' utilities and in choosing the most reliable agents in the long run. Thirdly, having explored ways to reduce uncertainties about reliability and honesty, we use persuasive negotiation techniques to tackle issues associated with uncertainties that agents have about the preferences and the space of possible agreements. To this end, we propose a novel protocol and reasoning mechanism that agents can use to generate and evaluate persuasive elements, such as promises of future rewards, to support the offers they make during negotiation. These persuasive elements aim to make offers more attractive over multiple encounters given the absence of information about an opponent's discount factors or exact payoffs. Specifically, we empirically demonstrate that agents are able to achieve a larger number of agreements and a higher expected utility over repeated encounters when they are given the capability to give or ask for rewards. Moreover, we develop a novel strategy using this protocol and show that it outperforms existing state of the art heuristic negotiation models. Finally, the applicability of persuasive negotiation and CREDIT is exemplified through a practical implementation in a pervasive computing environment. In this context, the negotiation mechanism is implemented in an instant messaging platform (JABBER) and used to resolve conflicts between group and individual preferences that arise in a meeting room scenario. In particular, we show how persuasive negotiation and trust permit a flexible management of interruptions by allowing intrusions to happen at appropriate times during the meeting while still managing to satisfy the preferences of all parties present.
http://eprints.soton.ac.uk/260200/
@phdthesis{eps260200,
title = {Multi-Agent Negotiation using Trust and Persuasion},
author = {Sarvapali Ramchurn},
url = {http://eprints.soton.ac.uk/260200/},
year = {2004},
date = {2004-01-01},
school = {University of Southampton},
abstract = {In this thesis, we propose a panoply of tools and techniques to manage inter-agent dependencies in open, distributed multi-agent systems that have significant degrees of uncertainty. In particular, we focus on situations in which agents are involved in repeated interactions where they need to negotiate to resolve conflicts that may arise between them. To this end, we endow agents with decision making models that exploit the notion of trust and use persuasive techniques during the negotiation process to reduce the level of uncertainty and achieve better deals in the long run. Firstly, we develop and evaluate a new trust model (called CREDIT) that allows agents to measure the degree of trust they should place in their opponents. This model reduces the uncertainty that agents have about their opponents' reliability. Thus, over repeated interactions, CREDIT enables agents to model their opponents' reliability using probabilistic techniques and a fuzzy reasoning mechanism that allows the combination of measures based on reputation (indirect interactions) and confidence (direct interactions). In so doing, CREDIT takes a wider range of behaviour-influencing factors into account than existing models, including the norms of the agents and the institution within which transactions occur. We then explore a novel application of trust models by showing how the measures developed in CREDIT ca be applied negotiations in multiple encounters. Specifically we show that agents that use CREDIT are able to avoid unreliable agents, both during the selection of interaction partners and during the negotiation process itself by using trust to adjust their negotiation stance. Also, we empirically show that agents are able to reach good deals with agents that are unreliable to some degree (rather than completely unreliable) and with those that try to strategically exploit their opponent. Secondly, having applied CREDIT to negotiations, we further extend the application of trust to reduce uncertainty about the reliability of agents in mechanism design (where the honesty of agents is elicited by the protocol). Thus, we develop $backslash$acftbmd that allows agents using a trust model (such as CREDIT) to reach efficient agreements that choose the most reliable agents in the long run. In particular, we show that our mechanism enforces truth-telling from the agents (i.e. it is incentive compatible), both about their perceived reliability of their opponent and their valuations for the goods to be traded. In proving the latter properties, our trust-based mechanism is shown to be the first reputation mechanism that implements individual rationality, incentive compatibility, and efficiency. Our trust-based mechanism is also empirically evaluated and shown to be better than other comparable models in reaching the outcome that maximises all the negotiating agents' utilities and in choosing the most reliable agents in the long run. Thirdly, having explored ways to reduce uncertainties about reliability and honesty, we use persuasive negotiation techniques to tackle issues associated with uncertainties that agents have about the preferences and the space of possible agreements. To this end, we propose a novel protocol and reasoning mechanism that agents can use to generate and evaluate persuasive elements, such as promises of future rewards, to support the offers they make during negotiation. These persuasive elements aim to make offers more attractive over multiple encounters given the absence of information about an opponent's discount factors or exact payoffs. Specifically, we empirically demonstrate that agents are able to achieve a larger number of agreements and a higher expected utility over repeated encounters when they are given the capability to give or ask for rewards. Moreover, we develop a novel strategy using this protocol and show that it outperforms existing state of the art heuristic negotiation models. Finally, the applicability of persuasive negotiation and CREDIT is exemplified through a practical implementation in a pervasive computing environment. In this context, the negotiation mechanism is implemented in an instant messaging platform (JABBER) and used to resolve conflicts between group and individual preferences that arise in a meeting room scenario. In particular, we show how persuasive negotiation and trust permit a flexible management of interruptions by allowing intrusions to happen at appropriate times during the meeting while still managing to satisfy the preferences of all parties present.},
keywords = {},
pubstate = {published},
tppubtype = {phdthesis}
}
@article{eps260155,
title = {Devising a trust model for multi-agent interactions using confidence and reputation},
author = {Sarvapali Ramchurn and C. Sierra and L. Godo and N. R. Jennings},
url = {http://eprints.soton.ac.uk/260155/},
year = {2004},
date = {2004-01-01},
journal = {International Journal of Applied Artificial Intelligence},
volume = {18},
number = {9-10},
pages = {833--852},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Negotiation is essential in settings where autonomous agents have con- flicting interests and a desire to cooperate. For this reason, mechanisms in which agents exchange potential agreements according to various rules of interaction have become very popular in recent years as evident, for example, in the auction and mechanism design community. However, a growing body of research is now emerging which points out limitations in such mechanisms and advocates the idea that agents can increase the likelihood and quality of an agreement by exchanging arguments which in- fluence each others? states. This community further argues that argument exchange is sometimes essential when various assumptions about agent rationality cannot be satisfied. To this end, in this article, we identify the main research motivations and ambitions behind work in the field. We then provide a conceptual framework through which we outline the core elements and features required by agents engaged in argumentation-based negotiation, as well as the environment that hosts these agents. For each of these elements, we survey and evaluate existing proposed techniques in the literature and highlight the major challenges that need to be addressed if argument-based negotiation research is to reach its full potential.
http://eprints.soton.ac.uk/258850/
@article{eps258850,
title = {Argumentation-based negotiation},
author = {I. Rahwan and Sarvapali Ramchurn and N. R. Jennings and P. McBurney and S. Parsons and L. Sonenberg},
url = {http://eprints.soton.ac.uk/258850/},
year = {2003},
date = {2003-01-01},
journal = {The Knowledge Engineering Review},
volume = {18},
number = {4},
pages = {343--375},
abstract = {Negotiation is essential in settings where autonomous agents have con- flicting interests and a desire to cooperate. For this reason, mechanisms in which agents exchange potential agreements according to various rules of interaction have become very popular in recent years as evident, for example, in the auction and mechanism design community. However, a growing body of research is now emerging which points out limitations in such mechanisms and advocates the idea that agents can increase the likelihood and quality of an agreement by exchanging arguments which in- fluence each others? states. This community further argues that argument exchange is sometimes essential when various assumptions about agent rationality cannot be satisfied. To this end, in this article, we identify the main research motivations and ambitions behind work in the field. We then provide a conceptual framework through which we outline the core elements and features required by agents engaged in argumentation-based negotiation, as well as the environment that hosts these agents. For each of these elements, we survey and evaluate existing proposed techniques in the literature and highlight the major challenges that need to be addressed if argument-based negotiation research is to reach its full potential.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Negotiation is essential in settings where autonomous agents have con- flicting interests and a desire to cooperate. For this reason, mechanisms in which agents exchange potential agreements according to various rules of interaction have become very popular in recent years as evident, for example, in the auction and mechanism design community. However, a growing body of research is now emerging which points out limitations in such mechanisms and advocates the idea that agents can increase the likelihood and quality of an agreement by exchanging arguments which in- fluence each others? states. This community further argues that argument exchange is sometimes essential when various assumptions about agent rationality cannot be satisfied. To this end, in this article, we identify the main research motivations and ambitions behind work in the field. We then provide a conceptual framework through which we outline the core elements and features required by agents engaged in argumentation-based negotiation, as well as the environment that hosts these agents. For each of these elements, we survey and evaluate existing proposed techniques in the literature and highlight the major challenges that need to be addressed if argument-based negotiation research is to reach its full potential.
http://eprints.soton.ac.uk/258850/
@article{eps258850,
title = {Argumentation-based negotiation},
author = {I. Rahwan and Sarvapali Ramchurn and N. R. Jennings and P. McBurney and S. Parsons and L. Sonenberg},
url = {http://eprints.soton.ac.uk/258850/},
year = {2003},
date = {2003-01-01},
journal = {The Knowledge Engineering Review},
volume = {18},
number = {4},
pages = {343--375},
abstract = {Negotiation is essential in settings where autonomous agents have con- flicting interests and a desire to cooperate. For this reason, mechanisms in which agents exchange potential agreements according to various rules of interaction have become very popular in recent years as evident, for example, in the auction and mechanism design community. However, a growing body of research is now emerging which points out limitations in such mechanisms and advocates the idea that agents can increase the likelihood and quality of an agreement by exchanging arguments which in- fluence each others? states. This community further argues that argument exchange is sometimes essential when various assumptions about agent rationality cannot be satisfied. To this end, in this article, we identify the main research motivations and ambitions behind work in the field. We then provide a conceptual framework through which we outline the core elements and features required by agents engaged in argumentation-based negotiation, as well as the environment that hosts these agents. For each of these elements, we survey and evaluate existing proposed techniques in the literature and highlight the major challenges that need to be addressed if argument-based negotiation research is to reach its full potential.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Persuasive negotiation occurs when autonomous agents exchange proposals that are backed up by rhetorical arguments (such as threats, rewards, or appeals). The role of such rhetorical arguments is to persuade the negotiation opponent to accept proposals more readily. To this end, this paper presents a rhetorical model of persuasion that defines the main types of rhetorical particles that are used and that provides a decision making model to enable an agent to determine what type of rhetorical argument to send in a given context and how to evaluate rhetorical arguments that are received. The model is empirically evaluated and we show that it is effective and efficient in reaching agreements.
http://eprints.soton.ac.uk/258541/
@inproceedings{eps258541,
title = {Persuasive negotiation for autonomous agents: A rhetorical approach},
author = {S.D. Ramchurn and N. R. Jennings and C. Sierra},
url = {http://eprints.soton.ac.uk/258541/},
year = {2003},
date = {2003-01-01},
booktitle = {IJCAI Workshop on Computational Models of Natural Argument},
pages = {9--17},
abstract = {Persuasive negotiation occurs when autonomous agents exchange proposals that are backed up by rhetorical arguments (such as threats, rewards, or appeals). The role of such rhetorical arguments is to persuade the negotiation opponent to accept proposals more readily. To this end, this paper presents a rhetorical model of persuasion that defines the main types of rhetorical particles that are used and that provides a decision making model to enable an agent to determine what type of rhetorical argument to send in a given context and how to evaluate rhetorical arguments that are received. The model is empirically evaluated and we show that it is effective and efficient in reaching agreements.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}