@inproceedings{soton478647,
title = {Multi-agent signal-less intersection management with dynamic platoon formation},
author = {Phuriwat Worrawichaipat and Enrico Gerding and Ioannis Kaparias and Sarvapali Ramchurn},
url = {https://eprints.soton.ac.uk/478647/},
year = {2023},
date = {2023-05-01},
booktitle = {22nd International Conference on Autonomous Agents and Multiagent Systems (29/05/23 - 02/06/23)},
pages = {1542--1550},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
@misc{soton477553,
title = {AI Foundation Models: initial review, CMA Consultation, TAS Hub Response},
author = {Joshua Krook and Derek McAuley and Stuart Anderson and John Downer and Peter Winter and Sarvapali D Ramchurn},
url = {https://eprints.soton.ac.uk/477553/},
year = {2023},
date = {2023-06-01},
publisher = {University of Southampton},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
@inproceedings{soton479970,
title = {The effect of data visualisation quality and task density on human-swarm interaction},
author = {Ayodeji Abioye and Mohammad Naiseh and William Hunt and Jediah R Clark and Sarvapali D Ramchurn and Mohammad Soorati},
url = {https://eprints.soton.ac.uk/479970/},
year = {2023},
date = {2023-06-01},
booktitle = {Proceedings of the 2023 32nd IEEE International Conference on Robot and Human Interactive Communication (RO-MAN)},
publisher = {IEEE},
abstract = {Despite the advantages of having robot swarms, human supervision is required for real-world applications. The performance of the human-swarm system depends on several factors including the data availability for the human operators. In this paper, we study the human factors aspect of the human-swarm interaction and investigate how having access to high-quality data can affect the performance of the human-swarm system - the number of tasks completed and the human trust level in operation. We designed an experiment where a human operator is tasked to operate a swarm to identify casualties in an area within a given time period. One group of operators had the option to request high-quality pictures while the other group had to base their decision on the available low-quality images. We performed a user study with 120 participants and recorded their success rate (directly logged via the simulation platform) as well as their workload and trust level (measured through a questionnaire after completing a human-swarm scenario). The findings from our study indicated that the group granted access to high-quality data exhibited an increased workload and placed greater trust in the swarm, thus confirming our initial hypothesis. However, we also found that the number of accurately identified casualties did not significantly vary between the two groups, suggesting that data quality had no impact on the successful completion of tasks.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Autonomous swarms of robots can bring robustness, scalability and adaptability to safety-critical tasks such as search and rescue but their application is still very limited. Using semi-autonomous swarms with human control can bring robot swarms to real-world applications. Human operators can define goals for the swarm, monitor their performance and interfere with, or overrule, the decisions and behaviour. We present the "Human And Robot Interactive Swarm'' simulator (HARIS) that allows multi-user interaction with a robot swarm and facilitates qualitative and quantitative user studies through simulation of robot swarms completing tasks, from package delivery to search and rescue, with varying levels of human control. In this demonstration, we showcase the simulator by using it to study the performance gain offered by maintaining a "human-in-the-loop'' over a fully autonomous system as an example. This is illustrated in the context of search and rescue, with an autonomous allocation of resources to those in need.
https://eprints.soton.ac.uk/479903/
@inproceedings{soton479903,
title = {Demonstrating performance benefits of human-swarm teaming},
author = {William Hunt and Jack Ryan and Ayodeji O Abioye and Sarvapali D Ramchurn and Mohammad D Soorati},
url = {https://eprints.soton.ac.uk/479903/},
year = {2023},
date = {2023-05-01},
booktitle = {Proceedings of the 2023 International Conference on Autonomous Agents and Multiagent Systems},
pages = {3062--3064},
publisher = {International Foundation for Autonomous Agents and Multiagent Systems (IFAAMAS)},
abstract = {Autonomous swarms of robots can bring robustness, scalability and adaptability to safety-critical tasks such as search and rescue but their application is still very limited. Using semi-autonomous swarms with human control can bring robot swarms to real-world applications. Human operators can define goals for the swarm, monitor their performance and interfere with, or overrule, the decisions and behaviour. We present the "Human And Robot Interactive Swarm'' simulator (HARIS) that allows multi-user interaction with a robot swarm and facilitates qualitative and quantitative user studies through simulation of robot swarms completing tasks, from package delivery to search and rescue, with varying levels of human control. In this demonstration, we showcase the simulator by using it to study the performance gain offered by maintaining a "human-in-the-loop'' over a fully autonomous system as an example. This is illustrated in the context of search and rescue, with an autonomous allocation of resources to those in need.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Negotiation is a fundamental interaction mechanism in multi-agent systems because it allows self-interested agents to come to mutually beneficial agreements and partition resources efficiently and effectively. Now, in many situations, the agents need to negotiate with one another many times and so developing strategies that are effective over repeated interactions is an important challenge. Against this background, a growing body of work has examined the use of Persuasive Negotiation (PN), which involves negotiating using rhetorical arguments (such as threats, rewards, or appeals), in trying to convince an opponent to accept a given offer. Such mechanisms are especially suited to repeated encounters because they allow agents to influence the outcomes of future negotiations, while negotiating a deal in the present one, with the aim of producing results that are beneficial to both parties. To this end, in this paper, we develop a comprehensive PN mechanism for repeated interactions that makes use of rewards that can be asked for or given to. Our mechanism consists of two parts. First, a novel protocol that structures the interaction by capturing the commitments that agents incur when using rewards. Second, a new reward generation algorithm that constructs promises of rewards in future interactions as a means of permitting agents to reach better agreements, in a shorter time, in the present encounter. We then go on to develop a specific negotiation tactic, based on this reward generation algorithm, and show that it can achieve significantly better outcomes than existing benchmark tactics that do not use such inducements. Specifically, we show, via empirical evaluation in a Multi-Move Prisoners? dilemma setting, that our tactic can lead to a 26% improvement in the utility of deals that are made and that 21 times fewer messages need to be exchanged in order to achieve this.
http://eprints.soton.ac.uk/264225/
@article{eps264225,
title = {Negotiating using rewards.},
author = {S.D. Ramchurn and C. Sierra and L. Godo and N. R. Jennings},
url = {http://eprints.soton.ac.uk/264225/},
year = {2007},
date = {2007-01-01},
journal = {Artificial Intelligence Journal.},
volume = {171},
number = {10-15},
pages = {805--837},
abstract = {Negotiation is a fundamental interaction mechanism in multi-agent systems because it allows self-interested agents to come to mutually beneficial agreements and partition resources efficiently and effectively. Now, in many situations, the agents need to negotiate with one another many times and so developing strategies that are effective over repeated interactions is an important challenge. Against this background, a growing body of work has examined the use of Persuasive Negotiation (PN), which involves negotiating using rhetorical arguments (such as threats, rewards, or appeals), in trying to convince an opponent to accept a given offer. Such mechanisms are especially suited to repeated encounters because they allow agents to influence the outcomes of future negotiations, while negotiating a deal in the present one, with the aim of producing results that are beneficial to both parties. To this end, in this paper, we develop a comprehensive PN mechanism for repeated interactions that makes use of rewards that can be asked for or given to. Our mechanism consists of two parts. First, a novel protocol that structures the interaction by capturing the commitments that agents incur when using rewards. Second, a new reward generation algorithm that constructs promises of rewards in future interactions as a means of permitting agents to reach better agreements, in a shorter time, in the present encounter. We then go on to develop a specific negotiation tactic, based on this reward generation algorithm, and show that it can achieve significantly better outcomes than existing benchmark tactics that do not use such inducements. Specifically, we show, via empirical evaluation in a Multi-Move Prisoners? dilemma setting, that our tactic can lead to a 26% improvement in the utility of deals that are made and that 21 times fewer messages need to be exchanged in order to achieve this.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
In this paper, we present our investigation into the use of a team of players within a noisy Iterated Prisoner?s Dilemma (IPD) tournament. We show that the members of such a team are able to use a pre-arranged sequence of moves that they make at the start of each interaction in order to recognise one another, and that by coordinating their actions they can increase the chances that one of the team members wins the round-robin style tournament. We consider, in detail, the factors that influence the performance of this team and we show that the problem that the team members face, when they attempt to recognise one another within the noisy IPD tournament, is exactly analogous to the problem, studied in information theory, of communicating reliably over a noisy channel. Thus we demonstrate that we can use error correcting codes to implement this recognition, and by doing so, further optimise the performance of the team.
https://eprints.soton.ac.uk/263238/
@article{eps263238,
title = {Coordinating Team Players within a Noisy Iterated Prisoner?s Dilemma Tournament},
author = {Alex Rogers and Rajdeep K. Dash and Sarvapali D. Ramchurn and Perukrishnen Vytelingum and N. R. Jennings},
url = {http://eprints.soton.ac.uk/263238/},
year = {2007},
date = {2007-01-01},
journal = {Theoretical Computer Science},
volume = {377},
number = {1-3},
pages = {243--259},
abstract = {In this paper, we present our investigation into the use of a team of players within a noisy Iterated Prisoner?s Dilemma (IPD) tournament. We show that the members of such a team are able to use a pre-arranged sequence of moves that they make at the start of each interaction in order to recognise one another, and that by coordinating their actions they can increase the chances that one of the team members wins the round-robin style tournament. We consider, in detail, the factors that influence the performance of this team and we show that the problem that the team members face, when they attempt to recognise one another within the noisy IPD tournament, is exactly analogous to the problem, studied in information theory, of communicating reliably over a noisy channel. Thus we demonstrate that we can use error correcting codes to implement this recognition, and by doing so, further optimise the performance of the team.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
@incollection{eps263264,
title = {Error-Correcting Codes for Team Coordination within a Noisy Iterated Prisoner?s Dilemma Tournament},
author = {Alex Rogers and Rajdeep K. Dash and Sarvapali D. Ramchurn and Perukrishnen Vytelingum and N. R. Jennings},
editor = {Graham Kendel and Xin Yao and Siang Yew Chong},
url = {http://eprints.soton.ac.uk/263264/},
year = {2007},
date = {2007-01-01},
booktitle = {The Iterated Prisoners Dilemma Competition: Celebrating the 20th Anniversary},
pages = {205--229},
publisher = {World Scientific},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Social influences play an important part in the actions that an individual agent may perform within a multi-agent society. However, the incomplete knowledge and the diverse and conflicting influences present within such societies, may stop an agent from abiding by all its social influences. This may, in turn, lead to conflicts that the agents need to identify, manage, and resolve in order for the society to behave in a coherent manner. To this end, we present an empirical study of an argumentation-based negotiation (ABN) approach that allows the agents to detect such conflicts, and then manage and resolve them through the use of argumentative dialogues. To test our theory, we map our ABN model to a multi-agent task allocation scenario. Our results show that using an argumentation approach allows agents to both efficiently and effectively manage their social influences even under high degrees of incompleteness. Finally, we show that allowing agents to argue and resolve such conflicts early in the negotiation encounter increases their efficiency in managing social influences.
http://eprints.soton.ac.uk/262022/
@inproceedings{eps262022,
title = {Managing Social Influences through Argumentation-Based Negotiation},
author = {N. C. Karunatillake and N. R. Jennings and I. Rahwan and S. D. Ramchurn},
url = {http://eprints.soton.ac.uk/262022/},
year = {2006},
date = {2006-01-01},
booktitle = {Third International Workshop on Argumentation in Multi-Agent Systems (ArgMAS 2006)},
pages = {35--52},
abstract = {Social influences play an important part in the actions that an individual agent may perform within a multi-agent society. However, the incomplete knowledge and the diverse and conflicting influences present within such societies, may stop an agent from abiding by all its social influences. This may, in turn, lead to conflicts that the agents need to identify, manage, and resolve in order for the society to behave in a coherent manner. To this end, we present an empirical study of an argumentation-based negotiation (ABN) approach that allows the agents to detect such conflicts, and then manage and resolve them through the use of argumentative dialogues. To test our theory, we map our ABN model to a multi-agent task allocation scenario. Our results show that using an argumentation approach allows agents to both efficiently and effectively manage their social influences even under high degrees of incompleteness. Finally, we show that allowing agents to argue and resolve such conflicts early in the negotiation encounter increases their efficiency in managing social influences.},
note = {Event Dates: 8th May 2006},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
In situations where self-interested agents interact repeatedly, it is important that they are endowed with negotiation techniques that enable them to reach agreements that are profitable in the long run. To this end, we devise a novel negotiation algorithm that generates promises of rewards in future interactions, as a means of permitting agents to reach better agreements, in a shorter time, in the present encounter. Moreover, we thus develop a specific negotiation tactic based on this reward generation algorithm and show that it can achieve significantly bettter outcomes than existing benchmark tactics that do not use such inducements. Specifically, we show, via empirical evaluation, that our tactic can lead to a 26% improvement in the utility of deals that are made and that 21 times fewer messages need to be exchanged in order to achieve this under concrete settings.
http://eprints.soton.ac.uk/262591/
@inproceedings{eps262591,
title = {Negotiating using rewards},
author = {S.D. Ramchurn and C. Sierra and L. Godo and N. R. Jennings},
url = {http://eprints.soton.ac.uk/262591/},
year = {2006},
date = {2006-01-01},
booktitle = {5th Int. Conf. on Autonomous Agents and Multi-Agent Systems},
journal = {Proc. 5th Int. Conf. on Autonomous Agents and Multi-Agent Systems, Hakodate, Japan},
pages = {400--407},
abstract = {In situations where self-interested agents interact repeatedly, it is important that they are endowed with negotiation techniques that enable them to reach agreements that are profitable in the long run. To this end, we devise a novel negotiation algorithm that generates promises of rewards in future interactions, as a means of permitting agents to reach better agreements, in a shorter time, in the present encounter. Moreover, we thus develop a specific negotiation tactic based on this reward generation algorithm and show that it can achieve significantly bettter outcomes than existing benchmark tactics that do not use such inducements. Specifically, we show, via empirical evaluation, that our tactic can lead to a 26% improvement in the utility of deals that are made and that 21 times fewer messages need to be exchanged in order to achieve this under concrete settings.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Current mechanisms for evaluating the trustworthiness of an agent within an electronic marketplace depend either on using a history of interactions or on recommendations from other agents. In the first case, these requirements limit what an agent with no prior interaction history can do. In the second case, they transform the problem into one of trusting the recommending agent. However, these mechanisms do not consider the relationships between agents that arise through interactions (such as buying or selling) or through overarching organisational structures (such as hierarchical or flat), which can also aid in evaluating trustworthiness. In response, this paper outlines a method that enables agents to evaluate the trustworthiness of their counterparts, based solely on an analysis of such relationships. Specifically, relationships are identified using a generic technique in conjunction with a basic model for agentbased marketplaces. They are then interpreted through a trust model that enables the inference of trust valuations based on the different types of relationships. In this way, we provide a further component for a trust evaluation model that addresses some of the limitations of existing work.
http://eprints.soton.ac.uk/260806/
@inproceedings{eps260806,
title = {Trust evaluation through relationship analysis},
author = {R. Ashri and S.D. Ramchurn and J. Sabater and M. Luck and N. R. Jennings},
url = {http://eprints.soton.ac.uk/260806/},
year = {2005},
date = {2005-01-01},
booktitle = {4th Int Joint Conf. on Autonomous Agents and Multi-Agent Systems},
journal = {Proceedings: 4th International Joint Conference on Autonomous Agents and Multi-Agent Systems},
pages = {1005--1011},
abstract = {Current mechanisms for evaluating the trustworthiness of an agent within an electronic marketplace depend either on using a history of interactions or on recommendations from other agents. In the first case, these requirements limit what an agent with no prior interaction history can do. In the second case, they transform the problem into one of trusting the recommending agent. However, these mechanisms do not consider the relationships between agents that arise through interactions (such as buying or selling) or through overarching organisational structures (such as hierarchical or flat), which can also aid in evaluating trustworthiness. In response, this paper outlines a method that enables agents to evaluate the trustworthiness of their counterparts, based solely on an analysis of such relationships. Specifically, relationships are identified using a generic technique in conjunction with a basic model for agentbased marketplaces. They are then interpreted through a trust model that enables the inference of trust valuations based on the different types of relationships. In this way, we provide a further component for a trust evaluation model that addresses some of the limitations of existing work.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
We define Trusted Kernel-based Coalition Formation as a novel extension to the traditional kernel-based coalition formation process which ensures agents choose the most reliable coalition partners and are guaranteed to obtain the payment they deserve. To this end, we develop an encryption-based communication protocol and a payment scheme which ensure that agents cannot manipulate the mechanism to their own benefit. Moreover, we integrate a generic trust model in the coalition formation process that permits the selection of the most reliable agents over repeated coalition games. We empirically evaluate our mechanism when iterated and show that, in the long run, it always chooses the coalition structure that has the maximum expected value and determines the payoffs that match their level of reliability.
http://eprints.soton.ac.uk/260808/
@inproceedings{eps260808,
title = {Trusted kernel-based coalition formation},
author = {B. Blankenburg and R.K. Dash and S.D. Ramchurn and M. Klusch and N. R. Jennings},
url = {http://eprints.soton.ac.uk/260808/},
year = {2005},
date = {2005-01-01},
booktitle = {Proc. 4th Int Joint Conf on Autonomous Agents and Multi-Agent Systems},
journal = {Proceedings: 4th International Joint Conference on Autonomous Agents and Multi-agent Systems},
pages = {989--996},
abstract = {We define Trusted Kernel-based Coalition Formation as a novel extension to the traditional kernel-based coalition formation process which ensures agents choose the most reliable coalition partners and are guaranteed to obtain the payment they deserve. To this end, we develop an encryption-based communication protocol and a payment scheme which ensure that agents cannot manipulate the mechanism to their own benefit. Moreover, we integrate a generic trust model in the coalition formation process that permits the selection of the most reliable agents over repeated coalition games. We empirically evaluate our mechanism when iterated and show that, in the long run, it always chooses the coalition structure that has the maximum expected value and determines the payoffs that match their level of reliability.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}