font
Ramchurn, S. D.; Sierra, C.; Godo, L.; Jennings, N. R.
Negotiating using rewards Proceedings Article
In: 5th Int. Conf. on Autonomous Agents and Multi-Agent Systems, pp. 400–407, 2006.
Abstract | Links | BibTeX | Tags: Argumentation, Bargaining, Persuasive Negotiation
@inproceedings{eps262591,
title = {Negotiating using rewards},
author = {S. D. Ramchurn and C. Sierra and L. Godo and N. R. Jennings},
url = {http://eprints.soton.ac.uk/262591/},
year = {2006},
date = {2006-01-01},
booktitle = {5th Int. Conf. on Autonomous Agents and Multi-Agent Systems},
journal = {Proc. 5th Int. Conf. on Autonomous Agents and Multi-Agent Systems, Hakodate, Japan},
pages = {400–407},
abstract = {In situations where self-interested agents interact repeatedly, it is important that they are endowed with negotiation techniques that enable them to reach agreements that are profitable in the long run. To this end, we devise a novel negotiation algorithm that generates promises of rewards in future interactions, as a means of permitting agents to reach better agreements, in a shorter time, in the present encounter. Moreover, we thus develop a specific negotiation tactic based on this reward generation algorithm and show that it can achieve significantly bettter outcomes than existing benchmark tactics that do not use such inducements. Specifically, we show, via empirical evaluation, that our tactic can lead to a 26% improvement in the utility of deals that are made and that 21 times fewer messages need to be exchanged in order to achieve this under concrete settings.},
keywords = {Argumentation, Bargaining, Persuasive Negotiation},
pubstate = {published},
tppubtype = {inproceedings}
}
Ramchurn, S. D.; Sierra, C.; Godo, L.; Jennings, N. R.
Negotiating using rewards Proceedings Article
In: 5th Int. Conf. on Autonomous Agents and Multi-Agent Systems, pp. 400–407, 2006.
@inproceedings{eps262591,
title = {Negotiating using rewards},
author = {S. D. Ramchurn and C. Sierra and L. Godo and N. R. Jennings},
url = {http://eprints.soton.ac.uk/262591/},
year = {2006},
date = {2006-01-01},
booktitle = {5th Int. Conf. on Autonomous Agents and Multi-Agent Systems},
journal = {Proc. 5th Int. Conf. on Autonomous Agents and Multi-Agent Systems, Hakodate, Japan},
pages = {400–407},
abstract = {In situations where self-interested agents interact repeatedly, it is important that they are endowed with negotiation techniques that enable them to reach agreements that are profitable in the long run. To this end, we devise a novel negotiation algorithm that generates promises of rewards in future interactions, as a means of permitting agents to reach better agreements, in a shorter time, in the present encounter. Moreover, we thus develop a specific negotiation tactic based on this reward generation algorithm and show that it can achieve significantly bettter outcomes than existing benchmark tactics that do not use such inducements. Specifically, we show, via empirical evaluation, that our tactic can lead to a 26% improvement in the utility of deals that are made and that 21 times fewer messages need to be exchanged in order to achieve this under concrete settings.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Ramchurn, S. D.; Sierra, C.; Godo, L.; Jennings, N. R.
Negotiating using rewards Proceedings Article
In: 5th Int. Conf. on Autonomous Agents and Multi-Agent Systems, pp. 400–407, 2006.
Abstract | Links | BibTeX | Tags: Argumentation, Bargaining, Persuasive Negotiation
@inproceedings{eps262591,
title = {Negotiating using rewards},
author = {S. D. Ramchurn and C. Sierra and L. Godo and N. R. Jennings},
url = {http://eprints.soton.ac.uk/262591/},
year = {2006},
date = {2006-01-01},
booktitle = {5th Int. Conf. on Autonomous Agents and Multi-Agent Systems},
journal = {Proc. 5th Int. Conf. on Autonomous Agents and Multi-Agent Systems, Hakodate, Japan},
pages = {400–407},
abstract = {In situations where self-interested agents interact repeatedly, it is important that they are endowed with negotiation techniques that enable them to reach agreements that are profitable in the long run. To this end, we devise a novel negotiation algorithm that generates promises of rewards in future interactions, as a means of permitting agents to reach better agreements, in a shorter time, in the present encounter. Moreover, we thus develop a specific negotiation tactic based on this reward generation algorithm and show that it can achieve significantly bettter outcomes than existing benchmark tactics that do not use such inducements. Specifically, we show, via empirical evaluation, that our tactic can lead to a 26% improvement in the utility of deals that are made and that 21 times fewer messages need to be exchanged in order to achieve this under concrete settings.},
keywords = {Argumentation, Bargaining, Persuasive Negotiation},
pubstate = {published},
tppubtype = {inproceedings}
}
Ramchurn, S. D.; Sierra, C.; Godo, L.; Jennings, N. R.
Negotiating using rewards Proceedings Article
In: 5th Int. Conf. on Autonomous Agents and Multi-Agent Systems, pp. 400–407, 2006.
@inproceedings{eps262591,
title = {Negotiating using rewards},
author = {S. D. Ramchurn and C. Sierra and L. Godo and N. R. Jennings},
url = {http://eprints.soton.ac.uk/262591/},
year = {2006},
date = {2006-01-01},
booktitle = {5th Int. Conf. on Autonomous Agents and Multi-Agent Systems},
journal = {Proc. 5th Int. Conf. on Autonomous Agents and Multi-Agent Systems, Hakodate, Japan},
pages = {400–407},
abstract = {In situations where self-interested agents interact repeatedly, it is important that they are endowed with negotiation techniques that enable them to reach agreements that are profitable in the long run. To this end, we devise a novel negotiation algorithm that generates promises of rewards in future interactions, as a means of permitting agents to reach better agreements, in a shorter time, in the present encounter. Moreover, we thus develop a specific negotiation tactic based on this reward generation algorithm and show that it can achieve significantly bettter outcomes than existing benchmark tactics that do not use such inducements. Specifically, we show, via empirical evaluation, that our tactic can lead to a 26% improvement in the utility of deals that are made and that 21 times fewer messages need to be exchanged in order to achieve this under concrete settings.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Multi-agent signal-less intersection management with dynamic platoon formation
AI Foundation Models: initial review, CMA Consultation, TAS Hub Response
The effect of data visualisation quality and task density on human-swarm interaction
Demonstrating performance benefits of human-swarm teaming
Ramchurn, S. D.; Sierra, C.; Godo, L.; Jennings, N. R.
Negotiating using rewards Proceedings Article
In: 5th Int. Conf. on Autonomous Agents and Multi-Agent Systems, pp. 400–407, 2006.
@inproceedings{eps262591,
title = {Negotiating using rewards},
author = {S. D. Ramchurn and C. Sierra and L. Godo and N. R. Jennings},
url = {http://eprints.soton.ac.uk/262591/},
year = {2006},
date = {2006-01-01},
booktitle = {5th Int. Conf. on Autonomous Agents and Multi-Agent Systems},
journal = {Proc. 5th Int. Conf. on Autonomous Agents and Multi-Agent Systems, Hakodate, Japan},
pages = {400–407},
abstract = {In situations where self-interested agents interact repeatedly, it is important that they are endowed with negotiation techniques that enable them to reach agreements that are profitable in the long run. To this end, we devise a novel negotiation algorithm that generates promises of rewards in future interactions, as a means of permitting agents to reach better agreements, in a shorter time, in the present encounter. Moreover, we thus develop a specific negotiation tactic based on this reward generation algorithm and show that it can achieve significantly bettter outcomes than existing benchmark tactics that do not use such inducements. Specifically, we show, via empirical evaluation, that our tactic can lead to a 26% improvement in the utility of deals that are made and that 21 times fewer messages need to be exchanged in order to achieve this under concrete settings.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}