font
Ramchurn, S. D.; Sierra, C.; Godo, L.; Jennings, N. R.
Negotiating using rewards. Journal Article
In: Artificial Intelligence Journal., vol. 171, no. 10-15, pp. 805–837, 2007.
Abstract | Links | BibTeX | Tags: Bargaining, Bilateral Negotiation., Negotiation Tactics, Persuasive Negotiation, Repeated Negotiations
@article{eps264225,
title = {Negotiating using rewards.},
author = {S. D. Ramchurn and C. Sierra and L. Godo and N. R. Jennings},
url = {http://eprints.soton.ac.uk/264225/},
year = {2007},
date = {2007-01-01},
journal = {Artificial Intelligence Journal.},
volume = {171},
number = {10-15},
pages = {805–837},
abstract = {Negotiation is a fundamental interaction mechanism in multi-agent systems because it allows self-interested agents to come to mutually beneficial agreements and partition resources efficiently and effectively. Now, in many situations, the agents need to negotiate with one another many times and so developing strategies that are effective over repeated interactions is an important challenge. Against this background, a growing body of work has examined the use of Persuasive Negotiation (PN), which involves negotiating using rhetorical arguments (such as threats, rewards, or appeals), in trying to convince an opponent to accept a given offer. Such mechanisms are especially suited to repeated encounters because they allow agents to influence the outcomes of future negotiations, while negotiating a deal in the present one, with the aim of producing results that are beneficial to both parties. To this end, in this paper, we develop a comprehensive PN mechanism for repeated interactions that makes use of rewards that can be asked for or given to. Our mechanism consists of two parts. First, a novel protocol that structures the interaction by capturing the commitments that agents incur when using rewards. Second, a new reward generation algorithm that constructs promises of rewards in future interactions as a means of permitting agents to reach better agreements, in a shorter time, in the present encounter. We then go on to develop a specific negotiation tactic, based on this reward generation algorithm, and show that it can achieve significantly better outcomes than existing benchmark tactics that do not use such inducements. Specifically, we show, via empirical evaluation in a Multi-Move Prisoners? dilemma setting, that our tactic can lead to a 26% improvement in the utility of deals that are made and that 21 times fewer messages need to be exchanged in order to achieve this.},
keywords = {Bargaining, Bilateral Negotiation., Negotiation Tactics, Persuasive Negotiation, Repeated Negotiations},
pubstate = {published},
tppubtype = {article}
}
Ramchurn, S. D.; Sierra, C.; Godo, L.; Jennings, N. R.
Negotiating using rewards Proceedings Article
In: 5th Int. Conf. on Autonomous Agents and Multi-Agent Systems, pp. 400–407, 2006.
Abstract | Links | BibTeX | Tags: Argumentation, Bargaining, Persuasive Negotiation
@inproceedings{eps262591,
title = {Negotiating using rewards},
author = {S. D. Ramchurn and C. Sierra and L. Godo and N. R. Jennings},
url = {http://eprints.soton.ac.uk/262591/},
year = {2006},
date = {2006-01-01},
booktitle = {5th Int. Conf. on Autonomous Agents and Multi-Agent Systems},
journal = {Proc. 5th Int. Conf. on Autonomous Agents and Multi-Agent Systems, Hakodate, Japan},
pages = {400–407},
abstract = {In situations where self-interested agents interact repeatedly, it is important that they are endowed with negotiation techniques that enable them to reach agreements that are profitable in the long run. To this end, we devise a novel negotiation algorithm that generates promises of rewards in future interactions, as a means of permitting agents to reach better agreements, in a shorter time, in the present encounter. Moreover, we thus develop a specific negotiation tactic based on this reward generation algorithm and show that it can achieve significantly bettter outcomes than existing benchmark tactics that do not use such inducements. Specifically, we show, via empirical evaluation, that our tactic can lead to a 26% improvement in the utility of deals that are made and that 21 times fewer messages need to be exchanged in order to achieve this under concrete settings.},
keywords = {Argumentation, Bargaining, Persuasive Negotiation},
pubstate = {published},
tppubtype = {inproceedings}
}
Ramchurn, S. D.; Sierra, C.; Godo, L.; Jennings, N. R.
Negotiating using rewards. Journal Article
In: Artificial Intelligence Journal., vol. 171, no. 10-15, pp. 805–837, 2007.
@article{eps264225,
title = {Negotiating using rewards.},
author = {S. D. Ramchurn and C. Sierra and L. Godo and N. R. Jennings},
url = {http://eprints.soton.ac.uk/264225/},
year = {2007},
date = {2007-01-01},
journal = {Artificial Intelligence Journal.},
volume = {171},
number = {10-15},
pages = {805–837},
abstract = {Negotiation is a fundamental interaction mechanism in multi-agent systems because it allows self-interested agents to come to mutually beneficial agreements and partition resources efficiently and effectively. Now, in many situations, the agents need to negotiate with one another many times and so developing strategies that are effective over repeated interactions is an important challenge. Against this background, a growing body of work has examined the use of Persuasive Negotiation (PN), which involves negotiating using rhetorical arguments (such as threats, rewards, or appeals), in trying to convince an opponent to accept a given offer. Such mechanisms are especially suited to repeated encounters because they allow agents to influence the outcomes of future negotiations, while negotiating a deal in the present one, with the aim of producing results that are beneficial to both parties. To this end, in this paper, we develop a comprehensive PN mechanism for repeated interactions that makes use of rewards that can be asked for or given to. Our mechanism consists of two parts. First, a novel protocol that structures the interaction by capturing the commitments that agents incur when using rewards. Second, a new reward generation algorithm that constructs promises of rewards in future interactions as a means of permitting agents to reach better agreements, in a shorter time, in the present encounter. We then go on to develop a specific negotiation tactic, based on this reward generation algorithm, and show that it can achieve significantly better outcomes than existing benchmark tactics that do not use such inducements. Specifically, we show, via empirical evaluation in a Multi-Move Prisoners? dilemma setting, that our tactic can lead to a 26% improvement in the utility of deals that are made and that 21 times fewer messages need to be exchanged in order to achieve this.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Ramchurn, S. D.; Sierra, C.; Godo, L.; Jennings, N. R.
Negotiating using rewards Proceedings Article
In: 5th Int. Conf. on Autonomous Agents and Multi-Agent Systems, pp. 400–407, 2006.
@inproceedings{eps262591,
title = {Negotiating using rewards},
author = {S. D. Ramchurn and C. Sierra and L. Godo and N. R. Jennings},
url = {http://eprints.soton.ac.uk/262591/},
year = {2006},
date = {2006-01-01},
booktitle = {5th Int. Conf. on Autonomous Agents and Multi-Agent Systems},
journal = {Proc. 5th Int. Conf. on Autonomous Agents and Multi-Agent Systems, Hakodate, Japan},
pages = {400–407},
abstract = {In situations where self-interested agents interact repeatedly, it is important that they are endowed with negotiation techniques that enable them to reach agreements that are profitable in the long run. To this end, we devise a novel negotiation algorithm that generates promises of rewards in future interactions, as a means of permitting agents to reach better agreements, in a shorter time, in the present encounter. Moreover, we thus develop a specific negotiation tactic based on this reward generation algorithm and show that it can achieve significantly bettter outcomes than existing benchmark tactics that do not use such inducements. Specifically, we show, via empirical evaluation, that our tactic can lead to a 26% improvement in the utility of deals that are made and that 21 times fewer messages need to be exchanged in order to achieve this under concrete settings.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Ramchurn, S. D.; Sierra, C.; Godo, L.; Jennings, N. R.
Negotiating using rewards. Journal Article
In: Artificial Intelligence Journal., vol. 171, no. 10-15, pp. 805–837, 2007.
Abstract | Links | BibTeX | Tags: Bargaining, Bilateral Negotiation., Negotiation Tactics, Persuasive Negotiation, Repeated Negotiations
@article{eps264225,
title = {Negotiating using rewards.},
author = {S. D. Ramchurn and C. Sierra and L. Godo and N. R. Jennings},
url = {http://eprints.soton.ac.uk/264225/},
year = {2007},
date = {2007-01-01},
journal = {Artificial Intelligence Journal.},
volume = {171},
number = {10-15},
pages = {805–837},
abstract = {Negotiation is a fundamental interaction mechanism in multi-agent systems because it allows self-interested agents to come to mutually beneficial agreements and partition resources efficiently and effectively. Now, in many situations, the agents need to negotiate with one another many times and so developing strategies that are effective over repeated interactions is an important challenge. Against this background, a growing body of work has examined the use of Persuasive Negotiation (PN), which involves negotiating using rhetorical arguments (such as threats, rewards, or appeals), in trying to convince an opponent to accept a given offer. Such mechanisms are especially suited to repeated encounters because they allow agents to influence the outcomes of future negotiations, while negotiating a deal in the present one, with the aim of producing results that are beneficial to both parties. To this end, in this paper, we develop a comprehensive PN mechanism for repeated interactions that makes use of rewards that can be asked for or given to. Our mechanism consists of two parts. First, a novel protocol that structures the interaction by capturing the commitments that agents incur when using rewards. Second, a new reward generation algorithm that constructs promises of rewards in future interactions as a means of permitting agents to reach better agreements, in a shorter time, in the present encounter. We then go on to develop a specific negotiation tactic, based on this reward generation algorithm, and show that it can achieve significantly better outcomes than existing benchmark tactics that do not use such inducements. Specifically, we show, via empirical evaluation in a Multi-Move Prisoners? dilemma setting, that our tactic can lead to a 26% improvement in the utility of deals that are made and that 21 times fewer messages need to be exchanged in order to achieve this.},
keywords = {Bargaining, Bilateral Negotiation., Negotiation Tactics, Persuasive Negotiation, Repeated Negotiations},
pubstate = {published},
tppubtype = {article}
}
Ramchurn, S. D.; Sierra, C.; Godo, L.; Jennings, N. R.
Negotiating using rewards Proceedings Article
In: 5th Int. Conf. on Autonomous Agents and Multi-Agent Systems, pp. 400–407, 2006.
Abstract | Links | BibTeX | Tags: Argumentation, Bargaining, Persuasive Negotiation
@inproceedings{eps262591,
title = {Negotiating using rewards},
author = {S. D. Ramchurn and C. Sierra and L. Godo and N. R. Jennings},
url = {http://eprints.soton.ac.uk/262591/},
year = {2006},
date = {2006-01-01},
booktitle = {5th Int. Conf. on Autonomous Agents and Multi-Agent Systems},
journal = {Proc. 5th Int. Conf. on Autonomous Agents and Multi-Agent Systems, Hakodate, Japan},
pages = {400–407},
abstract = {In situations where self-interested agents interact repeatedly, it is important that they are endowed with negotiation techniques that enable them to reach agreements that are profitable in the long run. To this end, we devise a novel negotiation algorithm that generates promises of rewards in future interactions, as a means of permitting agents to reach better agreements, in a shorter time, in the present encounter. Moreover, we thus develop a specific negotiation tactic based on this reward generation algorithm and show that it can achieve significantly bettter outcomes than existing benchmark tactics that do not use such inducements. Specifically, we show, via empirical evaluation, that our tactic can lead to a 26% improvement in the utility of deals that are made and that 21 times fewer messages need to be exchanged in order to achieve this under concrete settings.},
keywords = {Argumentation, Bargaining, Persuasive Negotiation},
pubstate = {published},
tppubtype = {inproceedings}
}
Ramchurn, S. D.; Sierra, C.; Godo, L.; Jennings, N. R.
Negotiating using rewards. Journal Article
In: Artificial Intelligence Journal., vol. 171, no. 10-15, pp. 805–837, 2007.
@article{eps264225,
title = {Negotiating using rewards.},
author = {S. D. Ramchurn and C. Sierra and L. Godo and N. R. Jennings},
url = {http://eprints.soton.ac.uk/264225/},
year = {2007},
date = {2007-01-01},
journal = {Artificial Intelligence Journal.},
volume = {171},
number = {10-15},
pages = {805–837},
abstract = {Negotiation is a fundamental interaction mechanism in multi-agent systems because it allows self-interested agents to come to mutually beneficial agreements and partition resources efficiently and effectively. Now, in many situations, the agents need to negotiate with one another many times and so developing strategies that are effective over repeated interactions is an important challenge. Against this background, a growing body of work has examined the use of Persuasive Negotiation (PN), which involves negotiating using rhetorical arguments (such as threats, rewards, or appeals), in trying to convince an opponent to accept a given offer. Such mechanisms are especially suited to repeated encounters because they allow agents to influence the outcomes of future negotiations, while negotiating a deal in the present one, with the aim of producing results that are beneficial to both parties. To this end, in this paper, we develop a comprehensive PN mechanism for repeated interactions that makes use of rewards that can be asked for or given to. Our mechanism consists of two parts. First, a novel protocol that structures the interaction by capturing the commitments that agents incur when using rewards. Second, a new reward generation algorithm that constructs promises of rewards in future interactions as a means of permitting agents to reach better agreements, in a shorter time, in the present encounter. We then go on to develop a specific negotiation tactic, based on this reward generation algorithm, and show that it can achieve significantly better outcomes than existing benchmark tactics that do not use such inducements. Specifically, we show, via empirical evaluation in a Multi-Move Prisoners? dilemma setting, that our tactic can lead to a 26% improvement in the utility of deals that are made and that 21 times fewer messages need to be exchanged in order to achieve this.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Ramchurn, S. D.; Sierra, C.; Godo, L.; Jennings, N. R.
Negotiating using rewards Proceedings Article
In: 5th Int. Conf. on Autonomous Agents and Multi-Agent Systems, pp. 400–407, 2006.
@inproceedings{eps262591,
title = {Negotiating using rewards},
author = {S. D. Ramchurn and C. Sierra and L. Godo and N. R. Jennings},
url = {http://eprints.soton.ac.uk/262591/},
year = {2006},
date = {2006-01-01},
booktitle = {5th Int. Conf. on Autonomous Agents and Multi-Agent Systems},
journal = {Proc. 5th Int. Conf. on Autonomous Agents and Multi-Agent Systems, Hakodate, Japan},
pages = {400–407},
abstract = {In situations where self-interested agents interact repeatedly, it is important that they are endowed with negotiation techniques that enable them to reach agreements that are profitable in the long run. To this end, we devise a novel negotiation algorithm that generates promises of rewards in future interactions, as a means of permitting agents to reach better agreements, in a shorter time, in the present encounter. Moreover, we thus develop a specific negotiation tactic based on this reward generation algorithm and show that it can achieve significantly bettter outcomes than existing benchmark tactics that do not use such inducements. Specifically, we show, via empirical evaluation, that our tactic can lead to a 26% improvement in the utility of deals that are made and that 21 times fewer messages need to be exchanged in order to achieve this under concrete settings.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Multi-agent signal-less intersection management with dynamic platoon formation
AI Foundation Models: initial review, CMA Consultation, TAS Hub Response
The effect of data visualisation quality and task density on human-swarm interaction
Demonstrating performance benefits of human-swarm teaming
Ramchurn, S. D.; Sierra, C.; Godo, L.; Jennings, N. R.
Negotiating using rewards. Journal Article
In: Artificial Intelligence Journal., vol. 171, no. 10-15, pp. 805–837, 2007.
@article{eps264225,
title = {Negotiating using rewards.},
author = {S. D. Ramchurn and C. Sierra and L. Godo and N. R. Jennings},
url = {http://eprints.soton.ac.uk/264225/},
year = {2007},
date = {2007-01-01},
journal = {Artificial Intelligence Journal.},
volume = {171},
number = {10-15},
pages = {805–837},
abstract = {Negotiation is a fundamental interaction mechanism in multi-agent systems because it allows self-interested agents to come to mutually beneficial agreements and partition resources efficiently and effectively. Now, in many situations, the agents need to negotiate with one another many times and so developing strategies that are effective over repeated interactions is an important challenge. Against this background, a growing body of work has examined the use of Persuasive Negotiation (PN), which involves negotiating using rhetorical arguments (such as threats, rewards, or appeals), in trying to convince an opponent to accept a given offer. Such mechanisms are especially suited to repeated encounters because they allow agents to influence the outcomes of future negotiations, while negotiating a deal in the present one, with the aim of producing results that are beneficial to both parties. To this end, in this paper, we develop a comprehensive PN mechanism for repeated interactions that makes use of rewards that can be asked for or given to. Our mechanism consists of two parts. First, a novel protocol that structures the interaction by capturing the commitments that agents incur when using rewards. Second, a new reward generation algorithm that constructs promises of rewards in future interactions as a means of permitting agents to reach better agreements, in a shorter time, in the present encounter. We then go on to develop a specific negotiation tactic, based on this reward generation algorithm, and show that it can achieve significantly better outcomes than existing benchmark tactics that do not use such inducements. Specifically, we show, via empirical evaluation in a Multi-Move Prisoners? dilemma setting, that our tactic can lead to a 26% improvement in the utility of deals that are made and that 21 times fewer messages need to be exchanged in order to achieve this.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Ramchurn, S. D.; Sierra, C.; Godo, L.; Jennings, N. R.
Negotiating using rewards Proceedings Article
In: 5th Int. Conf. on Autonomous Agents and Multi-Agent Systems, pp. 400–407, 2006.
@inproceedings{eps262591,
title = {Negotiating using rewards},
author = {S. D. Ramchurn and C. Sierra and L. Godo and N. R. Jennings},
url = {http://eprints.soton.ac.uk/262591/},
year = {2006},
date = {2006-01-01},
booktitle = {5th Int. Conf. on Autonomous Agents and Multi-Agent Systems},
journal = {Proc. 5th Int. Conf. on Autonomous Agents and Multi-Agent Systems, Hakodate, Japan},
pages = {400–407},
abstract = {In situations where self-interested agents interact repeatedly, it is important that they are endowed with negotiation techniques that enable them to reach agreements that are profitable in the long run. To this end, we devise a novel negotiation algorithm that generates promises of rewards in future interactions, as a means of permitting agents to reach better agreements, in a shorter time, in the present encounter. Moreover, we thus develop a specific negotiation tactic based on this reward generation algorithm and show that it can achieve significantly bettter outcomes than existing benchmark tactics that do not use such inducements. Specifically, we show, via empirical evaluation, that our tactic can lead to a 26% improvement in the utility of deals that are made and that 21 times fewer messages need to be exchanged in order to achieve this under concrete settings.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}