font
Ramchurn, Sarvapali D.; Mezzetti, Claudio; Giovannucci, Andrea; Rodriguez, Juan A.; Dash, Rajdeep K.; Jennings, Nicholas R.
Trust-based mechanisms for robust and efficient task allocation in the presence of execution uncertainty Journal Article
In: Journal of Artificial Intelligence Research, vol. 35, pp. 1–41, 2009.
Abstract | Links | BibTeX | Tags: mechanism design, optimisation, Trust, uncertainty
@article{eps267288,
title = {Trust-based mechanisms for robust and efficient task allocation in the presence of execution uncertainty},
author = {Sarvapali D. Ramchurn and Claudio Mezzetti and Andrea Giovannucci and Juan A. Rodriguez and Rajdeep K. Dash and Nicholas R. Jennings},
url = {http://eprints.soton.ac.uk/267288/},
year = {2009},
date = {2009-01-01},
journal = {Journal of Artificial Intelligence Research},
volume = {35},
pages = {1–41},
abstract = {Vickrey-Clarke-Groves (VCG) mechanisms are often used to allocate tasks to selfish and rational agents. VCG mechanisms are incentive-compatible, direct mechanisms that are efficient (i.e. maximise social utility) and individually rational (i.e. agents prefer to join rather than opt out). However, an important assumption of these mechanisms is that the agents will always successfully complete their allocated tasks. Clearly, this assumption is unrealistic in many real-world applications where agents can, and often do, fail in their endeavours. Moreover, whether an agent is deemed to have failed may be perceived differently by different agents. Such subjective perceptions about an agent's probability of succeeding at a given task are often captured and reasoned about using the notion of trust. Given this background, in this paper we investigate the design of novel mechanisms that take into account the trust between agents when allocating tasks. Specifically, we develop a new class of mechanisms, called trust-based mechanisms, that can take into account multiple subjective measures of the probability of an agent succeeding at a given task and produce allocations that maximise social utility, whilst ensuring that no agent obtains a negative utility. We then show that such mechanisms pose a challenging new combinatorial optimisation problem (that is NP-complete), devise a novel representation for solving the problem, and develop an effective integer programming solution (that can solve instances with about 2x10^ 5 possible allocations in 40 seconds).},
keywords = {mechanism design, optimisation, Trust, uncertainty},
pubstate = {published},
tppubtype = {article}
}
Ashri, R.; Ramchurn, S. D.; Sabater, J.; Luck, M.; Jennings, N. R.
Trust evaluation through relationship analysis Proceedings Article
In: 4th Int Joint Conf. on Autonomous Agents and Multi-Agent Systems, pp. 1005–1011, 2005.
Abstract | Links | BibTeX | Tags: Multi-Agent Systems., Relationships, Reputation, Trust
@inproceedings{eps260806,
title = {Trust evaluation through relationship analysis},
author = {R. Ashri and S. D. Ramchurn and J. Sabater and M. Luck and N. R. Jennings},
url = {http://eprints.soton.ac.uk/260806/},
year = {2005},
date = {2005-01-01},
booktitle = {4th Int Joint Conf. on Autonomous Agents and Multi-Agent Systems},
journal = {Proceedings: 4th International Joint Conference on Autonomous Agents and Multi-Agent Systems},
pages = {1005–1011},
abstract = {Current mechanisms for evaluating the trustworthiness of an agent within an electronic marketplace depend either on using a history of interactions or on recommendations from other agents. In the first case, these requirements limit what an agent with no prior interaction history can do. In the second case, they transform the problem into one of trusting the recommending agent. However, these mechanisms do not consider the relationships between agents that arise through interactions (such as buying or selling) or through overarching organisational structures (such as hierarchical or flat), which can also aid in evaluating trustworthiness. In response, this paper outlines a method that enables agents to evaluate the trustworthiness of their counterparts, based solely on an analysis of such relationships. Specifically, relationships are identified using a generic technique in conjunction with a basic model for agentbased marketplaces. They are then interpreted through a trust model that enables the inference of trust valuations based on the different types of relationships. In this way, we provide a further component for a trust evaluation model that addresses some of the limitations of existing work.},
keywords = {Multi-Agent Systems., Relationships, Reputation, Trust},
pubstate = {published},
tppubtype = {inproceedings}
}
Ramchurn, Sarvapali
Multi-Agent Negotiation using Trust and Persuasion PhD Thesis
University of Southampton, 2004.
Abstract | Links | BibTeX | Tags: Argumentation-based Negotiation, multi-agent systems, Negotiation, Persuasion, Trust
@phdthesis{eps260200,
title = {Multi-Agent Negotiation using Trust and Persuasion},
author = {Sarvapali Ramchurn},
url = {http://eprints.soton.ac.uk/260200/},
year = {2004},
date = {2004-01-01},
school = {University of Southampton},
abstract = {In this thesis, we propose a panoply of tools and techniques to manage inter-agent dependencies in open, distributed multi-agent systems that have significant degrees of uncertainty. In particular, we focus on situations in which agents are involved in repeated interactions where they need to negotiate to resolve conflicts that may arise between them. To this end, we endow agents with decision making models that exploit the notion of trust and use persuasive techniques during the negotiation process to reduce the level of uncertainty and achieve better deals in the long run. Firstly, we develop and evaluate a new trust model (called CREDIT) that allows agents to measure the degree of trust they should place in their opponents. This model reduces the uncertainty that agents have about their opponents' reliability. Thus, over repeated interactions, CREDIT enables agents to model their opponents' reliability using probabilistic techniques and a fuzzy reasoning mechanism that allows the combination of measures based on reputation (indirect interactions) and confidence (direct interactions). In so doing, CREDIT takes a wider range of behaviour-influencing factors into account than existing models, including the norms of the agents and the institution within which transactions occur. We then explore a novel application of trust models by showing how the measures developed in CREDIT ca be applied negotiations in multiple encounters. Specifically we show that agents that use CREDIT are able to avoid unreliable agents, both during the selection of interaction partners and during the negotiation process itself by using trust to adjust their negotiation stance. Also, we empirically show that agents are able to reach good deals with agents that are unreliable to some degree (rather than completely unreliable) and with those that try to strategically exploit their opponent. Secondly, having applied CREDIT to negotiations, we further extend the application of trust to reduce uncertainty about the reliability of agents in mechanism design (where the honesty of agents is elicited by the protocol). Thus, we develop $backslash$acftbmd that allows agents using a trust model (such as CREDIT) to reach efficient agreements that choose the most reliable agents in the long run. In particular, we show that our mechanism enforces truth-telling from the agents (i.e. it is incentive compatible), both about their perceived reliability of their opponent and their valuations for the goods to be traded. In proving the latter properties, our trust-based mechanism is shown to be the first reputation mechanism that implements individual rationality, incentive compatibility, and efficiency. Our trust-based mechanism is also empirically evaluated and shown to be better than other comparable models in reaching the outcome that maximises all the negotiating agents' utilities and in choosing the most reliable agents in the long run. Thirdly, having explored ways to reduce uncertainties about reliability and honesty, we use persuasive negotiation techniques to tackle issues associated with uncertainties that agents have about the preferences and the space of possible agreements. To this end, we propose a novel protocol and reasoning mechanism that agents can use to generate and evaluate persuasive elements, such as promises of future rewards, to support the offers they make during negotiation. These persuasive elements aim to make offers more attractive over multiple encounters given the absence of information about an opponent's discount factors or exact payoffs. Specifically, we empirically demonstrate that agents are able to achieve a larger number of agreements and a higher expected utility over repeated encounters when they are given the capability to give or ask for rewards. Moreover, we develop a novel strategy using this protocol and show that it outperforms existing state of the art heuristic negotiation models. Finally, the applicability of persuasive negotiation and CREDIT is exemplified through a practical implementation in a pervasive computing environment. In this context, the negotiation mechanism is implemented in an instant messaging platform (JABBER) and used to resolve conflicts between group and individual preferences that arise in a meeting room scenario. In particular, we show how persuasive negotiation and trust permit a flexible management of interruptions by allowing intrusions to happen at appropriate times during the meeting while still managing to satisfy the preferences of all parties present.},
keywords = {Argumentation-based Negotiation, multi-agent systems, Negotiation, Persuasion, Trust},
pubstate = {published},
tppubtype = {phdthesis}
}
Ramchurn, Sarvapali D.; Mezzetti, Claudio; Giovannucci, Andrea; Rodriguez, Juan A.; Dash, Rajdeep K.; Jennings, Nicholas R.
Trust-based mechanisms for robust and efficient task allocation in the presence of execution uncertainty Journal Article
In: Journal of Artificial Intelligence Research, vol. 35, pp. 1–41, 2009.
@article{eps267288,
title = {Trust-based mechanisms for robust and efficient task allocation in the presence of execution uncertainty},
author = {Sarvapali D. Ramchurn and Claudio Mezzetti and Andrea Giovannucci and Juan A. Rodriguez and Rajdeep K. Dash and Nicholas R. Jennings},
url = {http://eprints.soton.ac.uk/267288/},
year = {2009},
date = {2009-01-01},
journal = {Journal of Artificial Intelligence Research},
volume = {35},
pages = {1–41},
abstract = {Vickrey-Clarke-Groves (VCG) mechanisms are often used to allocate tasks to selfish and rational agents. VCG mechanisms are incentive-compatible, direct mechanisms that are efficient (i.e. maximise social utility) and individually rational (i.e. agents prefer to join rather than opt out). However, an important assumption of these mechanisms is that the agents will always successfully complete their allocated tasks. Clearly, this assumption is unrealistic in many real-world applications where agents can, and often do, fail in their endeavours. Moreover, whether an agent is deemed to have failed may be perceived differently by different agents. Such subjective perceptions about an agent's probability of succeeding at a given task are often captured and reasoned about using the notion of trust. Given this background, in this paper we investigate the design of novel mechanisms that take into account the trust between agents when allocating tasks. Specifically, we develop a new class of mechanisms, called trust-based mechanisms, that can take into account multiple subjective measures of the probability of an agent succeeding at a given task and produce allocations that maximise social utility, whilst ensuring that no agent obtains a negative utility. We then show that such mechanisms pose a challenging new combinatorial optimisation problem (that is NP-complete), devise a novel representation for solving the problem, and develop an effective integer programming solution (that can solve instances with about 2x10^ 5 possible allocations in 40 seconds).},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Ashri, R.; Ramchurn, S. D.; Sabater, J.; Luck, M.; Jennings, N. R.
Trust evaluation through relationship analysis Proceedings Article
In: 4th Int Joint Conf. on Autonomous Agents and Multi-Agent Systems, pp. 1005–1011, 2005.
@inproceedings{eps260806,
title = {Trust evaluation through relationship analysis},
author = {R. Ashri and S. D. Ramchurn and J. Sabater and M. Luck and N. R. Jennings},
url = {http://eprints.soton.ac.uk/260806/},
year = {2005},
date = {2005-01-01},
booktitle = {4th Int Joint Conf. on Autonomous Agents and Multi-Agent Systems},
journal = {Proceedings: 4th International Joint Conference on Autonomous Agents and Multi-Agent Systems},
pages = {1005–1011},
abstract = {Current mechanisms for evaluating the trustworthiness of an agent within an electronic marketplace depend either on using a history of interactions or on recommendations from other agents. In the first case, these requirements limit what an agent with no prior interaction history can do. In the second case, they transform the problem into one of trusting the recommending agent. However, these mechanisms do not consider the relationships between agents that arise through interactions (such as buying or selling) or through overarching organisational structures (such as hierarchical or flat), which can also aid in evaluating trustworthiness. In response, this paper outlines a method that enables agents to evaluate the trustworthiness of their counterparts, based solely on an analysis of such relationships. Specifically, relationships are identified using a generic technique in conjunction with a basic model for agentbased marketplaces. They are then interpreted through a trust model that enables the inference of trust valuations based on the different types of relationships. In this way, we provide a further component for a trust evaluation model that addresses some of the limitations of existing work.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Ramchurn, Sarvapali
Multi-Agent Negotiation using Trust and Persuasion PhD Thesis
University of Southampton, 2004.
@phdthesis{eps260200,
title = {Multi-Agent Negotiation using Trust and Persuasion},
author = {Sarvapali Ramchurn},
url = {http://eprints.soton.ac.uk/260200/},
year = {2004},
date = {2004-01-01},
school = {University of Southampton},
abstract = {In this thesis, we propose a panoply of tools and techniques to manage inter-agent dependencies in open, distributed multi-agent systems that have significant degrees of uncertainty. In particular, we focus on situations in which agents are involved in repeated interactions where they need to negotiate to resolve conflicts that may arise between them. To this end, we endow agents with decision making models that exploit the notion of trust and use persuasive techniques during the negotiation process to reduce the level of uncertainty and achieve better deals in the long run. Firstly, we develop and evaluate a new trust model (called CREDIT) that allows agents to measure the degree of trust they should place in their opponents. This model reduces the uncertainty that agents have about their opponents' reliability. Thus, over repeated interactions, CREDIT enables agents to model their opponents' reliability using probabilistic techniques and a fuzzy reasoning mechanism that allows the combination of measures based on reputation (indirect interactions) and confidence (direct interactions). In so doing, CREDIT takes a wider range of behaviour-influencing factors into account than existing models, including the norms of the agents and the institution within which transactions occur. We then explore a novel application of trust models by showing how the measures developed in CREDIT ca be applied negotiations in multiple encounters. Specifically we show that agents that use CREDIT are able to avoid unreliable agents, both during the selection of interaction partners and during the negotiation process itself by using trust to adjust their negotiation stance. Also, we empirically show that agents are able to reach good deals with agents that are unreliable to some degree (rather than completely unreliable) and with those that try to strategically exploit their opponent. Secondly, having applied CREDIT to negotiations, we further extend the application of trust to reduce uncertainty about the reliability of agents in mechanism design (where the honesty of agents is elicited by the protocol). Thus, we develop $backslash$acftbmd that allows agents using a trust model (such as CREDIT) to reach efficient agreements that choose the most reliable agents in the long run. In particular, we show that our mechanism enforces truth-telling from the agents (i.e. it is incentive compatible), both about their perceived reliability of their opponent and their valuations for the goods to be traded. In proving the latter properties, our trust-based mechanism is shown to be the first reputation mechanism that implements individual rationality, incentive compatibility, and efficiency. Our trust-based mechanism is also empirically evaluated and shown to be better than other comparable models in reaching the outcome that maximises all the negotiating agents' utilities and in choosing the most reliable agents in the long run. Thirdly, having explored ways to reduce uncertainties about reliability and honesty, we use persuasive negotiation techniques to tackle issues associated with uncertainties that agents have about the preferences and the space of possible agreements. To this end, we propose a novel protocol and reasoning mechanism that agents can use to generate and evaluate persuasive elements, such as promises of future rewards, to support the offers they make during negotiation. These persuasive elements aim to make offers more attractive over multiple encounters given the absence of information about an opponent's discount factors or exact payoffs. Specifically, we empirically demonstrate that agents are able to achieve a larger number of agreements and a higher expected utility over repeated encounters when they are given the capability to give or ask for rewards. Moreover, we develop a novel strategy using this protocol and show that it outperforms existing state of the art heuristic negotiation models. Finally, the applicability of persuasive negotiation and CREDIT is exemplified through a practical implementation in a pervasive computing environment. In this context, the negotiation mechanism is implemented in an instant messaging platform (JABBER) and used to resolve conflicts between group and individual preferences that arise in a meeting room scenario. In particular, we show how persuasive negotiation and trust permit a flexible management of interruptions by allowing intrusions to happen at appropriate times during the meeting while still managing to satisfy the preferences of all parties present.},
keywords = {},
pubstate = {published},
tppubtype = {phdthesis}
}
Ramchurn, Sarvapali D.; Mezzetti, Claudio; Giovannucci, Andrea; Rodriguez, Juan A.; Dash, Rajdeep K.; Jennings, Nicholas R.
Trust-based mechanisms for robust and efficient task allocation in the presence of execution uncertainty Journal Article
In: Journal of Artificial Intelligence Research, vol. 35, pp. 1–41, 2009.
Abstract | Links | BibTeX | Tags: mechanism design, optimisation, Trust, uncertainty
@article{eps267288,
title = {Trust-based mechanisms for robust and efficient task allocation in the presence of execution uncertainty},
author = {Sarvapali D. Ramchurn and Claudio Mezzetti and Andrea Giovannucci and Juan A. Rodriguez and Rajdeep K. Dash and Nicholas R. Jennings},
url = {http://eprints.soton.ac.uk/267288/},
year = {2009},
date = {2009-01-01},
journal = {Journal of Artificial Intelligence Research},
volume = {35},
pages = {1–41},
abstract = {Vickrey-Clarke-Groves (VCG) mechanisms are often used to allocate tasks to selfish and rational agents. VCG mechanisms are incentive-compatible, direct mechanisms that are efficient (i.e. maximise social utility) and individually rational (i.e. agents prefer to join rather than opt out). However, an important assumption of these mechanisms is that the agents will always successfully complete their allocated tasks. Clearly, this assumption is unrealistic in many real-world applications where agents can, and often do, fail in their endeavours. Moreover, whether an agent is deemed to have failed may be perceived differently by different agents. Such subjective perceptions about an agent's probability of succeeding at a given task are often captured and reasoned about using the notion of trust. Given this background, in this paper we investigate the design of novel mechanisms that take into account the trust between agents when allocating tasks. Specifically, we develop a new class of mechanisms, called trust-based mechanisms, that can take into account multiple subjective measures of the probability of an agent succeeding at a given task and produce allocations that maximise social utility, whilst ensuring that no agent obtains a negative utility. We then show that such mechanisms pose a challenging new combinatorial optimisation problem (that is NP-complete), devise a novel representation for solving the problem, and develop an effective integer programming solution (that can solve instances with about 2x10^ 5 possible allocations in 40 seconds).},
keywords = {mechanism design, optimisation, Trust, uncertainty},
pubstate = {published},
tppubtype = {article}
}
Ashri, R.; Ramchurn, S. D.; Sabater, J.; Luck, M.; Jennings, N. R.
Trust evaluation through relationship analysis Proceedings Article
In: 4th Int Joint Conf. on Autonomous Agents and Multi-Agent Systems, pp. 1005–1011, 2005.
Abstract | Links | BibTeX | Tags: Multi-Agent Systems., Relationships, Reputation, Trust
@inproceedings{eps260806,
title = {Trust evaluation through relationship analysis},
author = {R. Ashri and S. D. Ramchurn and J. Sabater and M. Luck and N. R. Jennings},
url = {http://eprints.soton.ac.uk/260806/},
year = {2005},
date = {2005-01-01},
booktitle = {4th Int Joint Conf. on Autonomous Agents and Multi-Agent Systems},
journal = {Proceedings: 4th International Joint Conference on Autonomous Agents and Multi-Agent Systems},
pages = {1005–1011},
abstract = {Current mechanisms for evaluating the trustworthiness of an agent within an electronic marketplace depend either on using a history of interactions or on recommendations from other agents. In the first case, these requirements limit what an agent with no prior interaction history can do. In the second case, they transform the problem into one of trusting the recommending agent. However, these mechanisms do not consider the relationships between agents that arise through interactions (such as buying or selling) or through overarching organisational structures (such as hierarchical or flat), which can also aid in evaluating trustworthiness. In response, this paper outlines a method that enables agents to evaluate the trustworthiness of their counterparts, based solely on an analysis of such relationships. Specifically, relationships are identified using a generic technique in conjunction with a basic model for agentbased marketplaces. They are then interpreted through a trust model that enables the inference of trust valuations based on the different types of relationships. In this way, we provide a further component for a trust evaluation model that addresses some of the limitations of existing work.},
keywords = {Multi-Agent Systems., Relationships, Reputation, Trust},
pubstate = {published},
tppubtype = {inproceedings}
}
Ramchurn, Sarvapali
Multi-Agent Negotiation using Trust and Persuasion PhD Thesis
University of Southampton, 2004.
Abstract | Links | BibTeX | Tags: Argumentation-based Negotiation, multi-agent systems, Negotiation, Persuasion, Trust
@phdthesis{eps260200,
title = {Multi-Agent Negotiation using Trust and Persuasion},
author = {Sarvapali Ramchurn},
url = {http://eprints.soton.ac.uk/260200/},
year = {2004},
date = {2004-01-01},
school = {University of Southampton},
abstract = {In this thesis, we propose a panoply of tools and techniques to manage inter-agent dependencies in open, distributed multi-agent systems that have significant degrees of uncertainty. In particular, we focus on situations in which agents are involved in repeated interactions where they need to negotiate to resolve conflicts that may arise between them. To this end, we endow agents with decision making models that exploit the notion of trust and use persuasive techniques during the negotiation process to reduce the level of uncertainty and achieve better deals in the long run. Firstly, we develop and evaluate a new trust model (called CREDIT) that allows agents to measure the degree of trust they should place in their opponents. This model reduces the uncertainty that agents have about their opponents' reliability. Thus, over repeated interactions, CREDIT enables agents to model their opponents' reliability using probabilistic techniques and a fuzzy reasoning mechanism that allows the combination of measures based on reputation (indirect interactions) and confidence (direct interactions). In so doing, CREDIT takes a wider range of behaviour-influencing factors into account than existing models, including the norms of the agents and the institution within which transactions occur. We then explore a novel application of trust models by showing how the measures developed in CREDIT ca be applied negotiations in multiple encounters. Specifically we show that agents that use CREDIT are able to avoid unreliable agents, both during the selection of interaction partners and during the negotiation process itself by using trust to adjust their negotiation stance. Also, we empirically show that agents are able to reach good deals with agents that are unreliable to some degree (rather than completely unreliable) and with those that try to strategically exploit their opponent. Secondly, having applied CREDIT to negotiations, we further extend the application of trust to reduce uncertainty about the reliability of agents in mechanism design (where the honesty of agents is elicited by the protocol). Thus, we develop $backslash$acftbmd that allows agents using a trust model (such as CREDIT) to reach efficient agreements that choose the most reliable agents in the long run. In particular, we show that our mechanism enforces truth-telling from the agents (i.e. it is incentive compatible), both about their perceived reliability of their opponent and their valuations for the goods to be traded. In proving the latter properties, our trust-based mechanism is shown to be the first reputation mechanism that implements individual rationality, incentive compatibility, and efficiency. Our trust-based mechanism is also empirically evaluated and shown to be better than other comparable models in reaching the outcome that maximises all the negotiating agents' utilities and in choosing the most reliable agents in the long run. Thirdly, having explored ways to reduce uncertainties about reliability and honesty, we use persuasive negotiation techniques to tackle issues associated with uncertainties that agents have about the preferences and the space of possible agreements. To this end, we propose a novel protocol and reasoning mechanism that agents can use to generate and evaluate persuasive elements, such as promises of future rewards, to support the offers they make during negotiation. These persuasive elements aim to make offers more attractive over multiple encounters given the absence of information about an opponent's discount factors or exact payoffs. Specifically, we empirically demonstrate that agents are able to achieve a larger number of agreements and a higher expected utility over repeated encounters when they are given the capability to give or ask for rewards. Moreover, we develop a novel strategy using this protocol and show that it outperforms existing state of the art heuristic negotiation models. Finally, the applicability of persuasive negotiation and CREDIT is exemplified through a practical implementation in a pervasive computing environment. In this context, the negotiation mechanism is implemented in an instant messaging platform (JABBER) and used to resolve conflicts between group and individual preferences that arise in a meeting room scenario. In particular, we show how persuasive negotiation and trust permit a flexible management of interruptions by allowing intrusions to happen at appropriate times during the meeting while still managing to satisfy the preferences of all parties present.},
keywords = {Argumentation-based Negotiation, multi-agent systems, Negotiation, Persuasion, Trust},
pubstate = {published},
tppubtype = {phdthesis}
}
Ramchurn, Sarvapali D.; Mezzetti, Claudio; Giovannucci, Andrea; Rodriguez, Juan A.; Dash, Rajdeep K.; Jennings, Nicholas R.
Trust-based mechanisms for robust and efficient task allocation in the presence of execution uncertainty Journal Article
In: Journal of Artificial Intelligence Research, vol. 35, pp. 1–41, 2009.
@article{eps267288,
title = {Trust-based mechanisms for robust and efficient task allocation in the presence of execution uncertainty},
author = {Sarvapali D. Ramchurn and Claudio Mezzetti and Andrea Giovannucci and Juan A. Rodriguez and Rajdeep K. Dash and Nicholas R. Jennings},
url = {http://eprints.soton.ac.uk/267288/},
year = {2009},
date = {2009-01-01},
journal = {Journal of Artificial Intelligence Research},
volume = {35},
pages = {1–41},
abstract = {Vickrey-Clarke-Groves (VCG) mechanisms are often used to allocate tasks to selfish and rational agents. VCG mechanisms are incentive-compatible, direct mechanisms that are efficient (i.e. maximise social utility) and individually rational (i.e. agents prefer to join rather than opt out). However, an important assumption of these mechanisms is that the agents will always successfully complete their allocated tasks. Clearly, this assumption is unrealistic in many real-world applications where agents can, and often do, fail in their endeavours. Moreover, whether an agent is deemed to have failed may be perceived differently by different agents. Such subjective perceptions about an agent's probability of succeeding at a given task are often captured and reasoned about using the notion of trust. Given this background, in this paper we investigate the design of novel mechanisms that take into account the trust between agents when allocating tasks. Specifically, we develop a new class of mechanisms, called trust-based mechanisms, that can take into account multiple subjective measures of the probability of an agent succeeding at a given task and produce allocations that maximise social utility, whilst ensuring that no agent obtains a negative utility. We then show that such mechanisms pose a challenging new combinatorial optimisation problem (that is NP-complete), devise a novel representation for solving the problem, and develop an effective integer programming solution (that can solve instances with about 2x10^ 5 possible allocations in 40 seconds).},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Ashri, R.; Ramchurn, S. D.; Sabater, J.; Luck, M.; Jennings, N. R.
Trust evaluation through relationship analysis Proceedings Article
In: 4th Int Joint Conf. on Autonomous Agents and Multi-Agent Systems, pp. 1005–1011, 2005.
@inproceedings{eps260806,
title = {Trust evaluation through relationship analysis},
author = {R. Ashri and S. D. Ramchurn and J. Sabater and M. Luck and N. R. Jennings},
url = {http://eprints.soton.ac.uk/260806/},
year = {2005},
date = {2005-01-01},
booktitle = {4th Int Joint Conf. on Autonomous Agents and Multi-Agent Systems},
journal = {Proceedings: 4th International Joint Conference on Autonomous Agents and Multi-Agent Systems},
pages = {1005–1011},
abstract = {Current mechanisms for evaluating the trustworthiness of an agent within an electronic marketplace depend either on using a history of interactions or on recommendations from other agents. In the first case, these requirements limit what an agent with no prior interaction history can do. In the second case, they transform the problem into one of trusting the recommending agent. However, these mechanisms do not consider the relationships between agents that arise through interactions (such as buying or selling) or through overarching organisational structures (such as hierarchical or flat), which can also aid in evaluating trustworthiness. In response, this paper outlines a method that enables agents to evaluate the trustworthiness of their counterparts, based solely on an analysis of such relationships. Specifically, relationships are identified using a generic technique in conjunction with a basic model for agentbased marketplaces. They are then interpreted through a trust model that enables the inference of trust valuations based on the different types of relationships. In this way, we provide a further component for a trust evaluation model that addresses some of the limitations of existing work.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Ramchurn, Sarvapali
Multi-Agent Negotiation using Trust and Persuasion PhD Thesis
University of Southampton, 2004.
@phdthesis{eps260200,
title = {Multi-Agent Negotiation using Trust and Persuasion},
author = {Sarvapali Ramchurn},
url = {http://eprints.soton.ac.uk/260200/},
year = {2004},
date = {2004-01-01},
school = {University of Southampton},
abstract = {In this thesis, we propose a panoply of tools and techniques to manage inter-agent dependencies in open, distributed multi-agent systems that have significant degrees of uncertainty. In particular, we focus on situations in which agents are involved in repeated interactions where they need to negotiate to resolve conflicts that may arise between them. To this end, we endow agents with decision making models that exploit the notion of trust and use persuasive techniques during the negotiation process to reduce the level of uncertainty and achieve better deals in the long run. Firstly, we develop and evaluate a new trust model (called CREDIT) that allows agents to measure the degree of trust they should place in their opponents. This model reduces the uncertainty that agents have about their opponents' reliability. Thus, over repeated interactions, CREDIT enables agents to model their opponents' reliability using probabilistic techniques and a fuzzy reasoning mechanism that allows the combination of measures based on reputation (indirect interactions) and confidence (direct interactions). In so doing, CREDIT takes a wider range of behaviour-influencing factors into account than existing models, including the norms of the agents and the institution within which transactions occur. We then explore a novel application of trust models by showing how the measures developed in CREDIT ca be applied negotiations in multiple encounters. Specifically we show that agents that use CREDIT are able to avoid unreliable agents, both during the selection of interaction partners and during the negotiation process itself by using trust to adjust their negotiation stance. Also, we empirically show that agents are able to reach good deals with agents that are unreliable to some degree (rather than completely unreliable) and with those that try to strategically exploit their opponent. Secondly, having applied CREDIT to negotiations, we further extend the application of trust to reduce uncertainty about the reliability of agents in mechanism design (where the honesty of agents is elicited by the protocol). Thus, we develop $backslash$acftbmd that allows agents using a trust model (such as CREDIT) to reach efficient agreements that choose the most reliable agents in the long run. In particular, we show that our mechanism enforces truth-telling from the agents (i.e. it is incentive compatible), both about their perceived reliability of their opponent and their valuations for the goods to be traded. In proving the latter properties, our trust-based mechanism is shown to be the first reputation mechanism that implements individual rationality, incentive compatibility, and efficiency. Our trust-based mechanism is also empirically evaluated and shown to be better than other comparable models in reaching the outcome that maximises all the negotiating agents' utilities and in choosing the most reliable agents in the long run. Thirdly, having explored ways to reduce uncertainties about reliability and honesty, we use persuasive negotiation techniques to tackle issues associated with uncertainties that agents have about the preferences and the space of possible agreements. To this end, we propose a novel protocol and reasoning mechanism that agents can use to generate and evaluate persuasive elements, such as promises of future rewards, to support the offers they make during negotiation. These persuasive elements aim to make offers more attractive over multiple encounters given the absence of information about an opponent's discount factors or exact payoffs. Specifically, we empirically demonstrate that agents are able to achieve a larger number of agreements and a higher expected utility over repeated encounters when they are given the capability to give or ask for rewards. Moreover, we develop a novel strategy using this protocol and show that it outperforms existing state of the art heuristic negotiation models. Finally, the applicability of persuasive negotiation and CREDIT is exemplified through a practical implementation in a pervasive computing environment. In this context, the negotiation mechanism is implemented in an instant messaging platform (JABBER) and used to resolve conflicts between group and individual preferences that arise in a meeting room scenario. In particular, we show how persuasive negotiation and trust permit a flexible management of interruptions by allowing intrusions to happen at appropriate times during the meeting while still managing to satisfy the preferences of all parties present.},
keywords = {},
pubstate = {published},
tppubtype = {phdthesis}
}
Multi-agent signal-less intersection management with dynamic platoon formation
AI Foundation Models: initial review, CMA Consultation, TAS Hub Response
The effect of data visualisation quality and task density on human-swarm interaction
Demonstrating performance benefits of human-swarm teaming
Ramchurn, Sarvapali D.; Mezzetti, Claudio; Giovannucci, Andrea; Rodriguez, Juan A.; Dash, Rajdeep K.; Jennings, Nicholas R.
Trust-based mechanisms for robust and efficient task allocation in the presence of execution uncertainty Journal Article
In: Journal of Artificial Intelligence Research, vol. 35, pp. 1–41, 2009.
@article{eps267288,
title = {Trust-based mechanisms for robust and efficient task allocation in the presence of execution uncertainty},
author = {Sarvapali D. Ramchurn and Claudio Mezzetti and Andrea Giovannucci and Juan A. Rodriguez and Rajdeep K. Dash and Nicholas R. Jennings},
url = {http://eprints.soton.ac.uk/267288/},
year = {2009},
date = {2009-01-01},
journal = {Journal of Artificial Intelligence Research},
volume = {35},
pages = {1–41},
abstract = {Vickrey-Clarke-Groves (VCG) mechanisms are often used to allocate tasks to selfish and rational agents. VCG mechanisms are incentive-compatible, direct mechanisms that are efficient (i.e. maximise social utility) and individually rational (i.e. agents prefer to join rather than opt out). However, an important assumption of these mechanisms is that the agents will always successfully complete their allocated tasks. Clearly, this assumption is unrealistic in many real-world applications where agents can, and often do, fail in their endeavours. Moreover, whether an agent is deemed to have failed may be perceived differently by different agents. Such subjective perceptions about an agent's probability of succeeding at a given task are often captured and reasoned about using the notion of trust. Given this background, in this paper we investigate the design of novel mechanisms that take into account the trust between agents when allocating tasks. Specifically, we develop a new class of mechanisms, called trust-based mechanisms, that can take into account multiple subjective measures of the probability of an agent succeeding at a given task and produce allocations that maximise social utility, whilst ensuring that no agent obtains a negative utility. We then show that such mechanisms pose a challenging new combinatorial optimisation problem (that is NP-complete), devise a novel representation for solving the problem, and develop an effective integer programming solution (that can solve instances with about 2x10^ 5 possible allocations in 40 seconds).},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Ashri, R.; Ramchurn, S. D.; Sabater, J.; Luck, M.; Jennings, N. R.
Trust evaluation through relationship analysis Proceedings Article
In: 4th Int Joint Conf. on Autonomous Agents and Multi-Agent Systems, pp. 1005–1011, 2005.
@inproceedings{eps260806,
title = {Trust evaluation through relationship analysis},
author = {R. Ashri and S. D. Ramchurn and J. Sabater and M. Luck and N. R. Jennings},
url = {http://eprints.soton.ac.uk/260806/},
year = {2005},
date = {2005-01-01},
booktitle = {4th Int Joint Conf. on Autonomous Agents and Multi-Agent Systems},
journal = {Proceedings: 4th International Joint Conference on Autonomous Agents and Multi-Agent Systems},
pages = {1005–1011},
abstract = {Current mechanisms for evaluating the trustworthiness of an agent within an electronic marketplace depend either on using a history of interactions or on recommendations from other agents. In the first case, these requirements limit what an agent with no prior interaction history can do. In the second case, they transform the problem into one of trusting the recommending agent. However, these mechanisms do not consider the relationships between agents that arise through interactions (such as buying or selling) or through overarching organisational structures (such as hierarchical or flat), which can also aid in evaluating trustworthiness. In response, this paper outlines a method that enables agents to evaluate the trustworthiness of their counterparts, based solely on an analysis of such relationships. Specifically, relationships are identified using a generic technique in conjunction with a basic model for agentbased marketplaces. They are then interpreted through a trust model that enables the inference of trust valuations based on the different types of relationships. In this way, we provide a further component for a trust evaluation model that addresses some of the limitations of existing work.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Ramchurn, Sarvapali
Multi-Agent Negotiation using Trust and Persuasion PhD Thesis
University of Southampton, 2004.
@phdthesis{eps260200,
title = {Multi-Agent Negotiation using Trust and Persuasion},
author = {Sarvapali Ramchurn},
url = {http://eprints.soton.ac.uk/260200/},
year = {2004},
date = {2004-01-01},
school = {University of Southampton},
abstract = {In this thesis, we propose a panoply of tools and techniques to manage inter-agent dependencies in open, distributed multi-agent systems that have significant degrees of uncertainty. In particular, we focus on situations in which agents are involved in repeated interactions where they need to negotiate to resolve conflicts that may arise between them. To this end, we endow agents with decision making models that exploit the notion of trust and use persuasive techniques during the negotiation process to reduce the level of uncertainty and achieve better deals in the long run. Firstly, we develop and evaluate a new trust model (called CREDIT) that allows agents to measure the degree of trust they should place in their opponents. This model reduces the uncertainty that agents have about their opponents' reliability. Thus, over repeated interactions, CREDIT enables agents to model their opponents' reliability using probabilistic techniques and a fuzzy reasoning mechanism that allows the combination of measures based on reputation (indirect interactions) and confidence (direct interactions). In so doing, CREDIT takes a wider range of behaviour-influencing factors into account than existing models, including the norms of the agents and the institution within which transactions occur. We then explore a novel application of trust models by showing how the measures developed in CREDIT ca be applied negotiations in multiple encounters. Specifically we show that agents that use CREDIT are able to avoid unreliable agents, both during the selection of interaction partners and during the negotiation process itself by using trust to adjust their negotiation stance. Also, we empirically show that agents are able to reach good deals with agents that are unreliable to some degree (rather than completely unreliable) and with those that try to strategically exploit their opponent. Secondly, having applied CREDIT to negotiations, we further extend the application of trust to reduce uncertainty about the reliability of agents in mechanism design (where the honesty of agents is elicited by the protocol). Thus, we develop $backslash$acftbmd that allows agents using a trust model (such as CREDIT) to reach efficient agreements that choose the most reliable agents in the long run. In particular, we show that our mechanism enforces truth-telling from the agents (i.e. it is incentive compatible), both about their perceived reliability of their opponent and their valuations for the goods to be traded. In proving the latter properties, our trust-based mechanism is shown to be the first reputation mechanism that implements individual rationality, incentive compatibility, and efficiency. Our trust-based mechanism is also empirically evaluated and shown to be better than other comparable models in reaching the outcome that maximises all the negotiating agents' utilities and in choosing the most reliable agents in the long run. Thirdly, having explored ways to reduce uncertainties about reliability and honesty, we use persuasive negotiation techniques to tackle issues associated with uncertainties that agents have about the preferences and the space of possible agreements. To this end, we propose a novel protocol and reasoning mechanism that agents can use to generate and evaluate persuasive elements, such as promises of future rewards, to support the offers they make during negotiation. These persuasive elements aim to make offers more attractive over multiple encounters given the absence of information about an opponent's discount factors or exact payoffs. Specifically, we empirically demonstrate that agents are able to achieve a larger number of agreements and a higher expected utility over repeated encounters when they are given the capability to give or ask for rewards. Moreover, we develop a novel strategy using this protocol and show that it outperforms existing state of the art heuristic negotiation models. Finally, the applicability of persuasive negotiation and CREDIT is exemplified through a practical implementation in a pervasive computing environment. In this context, the negotiation mechanism is implemented in an instant messaging platform (JABBER) and used to resolve conflicts between group and individual preferences that arise in a meeting room scenario. In particular, we show how persuasive negotiation and trust permit a flexible management of interruptions by allowing intrusions to happen at appropriate times during the meeting while still managing to satisfy the preferences of all parties present.},
keywords = {},
pubstate = {published},
tppubtype = {phdthesis}
}