font
Ayodeji, Opeyemi Abioye; Prior, Stephen; Thomas, Trevor; Saddington, Peter; Ramchurn, Sarvapali D.
The multimodal speech and visual gesture (mSVG) control model for a practical patrol, search, and rescue aerobot Proceedings Article
In: 19th Towards Autonomous Robotic Systems (TAROS) Conference 2018, pp. 423–437, Springer, 2018.
Abstract | Links | BibTeX | Tags: Aerobot (Aerial Robot), HCI (Human Computer Interaction), MCPU (Multimodal Control Processing Unit), mSVG (multimodal speech and visual gesture), nCA (Navigational Control Autonomy), SBC (Single Board Computer), Speech, visual gesture
@inproceedings{soton418869,
title = {The multimodal speech and visual gesture (mSVG) control model for a practical patrol, search, and rescue aerobot},
author = {Opeyemi Abioye Ayodeji and Stephen Prior and Trevor Thomas and Peter Saddington and Sarvapali D. Ramchurn},
url = {https://eprints.soton.ac.uk/418869/},
year = {2018},
date = {2018-07-01},
booktitle = {19th Towards Autonomous Robotic Systems (TAROS) Conference 2018},
volume = {10965},
pages = {423–437},
publisher = {Springer},
abstract = {This paper describes a model of the multimodal speech and visual gesture (mSVG) control for aerobots operating at higher nCA autonomy levels, within the context of a patrol, search, and rescue application. The developed mSVG control architecture, its mathematical navigation model, and some high level command operation models were discussed. This was successfully tested using both MATLAB simulation and python based ROS Gazebo UAV simulations. Some limitations were identified, which formed the basis for the further works presented.},
keywords = {Aerobot (Aerial Robot), HCI (Human Computer Interaction), MCPU (Multimodal Control Processing Unit), mSVG (multimodal speech and visual gesture), nCA (Navigational Control Autonomy), SBC (Single Board Computer), Speech, visual gesture},
pubstate = {published},
tppubtype = {inproceedings}
}
Ayodeji, Opeyemi Abioye; Prior, Stephen; Thomas, Trevor; Saddington, Peter; Ramchurn, Sarvapali
Quantifying the effects of varying light-visibility and noise-sound levels in practical multimodal speech and visual gesture (mSVG) interaction with aerobots Proceedings Article
In: IEEE International Conference on Applied System Innovation (IEEE ICASI) 2018, pp. 842–845, IEEE, 2018.
Abstract | Links | BibTeX | Tags: Aerobot, mSVG (multimodal speech and visual gesture), nCA (navigation control autonomy), Speech
@inproceedings{soton418871,
title = {Quantifying the effects of varying light-visibility and noise-sound levels in practical multimodal speech and visual gesture (mSVG) interaction with aerobots},
author = {Opeyemi Abioye Ayodeji and Stephen Prior and Trevor Thomas and Peter Saddington and Sarvapali Ramchurn},
url = {https://eprints.soton.ac.uk/418871/},
year = {2018},
date = {2018-06-01},
booktitle = {IEEE International Conference on Applied System Innovation (IEEE ICASI) 2018},
pages = {842–845},
publisher = {IEEE},
abstract = {This paper discusses the research work conducted to quantify the effective range of lighting levels and ambient noise levels in order to inform the design and development of a multimodal speech and visual gesture (mSVG) control interface for the control of a UAV. Noise level variation from 55 dB to 85 dB is observed under control lab conditions to determine where speech commands for a UAV fails, and to consider why, and possibly suggest a solution around this. Similarly, lighting levels are varied within the control lab condition to determine a range of effective visibility levels. The limitation of this work and some further work from this were also presented.},
keywords = {Aerobot, mSVG (multimodal speech and visual gesture), nCA (navigation control autonomy), Speech},
pubstate = {published},
tppubtype = {inproceedings}
}
Ayodeji, Opeyemi Abioye; Prior, Stephen; Thomas, Trevor; Saddington, Peter; Ramchurn, Sarvapali D.
The multimodal speech and visual gesture (mSVG) control model for a practical patrol, search, and rescue aerobot Proceedings Article
In: 19th Towards Autonomous Robotic Systems (TAROS) Conference 2018, pp. 423–437, Springer, 2018.
@inproceedings{soton418869,
title = {The multimodal speech and visual gesture (mSVG) control model for a practical patrol, search, and rescue aerobot},
author = {Opeyemi Abioye Ayodeji and Stephen Prior and Trevor Thomas and Peter Saddington and Sarvapali D. Ramchurn},
url = {https://eprints.soton.ac.uk/418869/},
year = {2018},
date = {2018-07-01},
booktitle = {19th Towards Autonomous Robotic Systems (TAROS) Conference 2018},
volume = {10965},
pages = {423–437},
publisher = {Springer},
abstract = {This paper describes a model of the multimodal speech and visual gesture (mSVG) control for aerobots operating at higher nCA autonomy levels, within the context of a patrol, search, and rescue application. The developed mSVG control architecture, its mathematical navigation model, and some high level command operation models were discussed. This was successfully tested using both MATLAB simulation and python based ROS Gazebo UAV simulations. Some limitations were identified, which formed the basis for the further works presented.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Ayodeji, Opeyemi Abioye; Prior, Stephen; Thomas, Trevor; Saddington, Peter; Ramchurn, Sarvapali
Quantifying the effects of varying light-visibility and noise-sound levels in practical multimodal speech and visual gesture (mSVG) interaction with aerobots Proceedings Article
In: IEEE International Conference on Applied System Innovation (IEEE ICASI) 2018, pp. 842–845, IEEE, 2018.
@inproceedings{soton418871,
title = {Quantifying the effects of varying light-visibility and noise-sound levels in practical multimodal speech and visual gesture (mSVG) interaction with aerobots},
author = {Opeyemi Abioye Ayodeji and Stephen Prior and Trevor Thomas and Peter Saddington and Sarvapali Ramchurn},
url = {https://eprints.soton.ac.uk/418871/},
year = {2018},
date = {2018-06-01},
booktitle = {IEEE International Conference on Applied System Innovation (IEEE ICASI) 2018},
pages = {842–845},
publisher = {IEEE},
abstract = {This paper discusses the research work conducted to quantify the effective range of lighting levels and ambient noise levels in order to inform the design and development of a multimodal speech and visual gesture (mSVG) control interface for the control of a UAV. Noise level variation from 55 dB to 85 dB is observed under control lab conditions to determine where speech commands for a UAV fails, and to consider why, and possibly suggest a solution around this. Similarly, lighting levels are varied within the control lab condition to determine a range of effective visibility levels. The limitation of this work and some further work from this were also presented.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Ayodeji, Opeyemi Abioye; Prior, Stephen; Thomas, Trevor; Saddington, Peter; Ramchurn, Sarvapali D.
The multimodal speech and visual gesture (mSVG) control model for a practical patrol, search, and rescue aerobot Proceedings Article
In: 19th Towards Autonomous Robotic Systems (TAROS) Conference 2018, pp. 423–437, Springer, 2018.
Abstract | Links | BibTeX | Tags: Aerobot (Aerial Robot), HCI (Human Computer Interaction), MCPU (Multimodal Control Processing Unit), mSVG (multimodal speech and visual gesture), nCA (Navigational Control Autonomy), SBC (Single Board Computer), Speech, visual gesture
@inproceedings{soton418869,
title = {The multimodal speech and visual gesture (mSVG) control model for a practical patrol, search, and rescue aerobot},
author = {Opeyemi Abioye Ayodeji and Stephen Prior and Trevor Thomas and Peter Saddington and Sarvapali D. Ramchurn},
url = {https://eprints.soton.ac.uk/418869/},
year = {2018},
date = {2018-07-01},
booktitle = {19th Towards Autonomous Robotic Systems (TAROS) Conference 2018},
volume = {10965},
pages = {423–437},
publisher = {Springer},
abstract = {This paper describes a model of the multimodal speech and visual gesture (mSVG) control for aerobots operating at higher nCA autonomy levels, within the context of a patrol, search, and rescue application. The developed mSVG control architecture, its mathematical navigation model, and some high level command operation models were discussed. This was successfully tested using both MATLAB simulation and python based ROS Gazebo UAV simulations. Some limitations were identified, which formed the basis for the further works presented.},
keywords = {Aerobot (Aerial Robot), HCI (Human Computer Interaction), MCPU (Multimodal Control Processing Unit), mSVG (multimodal speech and visual gesture), nCA (Navigational Control Autonomy), SBC (Single Board Computer), Speech, visual gesture},
pubstate = {published},
tppubtype = {inproceedings}
}
Ayodeji, Opeyemi Abioye; Prior, Stephen; Thomas, Trevor; Saddington, Peter; Ramchurn, Sarvapali
Quantifying the effects of varying light-visibility and noise-sound levels in practical multimodal speech and visual gesture (mSVG) interaction with aerobots Proceedings Article
In: IEEE International Conference on Applied System Innovation (IEEE ICASI) 2018, pp. 842–845, IEEE, 2018.
Abstract | Links | BibTeX | Tags: Aerobot, mSVG (multimodal speech and visual gesture), nCA (navigation control autonomy), Speech
@inproceedings{soton418871,
title = {Quantifying the effects of varying light-visibility and noise-sound levels in practical multimodal speech and visual gesture (mSVG) interaction with aerobots},
author = {Opeyemi Abioye Ayodeji and Stephen Prior and Trevor Thomas and Peter Saddington and Sarvapali Ramchurn},
url = {https://eprints.soton.ac.uk/418871/},
year = {2018},
date = {2018-06-01},
booktitle = {IEEE International Conference on Applied System Innovation (IEEE ICASI) 2018},
pages = {842–845},
publisher = {IEEE},
abstract = {This paper discusses the research work conducted to quantify the effective range of lighting levels and ambient noise levels in order to inform the design and development of a multimodal speech and visual gesture (mSVG) control interface for the control of a UAV. Noise level variation from 55 dB to 85 dB is observed under control lab conditions to determine where speech commands for a UAV fails, and to consider why, and possibly suggest a solution around this. Similarly, lighting levels are varied within the control lab condition to determine a range of effective visibility levels. The limitation of this work and some further work from this were also presented.},
keywords = {Aerobot, mSVG (multimodal speech and visual gesture), nCA (navigation control autonomy), Speech},
pubstate = {published},
tppubtype = {inproceedings}
}
Ayodeji, Opeyemi Abioye; Prior, Stephen; Thomas, Trevor; Saddington, Peter; Ramchurn, Sarvapali D.
The multimodal speech and visual gesture (mSVG) control model for a practical patrol, search, and rescue aerobot Proceedings Article
In: 19th Towards Autonomous Robotic Systems (TAROS) Conference 2018, pp. 423–437, Springer, 2018.
@inproceedings{soton418869,
title = {The multimodal speech and visual gesture (mSVG) control model for a practical patrol, search, and rescue aerobot},
author = {Opeyemi Abioye Ayodeji and Stephen Prior and Trevor Thomas and Peter Saddington and Sarvapali D. Ramchurn},
url = {https://eprints.soton.ac.uk/418869/},
year = {2018},
date = {2018-07-01},
booktitle = {19th Towards Autonomous Robotic Systems (TAROS) Conference 2018},
volume = {10965},
pages = {423–437},
publisher = {Springer},
abstract = {This paper describes a model of the multimodal speech and visual gesture (mSVG) control for aerobots operating at higher nCA autonomy levels, within the context of a patrol, search, and rescue application. The developed mSVG control architecture, its mathematical navigation model, and some high level command operation models were discussed. This was successfully tested using both MATLAB simulation and python based ROS Gazebo UAV simulations. Some limitations were identified, which formed the basis for the further works presented.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Ayodeji, Opeyemi Abioye; Prior, Stephen; Thomas, Trevor; Saddington, Peter; Ramchurn, Sarvapali
Quantifying the effects of varying light-visibility and noise-sound levels in practical multimodal speech and visual gesture (mSVG) interaction with aerobots Proceedings Article
In: IEEE International Conference on Applied System Innovation (IEEE ICASI) 2018, pp. 842–845, IEEE, 2018.
@inproceedings{soton418871,
title = {Quantifying the effects of varying light-visibility and noise-sound levels in practical multimodal speech and visual gesture (mSVG) interaction with aerobots},
author = {Opeyemi Abioye Ayodeji and Stephen Prior and Trevor Thomas and Peter Saddington and Sarvapali Ramchurn},
url = {https://eprints.soton.ac.uk/418871/},
year = {2018},
date = {2018-06-01},
booktitle = {IEEE International Conference on Applied System Innovation (IEEE ICASI) 2018},
pages = {842–845},
publisher = {IEEE},
abstract = {This paper discusses the research work conducted to quantify the effective range of lighting levels and ambient noise levels in order to inform the design and development of a multimodal speech and visual gesture (mSVG) control interface for the control of a UAV. Noise level variation from 55 dB to 85 dB is observed under control lab conditions to determine where speech commands for a UAV fails, and to consider why, and possibly suggest a solution around this. Similarly, lighting levels are varied within the control lab condition to determine a range of effective visibility levels. The limitation of this work and some further work from this were also presented.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Multi-agent signal-less intersection management with dynamic platoon formation
AI Foundation Models: initial review, CMA Consultation, TAS Hub Response
The effect of data visualisation quality and task density on human-swarm interaction
Demonstrating performance benefits of human-swarm teaming
Ayodeji, Opeyemi Abioye; Prior, Stephen; Thomas, Trevor; Saddington, Peter; Ramchurn, Sarvapali D.
The multimodal speech and visual gesture (mSVG) control model for a practical patrol, search, and rescue aerobot Proceedings Article
In: 19th Towards Autonomous Robotic Systems (TAROS) Conference 2018, pp. 423–437, Springer, 2018.
@inproceedings{soton418869,
title = {The multimodal speech and visual gesture (mSVG) control model for a practical patrol, search, and rescue aerobot},
author = {Opeyemi Abioye Ayodeji and Stephen Prior and Trevor Thomas and Peter Saddington and Sarvapali D. Ramchurn},
url = {https://eprints.soton.ac.uk/418869/},
year = {2018},
date = {2018-07-01},
booktitle = {19th Towards Autonomous Robotic Systems (TAROS) Conference 2018},
volume = {10965},
pages = {423–437},
publisher = {Springer},
abstract = {This paper describes a model of the multimodal speech and visual gesture (mSVG) control for aerobots operating at higher nCA autonomy levels, within the context of a patrol, search, and rescue application. The developed mSVG control architecture, its mathematical navigation model, and some high level command operation models were discussed. This was successfully tested using both MATLAB simulation and python based ROS Gazebo UAV simulations. Some limitations were identified, which formed the basis for the further works presented.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Ayodeji, Opeyemi Abioye; Prior, Stephen; Thomas, Trevor; Saddington, Peter; Ramchurn, Sarvapali
Quantifying the effects of varying light-visibility and noise-sound levels in practical multimodal speech and visual gesture (mSVG) interaction with aerobots Proceedings Article
In: IEEE International Conference on Applied System Innovation (IEEE ICASI) 2018, pp. 842–845, IEEE, 2018.
@inproceedings{soton418871,
title = {Quantifying the effects of varying light-visibility and noise-sound levels in practical multimodal speech and visual gesture (mSVG) interaction with aerobots},
author = {Opeyemi Abioye Ayodeji and Stephen Prior and Trevor Thomas and Peter Saddington and Sarvapali Ramchurn},
url = {https://eprints.soton.ac.uk/418871/},
year = {2018},
date = {2018-06-01},
booktitle = {IEEE International Conference on Applied System Innovation (IEEE ICASI) 2018},
pages = {842–845},
publisher = {IEEE},
abstract = {This paper discusses the research work conducted to quantify the effective range of lighting levels and ambient noise levels in order to inform the design and development of a multimodal speech and visual gesture (mSVG) control interface for the control of a UAV. Noise level variation from 55 dB to 85 dB is observed under control lab conditions to determine where speech commands for a UAV fails, and to consider why, and possibly suggest a solution around this. Similarly, lighting levels are varied within the control lab condition to determine a range of effective visibility levels. The limitation of this work and some further work from this were also presented.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}