[u' @inproceedings{nunez-marcos_using_2020, address = {P\xf3voa de Varzim, Portugal}, title = {Using {External} {Knowledge} to {Improve} {Zero}-{Shot} {Action} {Recognition} in {Egocentric} {Videos}}, doi = {10.1007/978-3-030-50347-5_16}, abstract = {Zero-shot learning is a very promising research topic. For a vision-based action recognition system, for instance, zero-shot learning allows to recognise actions never seen during the training phase. Previous works in zero-shot action recognition have exploited in several ways the visual appearance of input videos to infer actions. Here, we propose to add external knowledge to improve the performance of purely vision-based systems. Specifically, we have explored three different sources of knowledge in the form of text corpora. Our resulting system follows the literature and disentangles actions into verbs and objects. In particular, we independently train two vision-based detectors: (i) a verb detector and (ii) an active object detector. During inference, we combine the probability distributions generated from those detectors to obtain a probability distribution of actions. Finally, the vision-based estimation is further combined with an action prior extracted from text corpora (external knowledge). We evaluate our approach on the EGTEA Gaze+ dataset, an Egocentric Action Recognition dataset, demonstrating that the use of external knowledge improves the recognition of actions never seen by the detectors.}, booktitle = {Proceedings of the 17th {International} {Conference} on {Image} {Analysis} and {Recognition}}, author = {N\xfa\xf1ez-Marcos, Adri\xe1n and Azkune, Gorka and Agirre, Eneko and L\xf3pez-de-Ipi\xf1a, Diego and Arganda-Carreras, Ignacio}, month = jun, year = {2020}, keywords = {Activity Recognition, Computer Vision, Deep Learning, Smart Environments, corec} }'] [u' @article{sanchez-corcuera_smart_2019, title = {Smart cities survey: {Technologies}, application domains and challenges for the cities of the future}, volume = {15}, issn = {1550-1477}, shorttitle = {Smart cities survey}, url = {https://doi.org/10.1177/1550147719853984}, doi = {10.1177/1550147719853984}, abstract = {The introduction of the Information and Communication Technologies throughout the last decades has created a trend of providing daily objects with smartness, aiming to make human life more comfortable. The paradigm of Smart Cities arises as a response to the goal of creating the city of the future, where (1) the well-being and rights of their citizens are guaranteed, (2) industry and (3) urban planning is assessed from an environmental and sustainable viewpoint. Smart Cities still face some challenges in their implementation, but gradually more research projects of Smart Cities are funded and executed. Moreover, cities from all around the globe are implementing Smart City features to improve services or the quality of life of their citizens. Through this article, (1) we go through various definitions of Smart Cities in the literature, (2) we review the technologies and methodologies used nowadays, (3) we summarise the different domains of applications where these technologies and methodologies are applied (e.g. health and education), (4) we show the cities that have integrated the Smart City paradigm in their daily functioning and (5) we provide a review of the open research challenges. Finally, we discuss about the future opportunities for Smart Cities and the issues that must be tackled in order to move towards the cities of the future.}, language = {en}, number = {6}, urldate = {2019-06-10}, journal = {International Journal of Distributed Sensor Networks}, author = {S\xe1nchez-Corcuera, Ruben and Nu\xf1ez-Marcos, Adri\xe1n and Sesma-Solance, Jesus and Bilbao-Jayo, Aritz and Mulero, Rub\xe9n and Zulaika, Unai and Azkune, Gorka and Almeida, Aitor}, month = jun, year = {2019}, keywords = {Artificial Intelligence, IF1.151, IoT, Q4, Survey, architecture, co-creation, e-government, futuraal, smart cities}, pages = {1550147719853984}, } '] [u' @article{nunez-marcos_vision-based_2017, title = {Vision-{Based} {Fall} {Detection} with {Convolutional} {Neural} {Networks}}, issn = {1530-8669}, abstract = {One of the biggest challenges in modern societies is the improvement of healthy aging and the support to older persons in their daily activities. In particular, given its social and economic impact, the automatic detection of falls has at- tracted considerable attention in the computer vision and pattern recognition communities. Although the approaches based on wearable sensors have pro- vided high detection rates, some of the potential users are reluctant to wear them and thus their use is not yet normalized. As a consequence, alternative approaches such as vision-based methods have emerged. We firmly believe the irruption of the Smart Environments and the Internet of Things paradigms, to- gether with the increasing number of cameras in our daily environment, conform an optimal context for vision-based systems. Consequently, here we propose a vision-based solution using Convolutional Neural Networks to decide if a se- quence of frames contains a person falling. To model the video motion and make the system scenario-independent, we use optical flow images as input to the networks followed by a novel three-step training phase. Furthermore, our method is evaluated in three public datasets achieving state-of-the-art results in all three of them.}, journal = {Wireless Communications \\& Mobile Computing}, author = {N\xfa\xf1ez-Marcos, Adri\xe1n and Azkune, Gorka and Arganda-Carreras, Ignacio}, month = nov, year = {2017}, keywords = {AI for health, Activity Recognition, Computer Vision, Deep Learning, Smart Environments, jcr0.869, q4}, pages = {25} }']