?
-
Present
[u' @article{almeida_comparative_2022, title = {A {Comparative} {Analysis} of {Human} {Behavior} {Prediction} {Approaches} in {Intelligent} {Environments}}, volume = {22}, issn = {1424-8220}, url = {https://www.mdpi.com/1424-8220/22/3/701}, doi = {https://doi.org/10.3390/s22030701}, abstract = {Behavior modeling has multiple applications in the intelligent environment domain. It has been used in different tasks, such as the stratification of different pathologies, prediction of the user actions and activities, or modeling the energy usage. Specifically, behavior prediction can be used to forecast the future evolution of the users and to identify those behaviors that deviate from the expected conduct. In this paper, we propose the use of embeddings to represent the user actions, and study and compare several behavior prediction approaches. We test multiple model (LSTM, CNNs, GCNs, and transformers) architectures to ascertain the best approach to using embeddings for behavior modeling and also evaluate multiple embedding retrofitting approaches. To do so, we use the Kasteren dataset for intelligent environments, which is one of the most widely used datasets in the areas of activity recognition and behavior modeling.}, language = {English}, number = {3}, journal = {Sensors}, author = {Almeida, Aitor and Bermejo, Unai and Bilbao Jayo, Aritz and Azkune, Gorka and Aguilera, Unai and Emaldi, Mikel and Dornaika, Fadi and Arganda-Carreras, Ignacio}, month = jan, year = {2022}, keywords = {AI for health, CNN, JCR3.576, LSTM, Q1, activity recognition, artificial intelligence, attention, behavior modelling, behaviour prediction, convolutional networks, embeddings, futuraal, geometric deep learning, graph neural networks, knowledge graphs, machine learning, recurrent neural networks, transformers}, pages = {701}, } '] [u' @article{bermejo_embedding-based_2021, title = {Embedding-based real-time change point detection with application to activity segmentation in smart home time series data}, volume = {185}, issn = {0957-4174}, url = {https://www.sciencedirect.com/science/article/pii/S0957417421010344}, doi = {10.1016/j.eswa.2021.115641}, abstract = {Human activity recognition systems are essential to enable many assistive applications. Those systems can be sensor-based or vision-based. When sensor\u2026}, language = {en}, urldate = {2021-07-29}, journal = {Expert Systems with Applications}, author = {Bermejo, Unai and Almeida, Aitor and Bilbao Jayo, Aritz and Azkune, Gorka}, month = dec, year = {2021}, keywords = {AI for health, Data analysis, JCR6.954, Q1, activity recognition, artificial intelligence, behavior modelling, change point detection, embeddings, futuraal, intelligent environments, machine learning, smart home, transfer learning}, pages = {115641}, } '] [u' @article{lopez-guede_neural_nodate, title = {Neural {Modeling} of {Fuzzy} {Controllers} for {Maximum} {Power} {Point} {Tracking} in {Photovoltaic} {Energy} {Systems}}, doi = {https://doi.org/10.1007/s11664-018-6407-2}, journal = {Journal of Electronic Materials}, author = {Lopez-Guede, Jose Manuel and Ramos-Hernanz, Josean and Alt\u0131n, Necmi and Ozdemir, Saban and Kurt, Erol and Azkune, Gorka}, keywords = {Artificial Intelligence, Fuzzy Logic, Q3, jcr1.566, machine learning}, pages = {1--14}, } '] [u' @inproceedings{nunez-marcos_using_2020, address = {P\xf3voa de Varzim, Portugal}, title = {Using {External} {Knowledge} to {Improve} {Zero}-{Shot} {Action} {Recognition} in {Egocentric} {Videos}}, doi = {10.1007/978-3-030-50347-5_16}, abstract = {Zero-shot learning is a very promising research topic. For a vision-based action recognition system, for instance, zero-shot learning allows to recognise actions never seen during the training phase. Previous works in zero-shot action recognition have exploited in several ways the visual appearance of input videos to infer actions. Here, we propose to add external knowledge to improve the performance of purely vision-based systems. Specifically, we have explored three different sources of knowledge in the form of text corpora. Our resulting system follows the literature and disentangles actions into verbs and objects. In particular, we independently train two vision-based detectors: (i) a verb detector and (ii) an active object detector. During inference, we combine the probability distributions generated from those detectors to obtain a probability distribution of actions. Finally, the vision-based estimation is further combined with an action prior extracted from text corpora (external knowledge). We evaluate our approach on the EGTEA Gaze+ dataset, an Egocentric Action Recognition dataset, demonstrating that the use of external knowledge improves the recognition of actions never seen by the detectors.}, booktitle = {Proceedings of the 17th {International} {Conference} on {Image} {Analysis} and {Recognition}}, author = {N\xfa\xf1ez-Marcos, Adri\xe1n and Azkune, Gorka and Agirre, Eneko and L\xf3pez-de-Ipi\xf1a, Diego and Arganda-Carreras, Ignacio}, month = jun, year = {2020}, keywords = {Activity Recognition, Computer Vision, Deep Learning, Smart Environments, corec} }'] [u' @article{azkune_cross-environment_2020, title = {Cross-environment activity recognition using word embeddings for sensor and activity representation}, volume = {418}, issn = {0925-2312}, url = {https://www.sciencedirect.com/science/article/abs/pii/S0925231220313230}, doi = {https://doi.org/10.1016/j.neucom.2020.08.044}, abstract = {Cross-environment activity recognition in smart homes is a very challenging problem, specially for data-driven approaches. Currently, systems developed to work for a certain environment degrade substantially when applied to a new environment, where not only sensors, but also the monitored activities may be different. Some systems require manual labeling and mapping of the new sensor names and activities using an ontology. Ideally, given a new smart home, we would like to be able to deploy the system, which has been trained on other sources, with minimal manual effort and with acceptable performance. In this paper, we propose the use of neural word embeddings to represent sensor activations and activities, which comes with several advantages: (i) the representation of the semantic information of sensor and activity names, and (ii) automatically mapping sensors and activities of different environments into the same semantic space. Based on this novel representation approach, we propose two data-driven activity recognition systems: the first one is a completely unsupervised system based on embedding similarities, while the second one adds a supervised learning regressor on top of them. We compare our approaches with some baselines using four public datasets, showing that data-driven cross-environment activity recognition obtains good results even when sensors and activity labels significantly differ. Our results show promise for reducing manual effort, and are complementary to other efforts using ontologies.}, journal = {Neurocomputing}, author = {Azkune, Gorka and Almeida, Aitor and Agirre, Eneko}, month = dec, year = {2020}, keywords = {AI for health, Artificial Intelligence, Cross-environment Activity Recognition, NLP, Semantic Representations, Smart Homes, activity recognition, behavior modelling, embeddings, futuraal, intelligent environments, jcr4.438, machine learning, natural language processing, q1}, pages = {280--290}, } '] [u' @article{sanchez-corcuera_smart_2019, title = {Smart cities survey: {Technologies}, application domains and challenges for the cities of the future}, volume = {15}, issn = {1550-1477}, shorttitle = {Smart cities survey}, url = {https://doi.org/10.1177/1550147719853984}, doi = {10.1177/1550147719853984}, abstract = {The introduction of the Information and Communication Technologies throughout the last decades has created a trend of providing daily objects with smartness, aiming to make human life more comfortable. The paradigm of Smart Cities arises as a response to the goal of creating the city of the future, where (1) the well-being and rights of their citizens are guaranteed, (2) industry and (3) urban planning is assessed from an environmental and sustainable viewpoint. Smart Cities still face some challenges in their implementation, but gradually more research projects of Smart Cities are funded and executed. Moreover, cities from all around the globe are implementing Smart City features to improve services or the quality of life of their citizens. Through this article, (1) we go through various definitions of Smart Cities in the literature, (2) we review the technologies and methodologies used nowadays, (3) we summarise the different domains of applications where these technologies and methodologies are applied (e.g. health and education), (4) we show the cities that have integrated the Smart City paradigm in their daily functioning and (5) we provide a review of the open research challenges. Finally, we discuss about the future opportunities for Smart Cities and the issues that must be tackled in order to move towards the cities of the future.}, language = {en}, number = {6}, urldate = {2019-06-10}, journal = {International Journal of Distributed Sensor Networks}, author = {S\xe1nchez-Corcuera, Ruben and Nu\xf1ez-Marcos, Adri\xe1n and Sesma-Solance, Jesus and Bilbao-Jayo, Aritz and Mulero, Rub\xe9n and Zulaika, Unai and Azkune, Gorka and Almeida, Aitor}, month = jun, year = {2019}, keywords = {Artificial Intelligence, IF1.151, IoT, Q4, Survey, architecture, co-creation, e-government, futuraal, smart cities}, pages = {1550147719853984}, } '] [u' @article{azkune_scalable_2018, title = {A {Scalable} {Hybrid} {Activity} {Recognition} {Approach} for {Intelligent} {Environments}}, volume = {6}, issn = {2169-3536}, doi = {10.1109/ACCESS.2018.2861004}, abstract = {Human activity recognition is a key technology for ICT-based (infomation and communication technologies) assistive applications. The most successful activity recognition systems for intelligent environments in terms of performance rely on supervised learning techniques. However, those techniques demand large labelled data sets for specific sensor deployments and monitored person. Such requirements make supervised learning techniques not to scale well to real world deployments, where different sensor infrastructures may be used to monitor different users. In this paper, we present a novel activity recognition system, based on a combination of unsupervised learning techniques and knowledge-based activity models. First, we use a domain-specific data mining algorithm previously developed by Cooket al.to extract the most frequent action sequences executed by a person. Second, we insert knowledge-based activity models in a novel matching algorithm with the aim of inferring what activities are being performed in a given action sequence. The approach results on a scalable activity recognition system, which has been tested on three real data sets. The obtained performance is comparable to supervised learning techniques.}, journal = {IEEE Access}, author = {Azkune, Gorka and Almeida, Aitor}, year = {2018}, keywords = {AI for health, Activity Recognition, Artificial Intelligence, City4Age, IF4.098, Intelligent Environments, Q1, clustering, machine learning}, pages = {41745--41759}, } '] [u' @inproceedings{almeida_embedding-level_2018, address = {Guanzhou, China}, title = {Embedding-level attention and multi-scale convolutional neural networks for behaviour modelling}, doi = {10.1109/SmartWorld.2018.00103}, abstract = {Understanding human behaviour is a central task in intelligent environments. Understanding what the user does and how she does it allows to build more reactive and smart environments. In this paper we present a new approach to interactivity behaviour modelling. This approach is based on the use of multi-scale convolutional neural networks to detect n-grams in action sequences and a novel method of applying soft attention mechanisms at embedding level. The proposed architecture improves our previous architecture based on recurrent networks, obtaining better result predicting the users\u2019 actions.}, author = {Almeida, Aitor and Azkune, Gorka and Bilbao Jayo, Aritz}, month = oct, year = {2018}, keywords = {AI for health, Artificial Intelligence, City4Age, Deep Learning, Intelligent Environments, Neural Networks, attention mechanism, behavior modelling, cnn, convolutional networks, core-b, embeddings, isi, machine learning}, } '] [u' @article{almeida_predicting_2018, title = {Predicting {Human} {Behaviour} with {Recurrent} {Neural} {Networks}}, volume = {8}, copyright = {http://creativecommons.org/licenses/by/3.0/}, url = {http://www.mdpi.com/2076-3417/8/2/305}, doi = {10.3390/app8020305}, abstract = {As the average age of the urban population increases, cities must adapt to improve the quality of life of their citizens. The City4Age H2020 project is working on the early detection of the risks related to mild cognitive impairment and frailty and on providing meaningful interventions that prevent these risks. As part of the risk detection process, we have developed a multilevel conceptual model that describes the user behaviour using actions, activities, and intra- and inter-activity behaviour. Using this conceptual model, we have created a deep learning architecture based on long short-term memory networks (LSTMs) that models the inter-activity behaviour. The presented architecture offers a probabilistic model that allows us to predict the user\u2019s next actions and to identify anomalous user behaviours.}, language = {en}, number = {2}, urldate = {2018-02-23}, journal = {Applied Sciences}, author = {Almeida, Aitor and Azkune, Gorka}, month = feb, year = {2018}, keywords = {AI for health, Activity Recognition, Artificial Intelligence, City4Age, Deep Learning, Intelligent Environments, LSTM, Q2, behavior modelling, jcr2.217, long short-term memory networks, machine learning}, pages = {305}, } '] [u' @article{mulero_iot-aware_2018, title = {An {IoT}-aware {Approach} for {Elderly}-{Friendly} {Cities}}, volume = {PP}, doi = {10.1109/ACCESS.2018.2800161}, abstract = {The ever-growing life expectancy of people requires the adoption of proper solutions for addressing the particular needs of elderly people in a sustainable way, both from service provision and economic point of view. Mild Cognitive Impairments (MCI) and frailty are typical examples of elderly conditions which, if not timely addressed, can turn out into more complex diseases that are harder and costlier to treat. Information and Communication Technologies (ICTs), and in particular Internet of Things (IoT) technologies, can foster the creation of monitoring and intervention systems, both on an Ambient Assisted Living (AAL) and Smart City scope, for early detecting behavioral changes in elderly people. This allows to timely detect any potential risky situation and properly intervene, with benefits in terms of treatment\u2019s costs. In this context, as part of the H2020-funded City4Age project, this paper presents the data capturing and data management layers of the whole City4Age platform. In particular, this work deals with an unobtrusive data gathering system implementation to collect data about daily activities of elderly people, and with the implementation of the related Linked Open Data (LOD)-based data management system. The collected data are then used by other layers of the platform to perform risk detection algorithms and generate the proper customized interventions. Through the validation of some use-cases, it is demonstrated how this scalable approach, also characterized by unobtrusive and low-cost sensing technologies, can produce data with a high level of abstraction useful to define a risk profile of each elderly person.}, number = {99}, journal = {IEEE Access}, author = {Mulero, Rub\xe9n and Almeida, Aitor and Azkune, Gorka and Abril, Patricia and Arredondo, Maria Teresa and Paramo, Miguel and Patrono, Luigi and Rametta, Piercosimo and Sergi, Ilaria}, year = {2018}, keywords = {AI for health, Artificial Intelligence, City4Age, IoT, Linked Open Data, Middleware, Q1, Senior citizens, Smart City, jcr4.098, machine learning, smart cities, smart environments}, pages = {1--1}, } '] [u' @article{kamara-esteban_massha:_2017, title = {{MASSHA}: {An} agent-based approach for human activity simulation in intelligent environments}, issn = {1574-1192}, shorttitle = {{MASSHA}}, url = {http://www.sciencedirect.com/science/article/pii/S1574119216304072}, doi = {10.1016/j.pmcj.2017.07.007}, abstract = {Human activity recognition has the potential to become a real enabler for ambient assisted living technologies. Research on this area demands the execution of complex experiments involving humans interacting with intelligent environments in order to generate meaningful datasets, both for development and validation. Running such experiments is generally expensive and troublesome, slowing down the research process. This paper presents an agent-based simulator for emulating human activities within intelligent environments: MASSHA. Specifically, MASSHA models the behaviour of the occupants of a sensorized environment from a single-user and multiple-user point of view. The accuracy of MASSHA is tested through a sound validation methodology, providing examples of application with three real human activity datasets and comparing these to the activity datasets produced by the simulator. Results show that MASSHA can reproduce behaviour patterns that are similar to those registered in the real datasets, achieving an overall accuracy of 93.52\\% and 88.10\\% in frequency and 98.27\\% and 99.09\\% in duration for the single-user scenario datasets; and a 99.3\\% and 88.25\\% in terms of frequency and duration for the multiple-user scenario.}, journal = {Pervasive and Mobile Computing}, author = {Kamara-Esteban, Oihane and Azkune, Gorka and Pijoan, Ander and Borges, Cruz E. and Alonso-Vicario, Ainhoa and L\xf3pez-de-Ipi\xf1a, Diego}, month = jul, year = {2017}, keywords = {Activity Recognition, Agent based modelling, Agent environment, Artificial Intelligence, Intelligent Environments, Q1, jcr2.974, machine learning}, } '] [u' @article{lopez_guede_dual_2017, title = {Dual model oriented modeling of monocrystalline {PV} modules based on artificial neuronal networks}, doi = {https://doi.org/10.1016/j.ijhydene.2017.02.062}, abstract = {The deep insight into the different elements that compose photovoltaic (PV) systems is capital to boost the optimization of each one of them and consequently, increment of the overall performance of the whole PV systems. In this paper we address the open problem of obtaining empirical accurate models of monocrystalline PV modules in a systematic and unattended fashion. In order to tackle this issue, we used a dual model oriented modeling approach based on artificial neural networks (ANN) due to their advantages, being the generalization capability the most outstanding one. We tried two different model approaches with different input/outputs specifications to learn the electrical behavior of a monocrystalline PV module Atersa A-55 placed on the roof of the Faculty of Engineering of Vitoria-Gasteiz (Basque Country University, Spain). Following these approaches we found two season oriented models of IPV with a RMSE accuracy of 0.20 mA and 0.26 mA respectively, which is better than the precision of the measurement devices. After comparing these results with the state-of-art ones, we conclude that we have outperformed the previously existing results.}, journal = {International Journal of Hydrogen Energy}, author = {Lopez Guede, Jose Manuel and Ramos Hernanz, Jose Antonio and Zulueta, Ekaitz and Fernandez Gamiz, Unai and Azkune, Gorka}, month = mar, year = {2017}, keywords = {Artificial Intelligence, JCR4.229, Neural Networks, Q1, machine learning}, } '] [u' @inproceedings{mulero_aal_2017, address = {Split}, title = {An {AAL} system based on {IoT} {Technologies} and {Linked} {Open} {Data} for elderly monitoring in {Smart} {Cities}}, abstract = {The average age growing of the urban population, with an increasing number of 65+ citizens, is calling for the cities to provide global services specifically geared to elderly people. In this context, collecting data from the elderly\u2019s environment and his/her habits and making them available in a structured way to third parties for analysis, is the first step towards the realization of innovative user-centric services. This paper presents a city-wide general IoT-based sensing infrastructure and a data management layer providing some REST and Linked Open Data Application Programming Interfaces (APIs) that collect and present data related to elderly people. In particular, this architecure is used by the H2020 City4Age project to help geriatricians in identifying the onset of Mild Cognitive Impairment (MCI) disease.}, booktitle = {2nd {International} {Multidisciplinary} {Conference} on {Computer} and {Energy} {Science}}, author = {Mulero, Rub\xe9n and Almeida, Aitor and Azkune, Gorka and Mainetti, Luca and Mighali, Vincenzo and Patrono, Luigi and Rametta, Piercosimo and Sergi, Ilaria}, month = jul, year = {2017}, keywords = {AI for health, Ambient Assisted Living, Artificial Intelligence, City4Age, Internet of Things, IoT, Linked Open Data, intelligent environments, machine learning, semantic inference, smart cities, sparql}, } '] [u' @article{nunez-marcos_vision-based_2017, title = {Vision-{Based} {Fall} {Detection} with {Convolutional} {Neural} {Networks}}, issn = {1530-8669}, abstract = {One of the biggest challenges in modern societies is the improvement of healthy aging and the support to older persons in their daily activities. In particular, given its social and economic impact, the automatic detection of falls has at- tracted considerable attention in the computer vision and pattern recognition communities. Although the approaches based on wearable sensors have pro- vided high detection rates, some of the potential users are reluctant to wear them and thus their use is not yet normalized. As a consequence, alternative approaches such as vision-based methods have emerged. We firmly believe the irruption of the Smart Environments and the Internet of Things paradigms, to- gether with the increasing number of cameras in our daily environment, conform an optimal context for vision-based systems. Consequently, here we propose a vision-based solution using Convolutional Neural Networks to decide if a se- quence of frames contains a person falling. To model the video motion and make the system scenario-independent, we use optical flow images as input to the networks followed by a novel three-step training phase. Furthermore, our method is evaluated in three public datasets achieving state-of-the-art results in all three of them.}, journal = {Wireless Communications \\& Mobile Computing}, author = {N\xfa\xf1ez-Marcos, Adri\xe1n and Azkune, Gorka and Arganda-Carreras, Ignacio}, month = nov, year = {2017}, keywords = {AI for health, Activity Recognition, Computer Vision, Deep Learning, Smart Environments, jcr0.869, q4}, pages = {25} }'] [u' @inproceedings{almeida_inter-activity_2017, series = {Lecture {Notes} in {Computer} {Science}}, title = {Inter-activity {Behaviour} {Modelling} {Using} {Long} {Short}-{Term} {Memory} {Networks}}, isbn = {978-3-319-67584-8 978-3-319-67585-5}, url = {https://link.springer.com/chapter/10.1007/978-3-319-67585-5_41}, doi = {10.1007/978-3-319-67585-5_41}, abstract = {As the average age of the urban population increases, cities must adapt to improve the quality of life of their citizens. The City4Age H2020 project is working on the early detection of the risks related to Mild Cognitive Impairment and Frailty and on providing meaningful interventions that prevent those risks. As part of the risk detection process we have developed a multilevel conceptual model that describes the user behaviour using actions, activities, intra-activity behaviour and inter-activity behaviour. Using that conceptual model we have created a deep learning architecture based on Long Short-Term Memory Networks that models the inter-activity behaviour. The presented architecture offers a probabilistic model that allows to predict the users next actions and to identify anomalous user behaviours.}, language = {en}, urldate = {2017-10-16}, booktitle = {Ubiquitous {Computing} and {Ambient} {Intelligence}}, publisher = {Springer, Cham}, author = {Almeida, Aitor and Azkune, Gorka}, month = nov, year = {2017}, keywords = {AI for health, Artificial Intelligence, City4Age, Deep Learning, ISI, Intelligent Environments, LSTM, behaviour modelling, embeddings, machine learning}, pages = {394--399}, } '] [u' @inproceedings{almeida_activity_2017, title = {Activity {Recognition} {Approaches} for {Smart} {Cities}: {The} {City4Age} use case}, abstract = {Activity Recognition is an important ingredient that allows the interpretation of elementary data. Understanding which activity is going on allows framing an elementary action (e.g. \u201ca movement\u201d) in a proper context. This paper presents an activity recognition system designed to work in urban scenarios, which impose several restrictions: the unfeasibility of having enough annotated datasets, the heterogeneous sensor infrastructures and the presence of very different individuals. The main idea of our system is to combine knowledge- and datadriven techniques, to build a hybrid and scalable activity recognition system for smart cities.}, booktitle = {Proceeding of the 3rd {International} {Forum} on {Research} and {Technologies} for {Society} and {Industry}}, author = {Almeida, Aitor and Azkune, Gorka}, month = nov, year = {2017}, keywords = {AI for health, Activity Recognition, Ambient Assisted Living, Artificial Intelligence, City4Age, behaviour modelling, elderly people, healthcare, machine learning, smart cities}, } '] [u' @incollection{azkune_reasoning_2016, title = {Reasoning {Systems} for {AAL}}, isbn = {978-1-84919-988-9}, abstract = {Human activity recognition is a key enabler for Ambient Assisted Living and provides a paradigmatic example of how reasoning capacities can be used in such scenarios. In order to detect and recognise human activities, first of all, humans have to be monitored using sensors. The information grabbed by those sensors feed the modelling and inference layers, where the reasoning takes place. In this chapter, the most used inference and reasoning techniques have been introduced. Reasoning systems for AAL have been divided into three different categories, namely, the data-driven approach, the knowledge-driven approach, and the hybrid approach. The most representative examples of those approaches have been presented, describing the advantages and disadvantages. Due to the emergence of the Semantic Web and its application to AAL scenarios, has been devoted to describe the features of OWL and associated semantic reasoners, which can be classified as a knowledge-driven approach.}, booktitle = {Active and {Assisted} {Living}: {Technologies} and {Applications}}, publisher = {The Institution of Engineering and Technology}, author = {Azkune, Gorka and Ausin, David and Lopez de Ipina, Diego}, month = aug, year = {2016}, keywords = {AI for health, Artificial Intelligence, machine learning}, } '] [u' @article{azkune_combining_2015, title = {Combining {Users}\u2019 {Activity} {Survey} and {Simulators} to {Evaluate} {Human} {Activity} {Recognition} {Systems}}, volume = {15}, copyright = {http://creativecommons.org/licenses/by/3.0/}, url = {http://www.mdpi.com/1424-8220/15/4/8192}, doi = {10.3390/s150408192}, abstract = {Evaluating human activity recognition systems usually implies following expensive and time-consuming methodologies, where experiments with humans are run with the consequent ethical and legal issues. We propose a novel evaluation methodology to overcome the enumerated problems, which is based on surveys for users and a synthetic dataset generator tool. Surveys allow capturing how different users perform activities of daily living, while the synthetic dataset generator is used to create properly labelled activity datasets modelled with the information extracted from surveys. Important aspects, such as sensor noise, varying time lapses and user erratic behaviour, can also be simulated using the tool. The proposed methodology is shown to have very important advantages that allow researchers to carry out their work more efficiently. To evaluate the approach, a synthetic dataset generated following the proposed methodology is compared to a real dataset computing the similarity between sensor occurrence frequencies. It is concluded that the similarity between both datasets is more than significant.}, language = {en}, number = {4}, urldate = {2015-04-13}, journal = {Sensors}, author = {Azkune, Gorka and Almeida, Aitor and L\xf3pez-de-Ipi\xf1a, Diego and Chen, Liming}, month = apr, year = {2015}, note = {00000}, keywords = {AI for health, Activity Recognition, Artificial Intelligence, Data analysis, Q1, Synthetic Dataset Generator, activity survey, evaluation methodology, intelligent environments, jcr2.048, machine learning}, pages = {8192--8213}, } '] [u' @article{azkune_ezagutzan_2015, title = {Ezagutzan {Oinarritutako} {Giza}-{Jardueren} {Eredu} {Dinamiko} eta {Pertsonalizatuak} {Ikasten} (2)}, issn = {0214-9001}, url = {http://www.ehu.eus/ojs/index.php/ekaia/article/view/14662}, doi = {10.1387/ekaia.14662}, abstract = {Being able to recognise human activities by means of sensor and computational devices can be a key competence in order to achieve human centred technologies. For that purpose, it is mandatory to build computational models of the activities which have to be recognised. There are two major approaches for activity modelling: the data-driven and the knowledge-driven approaches. Both of them have advantages and drawbacks. The objective of this work is to combine both modelling approaches with the aim of building dynamic and personalised activity models, using generic knowledge-based models. This would allow implementing modelling processes which can adapt themselves to the evolution of specific people.}, number = {1333}, journal = {Ekaia. Euskal Herriko Unibertsitateko Zientzi eta Teknologi Aldizkaria}, author = {Azkune, Gorka and Almeida, Aitor and Lopez de Ipina, Diego and Chen, Liming Luke}, month = oct, year = {2015}, note = {00000}, keywords = {AI for health, Activity Recognition, Activity model, Artificial Intelligence, Data analysis, Survey, intelligent environments, machine learning}, } '] [u' @inproceedings{azkune_ezagutzan_2015, address = {Durango, Spain}, title = {Ezagutzan {Oinarritutako} {Giza}-{Jardueren} {Eredu} {Dinamiko} eta {Pertsonalizatuak} {Ikasten}}, url = {http://www.buruxkak.eus/gaiak/910/ikergazte.html}, abstract = {Being able to recognise human activities by means of sensor and computational devices can be a key competence in order to achieve human centred technologies. For that purpose, it is mandatory to build computational models of the activities which have to be recognised. There are two major approaches for activity modelling: the data-driven and the knowledge-driven approaches. Both of them have advantages and drawbacks. The objective of this work is to combine both modelling approaches with the aim of building dynamic and personalised activity models, using generic knowledge-based models. This would allow implementing modelling processes which can adapt themselves to the evolution of specific people.}, booktitle = {I. {Ikergazte}: {Nazioarteko} ikerketa euskaraz. {Kongresuko} artikulu-bilduma}, publisher = {Udako Euskal Unibertsitatea}, author = {Azkune, Gorka and Almeida, Aitor and Lopez de Ipina, Diego and Chen, Liming Luke}, month = may, year = {2015}, note = {00000}, keywords = {AI for health, Activity Recognition, Activity model, Artificial Intelligence, Data analysis, Survey, intelligent environments, machine learning}, } '] [u' @article{azkune_ultimas_2015, title = {\xdaltimas tendencias en el modelado de actividades humanas}, volume = {90}, issn = {0012-7361}, number = {4}, journal = {DYNA}, author = {Azkune, Gorka and Almeida, Aitor and L\xf3pez de Ipi\xf1a, Diego and Chen, Liming Luke}, month = jul, year = {2015}, note = {00000}, keywords = {AI for health, Activity Recognition, Activity model, Artificial Intelligence, Data analysis, Survey, intelligent environments, jcr0.179, machine learning, q4}, } '] [u' @article{azkune_extending_2015, title = {Extending knowledge-driven activity models through data-driven learning techniques}, volume = {42}, issn = {0957-4174}, url = {http://www.sciencedirect.com/science/article/pii/S0957417414007623}, doi = {10.1016/j.eswa.2014.11.063}, abstract = {Knowledge-driven activity recognition is an emerging and promising research area which has already shown very interesting features and advantages. However, there are also some drawbacks, such as the usage of generic and static activity models. This paper presents an approach to using data-driven techniques to evolve knowledge-driven activity models with a user\u2019s behavioral data. The approach includes a novel clustering process where initial incomplete models developed through knowledge engineering are used to detect action clusters which represent activities and aggregate new actions. Based on those action clusters, a learning process is then designed to learn and model varying ways of performing activities in order to acquire complete and specialized activity models. The approach has been tested with real users\u2019 inputs, noisy sensors and demanding activity sequences. Initial results have shown that complete and specialized activity models are properly learned with success rates of 100\\% at the expense of learning some false positive models.}, number = {6}, urldate = {2015-01-20}, journal = {Expert Systems with Applications}, author = {Azkune, Gorka and Almeida, Aitor and L\xf3pez-de-Ipi\xf1a, Diego and Chen, Liming}, month = apr, year = {2015}, note = {00000}, keywords = {AI for health, Activity Annotation, Activity Recognition, Activity model, Artificial Intelligence, Data analysis, Knowledge-Driven, Q1, intelligent environments, jcr2.981, machine learning}, pages = {3115--3128}, } '] [u' @incollection{azkune_hybrid_2014, series = {Lecture {Notes} in {Computer} {Science}}, title = {A {Hybrid} {Evaluation} {Methodology} for {Human} {Activity} {Recognition} {Systems}}, copyright = {\xa92014 Springer International Publishing Switzerland}, isbn = {978-3-319-13101-6 978-3-319-13102-3}, url = {http://link.springer.com/chapter/10.1007/978-3-319-13102-3_18}, abstract = {Evaluating human activity recognition systems usually implies following expensive and time consuming methodologies, where experiments with humans are run with the consequent ethical and legal issues. We propose a hybrid evaluation methodology to overcome the enumerated problems. Central to the hybrid methodology are surveys to users and a synthetic dataset generator tool. Surveys allow capturing how different users perform activities of daily living, while the synthetic dataset generator is used to create properly labelled activity datasets modelled with the information extracted from surveys. Sensor noise, varying time lapses and user erratic behaviour can also be simulated using the tool. The hybrid methodology is shown to have very important advantages that allow researchers carrying out their work more efficiently.}, language = {en}, number = {8867}, urldate = {2015-01-20}, booktitle = {Ubiquitous {Computing} and {Ambient} {Intelligence}. {Personalisation} and {User} {Adapted} {Services}}, publisher = {Springer International Publishing}, author = {Azkune, Gorka and Almeida, Aitor and L\xf3pez-de-Ipi\xf1a, Diego and Chen, Liming Luke}, editor = {Herv\xe1s, Ram\xf3n and Lee, Sungyoung and Nugent, Chris and Bravo, Jos\xe9}, month = jan, year = {2014}, note = {00001}, keywords = {AI for health, Activity Annotation, Activity Recognition, Artificial Intelligence, Computers and Society, Data analysis, Evaluation, Synthetic Dataset Generator, intelligent environments, machine learning}, pages = {92--99}, } '] [u" @incollection{azkune_knowledge-driven_2014, series = {Advances in {Intelligent} {Systems} and {Computing}}, title = {A {Knowledge}-{Driven} {Tool} for {Automatic} {Activity} {Dataset} {Annotation}}, copyright = {\xa92015 Springer International Publishing Switzerland}, isbn = {978-3-319-11312-8 978-3-319-11313-5}, url = {http://link.springer.com/chapter/10.1007/978-3-319-11313-5_52}, abstract = {Human activity recognition has become a very important research topic, due to its multiple applications in areas such as pervasive computing, surveillance, context-aware computing, ambient assistive living or social robotics. For activity recognition approaches to be properly developed and tested, annotated datasets are a key resource. However, few research works deal with activity annotation methods. In this paper, we describe a knowledge-driven approach to annotate activity datasets automatically. Minimal activity models have to be provided to the tool, which uses a novel algorithm to annotate datasets. Minimal activity models specify action patterns. Those actions are directly linked to sensor activations, which can appear in the dataset in varied orders and with interleaved actions that are not in the pattern itself. The presented algorithm finds those patterns and annotates activities accordingly. Obtained results confirm the reliability and robustness of the approach in several experiments involving noisy and changing activity executions.}, language = {en}, number = {322}, urldate = {2014-11-19}, booktitle = {Intelligent {Systems}'2014}, publisher = {Springer International Publishing}, author = {Azkune, Gorka and Almeida, Aitor and L\xf3pez-de-Ipi\xf1a, Diego and Chen, Liming}, editor = {Angelov, P. and Atanassov, K. T. and Doukovska, L. and Hadjiski, M. and Jotsov, V. and Kacprzyk, J. and Kasabov, N. and Sotirov, S. and Szmidt, E. and Zadro\u017cny, S.}, month = sep, year = {2014}, note = {00000}, keywords = {AI for health, Activity Annotation, Activity Recognition, Artificial Intelligence, Data analysis, ISI, Knowledge-Driven, core-c, intelligent environments, machine learning}, pages = {593--604}, } "] [u' @article{azkune_semantic_2013, title = {Semantic {Framework} for {Social} {Robot} {Self}-{Configuration}}, volume = {13}, issn = {1424-8220}, url = {http://www.mdpi.com/1424-8220/13/6/7004}, doi = {10.3390/s130607004}, number = {6}, urldate = {2013-11-04}, journal = {Sensors}, author = {Azkune, Gorka and Ordu\xf1a, Pablo and Laiseca, Xabier and Castillejo, Eduardo and L\xf3pez-de-Ipi\xf1a, Diego and Loitxate, Miguel and Azpiazu, Jon}, month = may, year = {2013}, keywords = {ACROSS, Artificial Intelligence, ISI, Q1, healthcare environments, jcr1.953, machine learning, ontologies, self-configuration, social robots}, pages = {7004--7020}, } '] [u' @incollection{azkune_semantic_2012, series = {Lecture {Notes} in {Computer} {Science}}, title = {Semantic {Based} {Self}-configuration {Approach} for {Social} {Robots} in {Health} {Care} {Environments}}, copyright = {\xa92012 Springer-Verlag Berlin Heidelberg}, isbn = {978-3-642-35394-9 978-3-642-35395-6}, url = {http://link.springer.com/chapter/10.1007/978-3-642-35395-6_48}, abstract = {Health care environments, as many other real world environments, present many changing and unpredictable situations. In order to use a social robot in such an environment, the robot has to be prepared to deal with all the changing situations. This paper presents a robot self-configuration approach to overcome suitably the commented problems. The approach is based on the integration of a semantic framework, where a reasoner can take decisions about the configuration of robot services and resources. An ontology has been designed to model the robot and the relevant context information. Besides rules are used to encode human knowledge and serve as policies for the reasoner. The approach has been successfully implemented in a mobile robot, which showed to be more capable of solving not pre-designed situations.}, number = {7657}, urldate = {2014-01-29}, booktitle = {Ambient {Assisted} {Living} and {Home} {Care}}, publisher = {Springer Berlin Heidelberg}, author = {Azkune, Gorka and Ordu\xf1a, Pablo and Laiseca, Xabier and L\xf3pez-de-Ipi\xf1a, Diego and Loitxate, Miguel}, editor = {Bravo, Jos\xe9 and Herv\xe1s, Ram\xf3n and Rodr\xedguez, Marcela}, month = jan, year = {2012}, keywords = {ACROSS, Ambient Assisted Living, Artificial Intelligence, ISI, machine learning, social robots, software engineering}, pages = {354--361}, } ']