- Publications
- Journal article
- Regularized online tensor factorization for sparse knowledge graph embeddings
Regularized online tensor factorization for sparse knowledge graph embeddings
[u' @article{zulaika_zurimendi_regularized_2022, title = {Regularized online tensor factorization for sparse knowledge graph embeddings}, issn = {1433-3058}, url = {https://doi.org/10.1007/s00521-022-07796-z}, doi = {10.1007/s00521-022-07796-z}, abstract = {Knowledge Graphs represent real-world facts and are used in several applications; however, they are often incomplete and have many missing facts. Link prediction is the task of completing these missing facts from existing ones. Embedding models based on Tensor Factorization attain state-of-the-art results in link prediction. Nevertheless, the embeddings they produce can not be easily interpreted. Inspired by previous work on word embeddings, we propose inducing sparsity in the bilinear tensor factorization model, RESCAL, to build interpretable Knowledge Graph embeddings. To overcome the difficulties that stochastic gradient descent has when producing sparse solutions, we add \\$\\$l\\_1\\$\\$regularization to the learning objective by using the generalized Regularized Dual Averaging online optimization algorithm. The proposed method substantially improves the interpretability of the learned embeddings while maintaining competitive performance in the standard metrics.}, language = {en}, urldate = {2022-09-30}, journal = {Neural Computing and Applications}, author = {Zulaika Zurimendi, Unai and Almeida, Aitor and L\xf3pez-de-Ipi\xf1a, Diego}, month = sep, year = {2022}, keywords = {Interpretable embeddings, Knowledge graph embedding, Sparse learning, inception, jcr5.102, kno, knowledge graphs, machine learning, q1, representation learning}, } ']
Abstract