@inproceedings{CharbonnierWartena2018, author = {Jean Charbonnier and Christian Wartena}, title = {Using Word Embeddings for Unsupervised Acronym Disambiguation}, series = {Proceedings of the 27th International Conference on Computational Linguistics, Santa Fe, New Mexico, USA, August 20-26, 2018.}, organization = {International Committee on Computational Linguistics (ICCL)}, doi = {10.25968/opus-1265}, url = {http://nbn-resolving.de/urn:nbn:de:bsz:960-opus4-12653}, pages = {2610 -- 2619}, year = {2018}, abstract = {Scientific papers from all disciplines contain many abbreviations and acronyms. In many cases these acronyms are ambiguous. We present a method to choose the contextual correct definition of an acronym that does not require training for each acronym and thus can be applied to a large number of different acronyms with only few instances. We constructed a set of 19,954 examples of 4,365 ambiguous acronyms from image captions in scientific papers along with their contextually correct definition from different domains. We learn word embeddings for all words in the corpus and compare the averaged context vector of the words in the expansion of an acronym with the weighted average vector of the words in the context of the acronym. We show that this method clearly outperforms (classical) cosine similarity. Furthermore, we show that word embeddings learned from a 1 billion word corpus of scientific exts outperform word embeddings learned from much larger general corpora.}, language = {en} }