@inproceedings{AgaWartenaDrumondetal.2016, author = {Rosa Tsegaye Aga and Christian Wartena and Lucas Drumond and Lars Schmidt-Thieme}, title = {Learning thesaurus relations from distributional features}, series = {LREC 2016, Tenth International Conference on Language Resources and Evaluation}, isbn = {978-2-9517408-9-1}, doi = {10.25968/opus-1089}, url = {http://nbn-resolving.de/urn:nbn:de:bsz:960-opus4-10894}, pages = {2071 -- 2075}, year = {2016}, abstract = {In distributional semantics words are represented by aggregated context features. The similarity of words can be computed by comparing their feature vectors. Thus, we can predict whether two words are synonymous or similar with respect to some other semantic relation. We will show on six different datasets of pairs of similar and non-similar words that a supervised learning algorithm on feature vectors representing pairs of words outperforms cosine similarity between vectors representing single words. We compared different methods to construct a feature vector representing a pair of words. We show that simple methods like pairwise addition or multiplication give better results than a recently proposed method that combines different types of features. The semantic relation we consider is relatedness of terms in thesauri for intellectual document classification. Thus our findings can directly be applied for the maintenance and extension of such thesauri. To the best of our knowledge this relation was not considered before in the field of distributional semantics.}, language = {en} }