-
Notifications
You must be signed in to change notification settings - Fork 2
Expand file tree
/
Copy pathConceptualizer.py
More file actions
310 lines (291 loc) · 13.3 KB
/
Conceptualizer.py
File metadata and controls
310 lines (291 loc) · 13.3 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
from LDA import tokenize
from gensim.utils import simple_preprocess
from utilities import get_concepts_of_instance_by_probase
import numpy as np
import operator
import sys
import nltk
sys.path.append("/home1/roy/QGen/DGen/Layer1")
from wordnet_candidate_generation import synsets_prob
from Layer2.Fine_tuned_BERT import get_similarity_from_SBERT
def getSynsetName(synset):
"""
return the name of synset
:param synset:
:return:
"""
name = synset.name().split(".")[0]
name = name.replace("_", " ")
return name
def transform_pos(sentence, key):
try:
tokenized = simple_preprocess(sentence)
key_index = tokenized.index(key)
pos = nltk.tag.pos_tag(tokenized)
pos_of_key = pos[key_index][-1]
if pos_of_key.startswith("J"):
ans = nltk.corpus.wordnet.ADJ
elif pos_of_key.startswith("R"):
ans = nltk.corpus.wordnet.ADV
elif pos_of_key.startswith("V"):
ans = nltk.corpus.wordnet.VERB
elif pos_of_key.startswith("N"):
ans = nltk.corpus.wordnet.NOUN
else:
ans = ""
return ans
except Exception as e:
print(str(e))
return ""
class Conceptualizer:
def __init__(self, lda):
self.lda = lda
self.ldamodel = lda.ldamodel
print('Using Conceptualizer with bert embedding')
def conceptualize(self, sentence, instance, mode=0, debug=False, eval=False):
"""
Conceptualize the given instance in the given context (sentence)
:param sentence: a sentence as context
:param instance: the instance, which should be conceptualized in the given context
:param mode: using Probase(0) or WordNet(1) to perform CDC
:return: the most likely concept for the intance in the given context
"""
if mode == 0:
# Probase
concepts = get_concepts_of_instance_by_probase(instance, eval=False)
if len(concepts) == 0:
return None
if debug:
print(sorted(concepts.items(), key=operator.itemgetter(1)))
probabilities_of_concepts = self.__calculate_probs_of_concepts_bert(
concepts, sentence, instance, debug
)
else:
# WordNet
pos = transform_pos(sentence, instance)
print(f"pos of {instance}: {pos}")
synsets = synsets_prob(instance, pos)
# print(f"synsets: {synsets}")
probabilities_of_concepts = self.__calculate_probs_of_concepts_wordnet_bert(
synsets, sentence, instance
)
if probabilities_of_concepts is None or len(probabilities_of_concepts) == 0:
return None
if debug:
print("All concepts: ")
print(sorted(probabilities_of_concepts, key=lambda x: -x[1]))
if eval:
probabilities_of_concepts = sorted(
probabilities_of_concepts, key=lambda x: -x[-1]
)
return probabilities_of_concepts
most_likely_concept = max(probabilities_of_concepts, key=lambda item: item[1])[
0
]
return most_likely_concept
def __calculate_probs_of_concepts_wordnet(self, synsets, sentence):
"""
:param synsets:
:param sentence:
:return:
"""
if synsets is None:
return None
probabilities_of_concepts = []
flag = False
# from sentence to (token_id, counts)
bag_of_words = self.ldamodel.id2word.doc2bow(simple_preprocess(sentence))
# topic_distribution_for_given_bow
topics_of_text = self.ldamodel.get_document_topics(
bag_of_words
) # probability of topics given sentence
for hyper, siblings, prob in synsets:
prob_c_given_w = prob
bag_of_words = self.ldamodel.id2word.doc2bow(
simple_preprocess(hyper.definition())
)
probs_of_topics_for_given_concept = [
x[1] for x in list(self.ldamodel.get_document_topics(bag_of_words))
]
sum = 0
for topic_id, prob_of_topic in topics_of_text: # p(s, z)
sum += (
probs_of_topics_for_given_concept[topic_id] * prob_of_topic
) # p( z | c ) * p(s, z)
prob_c_given_w_z = (
prob_c_given_w * sum
) # p(c | w, z) = p(c | w) * sum_z p(s, z)*p( z | c )
probabilities_of_concepts.append((hyper, siblings, prob_c_given_w_z))
return probabilities_of_concepts
def __calculate_probs_of_concepts_wordnet_bert(self, synsets, sentence, key):
"""
:param synsets:
:param sentence:
:return:
"""
if synsets is None:
return None
probabilities_of_concepts = []
print("Using Bert-based embedding in WordNet")
flag = False
# from sentence to (token_id, counts)
# bag_of_words = self.ldamodel.id2word.doc2bow(simple_preprocess(sentence))
# topic_distribution_for_given_bow
# topics_of_text = self.ldamodel.get_document_topics(
# bag_of_words
# ) # probability of topics given sentence
for hyper, siblings, prob in synsets:
prob_c_given_w = prob
# bag_of_words = self.ldamodel.id2word.doc2bow(
# simple_preprocess(hyper.definition())
# )
# probs_of_topics_for_given_concept = [
# x[1] for x in list(self.ldamodel.get_document_topics(bag_of_words))
# ]
# sum = 0
# for topic_id, prob_of_topic in topics_of_text: # p(s, z)
# sum += (
# probs_of_topics_for_given_concept[topic_id] * prob_of_topic
# ) # p( z | c ) * p(s, z)
# use bert embedding to calculate cosine similarity
sum = get_similarity_from_SBERT(sentence.replace(key, "**blank**"), hyper.definition(), key)
prob_c_given_w_z = (
prob_c_given_w * sum
) # p(c | w, z) = p(c | w) * sum_z p(s, z)*p( z | c )
probabilities_of_concepts.append((hyper, siblings, prob_c_given_w_z))
return probabilities_of_concepts
def __calculate_probs_of_concepts(self, concepts, sentence, debug):
"""
Calculates for each concept the probability of the concept for the given sentence
:param concepts: the concepts and their probability
:param sentence: the given sentence
:return: the concepts and ther probabilities
"""
probabilities_of_concepts = []
# word1 = "Apple Company"
# word2 = "Apple"
# word3 = "Company"
# bag_of_words = self.ldamodel.id2word.doc2bow(simple_preprocess(word2))
# print("words,", bag_of_words)
# topics_of_text = self.ldamodel.get_term_topics(bag_of_words[0][0], minimum_probability=0.0)
# print("topics, ",topics_of_text)
# topics_of_text = self.ldamodel.get_document_topics(bag_of_words, minimum_probability=0.0)
# print("topics, ",topics_of_text)
# bag_of_words = self.ldamodel.id2word.doc2bow(simple_preprocess(word2))
# print("words,", bag_of_words)
# topics_of_text = self.ldamodel.get_document_topics(bag_of_words, minimum_probability=0.0)
# print("topics, ",topics_of_text)
# bag_of_words = self.ldamodel.id2word.doc2bow(simple_preprocess(word3))
# print("words,", bag_of_words)
# topics_of_text = self.ldamodel.get_document_topics(bag_of_words, minimum_probability=0.0)
# print("topics, ",topics_of_text)
flag = False
# from sentence to (token_id, counts)
bag_of_words = self.ldamodel.id2word.doc2bow(simple_preprocess(sentence))
# topic_distribution_for_given_bow
topics_of_text = self.ldamodel.get_document_topics(
bag_of_words
) # probability of topics given sentence
for concept in concepts:
prob_c_given_w = concepts[
concept
] # probability of concept given the instance from Probase
if concept not in self.ldamodel.id2word.token2id.keys():
# simple_preprocess: Convert a document into a list of lowercase tokens, ignoring tokens that are too short or too long.
bag_of_words = self.ldamodel.id2word.doc2bow(simple_preprocess(concept))
probs_of_topics_for_given_concept = [
x[1] for x in list(self.ldamodel.get_document_topics(bag_of_words))
]
else:
topic_terms_ = self.ldamodel.state.get_lambda()
topics_terms_proba_ = np.apply_along_axis(
lambda x: x / x.sum(), 1, topic_terms_
)
probs_of_topics_for_given_concept = topics_terms_proba_[
:, self.ldamodel.id2word.token2id[concept]
] # probability of topics given concept
if not flag and debug:
print("bag of words:")
print(bag_of_words)
print("topic distribution:")
print(sorted(topics_of_text, key=lambda x: -x[1]))
flag = True
sum = 0
for topic_id, prob_of_topic in topics_of_text: # p(s, z)
sum += (
probs_of_topics_for_given_concept[topic_id] * prob_of_topic
) # p( z | c ) * p(s, z)
prob_c_given_w_z = (
prob_c_given_w * sum
) # p(c | w, z) = p(c | w) * sum_z p(s, z)*p( z | c )
probabilities_of_concepts.append((concept, prob_c_given_w_z))
return probabilities_of_concepts
def __calculate_probs_of_concepts_bert(self, concepts, sentence, key, debug):
"""
Calculates for each concept the probability of the concept for the given sentence
:param concepts: the concepts and their probability
:param sentence: the given sentence
:return: the concepts and ther probabilities
"""
print("Using Bert-based embedding in Probase")
probabilities_of_concepts = []
# word1 = "Apple Company"
# word2 = "Apple"
# word3 = "Company"
# bag_of_words = self.ldamodel.id2word.doc2bow(simple_preprocess(word2))
# print("words,", bag_of_words)
# topics_of_text = self.ldamodel.get_term_topics(bag_of_words[0][0], minimum_probability=0.0)
# print("topics, ",topics_of_text)
# topics_of_text = self.ldamodel.get_document_topics(bag_of_words, minimum_probability=0.0)
# print("topics, ",topics_of_text)
# bag_of_words = self.ldamodel.id2word.doc2bow(simple_preprocess(word2))
# print("words,", bag_of_words)
# topics_of_text = self.ldamodel.get_document_topics(bag_of_words, minimum_probability=0.0)
# print("topics, ",topics_of_text)
# bag_of_words = self.ldamodel.id2word.doc2bow(simple_preprocess(word3))
# print("words,", bag_of_words)
# topics_of_text = self.ldamodel.get_document_topics(bag_of_words, minimum_probability=0.0)
# print("topics, ",topics_of_text)
flag = False
# from sentence to (token_id, counts)
# bag_of_words = self.ldamodel.id2word.doc2bow(simple_preprocess(sentence))
# # topic_distribution_for_given_bow
# topics_of_text = self.ldamodel.get_document_topics(
# bag_of_words
# ) # probability of topics given sentence
for concept in concepts:
prob_c_given_w = concepts[
concept
] # probability of concept given the instance from Probase
# if concept not in self.ldamodel.id2word.token2id.keys():
# # simple_preprocess: Convert a document into a list of lowercase tokens, ignoring tokens that are too short or too long.
# bag_of_words = self.ldamodel.id2word.doc2bow(simple_preprocess(concept))
# probs_of_topics_for_given_concept = [
# x[1] for x in list(self.ldamodel.get_document_topics(bag_of_words))
# ]
# else:
# topic_terms_ = self.ldamodel.state.get_lambda()
# topics_terms_proba_ = np.apply_along_axis(
# lambda x: x / x.sum(), 1, topic_terms_
# )
# probs_of_topics_for_given_concept = topics_terms_proba_[
# :, self.ldamodel.id2word.token2id[concept]
# ] # probability of topics given concept
# if not flag and debug:
# print("bag of words:")
# print(bag_of_words)
# print("topic distribution:")
# print(sorted(topics_of_text, key=lambda x: -x[1]))
# flag = True
sum = 0
# user bert embedding to calculate the cosine similarity
sum = get_similarity_from_SBERT(sentence.replace(key, "**blank**"), concept, key)
# for topic_id, prob_of_topic in topics_of_text: # p(s, z)
# sum += (
# probs_of_topics_for_given_concept[topic_id] * prob_of_topic
# ) # p( z | c ) * p(s, z)
prob_c_given_w_z = (
prob_c_given_w * sum
) # p(c | w, z) = p(c | w) * sum_z p(s, z)*p( z | c )
probabilities_of_concepts.append((concept, prob_c_given_w_z))
return probabilities_of_concepts