-
Notifications
You must be signed in to change notification settings - Fork 0
/
extract_douban_emb.py
30 lines (26 loc) · 1.03 KB
/
extract_douban_emb.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
import torch
import json
from transformers import BertModel, BertTokenizer
model_path = "downstream_nlp/bert-base-uncased/"
tokenizer = BertTokenizer.from_pretrained(model_path)
model = BertModel.from_pretrained(model_path).to('cuda:5')
if __name__ == "__main__":
dataset = "Douban"
kg = json.load(open("dataset/{}/metadata.json".format(dataset), "r"))
text_map = json.load(open("data_utils/item2text.json", "r"))
entity_map = kg["id2entity"]
n = len(entity_map)
result = []
count = 0
with torch.no_grad():
for i in range(n):
entity = entity_map[str(i)]
text = entity
count += 1
print(count)
inputs = tokenizer(text, return_tensors="pt", max_length=512, truncation=True).to('cuda:5')
outputs = model(**inputs)
ent_representation = outputs.pooler_output.reshape(-1,)
result.append(ent_representation.cpu())
result = torch.stack(result).cpu()
torch.save(result, open("embeddings/{}2.pth".format(dataset), "wb"))