-
Notifications
You must be signed in to change notification settings - Fork 0
/
extract_amazon_emb.py
32 lines (28 loc) · 1.11 KB
/
extract_amazon_emb.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
import torch
import json
from transformers import BertModel, BertTokenizer
model_path = "bert-base-uncased"
tokenizer = BertTokenizer.from_pretrained(model_path)
model = BertModel.from_pretrained(model_path).to('cuda:5')
if __name__ == "__main__":
dataset = "Amazon"
kg = json.load(open("datasets/{}/metadata.json".format(dataset), "r"))
text_map = json.load(open("data_utils/item2text.json", "r"))
entity_map = kg["id2entity"]
n = len(entity_map)
result = []
count = 0
with torch.no_grad():
for i in range(n):
entity = entity_map[str(i)]
if entity in text_map:
text = "Product: " + text_map[entity]
count += 1
else:
text = "Profile: " + entity
inputs = tokenizer(text, return_tensors="pt", max_length=512, truncation=True).to('cuda:5')
outputs = model(**inputs)
ent_representation = outputs.pooler_output.reshape(-1,)
result.append(ent_representation)
result = torch.stack(result)
torch.save(result, open("embeddings/{}.pth".format(dataset), "wb"))