File size: 1,752 Bytes
c992dd1 eb222a6 c992dd1 e9623ba c992dd1 075cee0 c992dd1 e9623ba 075cee0 c992dd1 e9623ba eb222a6 e9623ba eb222a6 e9623ba eb222a6 e9623ba eb222a6 e9623ba |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 |
#!/usr/bin/python3
""" Work in progress
Plan:
Read in fullword.json for list of words and token
Read in pre-calculated "proper" embedding for each token from safetensor file
Generate a tensor array of distance for each token, to every other token/embedding
Save it out
"""
import sys
import json
import torch
from safetensors import safe_open
embed_file="embeddings.safetensors"
device=torch.device("cuda")
print("read in words from json now",file=sys.stderr)
with open("fullword.json","r") as f:
tokendict = json.load(f)
wordlist = list(tokendict.keys())
print("read in embeddings now",file=sys.stderr)
model = safe_open(embed_file,framework="pt",device="cuda")
embs=model.get_tensor("embeddings")
embs.to(device)
print("Shape of loaded embeds =",embs.shape)
# ("calculate distances now")
distances = torch.cdist(embs, embs, p=2)
print("distances shape is",distances.shape)
# Find 10 closest tokens to targetword.
# Will include the word itself
def find_closest(targetword):
try:
targetindex=wordlist.index(targetword)
except ValueError:
print(targetword,"not found")
return
#print("index of",targetword,"is",targetindex)
targetdistances=distances[targetindex]
smallest_distances, smallest_indices = torch.topk(targetdistances, 10, largest=False)
smallest_distances=smallest_distances.tolist()
smallest_indices=smallest_indices.tolist()
for d,i in zip(smallest_distances,smallest_indices):
print(wordlist[i],"(",d,")")
#print("The smallest distance values are",smallest_distances)
#print("The smallest index values are",smallest_indices)
print("Input a word now:")
for line in sys.stdin:
input_text = line.rstrip()
find_closest(input_text)
|