File size: 2,812 Bytes
2edceda
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
#!/bin/env python

""" 
Plan:
   Read in "dictionary" for list of words
   Read in pre-calculated "proper" embedding for each word from safetensor file
   Prompt user for a word from the list
   Generate a tensor array of distance to all the other known words
   Print out the 20 closest ones
"""


import sys
import torch
from safetensors import safe_open

from transformers import CLIPProcessor,CLIPModel, CLIPTextModelWithProjection

processor=None
tmodel2=None
model_path2=None
model_config2=None

if len(sys.argv) == 4:
    model_path2=sys.argv[1]
    model_config2=sys.argv[2]
    embed_file=sys.argv[3]
else:
    print("You have to give name of textencoder modelfile,config file, and embeddings file")
    sys.exit(1)

device=torch.device("cuda")


def init():
    global tmodel2,processor
    # yes, oddly they all use the same tokenizer, basically
    processor = CLIPProcessor.from_pretrained("openai/clip-vit-large-patch14")

    print("loading",model_path)
    tmodel2 = CLIPTextModelWithProjection.from_pretrained(model_path,config=model_config,local_files_only=True,use_safetensors=True)
    tmodel2.to(device)



print("read in words from dictionary now",file=sys.stderr)
with open("dictionary","r") as f:
    tokendict = f.readlines()
    wordlist = [token.strip() for token in tokendict]  # Remove trailing newlines
print(len(wordlist),"lines read")

print("read in embeddings now",file=sys.stderr)
model = safe_open(embed_file,framework="pt",device="cuda")
embs=model.get_tensor("embeddings")
embs.to(device)
print("Shape of loaded embeds =",embs.shape)

def standard_embed_calc(text):
    global processor,tmodel2
    inputs = processor(text=text, return_tensors="pt")
    inputs.to(device)

    with torch.no_grad():
        outputs = tmodel2(**inputs)
        embeddings = outputs.text_embeds
    return embeddings[0]


def print_distances(targetemb):
    targetdistances = torch.cdist( targetemb.unsqueeze(0), embs, p=2)

    print("shape of distances...",targetdistances.shape)

    smallest_distances, smallest_indices = torch.topk(targetdistances[0], 20, largest=False)

    smallest_distances=smallest_distances.tolist()
    smallest_indices=smallest_indices.tolist()
    for d,i in zip(smallest_distances,smallest_indices):
        print(wordlist[i],"(",d,")")



# Find 10 closest tokens to targetword.
# Will include the word itself
def find_closest(targetword):
    try:
        targetindex=wordlist.index(targetword)
        targetemb=embs[targetindex]
        print_distances(targetemb)
        return
    except ValueError:
        print(targetword,"not found in cache")


    print("Now doing with full calc embed")
    targetemb=standard_embed_calc(targetword)
    print_distances(targetemb)


while True:
    input_text=input("Input a word now:")
    find_closest(input_text)