Spaces:
Running
Running
File size: 1,908 Bytes
e0e4e57 1f6f7c0 e0e4e57 a17bdae e0e4e57 1f6f7c0 e0e4e57 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 |
import gradio as gr
# from sentence_transformers import SentenceTransformer, util
#
# model_name = 'nq-distilbert-base-v1'
# bi_encoder = SentenceTransformer("./")
# top_k = 5
# sentences = [
# "a happy person is a person how can do what he want with his money",
# "That is a happy dog ho bark alot",
# "Today is a sunny day so that a happy person can walk on the street"
# ]
# # vector embeddings created from dataset
# corpus_embeddings = bi_encoder.encode(sentences, convert_to_tensor=True, show_progress_bar=True)
#
# def search(query):
# # Encode the query using the bi-encoder and find potentially relevant passages
# question_embedding = bi_encoder.encode(query)
# hits = util.semantic_search(question_embedding, corpus_embeddings, top_k=top_k)
# hits = hits[0] # Get the hits for the first query
#
# # Output of top-k hits
# print("Input question:", query)
# print("Results")
# for hit in hits:
# print("\t{:.3f}\t{}".format(hit['score'], sentences[hit['corpus_id']]))
# return hits
#
# def greet(name):
# hittt = search(query=name)
# x=dict()
# for hit in hittt:
# score=hit['score']
# sentence=sentences[hit['corpus_id']]
# buffer={sentence:score}
# x.update(buffer)
# return x
import dill
def greet1(data):
# pdf=data.get('pdf')
print(data)
x=eval(data)
y=x.get('pdf')
print(y)
print(type(y))
print(type(dill.loads(eval(y))))
print(dill.loads(eval(y)).read(),"dah el data el file")
return y
iface = gr.Blocks()
with iface:
name = gr.Textbox(label="Name")
output = gr.Textbox(label="Output Box")
# greet_btn = gr.Button("Greet")
# greet_btn.click(fn=greet, inputs=name, outputs=output, api_name="greet")
greet1_btn = gr.Button("Greet1")
greet1_btn.click(fn=greet1, inputs=name, outputs=output, api_name="testing")
iface.launch() |