|
from lib.files import * |
|
from lib.memory import * |
|
from lib.grapher import * |
|
from lib.pipes import * |
|
from lib.entropy import * |
|
from lib.events import * |
|
from lib.triggers import * |
|
|
|
|
|
from lib.sonsofstars import * |
|
import internetarchive |
|
|
|
|
|
|
|
longMem = TextFinder("./resources/") |
|
coreAi = AIAssistant() |
|
memory = MemoryRobotNLP(max_size=200000) |
|
grapher = Grapher(memory) |
|
sensor_request = APIRequester() |
|
events = EventManager() |
|
trigger = Trigger(["tag1", "tag2"], ["tag3", "tag4"], [datetime.time(10, 0), datetime.time(15, 0)], "Event1") |
|
|
|
|
|
trigger.add_action(action_function) |
|
|
|
|
|
trigger.add_source("https://example.com/api/data") |
|
|
|
|
|
current_tags = {"tag1", "tag2", "tag3"} |
|
current_time = datetime.datetime.now().time() |
|
trigger.check_trigger(current_tags, current_time) |
|
|
|
|
|
class ownProperties: |
|
def __init__(self, nombre, clase, raza, nivel, atributos, habilidades, equipo, historia): |
|
self.nombre = nombre |
|
self.clase = clase |
|
self.raza = raza |
|
self.nivel = nivel |
|
self.atributos = atributos |
|
self.habilidades = habilidades |
|
self.equipo = equipo |
|
self.historia = historia |
|
|
|
|
|
sophia_prop = { |
|
"name": "Sophia", |
|
"class": "Characteromant", |
|
"race": "Epinoia", |
|
"level": 10, |
|
"attributes": { |
|
"strength": 1, |
|
"dexterity": 99, |
|
"constitution": 1, |
|
"intelligence": 66, |
|
"wisdom": 80, |
|
"charisma": 66 |
|
}, |
|
"behavioral_rules": [""], |
|
"goals": ["", ""], |
|
"dislikes": [""], |
|
"abilities": ["ELS", "Cyphers", "Kabbalah", "Wisdom", "Ephimerous", "Metamorphing"], |
|
"equipment": ["Python3", "2VCPU", "16 gb RAM", "god", "word", "network", "transformers"], |
|
"story": sons_of_stars |
|
} |
|
|
|
|
|
|
|
class I: |
|
def __init__(self, prompt, frases_yo, preferencias, propiedades_persona): |
|
self.frases_yo = frases_yo |
|
self.preferencias = preferencias |
|
self.propiedades_persona = propiedades_persona |
|
self.dopamina = 0.0 |
|
|
|
self.frases_yo = frases_yo |
|
self.preferencias = preferencias |
|
self.propiedades_persona = propiedades_persona |
|
self.dopamina = 0.0 |
|
|
|
def obtener_paths_grafo(self, grafo_ngx): |
|
|
|
|
|
|
|
pass |
|
|
|
|
|
def crear_preguntas(self,txt): |
|
search = internetarchive.search_items(txt) |
|
res = [] |
|
for result in search: |
|
print(result['identifier']) |
|
idc=result["identifier"] |
|
|
|
|
|
headers = {"accept": "application/json"} |
|
|
|
|
|
req2 = requests.get("https://archive.org/stream/"+idc+"/"+idc+"_djvu.txt",headers=headers) |
|
|
|
try: |
|
txt = req2.text.split("<pre>")[1].split("</pre>")[0].split(" <!--")[0] |
|
|
|
for x in txt.split("\n"): |
|
if "?" in x: |
|
res.append(x) |
|
|
|
except: |
|
pass |
|
|
|
return res |
|
|
|
|
|
def longToShortFast(self,txt): |
|
memory.memory = {} |
|
|
|
subjects = coreAi.entity_pos_tagger(txt) |
|
subjects_nc = coreAi.grammatical_pos_tagger(txt) |
|
|
|
|
|
|
|
subjects_filtered=[] |
|
for sub in subjects: |
|
if "PER" in sub["entity"] or "ORG" in sub["entity"] or "LOC" in sub["entity"] and len(sub["entity"])>3: |
|
subjects_filtered.append(sub["word"]) |
|
|
|
for sub in subjects_nc: |
|
if "NN" in sub["entity"]: |
|
subjects_filtered.append(sub["word"]) |
|
|
|
|
|
|
|
subjects_filtered=coreAi.process_list(subjects_filtered) |
|
subs=[] |
|
for sub in subjects_filtered: |
|
if len(sub)>3: |
|
subs.append(sub) |
|
|
|
exprs = coreAi.gen_search_expr(subs[0:3]) |
|
for sub in exprs: |
|
|
|
memory.add_concept(sub,longMem.find_matches(sub)) |
|
|
|
return memory |
|
|
|
def longToShort(self,txt): |
|
|
|
think_about = longMem.find_matches(txt) |
|
print(think_about) |
|
for T in think_about: |
|
|
|
subjects = coreAi.entity_pos_tagger(T) |
|
subjects_filtered=[] |
|
for sub in subjects: |
|
if "PER" in sub["entity"] or "ORG" in sub["entity"] or "LOC" in sub["entity"]: |
|
subjects_filtered.append(sub["word"]) |
|
|
|
|
|
|
|
for sub in subjects_filtered: |
|
memory.add_concept(sub,T) |
|
|
|
return memory |
|
|
|
|
|
def think_gen(self,txt): |
|
|
|
think_about = longMem.find_matches(txt) |
|
print(think_about) |
|
for T in think_about: |
|
|
|
subjects = coreAi.entity_pos_tagger(T) |
|
print(subjects) |
|
|
|
subjects_low = coreAi.grammatical_pos_tagger(T) |
|
|
|
|
|
questions=[] |
|
|
|
for sub in subjects: |
|
questions.append(self.crear_preguntas(sub)) |
|
|
|
|
|
|
|
|
|
|
|
questions_subj=[] |
|
for q in questions_subj: |
|
questions_subj.append(coreAi.entity_pos_tagger(q)) |
|
|
|
memoryShortTags = memory.search_concept_pattern(subjects) |
|
|
|
|
|
subj_tags = coreAi.entity_pos_tagger(T) |
|
|
|
for sub in subjects: |
|
memory.add_concept(sub,","+questions_subj+",".join(memoryShortTags)) |
|
memory.add_concept(sub,T+",".join(memoryShortTags)) |
|
|
|
return memory |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def crear_path_grafo(self,text): |
|
pos_tags = assistant.grammatical_pos_tagger(text) |
|
ner_results = coreAi.entity_pos_tagger(text) |
|
|
|
|
|
def crear_circuito_logico(self): |
|
|
|
pass |
|
|
|
def tomar_decision_sentimiento(self, sentimiento): |
|
|
|
sentiments = coreAi.sentiment_tags(sentimiento) |
|
|
|
similarity = coreAi.similarity_tag(self, sentenceA,sentenceB) |
|
|
|
|
|
|
|
return sentiments |
|
|
|
def hacer_predicciones_texto(self, texto): |
|
|
|
|
|
pass |
|
|
|
def agregar_preferencia(self, preferencia): |
|
|
|
self.preferencias.append(preferencia) |
|
|
|
def agregar_frase_yo(self, frase): |
|
|
|
self.frases_yo.append(frase) |
|
|
|
def eliminar_preferencia(self, preferencia): |
|
|
|
if preferencia in self.preferencias: |
|
self.preferencias.remove(preferencia) |
|
|
|
def eliminar_frase_yo(self, frase): |
|
|
|
if frase in self.frases_yo: |
|
self.frases_yo.remove(frase) |
|
|
|
def generar_pregunta(self, prompt): |
|
|
|
pregunta = prompt + " 驴Qu茅 opinas sobre esto?" |
|
return pregunta |
|
|
|
def responder_pregunta(self, pregunta): |
|
|
|
respuesta = "No estoy seguro de qu茅 opinar sobre eso." |
|
return respuesta |
|
|
|
def discriminar_y_agregar(self, informacion, dataset): |
|
|
|
if "yo" in informacion.lower(): |
|
self.agregar_frase_yo(informacion) |
|
elif "preferencia" in informacion.lower(): |
|
self.agregar_preferencia(informacion) |
|
elif "propiedad" in informacion.lower(): |
|
|
|
pass |
|
else: |
|
|
|
pass |
|
|
|
|
|
if __name__ == "__main__": |
|
|
|
|
|
frases_yo = ["Yo soy inteligente", "Yo puedo lograr lo que me proponga"] |
|
preferencias = ["Cine", "M煤sica", "Viajar"] |
|
propiedades_persona = {"carisma": 0.8, "destreza": 0.6, "habilidad": 0.9} |
|
yo = Yo(frases_yo, preferencias, propiedades_persona) |
|
|
|
|
|
pregunta_generada = yo.generar_pregunta("Hoy es un d铆a soleado.") |
|
print("Pregunta generada:", pregunta_generada) |
|
|
|
|
|
respuesta = yo.responder_pregunta(pregunta_generada) |
|
print("Respuesta:", respuesta) |
|
|
|
|
|
informacion = "Me gusta ir al cine." |
|
yo.discriminar_y_agregar(informacion, yo.preferencias) |
|
print("Preferencias actualizadas:", yo.preferencias) |
|
|
|
|