Spaces:
Running
Running
File size: 9,241 Bytes
752fdc4 ce78ec6 b3feaa3 51294aa b3feaa3 99bd29e 7f12383 99bd29e b3feaa3 3287a07 35ae31f ce78ec6 eafed86 ce78ec6 1bb6a20 ce78ec6 1bb6a20 02b756e eafed86 ce78ec6 278e21a 7e4e636 b3feaa3 3287a07 a072676 b3feaa3 3287a07 b3feaa3 3287a07 b3feaa3 3287a07 ce78ec6 7e4e636 b3feaa3 7e4e636 90c9489 b3feaa3 bc70f4a b3feaa3 bc70f4a ff079ce bc70f4a ff079ce b3feaa3 26c9e99 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 |
import os
import torch
import sys
from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
import json
# Get the HF_TOKEN from the environment variable (set by the Space)
hf_token = os.getenv("ACCESS")
tokenizer = AutoTokenizer.from_pretrained('google/gemma-2-2b-it', use_auth_token=hf_token)
# Configure 4-bit quantization using BitsAndBytesConfig
quantization_config = BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_compute_dtype=torch.bfloat16,
bnb_4bit_quant_type="nf4",
)
# Check if a GPU is available
if torch.cuda.is_available():
# Load the model with 4-bit quantization (for GPU)
quantization_config = BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_compute_dtype=torch.bfloat16,
bnb_4bit_quant_type="nf4",
)
model = AutoModelForCausalLM.from_pretrained(
'google/gemma-2-2b-it',
device_map="auto",
quantization_config=quantization_config,
use_auth_token=hf_token
)
else:
# Load the model without quantization (for CPU)
model = AutoModelForCausalLM.from_pretrained(
'google/gemma-2-2b-it',
device_map="auto",
use_auth_token=hf_token
)
# Definir el prompt para generar un JSON con eventos anidados
prompt = (
"Generate a JSON object that describes a sequence of potential future events, where each event can have nested subevents. The JSON structure should adhere to the following format:\n\n"
"{\n"
" \"events\": {\n"
" \"event\": {\n"
" \"event_number\": 1,\n"
" \"name\": \"conflict_start\",\n"
" \"description\": \"Tensions escalate between Iran and Israel\",\n"
" \"probability\": 70,\n"
" \"duration_days\": 30,\n"
" \"subevents\": {\n"
" \"event\": {\n"
" \"event_number\": 2,\n"
" \"name\": \"diplomatic_failure\",\n"
" \"description\": \"Diplomatic negotiations fail\",\n"
" \"probability\": 60,\n"
" \"duration_days\": 15,\n"
" \"subevents\": {\n"
" \"event\": {\n"
" \"event_number\": 3,\n"
" \"name\": \"military_clash\",\n"
" \"description\": \"Initial military clash at the border\",\n"
" \"probability\": 50,\n"
" \"duration_days\": 10,\n"
" \"subevents\": {\n"
" \"event\": [\n"
" {\n"
" \"event_number\": 4,\n"
" \"name\": \"escalation\",\n"
" \"description\": \"Conflict escalates into full-scale war\",\n"
" \"probability\": 40,\n"
" \"duration_days\": 180,\n"
" \"subevents\": {\n"
" \"event\": [\n"
" {\n"
" \"event_number\": 5,\n"
" \"name\": \"regional_involvement\",\n"
" \"description\": \"Other Middle Eastern countries get involved\",\n"
" \"probability\": 30,\n"
" \"duration_days\": 365,\n"
" \"subevents\": {\n"
" \"event\": [\n"
" {\n"
" \"event_number\": 6,\n"
" \"name\": \"ceasefire\",\n"
" \"description\": \"International powers broker a ceasefire\",\n"
" \"probability\": 20,\n"
" \"duration_days\": 30\n"
" },\n"
" {\n"
" \"event_number\": 7,\n"
" \"name\": \"prolonged_conflict\",\n"
" \"description\": \"Conflict continues for over a year\",\n"
" \"probability\": 50,\n"
" \"duration_days\": 365\n"
" }\n"
" ]\n"
" }\n"
" },\n"
" {\n"
" \"event_number\": 8,\n"
" \"name\": \"international_intervention\",\n"
" \"description\": \"UN or other international organizations intervene\",\n"
" \"probability\": 25,\n"
" \"duration_days\": 60\n"
" }\n"
" ]\n"
" }\n"
" },\n"
" {\n"
" \"event_number\": 9,\n"
" \"name\": \"containment\",\n"
" \"description\": \"Conflict is contained and doesn't escalate\",\n"
" \"probability\": 30,\n"
" \"duration_days\": 90\n"
" }\n"
" ]\n"
" }\n"
" },\n"
" \"event\": {\n"
" \"event_number\": 10,\n"
" \"name\": \"sanctions\",\n"
" \"description\": \"Increased sanctions on Iran\",\n"
" \"probability\": 70,\n"
" \"duration_days\": 180,\n"
" \"subevents\": {\n"
" \"event\": [\n"
" {\n"
" \"event_number\": 11,\n"
" \"name\": \"iran_retaliates\",\n"
" \"description\": \"Iran retaliates with cyberattacks\",\n"
" \"probability\": 40,\n"
" \"duration_days\": 60\n"
" },\n"
" {\n"
" \"event_number\": 12,\n"
" \"name\": \"israel_response\",\n"
" \"description\": \"Israel responds with targeted airstrikes\",\n"
" \"probability\": 50,\n"
" \"duration_days\": 60\n"
" }\n"
" ]\n"
" }\n"
" }\n"
" }\n"
" },\n"
" \"event\": {\n"
" \"event_number\": 13,\n"
" \"name\": \"diplomatic_success\",\n"
" \"description\": \"Successful diplomatic negotiations\",\n"
" \"probability\": 40,\n"
" \"duration_days\": 30,\n"
" \"subevents\": {\n"
" \"event\": [\n"
" {\n"
" \"event_number\": 14,\n"
" \"name\": \"peace_agreement\",\n"
" \"description\": \"Iran and Israel sign a peace agreement\",\n"
" \"probability\": 20,\n"
" \"duration_days\": 60\n"
" },\n"
" {\n"
" \"event_number\": 15,\n"
" \"name\": \"temporary_truce\",\n"
" \"description\": \"A temporary truce is established\",\n"
" \"probability\": 30,\n"
" \"duration_days\": 30\n"
" }\n"
" ]\n"
" }\n"
" }\n"
" }\n"
" }\n"
" }\n"
"}\n\n"
"Ensure the generated JSON is enclosed between `<json>` and `</json>` tags. For example:\n\n"
"<json>\n"
"{ \n"
" // Your generated JSON here \n"
"}\n"
"</json>\n\n"
"Now, generate a JSON with the before-mentioned schema, to reflect the potential future timeline with the following theme, responding only with the JSON enclosed within the `<json>` and `</json>` tags. Theme: "
)
def generate(event):
combined_input = f"{prompt} {event}"
prompt_msg = [{'role': 'user', 'content': combined_input}]
inputs = tokenizer.apply_chat_template(
prompt_msg,
add_generation_prompt=True,
return_tensors='pt'
)
tokens = model.generate(
inputs.to(model.device),
max_new_tokens=1024,
temperature=0.5,
do_sample=True
)
# Get the length of the input tokens (adjust based on your tokenizer)
input_length = len(tokenizer.encode(combined_input))
output_text = tokenizer.decode(tokens[0][input_length:], skip_special_tokens=True)
print(output_text)
json_start_index = output_text.find("<json>")
json_end_index = output_text.find("</json>")
if json_start_index != -1 and json_end_index != -1:
json_string = output_text[json_start_index + 6:json_end_index].strip()
# Debugging: Print the extracted JSON string to check its contents
print("Extracted JSON String:", json_string)
# Load and return the JSON data
try:
data = json.loads(json_string)
return data
except json.JSONDecodeError as e:
return f"Error: Invalid JSON - {e}"
else:
return "Error: <json> or </json> not found in generated output" |