# Imports import streamlit as st import numpy as np import torch import random from transformers import GPT2LMHeadModel, GPT2Tokenizer, Trainer, TrainingArguments, DataCollatorForLanguageModeling from datasets import Dataset from huggingface_hub import HfApi import plotly.graph_objects as go import time from datetime import datetime # Cyberpunk and Loading Animation Styling def setup_cyberpunk_style(): st.markdown(""" """, unsafe_allow_html=True) # Prepare Dataset Function with Padding Token Fix def prepare_dataset(data, tokenizer, block_size=128): tokenizer.pad_token = tokenizer.eos_token def tokenize_function(examples): return tokenizer(examples['text'], truncation=True, max_length=block_size, padding='max_length') raw_dataset = Dataset.from_dict({'text': data}) tokenized_dataset = raw_dataset.map(tokenize_function, batched=True, remove_columns=['text']) tokenized_dataset = tokenized_dataset.map(lambda examples: {'labels': examples['input_ids']}, batched=True) tokenized_dataset.set_format(type='torch', columns=['input_ids', 'attention_mask', 'labels']) return tokenized_dataset # Training Dashboard Class with Enhanced Display class TrainingDashboard: def __init__(self): self.metrics = { 'current_loss': 0, 'best_loss': float('inf'), 'generation': 0, 'individual': 0, 'start_time': time.time(), 'training_speed': 0 } self.history = [] def update(self, loss, generation, individual): self.metrics['current_loss'] = loss self.metrics['generation'] = generation self.metrics['individual'] = individual if loss < self.metrics['best_loss']: self.metrics['best_loss'] = loss elapsed_time = time.time() - self.metrics['start_time'] self.metrics['training_speed'] = (generation * individual) / elapsed_time self.history.append({'loss': loss, 'timestamp': datetime.now().strftime('%H:%M:%S')}) # Define Model Initialization def initialize_model(model_name="gpt2"): model = GPT2LMHeadModel.from_pretrained(model_name) tokenizer = GPT2Tokenizer.from_pretrained(model_name) tokenizer.pad_token = tokenizer.eos_token return model, tokenizer # Load Dataset Function def load_dataset(data_source="demo", tokenizer=None): if data_source == "demo": data = ["Sample text data for model training. This can be replaced with actual data for better performance."] else: data = ["Loaded data from uploaded text file."] dataset = prepare_dataset(data, tokenizer) return dataset # Train Model Function with Customized Progress Bar def train_model(model, train_dataset, tokenizer, epochs=3, batch_size=4): training_args = TrainingArguments( output_dir="./results", overwrite_output_dir=True, num_train_epochs=epochs, per_device_train_batch_size=batch_size, save_steps=10_000, save_total_limit=2, logging_dir="./logs", logging_steps=100, ) data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=False) trainer = Trainer( model=model, args=training_args, data_collator=data_collator, train_dataset=train_dataset, ) trainer.train() # Main App Logic def main(): setup_cyberpunk_style() st.markdown('