Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
File size: 5,471 Bytes
58b9de9 d7b7dc6 150bb15 d7b7dc6 58b9de9 d7b7dc6 58b9de9 d7b7dc6 58b9de9 e071b26 58b9de9 d7b7dc6 58b9de9 156ef43 150bb15 58b9de9 150bb15 404587d 58b9de9 156ef43 404587d 58b9de9 156ef43 58b9de9 150bb15 e071b26 150bb15 5c4aa1e dcf13df 150bb15 5c4aa1e dcf13df |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 |
import logging
import pandas as pd
import os
import csv
import src.envs as envs
from src.backend.model_operations import SummaryGenerator, EvaluationModel
import src.backend.util as util
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(levelname)s - %(message)s')
class Evaluator:
"""A class to evaluate summaries generated by a language model.
Attributes:
model (str): The name or path of the model.
revision (str): The model revision.
precision (str): The precision setting of the model.
num_fewshot (int): Number of few-shot examples to use.
batch_size (int): Batch size for processing.
device (str): The device to run the model on.
no_cache (bool): Flag to disable caching.
limit (int): Limit on the number of items to process.
write_out (bool): Whether to write results to a file.
output_base_path (str): Base path for output files.
summary_generator (SummaryGenerator): Instance for generating summaries.
eval_model (EvaluationModel): Instance for evaluating summaries.
"""
def __init__(self, model, revision, precision, batch_size,
device, no_cache, limit, write_out=True,
output_base_path='logs'):
"""Initializes the Evaluator with the given model and settings.
Args:
model (str): The name or path of the model.
revision (str): The model revision.
precision (str): The precision setting of the model.
num_fewshot (int): Number of few-shot examples to use.
batch_size (int): Batch size for processing.
device (str): The device to run the model on.
no_cache (bool): Flag to disable caching.
limit (int): Limit on the number of items to process.
write_out (bool): Whether to write results to a file.
output_base_path (str): Base path for output files.
"""
self.model = model
self.revision = revision
self.precision = precision
self.batch_size = batch_size
self.device = device
self.no_cache = no_cache
self.limit = limit
self.write_out = write_out
self.output_base_path = output_base_path
try:
self.summary_generator = SummaryGenerator(model, revision, self.device)
self.eval_model = EvaluationModel(envs.HEM_PATH, self.device)
except Exception as e:
logging.error(f"Error initializing Evaluator: {e}")
raise
def evaluate(self):
"""
Performs the evaluation process by generating summaries
and computing metrics.
Returns:
dict: A dictionary containing evaluation results.
"""
try:
df = pd.read_csv(envs.DATASET_PATH)
self.generated_summaries_df = self.summary_generator.generate_summaries(df, save_path=f"generation_results/{self.model}.csv")
avg_summary_len = self.summary_generator.avg_length
answer_rate = self.summary_generator.answer_rate
self.hallucination_scores, self.eval_results = self.eval_model.evaluate_hallucination(
self.generated_summaries_df)
factual_consistency_rate = self.eval_model.compute_factual_consistency_rate()
hallucination_rate = self.eval_model.hallucination_rate
results = util.format_results(model_name=self.model, revision=self.revision,
precision=self.precision,
factual_consistency_rate=factual_consistency_rate,
hallucination_rate=hallucination_rate,
answer_rate=answer_rate,
avg_summary_len=avg_summary_len)
return results
except FileNotFoundError:
logging.error(f"File not found: {envs.DATASET_PATH}")
raise
except Exception as e:
logging.error(f"Error during evaluation: {e}")
raise
def write_results(self):
print('Updating result files')
leaderboard_path = os.getcwd() # the path of leaderboard folder
print(leaderboard_path)
working_path = os.path.join(leaderboard_path, 'leaderboard_results')
if not os.path.exists(working_path):
logging.error(f"Need to first download the results from google drive to the learderboard folder")
raise
source_summary_df = self.generated_summaries_df[["source", "summary"]]
#update leaderboard_summaries.csv
#first remove previous results for the current model
existing_df = pd.read_csv(os.path.join(working_path, 'leaderboard_summaries.csv'), encoding='utf-8')
mask = existing_df['model'] == self.model
existing_df = existing_df[~mask]
print(existing_df.shape)
summary_doc = set(existing_df['model'].values.tolist())
print(summary_doc)
# get new result
leaderboard_summaries_df = source_summary_df
leaderboard_summaries_df.insert(2, "model", [self.model]*leaderboard_summaries_df.shape[0])
leaderboard_summaries_df.to_csv(os.path.join(working_path, 'leaderboard_summaries.csv'), mode='a', index=False, header=False)
print('leaderboard_summaries.csv has been updated')
|