diff --git a/README.md b/README.md index f57d6d4fb..ab1c919df 100644 --- a/README.md +++ b/README.md @@ -3,7 +3,7 @@ src="./docs/assets/logo.png">
- SOTA metrics for evaluating Retrieval Augmented Generation (RAG) + Evaluation framework for your Retrieval Augmented Generation (RAG) pipelines
diff --git a/src/ragas/metrics/__init__.py b/src/ragas/metrics/__init__.py index ce96f04ae..b54689e25 100644 --- a/src/ragas/metrics/__init__.py +++ b/src/ragas/metrics/__init__.py @@ -1,6 +1,6 @@ from ragas.metrics.answer_relevance import AnswerRelevancy, answer_relevancy from ragas.metrics.context_relevance import ContextRelevancy, context_relevancy -from ragas.metrics.factual import Faithfulness, faithfulness +from ragas.metrics.faithfulnes import Faithfulness, faithfulness __all__ = [ "Faithfulness", diff --git a/src/ragas/metrics/context_relevance.py b/src/ragas/metrics/context_relevance.py index 8560ed55d..93cc7acae 100644 --- a/src/ragas/metrics/context_relevance.py +++ b/src/ragas/metrics/context_relevance.py @@ -8,6 +8,7 @@ import numpy as np from datasets import Dataset from sentence_transformers import CrossEncoder +from tqdm import tqdm from ragas.metrics.base import Metric from ragas.metrics.llms import openai_completion @@ -135,7 +136,7 @@ def score(self: t.Self, dataset: Dataset) -> Dataset: prompts.append(prompt) responses = [] - for batch_idx in range(0, len(prompts), 20): + for batch_idx in tqdm(range(0, len(prompts), 20)): batch_responses = openai_completion( prompts[batch_idx : batch_idx + 20], n=self.strictness ) diff --git a/src/ragas/metrics/factual.py b/src/ragas/metrics/faithfulnes.py similarity index 100% rename from src/ragas/metrics/factual.py rename to src/ragas/metrics/faithfulnes.py diff --git a/tests/benchmarks/benchmark_eval.py b/tests/benchmarks/benchmark_eval.py index 5a09c18cd..7d5a30905 100644 --- a/tests/benchmarks/benchmark_eval.py +++ b/tests/benchmarks/benchmark_eval.py @@ -8,14 +8,8 @@ DEVICE = "cuda" if is_available() else "cpu" -PATH_TO_DATSET_GIT_REPO = "../../../datasets/fiqa/" -dataset_dir = os.environ.get("DATASET_DIR", PATH_TO_DATSET_GIT_REPO) -if os.path.isdir(dataset_dir): - ds = Dataset.from_csv(os.path.join(dataset_dir, "baseline.csv")) - assert isinstance(ds, Dataset) -else: - # data - ds = load_dataset("explodinggradients/fiqa", "ragas_eval")["baseline"] +# data +ds = load_dataset("explodinggradients/fiqa", "ragas_eval")["baseline"] if __name__ == "__main__": result = evaluate(