Skip to content

Commit

Permalink
docs: moved quickstart (#54)
Browse files Browse the repository at this point in the history
  • Loading branch information
jjmachan authored Jul 10, 2023
1 parent 7df8cbc commit fc09372
Show file tree
Hide file tree
Showing 5 changed files with 52 additions and 31 deletions.
6 changes: 3 additions & 3 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -8,14 +8,14 @@ format: ## Running code formatter: black and isort
@echo "(isort) Ordering imports..."
@isort .
@echo "(black) Formatting codebase..."
@black --config pyproject.toml src tests examples experiments
@black --config pyproject.toml src tests docs experiments
@echo "(black) Formatting stubs..."
@find src -name "*.pyi" ! -name "*_pb2*" -exec black --pyi --config pyproject.toml {} \;
@echo "(ruff) Running fix only..."
@ruff check src examples tests --fix-only
@ruff check src docs tests --fix-only
lint: ## Running lint checker: ruff
@echo "(ruff) Linting development project..."
@ruff check src examples tests
@ruff check src docs tests
type: ## Running type checker: pyright
@echo "(pyright) Typechecking codebase..."
@pyright src
Expand Down
4 changes: 2 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@
<a href="https://github.com/explodinggradients/ragas/blob/master/LICENSE">
<img alt="License" src="https://img.shields.io/github/license/explodinggradients/ragas.svg?color=green">
</a>
<a href="https://colab.research.google.com/github/explodinggradients/ragas/blob/main/examples/quickstart.ipynb">
<a href="https://colab.research.google.com/github/explodinggradients/ragas/blob/main/docs/quickstart.ipynb">
<img alt="Open In Colab" src="https://colab.research.google.com/assets/colab-badge.svg">
</a>
<a href="https://discord.gg/5djav8GGNZ">
Expand Down Expand Up @@ -77,7 +77,7 @@ results = evaluate(dataset)
# {'ragas_score': 0.860, 'context_relavency': 0.817,
# 'faithfulness': 0.892, 'answer_relevancy': 0.874}
```
If you want a more in-depth explanation of core components, check out our [quick-start notebook](./examples/quickstart.ipynb)
If you want a more in-depth explanation of core components, check out our [quick-start notebook](./docs/quickstart.ipynb)
## :luggage: Metrics

Ragas measures your pipeline's performance against two dimensions
Expand Down
File renamed without changes.
16 changes: 16 additions & 0 deletions examples/quickstart.ipynb → docs/quickstart.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -5,13 +5,29 @@
"id": "2e63f667",
"metadata": {},
"source": [
"<a href=\"https://colab.research.google.com/github/explodinggradients/ragas/blob/main/docs/quickstart.ipynb\">\n",
" <img alt=\"Open In Colab\" \n",
" align=\"left\"\n",
" src=\"https://colab.research.google.com/assets/colab-badge.svg\">\n",
"</a>\n",
"# Quickstart\n",
"\n",
"welcome to the ragas quickstart. We're going to get you up and running with ragas as qickly as you can so that you can go back to improving your Retrieval Augmented Generation pipelines while this library makes sure your changes are improving your entire pipeline.\n",
"\n",
"to kick things of lets start with the data"
]
},
{
"cell_type": "code",
"execution_count": 1,
"id": "18274e1f",
"metadata": {},
"outputs": [],
"source": [
"# if using colab uncomment this\n",
"#!pip install ragas"
]
},
{
"cell_type": "code",
"execution_count": 1,
Expand Down
57 changes: 31 additions & 26 deletions experiments/assesments/metrics_assesments.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -177,8 +177,8 @@
"outputs": [],
"source": [
"def get_corr(targets, predictions):\n",
" scores = [kendalltau(x, y).correlation for x, y in zip(targets, predictions)]\n",
" return [score if not np.isnan(score) else 0 for score in scores ]"
" scores = [kendalltau(x, y).correlation for x, y in zip(targets, predictions)]\n",
" return [score if not np.isnan(score) else 0 for score in scores]"
]
},
{
Expand Down Expand Up @@ -355,18 +355,25 @@
"metadata": {},
"outputs": [],
"source": [
"def gpt_faithfulness(question:list, context:list, answer:list):\n",
" prompt = [faithfulness.format(c,q, a) for c,q,a in zip(question,context,answer)]\n",
" output = [output for output in llm(prompt)['choices']]\n",
" scores = [(out[\"text\"].strip()) for out in output ]\n",
" scores = [int(score) if score in ['1','2','3','4','5'] else 1 for score in scores]\n",
"def gpt_faithfulness(question: list, context: list, answer: list):\n",
" prompt = [\n",
" faithfulness.format(c, q, a) for c, q, a in zip(question, context, answer)\n",
" ]\n",
" output = [output for output in llm(prompt)[\"choices\"]]\n",
" scores = [(out[\"text\"].strip()) for out in output]\n",
" scores = [\n",
" int(score) if score in [\"1\", \"2\", \"3\", \"4\", \"5\"] else 1 for score in scores\n",
" ]\n",
" return scores\n",
"\n",
"def gpt_relevance(question:list, answer:list):\n",
" prompt = [relevence.format(q,a) for q,a in zip(question,answer)]\n",
" output = [output for output in llm(prompt)['choices']]\n",
" scores = [(out[\"text\"].strip()) for out in output ]\n",
" scores = [int(score) if score in ['1','2','3','4','5'] else 1 for score in scores]\n",
"\n",
"def gpt_relevance(question: list, answer: list):\n",
" prompt = [relevence.format(q, a) for q, a in zip(question, answer)]\n",
" output = [output for output in llm(prompt)[\"choices\"]]\n",
" scores = [(out[\"text\"].strip()) for out in output]\n",
" scores = [\n",
" int(score) if score in [\"1\", \"2\", \"3\", \"4\", \"5\"] else 1 for score in scores\n",
" ]\n",
" return scores"
]
},
Expand Down Expand Up @@ -425,7 +432,11 @@
"metadata": {},
"outputs": [],
"source": [
"q,a,c = wikiqa_ragas['train'][0]['question'],wikiqa_ragas['train'][0]['generated_without_rag'],wikiqa_ragas['train'][0]['context']"
"q, a, c = (\n",
" wikiqa_ragas[\"train\"][0][\"question\"],\n",
" wikiqa_ragas[\"train\"][0][\"generated_without_rag\"],\n",
" wikiqa_ragas[\"train\"][0][\"context\"],\n",
")"
]
},
{
Expand All @@ -446,7 +457,7 @@
}
],
"source": [
"gpt_faithfulness([q],[c], [a])"
"gpt_faithfulness([q], [c], [a])"
]
},
{
Expand Down Expand Up @@ -517,12 +528,12 @@
"def predict_(examples):\n",
" scores = {}\n",
" questions = examples[\"question\"]\n",
" context = examples['context']\n",
" context = examples[\"context\"]\n",
" for col in COLUMNS:\n",
" passage = examples[col]\n",
" inputs = list(zip(questions, passage))\n",
" #scores[f\"{col}_relevance\"] = t5_qgen.predict(inputs, show_progress=False)\n",
" scores[f\"{col}_relevance\"] = gpt_faithfulness(questions,context,passage)\n",
" # scores[f\"{col}_relevance\"] = t5_qgen.predict(inputs, show_progress=False)\n",
" scores[f\"{col}_relevance\"] = gpt_faithfulness(questions, context, passage)\n",
" return scores"
]
},
Expand Down Expand Up @@ -553,10 +564,7 @@
},
"outputs": [],
"source": [
"output = (\n",
" wikiqa_ragas[\"train\"]\n",
" .map(predict_relevance, batched=True, batch_size=10)\n",
")"
"output = wikiqa_ragas[\"train\"].map(predict_relevance, batched=True, batch_size=10)"
]
},
{
Expand Down Expand Up @@ -622,10 +630,7 @@
}
],
"source": [
"output = (\n",
" wikiqa_ragas[\"train\"]\n",
" .map(predict_relevance, batched=True, batch_size=10)\n",
")"
"output = wikiqa_ragas[\"train\"].map(predict_relevance, batched=True, batch_size=10)"
]
},
{
Expand Down Expand Up @@ -877,7 +882,7 @@
"metadata": {},
"outputs": [],
"source": [
"def predict_faithfulness(examples,scoring_fun=NLI.score):\n",
"def predict_faithfulness(examples, scoring_fun=NLI.score):\n",
" scores = {}\n",
" questions = examples[\"question\"]\n",
" contexts = examples[\"answer_context\"]\n",
Expand Down

0 comments on commit fc09372

Please sign in to comment.