Skip to content

Commit 68d52b9

Browse files
authored
feat: new Prompt object and faithfulness metric (#1232)
1 parent fa864a6 commit 68d52b9

File tree

7 files changed

+478
-10
lines changed

7 files changed

+478
-10
lines changed

.gitignore

+1-1
Original file line numberDiff line numberDiff line change
@@ -168,4 +168,4 @@ cython_debug/
168168
experiments/
169169
**/fil-result/
170170
src/ragas/_version.py
171-
.vscode/settings.json
171+
.vscode
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,141 @@
1+
from __future__ import annotations
2+
3+
from abc import ABC, abstractmethod
4+
from dataclasses import dataclass
5+
import json
6+
import typing as t
7+
8+
from ragas.llms.output_parser import RagasoutputParser
9+
from ragas.llms.prompt import PromptValue
10+
11+
# Check Pydantic version
12+
from pydantic import BaseModel
13+
import pydantic
14+
15+
if t.TYPE_CHECKING:
16+
from ragas.llms.base import BaseRagasLLM
17+
from langchain_core.callbacks import Callbacks
18+
19+
PYDANTIC_V2 = pydantic.VERSION.startswith("2.")
20+
21+
22+
class BasePrompt(ABC):
23+
def __init__(self, llm):
24+
self.llm: BaseRagasLLM = llm
25+
26+
@abstractmethod
27+
async def generate(self, data: t.Any) -> t.Any:
28+
pass
29+
30+
31+
def model_to_dict(
32+
model: BaseModel,
33+
by_alias: bool = False,
34+
exclude_unset: bool = False,
35+
exclude_defaults: bool = False,
36+
) -> t.Dict[str, t.Any]:
37+
if PYDANTIC_V2:
38+
return model.model_dump( # type: ignore
39+
by_alias=by_alias,
40+
exclude_unset=exclude_unset,
41+
exclude_defaults=exclude_defaults,
42+
)
43+
else:
44+
return model.dict(
45+
by_alias=by_alias,
46+
exclude_unset=exclude_unset,
47+
exclude_defaults=exclude_defaults,
48+
)
49+
50+
51+
def to_json(model: t.Any, indent: int = 4) -> str:
52+
if PYDANTIC_V2:
53+
# Pydantic 2.x
54+
return model.model_dump_json(indent=indent)
55+
else:
56+
# Pydantic 1.x
57+
return model.json(indent=indent)
58+
59+
60+
def model_to_json_schema(model: t.Type[BaseModel]) -> dict:
61+
if PYDANTIC_V2:
62+
return model.model_json_schema()
63+
else:
64+
return model.schema_json()
65+
66+
InputModel = t.TypeVar("InputModel", bound=BaseModel)
67+
OutputModel = t.TypeVar("OutputModel", bound=BaseModel)
68+
69+
70+
class StringIO(BaseModel):
71+
text: str
72+
73+
74+
class PydanticPrompt(BasePrompt, t.Generic[InputModel, OutputModel]):
75+
input_model: t.Type[InputModel]
76+
output_model: t.Type[OutputModel]
77+
instruction: str
78+
examples: t.List[t.Tuple[InputModel, OutputModel]] = []
79+
80+
def generate_instruction(self) -> str:
81+
return self.instruction
82+
83+
def generate_output_signature(self, indent: int = 4) -> str:
84+
schema = model_to_json_schema(self.output_model)
85+
return (
86+
f"Please return the output in a JSON format that complies with the "
87+
f"following schema as specified in JSON Schema and OpenAPI specification:\n"
88+
f"{schema}"
89+
)
90+
91+
def generate_examples(self):
92+
if self.examples:
93+
example_strings = []
94+
for e in self.examples:
95+
input_data, output_data = e
96+
example_strings.append(
97+
self.instruction
98+
+ "\n"
99+
+ "input: " + to_json(input_data, indent=4)
100+
+ "\n"
101+
+ "output: " + to_json(output_data, indent=4)
102+
)
103+
104+
return (
105+
"These are some examples to show how to perform the above instruction\n"
106+
+ "\n\n".join(example_strings)
107+
)
108+
# if no examples are provided
109+
else:
110+
return ""
111+
112+
def to_string(self, data: InputModel) -> str:
113+
# this needs a check
114+
return (
115+
self.generate_instruction()
116+
+ "\n"
117+
+ self.generate_output_signature()
118+
+ "\n"
119+
+ self.generate_examples()
120+
+ "\nNow perform the above instruction with the following input\n"
121+
+ "input: " + to_json(data, indent=4)
122+
+ "\n"
123+
+ "output: "
124+
)
125+
126+
async def generate(self, data: InputModel, callbacks: Callbacks) -> OutputModel:
127+
prompt_value = PromptValue(prompt_str=self.to_string(data))
128+
resp = await self.llm.generate(prompt_value, callbacks=callbacks)
129+
resp_text = resp.generations[0][0].text
130+
parser = RagasoutputParser(pydantic_object=self.output_model)
131+
answer = await parser.aparse(resp_text, prompt_value, self.llm, max_retries=3)
132+
133+
# TODO: make sure RagasOutputPraser returns the same type as OutputModel
134+
return answer # type: ignore
135+
136+
137+
class StringPrompt(BasePrompt):
138+
async def generate(self, data: str) -> str:
139+
prompt_value = PromptValue(prompt_str=data)
140+
llm_result = await self.llm.agenerate_text(prompt_value)
141+
return llm_result.generations[0][0].text
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,3 @@
1+
from ._faithfulness import FaithfulnessExperimental
2+
3+
__all__ = ["FaithfulnessExperimental"]
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,226 @@
1+
from __future__ import annotations
2+
3+
import typing as t
4+
import logging
5+
from dataclasses import dataclass
6+
7+
from pydantic import BaseModel, Field
8+
import numpy as np
9+
10+
from ragas.metrics.base import EvaluationMode, MetricWithLLM, get_segmenter
11+
from ragas_experimental.llms.prompt import PydanticPrompt
12+
13+
if t.TYPE_CHECKING:
14+
from langchain_core.callbacks import Callbacks
15+
from ragas.metrics._faithfulness import HasSegmentMethod
16+
17+
18+
logger = logging.getLogger(__name__)
19+
20+
21+
class FaithfulnessStatements(BaseModel):
22+
question: str = Field(description="The question to answer")
23+
answer: str = Field(description="The answer to the question")
24+
sentences: t.Dict[int, str] = Field(
25+
description="A mapping of sentence index to the sentence"
26+
)
27+
28+
29+
class SentenceComponents(BaseModel):
30+
sentence_index: int = Field(description="The index of the sentence")
31+
simpler_statements: t.List[str] = Field(
32+
description="A list of simpler statements that can be directly inferred from the context"
33+
)
34+
35+
36+
class SentencesSimplified(BaseModel):
37+
sentences: t.List[SentenceComponents] = Field(
38+
description="A list of sentences and their simpler versions"
39+
)
40+
41+
42+
# examples
43+
example_input_1 = FaithfulnessStatements(
44+
question="Who was Albert Einstein and what is he best known for?",
45+
answer="He was a German-born theoretical physicist, widely acknowledged to be one of the greatest and most influential physicists of all time. He was best known for developing the theory of relativity, he also made important contributions to the development of the theory of quantum mechanics.",
46+
sentences={
47+
0: "He was a German-born theoretical physicist, widely acknowledged to be one of the greatest and most influential physicists of all time.",
48+
1: "He was best known for developing the theory of relativity, he also made important contributions to the development of the theory of quantum mechanics.",
49+
},
50+
)
51+
52+
example_output_1 = SentencesSimplified(
53+
sentences=[
54+
SentenceComponents(
55+
sentence_index=0,
56+
simpler_statements=[
57+
"Albert Einstein was a German-born theoretical physicist.",
58+
"Albert Einstein is recognized as one of the greatest and most influential physicists of all time.",
59+
],
60+
),
61+
SentenceComponents(
62+
sentence_index=1,
63+
simpler_statements=[
64+
"Albert Einstein was best known for developing the theory of relativity.",
65+
"Albert Einstein also made important contributions to the development of the theory of quantum mechanics.",
66+
],
67+
),
68+
]
69+
)
70+
71+
72+
class LongFormAnswerPrompt(PydanticPrompt[FaithfulnessStatements, SentencesSimplified]):
73+
instruction = "Given a question, an answer, and sentences from the answer analyze the complexity of each sentence given under 'sentences' and break down each sentence into one or more fully understandable statements while also ensuring no pronouns are used in each statement. Format the outputs in JSON."
74+
input_model = FaithfulnessStatements
75+
output_model = SentencesSimplified
76+
examples = [(example_input_1, example_output_1)]
77+
78+
79+
class StatementFaithfulnessAnswer(BaseModel):
80+
statement: str = Field(..., description="the original statement, word-by-word")
81+
reason: str = Field(..., description="the reason of the verdict")
82+
verdict: int = Field(..., description="the verdict(0/1) of the faithfulness.")
83+
84+
85+
class NLIStatementOutput(BaseModel):
86+
statements: t.List[StatementFaithfulnessAnswer]
87+
88+
89+
class NLIStatementInput(BaseModel):
90+
context: str = Field(..., description="The context of the question")
91+
statements: t.List[str] = Field(..., description="The statements to judge")
92+
93+
94+
class NLIStatementPrompt(PydanticPrompt[NLIStatementInput, NLIStatementOutput]):
95+
instruction = "Your task is to judge the faithfulness of a series of statements based on a given context. For each statement you must return verdict as 1 if the statement can be directly inferred based on the context or 0 if the statement can not be directly inferred based on the context."
96+
input_model = NLIStatementInput
97+
output_model = NLIStatementOutput
98+
examples = [
99+
(
100+
NLIStatementInput(
101+
context="""John is a student at XYZ University. He is pursuing a degree in Computer Science. He is enrolled in several courses this semester, including Data Structures, Algorithms, and Database Management. John is a diligent student and spends a significant amount of time studying and completing assignments. He often stays late in the library to work on his projects.""",
102+
statements=[
103+
"John is majoring in Biology.",
104+
"John is taking a course on Artificial Intelligence.",
105+
"John is a dedicated student.",
106+
"John has a part-time job.",
107+
],
108+
),
109+
NLIStatementOutput(
110+
statements=[
111+
StatementFaithfulnessAnswer(
112+
statement="John is majoring in Biology.",
113+
reason="John's major is explicitly mentioned as Computer Science. There is no information suggesting he is majoring in Biology.",
114+
verdict=0,
115+
),
116+
StatementFaithfulnessAnswer(
117+
statement="John is taking a course on Artificial Intelligence.",
118+
reason="The context mentions the courses John is currently enrolled in, and Artificial Intelligence is not mentioned. Therefore, it cannot be deduced that John is taking a course on AI.",
119+
verdict=0,
120+
),
121+
StatementFaithfulnessAnswer(
122+
statement="John is a dedicated student.",
123+
reason="The context states that he spends a significant amount of time studying and completing assignments. Additionally, it mentions that he often stays late in the library to work on his projects, which implies dedication.",
124+
verdict=1,
125+
),
126+
StatementFaithfulnessAnswer(
127+
statement="John has a part-time job.",
128+
reason="There is no information given in the context about John having a part-time job.",
129+
verdict=0,
130+
),
131+
]
132+
),
133+
),
134+
(
135+
NLIStatementInput(
136+
context="Photosynthesis is a process used by plants, algae, and certain bacteria to convert light energy into chemical energy.",
137+
statements=[
138+
"Albert Einstein was a genius.",
139+
],
140+
),
141+
NLIStatementOutput(
142+
statements=[
143+
StatementFaithfulnessAnswer(
144+
statement="Albert Einstein was a genius.",
145+
reason="The context and statement are unrelated",
146+
verdict=0,
147+
)
148+
]
149+
),
150+
),
151+
]
152+
153+
154+
@dataclass
155+
class FaithfulnessExperimental(MetricWithLLM):
156+
name: str = "faithfulness_experimental" # type: ignore
157+
evaluation_mode: EvaluationMode = EvaluationMode.qac # type: ignore
158+
sentence_segmenter: t.Optional[HasSegmentMethod] = None
159+
max_retries: int = 1
160+
_reproducibility: int = 1
161+
162+
@property
163+
def reproducibility(self):
164+
return self._reproducibility
165+
166+
@reproducibility.setter
167+
def reproducibility(self, value):
168+
if value < 1:
169+
logger.warning("reproducibility cannot be less than 1, setting to 1")
170+
value = 1
171+
elif value % 2 == 0:
172+
logger.warning(
173+
"reproducibility level cannot be set to even number, setting to odd"
174+
)
175+
value += 1
176+
self._reproducibility = value
177+
178+
def __post_init__(self):
179+
self.long_form_answer_prompt = LongFormAnswerPrompt(llm=self.llm)
180+
self.nli_statement_prompt = NLIStatementPrompt(llm=self.llm)
181+
if self.sentence_segmenter is None:
182+
# TODO: make this dynamic, taking language from prompt
183+
language = "english"
184+
self.sentence_segmenter = get_segmenter(language=language, clean=False)
185+
186+
async def _ascore(self, row: t.Dict, callbacks: Callbacks) -> float:
187+
answer, question, contexts = row["answer"], row["question"], row["contexts"]
188+
189+
# get the sentences from the answer
190+
sentences = self.sentence_segmenter.segment(answer)
191+
# TODO: why do we do this?
192+
sentences = [
193+
sentence for sentence in sentences if sentence.strip().endswith(".")
194+
]
195+
sentence_components = await self.long_form_answer_prompt.generate(
196+
FaithfulnessStatements(
197+
question=question,
198+
answer=answer,
199+
sentences={i: sentence for i, sentence in enumerate(sentences)},
200+
),
201+
callbacks=callbacks
202+
)
203+
204+
statements = [
205+
statement
206+
for component in sentence_components.sentences
207+
for statement in component.simpler_statements
208+
]
209+
verdicts = await self.nli_statement_prompt.generate(
210+
NLIStatementInput(
211+
context="\n".join(contexts),
212+
statements=statements,
213+
),
214+
callbacks=callbacks
215+
)
216+
217+
# compute the score
218+
num_faithful_statements = sum(
219+
verdict.verdict for verdict in verdicts.statements
220+
)
221+
if len(statements):
222+
score = num_faithful_statements / len(statements)
223+
else:
224+
score = np.nan
225+
return score
226+

0 commit comments

Comments
 (0)