Skip to content

Commit c996061

Browse files
committed
added context precision
1 parent cb4e1d5 commit c996061

File tree

3 files changed

+233
-0
lines changed

3 files changed

+233
-0
lines changed

src/ragas/metrics/collections/__init__.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@
33
from ragas.metrics.collections._answer_relevancy import AnswerRelevancy
44
from ragas.metrics.collections._answer_similarity import AnswerSimilarity
55
from ragas.metrics.collections._bleu_score import BleuScore
6+
from ragas.metrics.collections._context_precision import ContextPrecision
67
from ragas.metrics.collections._context_recall import ContextRecall
78
from ragas.metrics.collections._rouge_score import RougeScore
89
from ragas.metrics.collections._string import (
@@ -18,6 +19,7 @@
1819
"AnswerRelevancy",
1920
"AnswerSimilarity",
2021
"BleuScore",
22+
"ContextPrecision",
2123
"ContextRecall",
2224
"DistanceMeasure",
2325
"ExactMatch",
Lines changed: 163 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,163 @@
1+
"""Context Precision metric v2 - Modern implementation with instructor LLMs."""
2+
3+
import typing as t
4+
5+
import numpy as np
6+
from pydantic import BaseModel, Field
7+
8+
from ragas.metrics.collections.base import BaseMetric
9+
from ragas.metrics.result import MetricResult
10+
from ragas.prompt.metrics.context_precision import context_precision_prompt
11+
12+
if t.TYPE_CHECKING:
13+
from ragas.llms.base import InstructorBaseRagasLLM
14+
15+
16+
class ContextPrecisionVerification(BaseModel):
17+
"""Structured output for context precision verification."""
18+
19+
reason: str = Field(..., description="Reason for the verdict")
20+
verdict: int = Field(..., description="Binary verdict: 1 if useful, 0 if not")
21+
22+
23+
class ContextPrecision(BaseMetric):
24+
"""
25+
Evaluate context precision using Average Precision metric.
26+
27+
This metric evaluates whether all relevant items (contexts) are ranked higher
28+
by checking if each context was useful in arriving at the given answer.
29+
30+
This implementation uses modern instructor LLMs with structured output.
31+
Only supports modern components - legacy wrappers are rejected with clear error messages.
32+
33+
Usage:
34+
>>> from openai import AsyncOpenAI
35+
>>> from ragas.llms.base import instructor_llm_factory
36+
>>> from ragas.metrics.collections import ContextPrecision
37+
>>>
38+
>>> # Setup dependencies
39+
>>> client = AsyncOpenAI()
40+
>>> llm = instructor_llm_factory("openai", client=client, model="gpt-4o-mini")
41+
>>>
42+
>>> # Create metric instance
43+
>>> metric = ContextPrecision(llm=llm)
44+
>>>
45+
>>> # Single evaluation
46+
>>> result = await metric.ascore(
47+
... user_input="What is the capital of France?",
48+
... retrieved_contexts=["Paris is the capital of France.", "London is in England."],
49+
... reference="Paris"
50+
... )
51+
>>> print(f"Score: {result.value}")
52+
>>>
53+
>>> # Batch evaluation
54+
>>> results = await metric.abatch_score([
55+
... {"user_input": "Q1", "retrieved_contexts": ["C1", "C2"], "reference": "A1"},
56+
... {"user_input": "Q2", "retrieved_contexts": ["C1", "C2"], "reference": "A2"},
57+
... ])
58+
59+
Attributes:
60+
llm: Modern instructor-based LLM for verification
61+
name: The metric name
62+
allowed_values: Score range (0.0 to 1.0)
63+
"""
64+
65+
# Type hints for linter (attributes are set in __init__)
66+
llm: "InstructorBaseRagasLLM"
67+
68+
def __init__(
69+
self,
70+
llm: "InstructorBaseRagasLLM",
71+
name: str = "context_precision",
72+
**kwargs,
73+
):
74+
"""Initialize ContextPrecision metric with required components."""
75+
# Set attributes explicitly before calling super()
76+
self.llm = llm
77+
78+
# Call super() for validation
79+
super().__init__(name=name, **kwargs)
80+
81+
async def ascore(
82+
self,
83+
user_input: str,
84+
retrieved_contexts: t.List[str],
85+
reference: str,
86+
) -> MetricResult:
87+
"""
88+
Calculate context precision score asynchronously.
89+
90+
The metric evaluates each retrieved context to determine if it was useful
91+
for arriving at the reference answer, then calculates average precision.
92+
93+
Args:
94+
user_input: The original question
95+
retrieved_contexts: List of retrieved context strings (in ranked order)
96+
reference: The reference answer to evaluate against
97+
98+
Returns:
99+
MetricResult with average precision score (0.0-1.0)
100+
"""
101+
# Handle edge cases
102+
if not retrieved_contexts:
103+
return MetricResult(value=0.0)
104+
105+
if not reference or not user_input:
106+
return MetricResult(value=0.0)
107+
108+
# Evaluate each context
109+
verdicts = []
110+
for context in retrieved_contexts:
111+
# Generate prompt for this context
112+
prompt = context_precision_prompt(
113+
question=user_input, context=context, answer=reference
114+
)
115+
116+
# Get verification from LLM
117+
verification = await self.llm.agenerate(
118+
prompt, ContextPrecisionVerification
119+
)
120+
121+
# Store binary verdict (1 if useful, 0 if not)
122+
verdicts.append(1 if verification.verdict else 0)
123+
124+
# Calculate average precision
125+
score = self._calculate_average_precision(verdicts)
126+
127+
return MetricResult(value=float(score))
128+
129+
def _calculate_average_precision(self, verdict_list: t.List[int]) -> float:
130+
"""
131+
Calculate average precision from list of binary verdicts.
132+
133+
Average Precision formula:
134+
AP = (sum of (precision@k * relevance@k)) / (total relevant items)
135+
136+
Where:
137+
- precision@k = (relevant items in top k) / k
138+
- relevance@k = 1 if item k is relevant, 0 otherwise
139+
140+
Args:
141+
verdict_list: List of binary verdicts (1 for relevant, 0 for not)
142+
143+
Returns:
144+
Average precision score (0.0-1.0), or nan if no relevant items
145+
"""
146+
# Count total relevant items
147+
denominator = sum(verdict_list) + 1e-10
148+
149+
# Calculate sum of precision at each relevant position
150+
numerator = sum(
151+
[
152+
(sum(verdict_list[: i + 1]) / (i + 1)) * verdict_list[i]
153+
for i in range(len(verdict_list))
154+
]
155+
)
156+
157+
score = numerator / denominator
158+
159+
# Return nan if score is invalid
160+
if np.isnan(score):
161+
return np.nan
162+
163+
return score
Lines changed: 68 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,68 @@
1+
"""Context Precision prompt for verifying context usefulness."""
2+
3+
import json
4+
5+
6+
def context_precision_prompt(question: str, context: str, answer: str) -> str:
7+
"""
8+
Generate the prompt for context precision evaluation.
9+
10+
This prompt evaluates whether a given context was useful in arriving at the answer.
11+
12+
Args:
13+
question: The original question
14+
context: A single retrieved context to evaluate
15+
answer: The reference answer to compare against
16+
17+
Returns:
18+
Formatted prompt string for the LLM
19+
"""
20+
# Use json.dumps() to safely escape the strings
21+
safe_question = json.dumps(question)
22+
safe_context = json.dumps(context)
23+
safe_answer = json.dumps(answer)
24+
25+
return f"""Given question, answer and context verify if the context was useful in arriving at the given answer. Give verdict as "1" if useful and "0" if not with json output.
26+
27+
--------EXAMPLES-----------
28+
Example 1
29+
Input: {{
30+
"question": "What can you tell me about Albert Einstein?",
31+
"context": "Albert Einstein (14 March 1879 – 18 April 1955) was a German-born theoretical physicist, widely held to be one of the greatest and most influential scientists of all time. Best known for developing the theory of relativity, he also made important contributions to quantum mechanics, and was thus a central figure in the revolutionary reshaping of the scientific understanding of nature that modern physics accomplished in the first decades of the twentieth century. His mass–energy equivalence formula E = mc2, which arises from relativity theory, has been called 'the world's most famous equation'. He received the 1921 Nobel Prize in Physics 'for his services to theoretical physics, and especially for his discovery of the law of the photoelectric effect', a pivotal step in the development of quantum theory. His work is also known for its influence on the philosophy of science. In a 1999 poll of 130 leading physicists worldwide by the British journal Physics World, Einstein was ranked the greatest physicist of all time. His intellectual achievements and originality have made Einstein synonymous with genius.",
32+
"answer": "Albert Einstein, born on 14 March 1879, was a German-born theoretical physicist, widely held to be one of the greatest and most influential scientists of all time. He received the 1921 Nobel Prize in Physics for his services to theoretical physics."
33+
}}
34+
Output: {{
35+
"reason": "The provided context was indeed useful in arriving at the given answer. The context includes key information about Albert Einstein's life and contributions, which are reflected in the answer.",
36+
"verdict": 1
37+
}}
38+
39+
Example 2
40+
Input: {{
41+
"question": "who won 2020 icc world cup?",
42+
"context": "The 2022 ICC Men's T20 World Cup, held from October 16 to November 13, 2022, in Australia, was the eighth edition of the tournament. Originally scheduled for 2020, it was postponed due to the COVID-19 pandemic. England emerged victorious, defeating Pakistan by five wickets in the final to clinch their second ICC Men's T20 World Cup title.",
43+
"answer": "England"
44+
}}
45+
Output: {{
46+
"reason": "the context was useful in clarifying the situation regarding the 2020 ICC World Cup and indicating that England was the winner of the tournament that was intended to be held in 2020 but actually took place in 2022.",
47+
"verdict": 1
48+
}}
49+
50+
Example 3
51+
Input: {{
52+
"question": "What is the tallest mountain in the world?",
53+
"context": "The Andes is the longest continental mountain range in the world, located in South America. It stretches across seven countries and features many of the highest peaks in the Western Hemisphere. The range is known for its diverse ecosystems, including the high-altitude Andean Plateau and the Amazon rainforest.",
54+
"answer": "Mount Everest."
55+
}}
56+
Output: {{
57+
"reason": "the provided context discusses the Andes mountain range, which, while impressive, does not include Mount Everest or directly relate to the question about the world's tallest mountain.",
58+
"verdict": 0
59+
}}
60+
-----------------------------
61+
62+
Now perform the same with the following input
63+
Input: {{
64+
"question": {safe_question},
65+
"context": {safe_context},
66+
"answer": {safe_answer}
67+
}}
68+
Output: """

0 commit comments

Comments
 (0)