Answer_correctness and answer_semantic_similarity do not work with Bedrock embeddings: 'BedrockEmbeddings' object has no attribute 'embed_text'. Did you mean: 'embed_query'?
[ ] I have checked the documentation and related resources and couldn't resolve my bug.
Bedrock embeddings throw exception when used for answer_correctness and answer_semantic_similarity. See the code and error message below: AttributeError: 'BedrockEmbeddings' object has no attribute 'embed_text'. Did you mean: 'embed_query'?
Ragas version: ragas-0.1.6 Python version:
Code to Reproduce import json import boto3 import tqdm import pandas as pd import random import os from ragas.metrics import ( faithfulness, answer_relevancy, context_recall, context_precision, context_utilization, AnswerCorrectness ) from ragas.llms.prompt import Prompt from ragas.metrics.critique import harmfulness from ragas.llms import LangchainLLMWrapper from ragas import evaluate from ragas.embeddings.base import HuggingfaceEmbeddings from langchain_community.embeddings import BedrockEmbeddings from langchain.llms.bedrock import Bedrock from langchain_community.chat_models import BedrockChat from datasets import Dataset from datasets import Dataset, Features, Sequence, Value import nest_asyncio
class ChatbotEvaluator: def init(self, llm_model, embeddings=None, context_retriever=None): self.llm_model = llm_model self.context_retriever = context_retriever self.embeddings = embeddings
def get_evaluation_dataset(self, dataset, required_columns=None):
if required_columns is None:
required_columns = ['question', 'contexts', 'answer', 'ground_truths', 'ground_truth']
# Filter dataset to include only required columns that are present
available_columns = [col for col in required_columns if col in dataset.columns]
filtered_dataset = dataset[available_columns]
# Dynamically construct the features schema based on available columns
features_schema = {}
if 'question' in required_columns:
features_schema['question'] = Value('string')
if 'contexts' in required_columns:
features_schema['contexts'] = Sequence(Value('string'))
if 'answer' in required_columns:
features_schema['answer'] = Value('string')
if 'ground_truths' in required_columns:
features_schema['ground_truths'] = Sequence(Value('string'))
if 'ground_truth' in required_columns:
features_schema['ground_truth'] = Value('string')
features = Features(features_schema)
eval_dataset = Dataset.from_pandas(filtered_dataset, features=features)
return eval_dataset
def evaluate_offline(self, dataset):
"""
Evaluate a batch of queries offline from a dataset.
The dataset is expected to be a list of dictionaries with keys for query, context, answer, and groundtruth.
"""
nest_asyncio.apply()
metrics = []
answer_correctness = AnswerCorrectness([0.75, 0,25])
# I still need to implement a way to account for retrieving context if not available
if "contexts" in dataset.column_names and "answer" in dataset.column_names:
metrics.append(faithfulness)
if "question" in dataset.column_names and "answer" in dataset.column_names:
if self.embeddings is not None:
answer_relevancy.embeddings = self.embeddings
metrics.append(answer_relevancy)
if "question" in dataset.column_names and "answer" in dataset.column_names and "ground_truth" in dataset.column_names:
answer_correctness = AnswerCorrectness([0.75, 0,25])
if self.embeddings is not None:
answer_correctness.embeddings = self.embeddings
metrics.append(answer_correctness)
if "question" in dataset.column_names and "contexts" in dataset.column_names:
metrics.append(context_utilization)
if "ground_truths" in dataset.column_names and "contexts" in dataset.column_names:
metrics.append(context_precision)
if "contexts" in dataset.column_names and "ground_truths" in dataset.column_names:
metrics.append(context_recall)
for m in metrics:
m.__setattr__("llm", self.llm_model)
result = evaluate(
dataset,
metrics=metrics
)
return result
config = { "region_name": "us-east-1", "model_id": 'anthropic.claude-3-sonnet-20240229-v1:0', "model_kwargs": {"temperature": 0}, }
bedrock_model = BedrockChat( region_name=config["region_name"], endpoint_url=f"https://bedrock-runtime.{config['region_name']}.amazonaws.com", model_id=config["model_id"], model_kwargs=config["model_kwargs"] )
ragas_bedrock_model = LangchainLLMWrapper(bedrock_model) bedrock_embeddings = BedrockEmbeddings( region_name=config["region_name"] )
evaluator = ChatbotEvaluator(ragas_bedrock_model, bedrock_embeddings )
data = { "question": [ "What is the capital of France?", ], "answer": [ "The capital of France is Paris.", ], "contexts": [ [ "Pele is the most decorated soccer player in the world. Soccer is the most popular game in Latin America", "France is a country in Western Europe. The capital of France is Paris.", ], ], "ground_truths": [ [ "France, in Western Europe, encompasses medieval cities, alpine villages and Mediterranean beaches. Its capital is Paris. The country is also renowned for its wines and sophisticated cuisine. Lascaux’s ancient cave drawings, Lyon’s Roman theater and the vast Palace of Versailles attest to its rich history.'To Kill a Mockingbird' is a novel by Harper Lee, published in 1960.", ],
],
"ground_truth": [
"France, in Western Europe, encompasses medieval cities, alpine villages and Mediterranean beaches. Its capital is Paris. The country is also renowned for its wines and sophisticated cuisine. Lascaux’s ancient cave drawings, Lyon’s Roman theater and the vast Palace of Versailles attest to its rich history.'To Kill a Mockingbird' is a novel by Harper Lee, published in 1960.",
],
}
df = pd.DataFrame(data) evaluation_dataset = evaluator.get_evaluation_dataset(dataset=df)
evaluation = evaluator.evaluate_offline(evaluation_dataset)
evaluation
Error trace Exception in thread Thread-5: Traceback (most recent call last): File "/home/ec2-user/anaconda3/envs/tensorflow2_p310/lib/python3.10/threading.py", line 1016, in _bootstrap_inner self.run() File "/home/ec2-user/anaconda3/envs/tensorflow2_p310/lib/python3.10/site-packages/ragas/executor.py", line 96, in run results = self.loop.run_until_complete(self._aresults()) File "/home/ec2-user/anaconda3/envs/tensorflow2_p310/lib/python3.10/site-packages/nest_asyncio.py", line 98, in run_until_complete return f.result() File "/home/ec2-user/anaconda3/envs/tensorflow2_p310/lib/python3.10/asyncio/futures.py", line 201, in result raise self._exception.with_traceback(self._exception_tb) File "/home/ec2-user/anaconda3/envs/tensorflow2_p310/lib/python3.10/asyncio/tasks.py", line 232, in __step result = coro.send(None) File "/home/ec2-user/anaconda3/envs/tensorflow2_p310/lib/python3.10/site-packages/ragas/executor.py", line 84, in _aresults raise e File "/home/ec2-user/anaconda3/envs/tensorflow2_p310/lib/python3.10/site-packages/ragas/executor.py", line 79, in _aresults r = await future File "/home/ec2-user/anaconda3/envs/tensorflow2_p310/lib/python3.10/asyncio/tasks.py", line 571, in _wait_for_one return f.result() # May raise f.exception(). File "/home/ec2-user/anaconda3/envs/tensorflow2_p310/lib/python3.10/asyncio/futures.py", line 201, in result raise self._exception.with_traceback(self._exception_tb) File "/home/ec2-user/anaconda3/envs/tensorflow2_p310/lib/python3.10/asyncio/tasks.py", line 232, in __step result = coro.send(None) File "/home/ec2-user/anaconda3/envs/tensorflow2_p310/lib/python3.10/site-packages/ragas/executor.py", line 38, in sema_coro return await coro File "/home/ec2-user/anaconda3/envs/tensorflow2_p310/lib/python3.10/site-packages/ragas/executor.py", line 112, in wrapped_callable_async return counter, await callable(*args, **kwargs) File "/home/ec2-user/anaconda3/envs/tensorflow2_p310/lib/python3.10/site-packages/ragas/metrics/base.py", line 114, in ascore raise e File "/home/ec2-user/anaconda3/envs/tensorflow2_p310/lib/python3.10/site-packages/ragas/metrics/base.py", line 110, in ascore score = await self._ascore(row=row, callbacks=group_cm, is_async=is_async) File "/home/ec2-user/anaconda3/envs/tensorflow2_p310/lib/python3.10/site-packages/ragas/metrics/_answer_correctness.py", line 165, in _ascore similarity_score = await self.answer_similarity.ascore( File "/home/ec2-user/anaconda3/envs/tensorflow2_p310/lib/python3.10/site-packages/ragas/metrics/base.py", line 114, in ascore raise e File "/home/ec2-user/anaconda3/envs/tensorflow2_p310/lib/python3.10/site-packages/ragas/metrics/base.py", line 110, in ascore score = await self._ascore(row=row, callbacks=group_cm, is_async=is_async) File "/home/ec2-user/anaconda3/envs/tensorflow2_p310/lib/python3.10/site-packages/ragas/metrics/_answer_similarity.py", line 65, in _ascore embedding_1 = np.array(await self.embeddings.embed_text(ground_truth)) AttributeError: 'BedrockEmbeddings' object has no attribute 'embed_text'. Did you mean: 'embed_query'?
Expected behavior I want to be able to run answer_correctness and answer_semantic_similarity when question, answer, contexts and ground truth are provided in the dataset.
Additional context Add any other context about the problem here.
@MikhailKozineHBS try wrapping a LangchainEmbeddingsWrapper:
embeddings = SentenceTransformerEmbeddings(model_name="maidalun1020/bce-embedding-base_v1",
model_kwargs={"device": torch_device()})
embeddings = LangchainEmbeddingsWrapper(embeddings)