ragas icon indicating copy to clipboard operation
ragas copied to clipboard

Ragas testset generation throws error "TypeError: AsyncMessages.create() got an unexpected keyword argument 'n'" with LlamaIndex and Anthropic

Open ahgraber opened this issue 1 year ago • 2 comments

[x ] I have checked the documentation and related resources and couldn't resolve my bug.

Describe the bug I am using Ragas to generate a test dataset, and using Llama-index with Anthropic for both generator and critic LLMs. When I run add documents/nodes to the docstore, I receive an error TypeError: AsyncMessages.create() got an unexpected keyword argument 'n'

Ragas version: 0.1.21 Python version: 3.11.9 (cpython) Llama-index version: 0.11.16 Anthropic version: 0.36.0

Code to Reproduce Share code to reproduce the issue

from langchain.text_splitter import TokenTextSplitter
from llama_index.core import Document as LlamaDoc
from llama_index.core.node_parser import MarkdownNodeParser
from llama_index.embeddings.voyageai import VoyageEmbedding
from llama_index.llms.anthropic import Anthropic
from ragas.embeddings.base import LlamaIndexEmbeddingsWrapper
from ragas.llms import LlamaIndexLLMWrapper
from ragas.run_config import RunConfig
from ragas.testset.docstore import Document as RagasDoc, InMemoryDocumentStore, Node as RagasNode
from ragas.testset.evolutions import multi_context, reasoning, simple
from ragas.testset.extractor import KeyphraseExtractor
from ragas.testset.generator import TestsetGenerator

llm = Anthropic(
    api_key=ANTHROPIC_API_KEY,
    model="claude-3-5-sonnet-20240620",
    temperature=0,
    max_tokens=1024,
    max_retries=10,
    timeout=60,
)
em = voyageEmbedding(
    voyage_api_key=VOYAGE_API_KEY,
    model_name="voyage-3-lite",
)

run_config = RunConfig()

docstore = InMemoryDocumentStore(
    splitter=TokenTextSplitter(chunk_size=8000, chunk_overlap=0),
    embeddings=LlamaIndexEmbeddingsWrapper(em),
    extractor=KeyphraseExtractor(LlamaIndexLLMWrapper(llm)),
    run_config=run_config,
)

# for *reasons* I have already chunked my LlamaIndex documents into nodes
documents = [LlamaDoc(text=..., metadata=...) for t, m in ...]

parser = MarkdownNodeParser()
nodes = parser.get_nodes_from_documents(documents)

# so I load them into the docstore directly
# This runs the embedding and keyword extraction pipeline
docstore.add_nodes([RagasNode.from_llamaindex_document(node) for node in nodes])
# I get the error `TypeError: AsyncMessages.create() got an unexpected keyword argument 'n'`

Error trace

TypeError Traceback (most recent call last) Cell In[28], line 8 1 # %% 2 # Manually fill docstore with existing nodes from llamaindex 3 (...) 6 7 # NOTE: this will run embeddings and keyphrase extraction for all nodes! ----> 8 docstore.add_nodes([RagasNode.from_llamaindex_document(node) for node in nodes])

File ~/micromamba/envs/ragas/lib/python3.11/site-packages/ragas/testset/docstore.py:251, in InMemoryDocumentStore.add_nodes(self, nodes, show_progress) 244 executor.submit( 245 self.extractor.extract, 246 n, 247 name=f"keyphrase-extraction[{i}]", 248 ) 249 result_idx += 1 --> 251 results = executor.results() 252 if not results: 253 raise ExceptionInRunner()

File ~/micromamba/envs/ragas/lib/python3.11/site-packages/ragas/executor.py:116, in Executor.results(self) 112 results.append(r) 114 return results --> 116 results = asyncio.run(_aresults()) 117 sorted_results = sorted(results, key=lambda x: x[0]) 118 return [r[1] for r in sorted_results]

File ~/micromamba/envs/ragas/lib/python3.11/site-packages/nest_asyncio.py:30, in _patch_asyncio..run(main, debug) 28 task = asyncio.ensure_future(main) 29 try: ---> 30 return loop.run_until_complete(task) 31 finally: 32 if not task.done():

File ~/micromamba/envs/ragas/lib/python3.11/site-packages/nest_asyncio.py:98, in _patch_loop..run_until_complete(self, future) 95 if not f.done(): 96 raise RuntimeError( 97 'Event loop stopped before Future completed.') ---> 98 return f.result()

File ~/micromamba/envs/ragas/lib/python3.11/asyncio/futures.py:203, in Future.result(self) 201 self.__log_traceback = False 202 if self._exception is not None: --> 203 raise self._exception.with_traceback(self._exception_tb) 204 return self._result

File ~/micromamba/envs/ragas/lib/python3.11/asyncio/tasks.py:277, in Task.__step(failed resolving arguments) 273 try: 274 if exc is None: 275 # We use the send method directly, because coroutines 276 # don't have __iter__ and __next__ methods. --> 277 result = coro.send(None) 278 else: 279 result = coro.throw(exc)

File ~/micromamba/envs/ragas/lib/python3.11/site-packages/ragas/executor.py:111, in Executor.results.._aresults() 103 results = [] 104 for future in tqdm( 105 futures_as_they_finish, 106 desc=self.desc, (...) 109 leave=self.keep_progress_bar, 110 ): --> 111 r = await future 112 results.append(r) 114 return results

File ~/micromamba/envs/ragas/lib/python3.11/asyncio/tasks.py:615, in as_completed.._wait_for_one() 612 if f is None: 613 # Dummy value from _on_timeout(). 614 raise exceptions.TimeoutError --> 615 return f.result()

File ~/micromamba/envs/ragas/lib/python3.11/asyncio/futures.py:203, in Future.result(self) 201 self.__log_traceback = False 202 if self._exception is not None: --> 203 raise self._exception.with_traceback(self._exception_tb) 204 return self._result

File ~/micromamba/envs/ragas/lib/python3.11/asyncio/tasks.py:277, in Task.__step(failed resolving arguments) 273 try: 274 if exc is None: 275 # We use the send method directly, because coroutines 276 # don't have __iter__ and __next__ methods. --> 277 result = coro.send(None) 278 else: 279 result = coro.throw(exc)

File ~/micromamba/envs/ragas/lib/python3.11/site-packages/ragas/executor.py:34, in as_completed..sema_coro(coro) 32 async def sema_coro(coro): 33 async with semaphore: ---> 34 return await coro

File ~/micromamba/envs/ragas/lib/python3.11/site-packages/ragas/executor.py:60, in Executor.wrap_callable_with_index..wrapped_callable_async(*args, **kwargs) 58 except Exception as e: 59 if self.raise_exceptions: ---> 60 raise e 61 else: 62 exec_name = type(e).name

File ~/micromamba/envs/ragas/lib/python3.11/site-packages/ragas/executor.py:54, in Executor.wrap_callable_with_index..wrapped_callable_async(*args, **kwargs) 52 result = np.nan 53 try: ---> 54 result = await callable(*args, **kwargs) 55 except MaxRetriesExceeded as e: 56 # this only for testset generation v2 57 logger.warning(f"max retries exceeded for {e.evolution}")

File ~/micromamba/envs/ragas/lib/python3.11/site-packages/ragas/testset/extractor.py:49, in KeyphraseExtractor.extract(self, node, is_async) 47 async def extract(self, node: Node, is_async: bool = True) -> t.List[str]: 48 prompt = self.extractor_prompt.format(text=node.page_content) ---> 49 results = await self.llm.generate(prompt=prompt, is_async=is_async) 50 keyphrases = await json_loader.safe_load( 51 results.generations[0][0].text.strip(), llm=self.llm, is_async=is_async 52 ) 53 keyphrases = keyphrases if isinstance(keyphrases, dict) else {}

File ~/micromamba/envs/ragas/lib/python3.11/site-packages/ragas/llms/base.py:98, in BaseRagasLLM.generate(self, prompt, n, temperature, stop, callbacks, is_async) 94 if is_async: 95 agenerate_text_with_retry = add_async_retry( 96 self.agenerate_text, self.run_config 97 ) ---> 98 return await agenerate_text_with_retry( 99 prompt=prompt, 100 n=n, 101 temperature=temperature, 102 stop=stop, 103 callbacks=callbacks, 104 ) 105 else: 106 loop = asyncio.get_event_loop()

File ~/micromamba/envs/ragas/lib/python3.11/site-packages/tenacity/asyncio/init.py:189, in AsyncRetrying.wraps..async_wrapped(*args, **kwargs) 187 copy = self.copy() 188 async_wrapped.statistics = copy.statistics # type: ignore[attr-defined] --> 189 return await copy(fn, *args, **kwargs)

File ~/micromamba/envs/ragas/lib/python3.11/site-packages/tenacity/asyncio/init.py:111, in AsyncRetrying.call(self, fn, *args, **kwargs) 109 retry_state = RetryCallState(retry_object=self, fn=fn, args=args, kwargs=kwargs) 110 while True: --> 111 do = await self.iter(retry_state=retry_state) 112 if isinstance(do, DoAttempt): 113 try:

File ~/micromamba/envs/ragas/lib/python3.11/site-packages/tenacity/asyncio/init.py:153, in AsyncRetrying.iter(self, retry_state) 151 result = None 152 for action in self.iter_state.actions: --> 153 result = await action(retry_state) 154 return result

File ~/micromamba/envs/ragas/lib/python3.11/site-packages/tenacity/_utils.py:99, in wrap_to_async_func..inner(*args, **kwargs) 98 async def inner(*args: typing.Any, **kwargs: typing.Any) -> typing.Any: ---> 99 return call(*args, **kwargs)

File ~/micromamba/envs/ragas/lib/python3.11/site-packages/tenacity/init.py:418, in BaseRetrying._post_stop_check_actions..exc_check(rs) 416 retry_exc = self.retry_error_cls(fut) 417 if self.reraise: --> 418 raise retry_exc.reraise() 419 raise retry_exc from fut.exception()

File ~/micromamba/envs/ragas/lib/python3.11/site-packages/tenacity/init.py:185, in RetryError.reraise(self) 183 def reraise(self) -> t.NoReturn: 184 if self.last_attempt.failed: --> 185 raise self.last_attempt.result() 186 raise self

File ~/micromamba/envs/ragas/lib/python3.11/concurrent/futures/_base.py:449, in Future.result(self, timeout) 447 raise CancelledError() 448 elif self._state == FINISHED: --> 449 return self.__get_result() 451 self._condition.wait(timeout) 453 if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:

File ~/micromamba/envs/ragas/lib/python3.11/concurrent/futures/_base.py:401, in Future.__get_result(self) 399 if self._exception: 400 try: --> 401 raise self._exception 402 finally: 403 # Break a reference cycle with the exception in self._exception 404 self = None

File ~/micromamba/envs/ragas/lib/python3.11/site-packages/tenacity/asyncio/init.py:114, in AsyncRetrying.call(self, fn, *args, **kwargs) 112 if isinstance(do, DoAttempt): 113 try: --> 114 result = await fn(*args, **kwargs) 115 except BaseException: # noqa: B902 116 retry_state.set_exception(sys.exc_info()) # type: ignore[arg-type]

File ~/micromamba/envs/ragas/lib/python3.11/site-packages/ragas/llms/base.py:287, in LlamaIndexLLMWrapper.agenerate_text(self, prompt, n, temperature, stop, callbacks) 284 temperature = 1e-8 286 kwargs = self.check_args(n, temperature, stop, callbacks) --> 287 li_response = await self.llm.acomplete(prompt.to_string(), **kwargs) 289 return LLMResult(generations=[[Generation(text=li_response.text)]])

File ~/micromamba/envs/ragas/lib/python3.11/site-packages/llama_index/core/instrumentation/dispatcher.py:353, in Dispatcher.span..async_wrapper(func, instance, args, kwargs) 345 self.span_enter( 346 id_=id_, 347 bound_args=bound_args, (...) 350 tags=tags, 351 ) 352 try: --> 353 result = await func(*args, **kwargs) 354 except BaseException as e: 355 self.event(SpanDropEvent(span_id=id_, err_str=str(e)))

File ~/micromamba/envs/ragas/lib/python3.11/site-packages/llama_index/core/llms/callbacks.py:334, in llm_completion_callback..wrap..wrapped_async_llm_predict(_self, *args, **kwargs) 324 event_id = callback_manager.on_event_start( 325 CBEventType.LLM, 326 payload={ (...) 330 }, 331 ) 333 try: --> 334 f_return_val = await f(_self, *args, **kwargs) 335 except BaseException as e: 336 callback_manager.on_event_end( 337 CBEventType.LLM, 338 payload={EventPayload.EXCEPTION: e}, 339 event_id=event_id, 340 )

File ~/micromamba/envs/ragas/lib/python3.11/site-packages/llama_index/llms/anthropic/base.py:358, in Anthropic.acomplete(self, prompt, formatted, **kwargs) 353 @llm_completion_callback() 354 async def acomplete( 355 self, prompt: str, formatted: bool = False, **kwargs: Any 356 ) -> CompletionResponse: 357 acomplete_fn = achat_to_completion_decorator(self.achat) --> 358 return await acomplete_fn(prompt, **kwargs)

File ~/micromamba/envs/ragas/lib/python3.11/site-packages/llama_index/core/base/llms/generic_utils.py:221, in achat_to_completion_decorator..wrapper(prompt, **kwargs) 218 async def wrapper(prompt: str, **kwargs: Any) -> CompletionResponse: 219 # normalize input 220 messages = prompt_to_messages(prompt) --> 221 chat_response = await func(messages, **kwargs) 222 # normalize output 223 return chat_response_to_completion_response(chat_response)

File ~/micromamba/envs/ragas/lib/python3.11/site-packages/llama_index/core/instrumentation/dispatcher.py:353, in Dispatcher.span..async_wrapper(func, instance, args, kwargs) 345 self.span_enter( 346 id_=id_, 347 bound_args=bound_args, (...) 350 tags=tags, 351 ) 352 try: --> 353 result = await func(*args, **kwargs) 354 except BaseException as e: 355 self.event(SpanDropEvent(span_id=id_, err_str=str(e)))

File ~/micromamba/envs/ragas/lib/python3.11/site-packages/llama_index/core/llms/callbacks.py:75, in llm_chat_callback..wrap..wrapped_async_llm_chat(_self, messages, **kwargs) 66 event_id = callback_manager.on_event_start( 67 CBEventType.LLM, 68 payload={ (...) 72 }, 73 ) 74 try: ---> 75 f_return_val = await f(_self, messages, **kwargs) 76 except BaseException as e: 77 callback_manager.on_event_end( 78 CBEventType.LLM, 79 payload={EventPayload.EXCEPTION: e}, 80 event_id=event_id, 81 )

File ~/micromamba/envs/ragas/lib/python3.11/site-packages/llama_index/llms/anthropic/base.py:335, in Anthropic.achat(self, messages, **kwargs) 332 anthropic_messages, system_prompt = messages_to_anthropic_messages(messages) 333 all_kwargs = self._get_all_kwargs(**kwargs) --> 335 response = await self._aclient.messages.create( 336 messages=anthropic_messages, 337 system=system_prompt, 338 stream=False, 339 **all_kwargs, 340 ) 342 content, tool_calls = self._get_content_and_tool_calls(response) 344 return ChatResponse( 345 message=ChatMessage( 346 role=MessageRole.ASSISTANT, (...) 350 raw=dict(response), 351 )

File ~/micromamba/envs/ragas/lib/python3.11/site-packages/anthropic/_utils/_utils.py:274, in required_args..inner..wrapper(*args, **kwargs) 272 msg = f"Missing required argument: {quote(missing[0])}" 273 raise TypeError(msg) --> 274 return func(*args, **kwargs)

TypeError: AsyncMessages.create() got an unexpected keyword argument 'n'

Expected behavior Ragas should not throw an error

Additional context

This seems to be a combined issue between Anthropic, LlamaIndex, and Ragas:

  • I successfully used Llamaindex's OpenAI and Together wrappers with success
  • I have successfully run the equivalent code with Langchain using Anthropic

My guess is the problem lies with the intersection of the llamaindex llm wrapper and that Anthropic strictly manages passed kwargs.

Specifically, here indicates n isn't really supported with Llamaindex but it is still passed in kwargs during the generate_text and agenerate_text methods.

ahgraber avatar Oct 10 '24 15:10 ahgraber

Hey @ahgraber, how's it going? I'm here to help you out with any bugs, questions, or contributions you might have. Let's dive in and get that issue sorted!

To resolve the "TypeError: AsyncMessages.create() got an unexpected keyword argument 'n'" error, you need to modify the check_args method in the LlamaIndexLLMWrapper class. The acomplete method of the LLM does not expect an n argument, so it should be removed when the LLM is not of type "bedrock".

Here's how you can adjust the check_args method:

def check_args(
    self,
    n: int,
    temperature: float,
    stop: t.Optional[t.List[str]],
    callbacks: Callbacks,
) -> dict[str, t.Any]:
    if n != 1:
        logger.warning("n values greater than 1 not supported for LlamaIndex LLMs")
    if temperature != 1e-8:
        logger.info("temperature kwarg passed to LlamaIndex LLM")
    if stop is not None:
        logger.info("stop kwarg passed to LlamaIndex LLM")
    if callbacks is not None:
        logger.info(
            "callbacks not supported for LlamaIndex LLMs, ignoring callbacks"
        )
    if self._signature == "bedrock":
        return {"temperature": temperature}
    else:
        return {
            "temperature": temperature,
            "stop": stop,
        }

By ensuring the n argument is not included in the dictionary returned by check_args when the LLM is not "bedrock", you should avoid the TypeError [1].

To continue talking to Dosu, mention @dosu.

dosubot[bot] avatar Oct 10 '24 15:10 dosubot[bot]

I can confirm that hacking the check_args method allows the Anthropic LLM call to work:

    def check_args(
        self,
        n: int,
        temperature: float,
        stop: t.Optional[t.List[str]],
        callbacks: Callbacks,
    ) -> dict[str, t.Any]:
        if n != 1:
            logger.warning("n values greater than 1 not support for LlamaIndex LLMs")
        if temperature != 1e-8:
            logger.info("temperature kwarg passed to LlamaIndex LLM")
        if stop is not None:
            logger.info("stop kwarg passed to LlamaIndex LLM")
        if callbacks is not None:
            logger.info(
                "callbacks not supported for LlamaIndex LLMs, ignoring callbacks"
            )
        if (
            self._signature == "bedrock"
            or type(self.llm).__name__.lower() == "anthropic"
        ):
            # ref: https://github.com/explodinggradients/ragas/issues/1468
            return {"temperature": temperature}
        else:
            return {
                "n": n,
                "temperature": temperature,
                "stop": stop,
            }

ahgraber avatar Oct 10 '24 16:10 ahgraber