MetaGPT icon indicating copy to clipboard operation
MetaGPT copied to clipboard

openai.error.RateLimitError: Rate limit reached for 10KTPM-200RPM in organization org-mSnj0BKpxchO5RMRJ0Q7zro0 on tokens per min. Limit: 10000 / min. Please try again in 6ms.

Open gphett opened this issue 2 years ago • 5 comments
trafficstars

need to insert a pause so we do not get this error

gphett avatar Aug 21 '23 19:08 gphett

from openai.error import RateLimitError

async def _achat_completion_stream(self, messages: list[dict]) -> str: retry_delay = 60 # Time in seconds to wait before retrying max_retries = 10 # Maximum number of retries

for attempt in range(max_retries):
    try:
        response = await openai.ChatCompletion.acreate(**self._cons_kwargs(messages), stream=True)

        # Create variables to collect the stream of chunks
        collected_chunks = []
        collected_messages = []
        # Iterate through the stream of events
        async for chunk in response:
            collected_chunks.append(chunk)  # Save the event response
            chunk_message = chunk["choices"][0]["delta"]  # Extract the message
            collected_messages.append(chunk_message)  # Save the message
            if "content" in chunk_message:
                print(chunk_message["content"], end="")
        print()

        full_reply_content = "".join([m.get("content", "") for m in collected_messages])
        usage = self._calc_usage(messages, full_reply_content)
        self._update_costs(usage)
        return full_reply_content

    except RateLimitError as e:
        logger.warning(f"Rate limit error: {e}. Retrying in {retry_delay} seconds...")
        await asyncio.sleep(retry_delay)

logger.error(f"Reached maximum retries ({max_retries}) for rate limit error.")
raise RateLimitError("Maximum retries reached for rate limit error.")

This seems to have fixed it for me

gphett avatar Aug 21 '23 20:08 gphett

I think tenacity is supposed to be the one that handles the retries, but this is so far the only solution that worked for me. Thanks

dvizcaya91 avatar Aug 24 '23 21:08 dvizcaya91

Perhaps you could provide a Pull Request?

pablospe avatar Sep 03 '23 22:09 pablospe

check https://github.com/geekan/MetaGPT/blob/main/config/config.yaml RPM para

geekan avatar Sep 09 '23 03:09 geekan

T

from openai.error import RateLimitError

async def _achat_completion_stream(self, messages: list[dict]) -> str: retry_delay = 60 # Time in seconds to wait before retrying max_retries = 10 # Maximum number of retries

for attempt in range(max_retries):
    try:
        response = await openai.ChatCompletion.acreate(**self._cons_kwargs(messages), stream=True)

        # Create variables to collect the stream of chunks
        collected_chunks = []
        collected_messages = []
        # Iterate through the stream of events
        async for chunk in response:
            collected_chunks.append(chunk)  # Save the event response
            chunk_message = chunk["choices"][0]["delta"]  # Extract the message
            collected_messages.append(chunk_message)  # Save the message
            if "content" in chunk_message:
                print(chunk_message["content"], end="")
        print()

        full_reply_content = "".join([m.get("content", "") for m in collected_messages])
        usage = self._calc_usage(messages, full_reply_content)
        self._update_costs(usage)
        return full_reply_content

    except RateLimitError as e:
        logger.warning(f"Rate limit error: {e}. Retrying in {retry_delay} seconds...")
        await asyncio.sleep(retry_delay)

logger.error(f"Reached maximum retries ({max_retries}) for rate limit error.")
raise RateLimitError("Maximum retries reached for rate limit error.")

This seems to have fixed it for me

This worked for me as well, thank you!

thatiodeguy avatar Sep 27 '23 19:09 thatiodeguy