langchain
langchain copied to clipboard
JinaChat Authentication
System Info
langchain-0.0.229 python 3.10
Who can help?
@delgermurun
Information
- [X] The official example notebooks/scripts
- [ ] My own modified scripts
Related Components
- [X] LLMs/Chat Models
- [ ] Embedding Models
- [ ] Prompts / Prompt Templates / Prompt Selectors
- [ ] Output Parsers
- [ ] Document Loaders
- [ ] Vector Stores / Retrievers
- [ ] Memory
- [ ] Agents / Agent Executors
- [ ] Tools / Toolkits
- [ ] Chains
- [ ] Callbacks/Tracing
- [ ] Async
Reproduction
import os
from langchain.chat_models import JinaChat
from langchain.schema import HumanMessage
os.environ["JINACHAT_API_KEY"] = "..." # from https://cloud.jina.ai/settings/tokens
chat = JinaChat(temperature=0)
messages = [
HumanMessage(
content="Translate this sentence from English to French: I love you!"
)
]
print(chat(messages))
Expected behavior
Expected output: Je t'aime Actual output:
---------------------------------------------------------------------------
AuthenticationError Traceback (most recent call last)
Cell In[7], line 10
3 chat = JinaChat(temperature=0)
5 messages = [
6 HumanMessage(
7 content="Translate this sentence from English to French: I love generative AI!"
8 )
9 ]
---> 10 chat(messages)
File /opt/anaconda3/envs/langchain/lib/python3.10/site-packages/langchain/chat_models/base.py:349, in BaseChatModel.__call__(self, messages, stop, callbacks, **kwargs)
342 def __call__(
343 self,
344 messages: List[BaseMessage],
(...)
347 **kwargs: Any,
348 ) -> BaseMessage:
--> 349 generation = self.generate(
350 [messages], stop=stop, callbacks=callbacks, **kwargs
351 ).generations[0][0]
352 if isinstance(generation, ChatGeneration):
353 return generation.message
File /opt/anaconda3/envs/langchain/lib/python3.10/site-packages/langchain/chat_models/base.py:125, in BaseChatModel.generate(self, messages, stop, callbacks, tags, metadata, **kwargs)
123 if run_managers:
124 run_managers[i].on_llm_error(e)
--> 125 raise e
126 flattened_outputs = [
127 LLMResult(generations=[res.generations], llm_output=res.llm_output)
128 for res in results
129 ]
130 llm_output = self._combine_llm_outputs([res.llm_output for res in results])
File /opt/anaconda3/envs/langchain/lib/python3.10/site-packages/langchain/chat_models/base.py:115, in BaseChatModel.generate(self, messages, stop, callbacks, tags, metadata, **kwargs)
112 for i, m in enumerate(messages):
113 try:
114 results.append(
--> 115 self._generate_with_cache(
116 m,
117 stop=stop,
118 run_manager=run_managers[i] if run_managers else None,
119 **kwargs,
120 )
121 )
122 except (KeyboardInterrupt, Exception) as e:
123 if run_managers:
File /opt/anaconda3/envs/langchain/lib/python3.10/site-packages/langchain/chat_models/base.py:262, in BaseChatModel._generate_with_cache(self, messages, stop, run_manager, **kwargs)
258 raise ValueError(
259 "Asked to cache, but no cache found at `langchain.cache`."
260 )
261 if new_arg_supported:
--> 262 return self._generate(
263 messages, stop=stop, run_manager=run_manager, **kwargs
264 )
265 else:
266 return self._generate(messages, stop=stop, **kwargs)
File /opt/anaconda3/envs/langchain/lib/python3.10/site-packages/langchain/chat_models/jinachat.py:288, in JinaChat._generate(self, messages, stop, run_manager, **kwargs)
281 message = _convert_dict_to_message(
282 {
283 "content": inner_completion,
284 "role": role,
285 }
286 )
287 return ChatResult(generations=[ChatGeneration(message=message)])
--> 288 response = self.completion_with_retry(messages=message_dicts, **params)
289 return self._create_chat_result(response)
File /opt/anaconda3/envs/langchain/lib/python3.10/site-packages/langchain/chat_models/jinachat.py:244, in JinaChat.completion_with_retry(self, **kwargs)
240 @retry_decorator
241 def _completion_with_retry(**kwargs: Any) -> Any:
242 return self.client.create(**kwargs)
--> 244 return _completion_with_retry(**kwargs)
File /opt/anaconda3/envs/langchain/lib/python3.10/site-packages/tenacity/__init__.py:289, in BaseRetrying.wraps.<locals>.wrapped_f(*args, **kw)
287 @functools.wraps(f)
288 def wrapped_f(*args: t.Any, **kw: t.Any) -> t.Any:
--> 289 return self(f, *args, **kw)
File /opt/anaconda3/envs/langchain/lib/python3.10/site-packages/tenacity/__init__.py:379, in Retrying.__call__(self, fn, *args, **kwargs)
377 retry_state = RetryCallState(retry_object=self, fn=fn, args=args, kwargs=kwargs)
378 while True:
--> 379 do = self.iter(retry_state=retry_state)
380 if isinstance(do, DoAttempt):
381 try:
File /opt/anaconda3/envs/langchain/lib/python3.10/site-packages/tenacity/__init__.py:314, in BaseRetrying.iter(self, retry_state)
312 is_explicit_retry = fut.failed and isinstance(fut.exception(), TryAgain)
313 if not (is_explicit_retry or self.retry(retry_state)):
--> 314 return fut.result()
316 if self.after is not None:
317 self.after(retry_state)
File /opt/anaconda3/envs/langchain/lib/python3.10/concurrent/futures/_base.py:451, in Future.result(self, timeout)
449 raise CancelledError()
450 elif self._state == FINISHED:
--> 451 return self.__get_result()
453 self._condition.wait(timeout)
455 if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
File /opt/anaconda3/envs/langchain/lib/python3.10/concurrent/futures/_base.py:403, in Future.__get_result(self)
401 if self._exception:
402 try:
--> 403 raise self._exception
404 finally:
405 # Break a reference cycle with the exception in self._exception
406 self = None
File /opt/anaconda3/envs/langchain/lib/python3.10/site-packages/tenacity/__init__.py:382, in Retrying.__call__(self, fn, *args, **kwargs)
380 if isinstance(do, DoAttempt):
381 try:
--> 382 result = fn(*args, **kwargs)
383 except BaseException: # noqa: B902
384 retry_state.set_exception(sys.exc_info()) # type: ignore[arg-type]
File /opt/anaconda3/envs/langchain/lib/python3.10/site-packages/langchain/chat_models/jinachat.py:242, in JinaChat.completion_with_retry.<locals>._completion_with_retry(**kwargs)
240 @retry_decorator
241 def _completion_with_retry(**kwargs: Any) -> Any:
--> 242 return self.client.create(**kwargs)
File /opt/anaconda3/envs/langchain/lib/python3.10/site-packages/openai/api_resources/chat_completion.py:25, in ChatCompletion.create(cls, *args, **kwargs)
23 while True:
24 try:
---> 25 return super().create(*args, **kwargs)
26 except TryAgain as e:
27 if timeout is not None and time.time() > start + timeout:
File /opt/anaconda3/envs/langchain/lib/python3.10/site-packages/openai/api_resources/abstract/engine_api_resource.py:153, in EngineAPIResource.create(cls, api_key, api_base, api_type, request_id, api_version, organization, **params)
127 @classmethod
128 def create(
129 cls,
(...)
136 **params,
137 ):
138 (
139 deployment_id,
140 engine,
(...)
150 api_key, api_base, api_type, api_version, organization, **params
151 )
--> 153 response, _, api_key = requestor.request(
154 "post",
155 url,
156 params=params,
157 headers=headers,
158 stream=stream,
159 request_id=request_id,
160 request_timeout=request_timeout,
161 )
163 if stream:
164 # must be an iterator
165 assert not isinstance(response, OpenAIResponse)
File /opt/anaconda3/envs/langchain/lib/python3.10/site-packages/openai/api_requestor.py:298, in APIRequestor.request(self, method, url, params, headers, files, stream, request_id, request_timeout)
277 def request(
278 self,
279 method,
(...)
286 request_timeout: Optional[Union[float, Tuple[float, float]]] = None,
287 ) -> Tuple[Union[OpenAIResponse, Iterator[OpenAIResponse]], bool, str]:
288 result = self.request_raw(
289 method.lower(),
290 url,
(...)
296 request_timeout=request_timeout,
297 )
--> 298 resp, got_stream = self._interpret_response(result, stream)
299 return resp, got_stream, self.api_key
File /opt/anaconda3/envs/langchain/lib/python3.10/site-packages/openai/api_requestor.py:700, in APIRequestor._interpret_response(self, result, stream)
692 return (
693 self._interpret_response_line(
694 line, result.status_code, result.headers, stream=True
695 )
696 for line in parse_stream(result.iter_lines())
697 ), True
698 else:
699 return (
--> 700 self._interpret_response_line(
701 result.content.decode("utf-8"),
702 result.status_code,
703 result.headers,
704 stream=False,
705 ),
706 False,
707 )
File /opt/anaconda3/envs/langchain/lib/python3.10/site-packages/openai/api_requestor.py:763, in APIRequestor._interpret_response_line(self, rbody, rcode, rheaders, stream)
761 stream_error = stream and "error" in resp.data
762 if stream_error or not 200 <= rcode < 300:
--> 763 raise self.handle_error_response(
764 rbody, rcode, resp.data, rheaders, stream_error=stream_error
765 )
766 return resp
AuthenticationError: Invalid token
Answer generated by a 🤖
Answer
From your description, it appears that you're encountering an "AuthenticationError: Invalid token" error when trying to use the JinaChat service in the LangChain framework. This error typically arises when the API key used for authentication is invalid.
To address this issue, I would suggest verifying the API key you're using. You've set the API key in the environment variable JINACHAT_API_KEY as shown in your code:
os.environ["JINACHAT_API_KEY"] = "..." # from https://cloud.jina.ai/settings/tokens
Please ensure that the API key you're using is valid and correctly copied from your JinaChat account. You can usually find this in your account settings on the JinaChat website or service.
However, please note that this is just one potential solution based on the information provided. If you've verified the API key and are still encountering the same issue, it's possible that there might be a problem with the JinaChat service itself, or the key might have been revoked or expired.
If you believe this is a bug that could impact other users, feel free to make a pull request with a proposed fix. If the issue persists after trying these steps, I would recommend checking if you're using the latest version of LangChain, as the issue might have been resolved in a more recent update.
Sources
This response is meant to be useful, save you time, and share context. It is not meant to be a precise solution, but rather a starting point for your own research.
Help me be more useful! Please leave a 👍 if this is helpful and 👎 if it is irrelevant.
@benman1 did you get an api key from https://chat.jina.ai/api and set JINACHAT_API_KEY env?
I tried replicating the same, and I confirm that I am also getting a similar error ( even after setting the JINA_API_KEY ). I assume that this model can't be implemented wrong, but for checking purpose - I just passed the API_KEY directly in my code - can that cause an error ? ( In case not, then @benman1 has a proper issue ).
@delgermurun thanks for clarifying,where to get the API keys for the chat. I found https://cloud.jina.ai/settings/tokens mentioned elsewhere, hence the confusion. I confirm it works with API key from the right place.
I've created a mini PR #7540 to clarify this in the Jina chat docstring.
@benman1 thanks a lot.
@dudesparsh just in case you missed it, the environment var key is JINACHAT_API_KEY, not JINA_API_KEY.
Hi, @benman1! I'm Dosu, and I'm helping the LangChain team manage our backlog. I wanted to let you know that we are marking this issue as stale.
From what I understand, the issue you reported was related to authentication in the JinaChat module. You encountered an "AuthenticationError: Invalid token" when running the code. However, it seems that the issue has been resolved. Another user named dosu-beta suggested verifying the API key used for authentication, and you confirmed that using the correct API key resolved the issue. In fact, you have even created a pull request to update the Jina chat docstring to clarify this solution.
Before we close this issue, we wanted to check with you if it is still relevant to the latest version of the LangChain repository. If it is, please let us know by commenting on this issue. Otherwise, feel free to close the issue yourself, or it will be automatically closed in 7 days.
Thank you for your contribution and for helping us improve LangChain!
I am trying to use Jina embedding model and facing authentication issue:
The given Jina auth token is invalid. Please check your Jina auth token. (type=value_error)
The sample python code given on Jina Embedding page is:
import requests
url = 'https://api.jina.ai/v1/embeddings'
headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer jina_xxxxxxxx-xx'
}
data = {
'input': ["Your text string goes here", "You can send multiple texts"],
'model': 'jina-embeddings-v2-base-en'
}
response = requests.post(url, headers=headers, json=data)
Its using Bearer token there. On the other hand, sample code given on Lang Chain is:
from langchain.embeddings import JinaEmbeddings
jina_auth_token = 'Bearer jina_xxxxxxxx-xx'
# jina_auth_token = 'jina_xxxxxxxx-xx'
embeddings = JinaEmbeddings(
jina_auth_token=jina_auth_token, model_name="ViT-B-32::openai"
)
I used both variations of token there i.e with and without Bearer but its giving me the authentication error.
I am trying to use Jina embedding model and facing authentication issue:
The given Jina auth token is invalid. Please check your Jina auth token. (type=value_error)The sample python code given on Jina Embedding page is:
import requests url = 'https://api.jina.ai/v1/embeddings' headers = { 'Content-Type': 'application/json', 'Authorization': 'Bearer jina_xxxxxxxx-xx' } data = { 'input': ["Your text string goes here", "You can send multiple texts"], 'model': 'jina-embeddings-v2-base-en' } response = requests.post(url, headers=headers, json=data)Its using Bearer token there. On the other hand, sample code given on Lang Chain is:
from langchain.embeddings import JinaEmbeddings jina_auth_token = 'Bearer jina_xxxxxxxx-xx' # jina_auth_token = 'jina_xxxxxxxx-xx' embeddings = JinaEmbeddings( jina_auth_token=jina_auth_token, model_name="ViT-B-32::openai" )I used both variations of token there i.e with and without Bearer but its giving me the authentication error.
Hey @hammad26 ,
I am going to fix the implementation to be able to use JinaEmbeddings properly after the new platform release