Langchain-Chatchat icon indicating copy to clipboard operation
Langchain-Chatchat copied to clipboard

执行python cli_demo1.py

Open heiyexingguang opened this issue 1 year ago • 6 comments

Traceback (most recent call last): File "cli_demo1.py", line 16, in local_doc_qa.init_cfg(llm_model=LLM_MODEL, File "/app/_old/langchain-ChatGLM-master/chains/local_doc_qa.py", line 47, in init_cfg self.llm.load_model(model_name_or_path=llm_model_dict[llm_model], TypeError: load_model() got an unexpected keyword argument 'use_ptuning_v2'

heiyexingguang avatar Apr 20 '23 07:04 heiyexingguang

中午对master分支代码进行了更新,应该已经修复了该问题,请拉取最新代码后再测试看看

heiyexingguang @.***>于2023年4月20日 周四15:50写道:

Traceback (most recent call last): File "cli_demo1.py", line 16, in local_doc_qa.init_cfg(llm_model=LLM_MODEL, File "/app/_old/langchain-ChatGLM-master/chains/local_doc_qa.py", line 47, in init_cfg self.llm.load_model(model_name_or_path=llm_model_dict[llm_model], TypeError: load_model() got an unexpected keyword argument 'use_ptuning_v2'

— Reply to this email directly, view it on GitHub https://github.com/imClumsyPanda/langchain-ChatGLM/issues/147, or unsubscribe https://github.com/notifications/unsubscribe-auth/ABLH5EVBWKZMUGODOODZHIDXCDTE3ANCNFSM6AAAAAAXFDOJWM . You are receiving this because you are subscribed to this thread.Message ID: @.***>

imClumsyPanda avatar Apr 20 '23 07:04 imClumsyPanda

Input your local knowledge file path 请输入本地知识文件路径:/content/langchain-ChatGLM/content langchain-ChatGLM_README.md 已成功加载 test.txt 已成功加载 Invalid file /content/langchain-ChatGLM/content/.ipynb_checkpoints. The FileType.UNK file type is not supported in partition. .ipynb_checkpoints 未能成功加载 test2.txt 已成功加载 Input your question 请输入问题:lang chain The dtype of attention mask (torch.int64) is not bool Traceback (most recent call last): File "/content/langchain-ChatGLM/cli_demo.py", line 31, in resp, history = local_doc_qa.get_knowledge_based_answer(query=query, File "/content/langchain-ChatGLM/chains/local_doc_qa.py", line 137, in get_knowledge_based_answer result = knowledge_chain({"query": query}) File "/usr/local/lib/python3.9/dist-packages/langchain/chains/base.py", line 116, in call raise e File "/usr/local/lib/python3.9/dist-packages/langchain/chains/base.py", line 113, in call outputs = self._call(inputs) File "/usr/local/lib/python3.9/dist-packages/langchain/chains/retrieval_qa/base.py", line 110, in _call answer = self.combine_documents_chain.run( File "/usr/local/lib/python3.9/dist-packages/langchain/chains/base.py", line 216, in run return self(kwargs)[self.output_keys[0]] File "/usr/local/lib/python3.9/dist-packages/langchain/chains/base.py", line 116, in call raise e File "/usr/local/lib/python3.9/dist-packages/langchain/chains/base.py", line 113, in call outputs = self._call(inputs) File "/usr/local/lib/python3.9/dist-packages/langchain/chains/combine_documents/base.py", line 75, in _call output, extra_return_dict = self.combine_docs(docs, **other_keys) File "/usr/local/lib/python3.9/dist-packages/langchain/chains/combine_documents/stuff.py", line 83, in combine_docs return self.llm_chain.predict(**inputs), {} File "/usr/local/lib/python3.9/dist-packages/langchain/chains/llm.py", line 151, in predict return self(kwargs)[self.output_key] File "/usr/local/lib/python3.9/dist-packages/langchain/chains/base.py", line 116, in call raise e File "/usr/local/lib/python3.9/dist-packages/langchain/chains/base.py", line 113, in call outputs = self._call(inputs) File "/usr/local/lib/python3.9/dist-packages/langchain/chains/llm.py", line 57, in _call return self.apply([inputs])[0] File "/usr/local/lib/python3.9/dist-packages/langchain/chains/llm.py", line 118, in apply response = self.generate(input_list) File "/usr/local/lib/python3.9/dist-packages/langchain/chains/llm.py", line 62, in generate return self.llm.generate_prompt(prompts, stop) File "/usr/local/lib/python3.9/dist-packages/langchain/llms/base.py", line 107, in generate_prompt return self.generate(prompt_strings, stop=stop) File "/usr/local/lib/python3.9/dist-packages/langchain/llms/base.py", line 140, in generate raise e File "/usr/local/lib/python3.9/dist-packages/langchain/llms/base.py", line 137, in generate output = self._generate(prompts, stop=stop) File "/usr/local/lib/python3.9/dist-packages/langchain/llms/base.py", line 324, in _generate text = self._call(prompt, stop=stop) File "/content/langchain-ChatGLM/models/chatglm_llm.py", line 72, in _call response, _ = self.model.chat( File "/usr/local/lib/python3.9/dist-packages/torch/utils/_contextlib.py", line 115, in decorate_context return func(*args, **kwargs) File "/root/.cache/huggingface/modules/transformers_modules/THUDM/chatglm-6b-int4/e02ba894cf18f3fd9b2526c795f983683c4ec732/modeling_chatglm.py", line 1288, in chat response = tokenizer.decode(outputs) File "/usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base.py", line 3474, in decode token_ids = to_py_obj(token_ids) File "/usr/local/lib/python3.9/dist-packages/transformers/utils/generic.py", line 174, in to_py_obj return [to_py_obj(o) for o in obj] File "/usr/local/lib/python3.9/dist-packages/transformers/utils/generic.py", line 174, in return [to_py_obj(o) for o in obj] File "/usr/local/lib/python3.9/dist-packages/transformers/utils/generic.py", line 175, in to_py_obj elif is_tf_tensor(obj): File "/usr/local/lib/python3.9/dist-packages/transformers/utils/generic.py", line 151, in is_tf_tensor return False if not is_tf_available() else _is_tensorflow(x) File "/usr/local/lib/python3.9/dist-packages/transformers/utils/generic.py", line 142, in _is_tensorflow import tensorflow as tf File "/usr/local/lib/python3.9/dist-packages/tensorflow/init.py", line 37, in from tensorflow.python.tools import module_util as _module_util File "/usr/local/lib/python3.9/dist-packages/tensorflow/python/init.py", line 37, in from tensorflow.python.eager import context File "/usr/local/lib/python3.9/dist-packages/tensorflow/python/eager/context.py", line 28, in from tensorflow.core.framework import function_pb2 File "/usr/local/lib/python3.9/dist-packages/tensorflow/core/framework/function_pb2.py", line 5, in from google.protobuf.internal import builder as _builder ImportError: cannot import name 'builder' from 'google.protobuf.internal' (/usr/local/lib/python3.9/dist-packages/google/protobuf/internal/init.py)

mikewu0511 avatar Apr 23 '23 09:04 mikewu0511

在colab中运行cli_demo.py 报错

mikewu0511 avatar Apr 23 '23 09:04 mikewu0511

请问只输入一个.md或.txt文件时是否发生相同报错

imClumsyPanda avatar Apr 23 '23 11:04 imClumsyPanda

是的,发生相同报错 nput your local knowledge file path 请输入本地知识文件路径:/content/langchain-ChatGLM/content langchain-ChatGLM_README.md 已成功加载 Invalid file /content/langchain-ChatGLM/content/.ipynb_checkpoints. The FileType.UNK file type is not supported in partition. .ipynb_checkpoints 未能成功加载 Input your question 请输入问题:langchain是什么 The dtype of attention mask (torch.int64) is not bool ╭───────────────────── Traceback (most recent call last) ──────────────────────╮ │ /content/langchain-ChatGLM/cli_demo.py:31 in │ │ │ │ 28 │ history = [] │ │ 29 │ while True: │ │ 30 │ │ query = input("Input your question 请输入问题:") │ │ ❱ 31 │ │ resp, history = local_doc_qa.get_knowledge_based_answer(query=q │ │ 32 │ │ │ │ │ │ │ │ │ │ │ │ │ │ │ │ vs_path │ │ 33 │ │ │ │ │ │ │ │ │ │ │ │ │ │ │ │ chat_hi │ │ 34 │ │ if REPLY_WITH_SOURCE: │ │ │ │ /content/langchain-ChatGLM/chains/local_doc_qa.py:139 in │ │ get_knowledge_based_answer │ │ │ │ 136 │ │ │ │ 137 │ │ knowledge_chain.return_source_documents = True │ │ 138 │ │ │ │ ❱ 139 │ │ result = knowledge_chain({"query": query}) │ │ 140 │ │ self.llm.history[-1][0] = query │ │ 141 │ │ return result, self.llm.history │ │ 142 │ │ │ │ /usr/local/lib/python3.9/dist-packages/langchain/chains/base.py:116 in │ │ call │ │ │ │ 113 │ │ │ outputs = self._call(inputs) │ │ 114 │ │ except (KeyboardInterrupt, Exception) as e: │ │ 115 │ │ │ self.callback_manager.on_chain_error(e, verbose=self.verbo │ │ ❱ 116 │ │ │ raise e │ │ 117 │ │ self.callback_manager.on_chain_end(outputs, verbose=self.verbo │ │ 118 │ │ return self.prep_outputs(inputs, outputs, return_only_outputs) │ │ 119 │ │ │ │ /usr/local/lib/python3.9/dist-packages/langchain/chains/base.py:113 in │ │ call │ │ │ │ 110 │ │ │ verbose=self.verbose, │ │ 111 │ │ ) │ │ 112 │ │ try: │ │ ❱ 113 │ │ │ outputs = self._call(inputs) │ │ 114 │ │ except (KeyboardInterrupt, Exception) as e: │ │ 115 │ │ │ self.callback_manager.on_chain_error(e, verbose=self.verbo │ │ 116 │ │ │ raise e │ │ │ │ /usr/local/lib/python3.9/dist-packages/langchain/chains/retrieval_qa/base.py │ │ :110 in _call │ │ │ │ 107 │ │ question = inputs[self.input_key] │ │ 108 │ │ │ │ 109 │ │ docs = self._get_docs(question) │ │ ❱ 110 │ │ answer = self.combine_documents_chain.run( │ │ 111 │ │ │ input_documents=docs, question=question │ │ 112 │ │ ) │ │ 113 │ │ │ │ /usr/local/lib/python3.9/dist-packages/langchain/chains/base.py:216 in run │ │ │ │ 213 │ │ │ return self(args[0])[self.output_keys[0]] │ │ 214 │ │ │ │ 215 │ │ if kwargs and not args: │ │ ❱ 216 │ │ │ return self(kwargs)[self.output_keys[0]] │ │ 217 │ │ │ │ 218 │ │ raise ValueError( │ │ 219 │ │ │ f"run supported with either positional arguments or keyw │ │ │ │ /usr/local/lib/python3.9/dist-packages/langchain/chains/base.py:116 in │ │ call │ │ │ │ 113 │ │ │ outputs = self._call(inputs) │ │ 114 │ │ except (KeyboardInterrupt, Exception) as e: │ │ 115 │ │ │ self.callback_manager.on_chain_error(e, verbose=self.verbo │ │ ❱ 116 │ │ │ raise e │ │ 117 │ │ self.callback_manager.on_chain_end(outputs, verbose=self.verbo │ │ 118 │ │ return self.prep_outputs(inputs, outputs, return_only_outputs) │ │ 119 │ │ │ │ /usr/local/lib/python3.9/dist-packages/langchain/chains/base.py:113 in │ │ call │ │ │ │ 110 │ │ │ verbose=self.verbose, │ │ 111 │ │ ) │ │ 112 │ │ try: │ │ ❱ 113 │ │ │ outputs = self._call(inputs) │ │ 114 │ │ except (KeyboardInterrupt, Exception) as e: │ │ 115 │ │ │ self.callback_manager.on_chain_error(e, verbose=self.verbo │ │ 116 │ │ │ raise e │ │ │ │ /usr/local/lib/python3.9/dist-packages/langchain/chains/combine_documents/ba │ │ se.py:75 in _call │ │ │ │ 72 │ │ docs = inputs[self.input_key] │ │ 73 │ │ # Other keys are assumed to be needed for LLM prediction │ │ 74 │ │ other_keys = {k: v for k, v in inputs.items() if k != self.inp │ │ ❱ 75 │ │ output, extra_return_dict = self.combine_docs(docs, **other_ke │ │ 76 │ │ extra_return_dict[self.output_key] = output │ │ 77 │ │ return extra_return_dict │ │ 78 │ │ │ │ /usr/local/lib/python3.9/dist-packages/langchain/chains/combine_documents/st │ │ uff.py:83 in combine_docs │ │ │ │ 80 │ │ """Stuff all documents into one prompt and pass to LLM.""" │ │ 81 │ │ inputs = self._get_inputs(docs, **kwargs) │ │ 82 │ │ # Call predict on the LLM. │ │ ❱ 83 │ │ return self.llm_chain.predict(**inputs), {} │ │ 84 │ │ │ 85 │ async def acombine_docs( │ │ 86 │ │ self, docs: List[Document], **kwargs: Any │ │ │ │ /usr/local/lib/python3.9/dist-packages/langchain/chains/llm.py:151 in │ │ predict │ │ │ │ 148 │ │ │ │ │ │ 149 │ │ │ │ completion = llm.predict(adjective="funny") │ │ 150 │ │ """ │ │ ❱ 151 │ │ return self(kwargs)[self.output_key] │ │ 152 │ │ │ 153 │ async def apredict(self, **kwargs: Any) -> str: │ │ 154 │ │ """Format prompt with kwargs and pass to LLM. │ │ │ │ /usr/local/lib/python3.9/dist-packages/langchain/chains/base.py:116 in │ │ call │ │ │ │ 113 │ │ │ outputs = self._call(inputs) │ │ 114 │ │ except (KeyboardInterrupt, Exception) as e: │ │ 115 │ │ │ self.callback_manager.on_chain_error(e, verbose=self.verbo │ │ ❱ 116 │ │ │ raise e │ │ 117 │ │ self.callback_manager.on_chain_end(outputs, verbose=self.verbo │ │ 118 │ │ return self.prep_outputs(inputs, outputs, return_only_outputs) │ │ 119 │ │ │ │ /usr/local/lib/python3.9/dist-packages/langchain/chains/base.py:113 in │ │ call │ │ │ │ 110 │ │ │ verbose=self.verbose, │ │ 111 │ │ ) │ │ 112 │ │ try: │ │ ❱ 113 │ │ │ outputs = self._call(inputs) │ │ 114 │ │ except (KeyboardInterrupt, Exception) as e: │ │ 115 │ │ │ self.callback_manager.on_chain_error(e, verbose=self.verbo │ │ 116 │ │ │ raise e │ │ │ │ /usr/local/lib/python3.9/dist-packages/langchain/chains/llm.py:57 in _call │ │ │ │ 54 │ │ return [self.output_key] │ │ 55 │ │ │ 56 │ def _call(self, inputs: Dict[str, Any]) -> Dict[str, str]: │ │ ❱ 57 │ │ return self.apply([inputs])[0] │ │ 58 │ │ │ 59 │ def generate(self, input_list: List[Dict[str, Any]]) -> LLMResult: │ │ 60 │ │ """Generate LLM result from inputs.""" │ │ │ │ /usr/local/lib/python3.9/dist-packages/langchain/chains/llm.py:118 in apply │ │ │ │ 115 │ │ │ 116 │ def apply(self, input_list: List[Dict[str, Any]]) -> List[Dict[str │ │ 117 │ │ """Utilize the LLM generate method for speed gains.""" │ │ ❱ 118 │ │ response = self.generate(input_list) │ │ 119 │ │ return self.create_outputs(response) │ │ 120 │ │ │ 121 │ async def aapply(self, input_list: List[Dict[str, Any]]) -> List[D │ │ │ │ /usr/local/lib/python3.9/dist-packages/langchain/chains/llm.py:62 in │ │ generate │ │ │ │ 59 │ def generate(self, input_list: List[Dict[str, Any]]) -> LLMResult: │ │ 60 │ │ """Generate LLM result from inputs.""" │ │ 61 │ │ prompts, stop = self.prep_prompts(input_list) │ │ ❱ 62 │ │ return self.llm.generate_prompt(prompts, stop) │ │ 63 │ │ │ 64 │ async def agenerate(self, input_list: List[Dict[str, Any]]) -> LLM │ │ 65 │ │ """Generate LLM result from inputs.""" │ │ │ │ /usr/local/lib/python3.9/dist-packages/langchain/llms/base.py:107 in │ │ generate_prompt │ │ │ │ 104 │ │ self, prompts: List[PromptValue], stop: Optional[List[str]] = │ │ 105 │ ) -> LLMResult: │ │ 106 │ │ prompt_strings = [p.to_string() for p in prompts] │ │ ❱ 107 │ │ return self.generate(prompt_strings, stop=stop) │ │ 108 │ │ │ 109 │ async def agenerate_prompt( │ │ 110 │ │ self, prompts: List[PromptValue], stop: Optional[List[str]] = │ │ │ │ /usr/local/lib/python3.9/dist-packages/langchain/llms/base.py:140 in │ │ generate │ │ │ │ 137 │ │ │ │ output = self._generate(prompts, stop=stop) │ │ 138 │ │ │ except (KeyboardInterrupt, Exception) as e: │ │ 139 │ │ │ │ self.callback_manager.on_llm_error(e, verbose=self.ver │ │ ❱ 140 │ │ │ │ raise e │ │ 141 │ │ │ self.callback_manager.on_llm_end(output, verbose=self.verb │ │ 142 │ │ │ return output │ │ 143 │ │ params = self.dict() │ │ │ │ /usr/local/lib/python3.9/dist-packages/langchain/llms/base.py:137 in │ │ generate │ │ │ │ 134 │ │ │ │ {"name": self.class.name}, prompts, verbose=se │ │ 135 │ │ │ ) │ │ 136 │ │ │ try: │ │ ❱ 137 │ │ │ │ output = self._generate(prompts, stop=stop) │ │ 138 │ │ │ except (KeyboardInterrupt, Exception) as e: │ │ 139 │ │ │ │ self.callback_manager.on_llm_error(e, verbose=self.ver │ │ 140 │ │ │ │ raise e │ │ │ │ /usr/local/lib/python3.9/dist-packages/langchain/llms/base.py:324 in │ │ _generate │ │ │ │ 321 │ │ # TODO: add caching here. │ │ 322 │ │ generations = [] │ │ 323 │ │ for prompt in prompts: │ │ ❱ 324 │ │ │ text = self._call(prompt, stop=stop) │ │ 325 │ │ │ generations.append([Generation(text=text)]) │ │ 326 │ │ return LLMResult(generations=generations) │ │ 327 │ │ │ │ /content/langchain-ChatGLM/models/chatglm_llm.py:72 in _call │ │ │ │ 69 │ def _call(self, │ │ 70 │ │ │ prompt: str, │ │ 71 │ │ │ stop: Optional[List[str]] = None) -> str: │ │ ❱ 72 │ │ response, _ = self.model.chat( │ │ 73 │ │ │ self.tokenizer, │ │ 74 │ │ │ prompt, │ │ 75 │ │ │ history=self.history[-self.history_len:] if self.history_l │ │ │ │ /usr/local/lib/python3.9/dist-packages/torch/utils/_contextlib.py:115 in │ │ decorate_context │ │ │ │ 112 │ @functools.wraps(func) │ │ 113 │ def decorate_context(*args, **kwargs): │ │ 114 │ │ with ctx_factory(): │ │ ❱ 115 │ │ │ return func(*args, **kwargs) │ │ 116 │ │ │ 117 │ return decorate_context │ │ 118 │ │ │ │ /root/.cache/huggingface/modules/transformers_modules/THUDM/chatglm-6b-int4/ │ │ e02ba894cf18f3fd9b2526c795f983683c4ec732/modeling_chatglm.py:1288 in chat │ │ │ │ 1285 │ │ inputs = inputs.to(self.device) │ │ 1286 │ │ outputs = self.generate(**inputs, **gen_kwargs) │ │ 1287 │ │ outputs = outputs.tolist()[0][len(inputs["input_ids"][0]):] │ │ ❱ 1288 │ │ response = tokenizer.decode(outputs) │ │ 1289 │ │ response = self.process_response(response) │ │ 1290 │ │ history = history + [(query, response)] │ │ 1291 │ │ return response, history │ │ │ │ /usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base. │ │ py:3474 in decode │ │ │ │ 3471 │ │ │ str: The decoded sentence. │ │ 3472 │ │ """ │ │ 3473 │ │ # Convert inputs to python lists │ │ ❱ 3474 │ │ token_ids = to_py_obj(token_ids) │ │ 3475 │ │ │ │ 3476 │ │ return self._decode( │ │ 3477 │ │ │ token_ids=token_ids, │ │ │ │ /usr/local/lib/python3.9/dist-packages/transformers/utils/generic.py:174 in │ │ to_py_obj │ │ │ │ 171 │ if isinstance(obj, (dict, UserDict)): │ │ 172 │ │ return {k: to_py_obj(v) for k, v in obj.items()} │ │ 173 │ elif isinstance(obj, (list, tuple)): │ │ ❱ 174 │ │ return [to_py_obj(o) for o in obj] │ │ 175 │ elif is_tf_tensor(obj): │ │ 176 │ │ return obj.numpy().tolist() │ │ 177 │ elif is_torch_tensor(obj): │ │ │ │ /usr/local/lib/python3.9/dist-packages/transformers/utils/generic.py:174 in │ │ │ │ │ │ 171 │ if isinstance(obj, (dict, UserDict)): │ │ 172 │ │ return {k: to_py_obj(v) for k, v in obj.items()} │ │ 173 │ elif isinstance(obj, (list, tuple)): │ │ ❱ 174 │ │ return [to_py_obj(o) for o in obj] │ │ 175 │ elif is_tf_tensor(obj): │ │ 176 │ │ return obj.numpy().tolist() │ │ 177 │ elif is_torch_tensor(obj): │ │ │ │ /usr/local/lib/python3.9/dist-packages/transformers/utils/generic.py:175 in │ │ to_py_obj │ │ │ │ 172 │ │ return {k: to_py_obj(v) for k, v in obj.items()} │ │ 173 │ elif isinstance(obj, (list, tuple)): │ │ 174 │ │ return [to_py_obj(o) for o in obj] │ │ ❱ 175 │ elif is_tf_tensor(obj): │ │ 176 │ │ return obj.numpy().tolist() │ │ 177 │ elif is_torch_tensor(obj): │ │ 178 │ │ return obj.detach().cpu().tolist() │ │ │ │ /usr/local/lib/python3.9/dist-packages/transformers/utils/generic.py:151 in │ │ is_tf_tensor │ │ │ │ 148 │ """ │ │ 149 │ Tests if x is a tensorflow tensor or not. Safe to call even if t │ │ 150 │ """ │ │ ❱ 151 │ return False if not is_tf_available() else _is_tensorflow(x) │ │ 152 │ │ 153 │ │ 154 def _is_jax(x): │ │ │ │ /usr/local/lib/python3.9/dist-packages/transformers/utils/generic.py:142 in │ │ _is_tensorflow │ │ │ │ 139 │ │ 140 │ │ 141 def _is_tensorflow(x): │ │ ❱ 142 │ import tensorflow as tf │ │ 143 │ │ │ 144 │ return isinstance(x, tf.Tensor) │ │ 145 │ │ │ │ /usr/local/lib/python3.9/dist-packages/tensorflow/init.py:37 in │ │ │ │ 34 import sys as _sys │ │ 35 import typing as _typing │ │ 36 │ │ ❱ 37 from tensorflow.python.tools import module_util as _module_util │ │ 38 from tensorflow.python.util.lazy_loader import LazyLoader as _LazyLoad │ │ 39 │ │ 40 # Make sure code inside the TensorFlow codebase can use tf2.enabled() │ │ │ │ /usr/local/lib/python3.9/dist-packages/tensorflow/python/init.py:37 in │ │ │ │ │ │ 34 # pylint: disable=wildcard-import,g-bad-import-order,g-import-not-at-t │ │ 35 │ │ 36 from tensorflow.python import pywrap_tensorflow as _pywrap_tensorflow │ │ ❱ 37 from tensorflow.python.eager import context │ │ 38 │ │ 39 # pylint: enable=wildcard-import │ │ 40 │ │ │ │ /usr/local/lib/python3.9/dist-packages/tensorflow/python/eager/context.py:28 │ │ in │ │ │ │ 25 from absl import logging │ │ 26 import numpy as np │ │ 27 │ │ ❱ 28 from tensorflow.core.framework import function_pb2 │ │ 29 from tensorflow.core.protobuf import config_pb2 │ │ 30 from tensorflow.core.protobuf import rewriter_config_pb2 │ │ 31 from tensorflow.python import pywrap_tfe │ │ │ │ /usr/local/lib/python3.9/dist-packages/tensorflow/core/framework/function_pb │ │ 2.py:5 in │ │ │ │ 2 # Generated by the protocol buffer compiler. DO NOT EDIT! │ │ 3 # source: tensorflow/core/framework/function.proto │ │ 4 """Generated protocol buffer code.""" │ │ ❱ 5 from google.protobuf.internal import builder as _builder │ │ 6 from google.protobuf import descriptor as _descriptor │ │ 7 from google.protobuf import descriptor_pool as _descriptor_pool │ │ 8 from google.protobuf import symbol_database as _symbol_database │ ╰──────────────────────────────────────────────────────────────────────────────╯ ImportError: cannot import name 'builder' from 'google.protobuf.internal' (/usr/local/lib/python3.9/dist-packages/google/protobuf/internal/init.py)

mikewu0511 avatar Apr 24 '23 04:04 mikewu0511

更新protobuf可以解决: !pip install protobuf==3.20.0

RayYe586 avatar Jun 15 '23 06:06 RayYe586

安装了protobuf 3.20.0之后,提示 ERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts. icetk 0.0.7 requires protobuf<3.19, but you have protobuf 3.20.0 which is incompatible.

l5276261 avatar Jun 26 '23 12:06 l5276261