Langchain-Chatchat
Langchain-Chatchat copied to clipboard
执行python cli_demo1.py
Traceback (most recent call last):
File "cli_demo1.py", line 16, in
中午对master分支代码进行了更新,应该已经修复了该问题,请拉取最新代码后再测试看看
heiyexingguang @.***>于2023年4月20日 周四15:50写道:
Traceback (most recent call last): File "cli_demo1.py", line 16, in local_doc_qa.init_cfg(llm_model=LLM_MODEL, File "/app/_old/langchain-ChatGLM-master/chains/local_doc_qa.py", line 47, in init_cfg self.llm.load_model(model_name_or_path=llm_model_dict[llm_model], TypeError: load_model() got an unexpected keyword argument 'use_ptuning_v2'
— Reply to this email directly, view it on GitHub https://github.com/imClumsyPanda/langchain-ChatGLM/issues/147, or unsubscribe https://github.com/notifications/unsubscribe-auth/ABLH5EVBWKZMUGODOODZHIDXCDTE3ANCNFSM6AAAAAAXFDOJWM . You are receiving this because you are subscribed to this thread.Message ID: @.***>
Input your local knowledge file path 请输入本地知识文件路径:/content/langchain-ChatGLM/content
langchain-ChatGLM_README.md 已成功加载
test.txt 已成功加载
Invalid file /content/langchain-ChatGLM/content/.ipynb_checkpoints. The FileType.UNK file type is not supported in partition.
.ipynb_checkpoints 未能成功加载
test2.txt 已成功加载
Input your question 请输入问题:lang chain
The dtype of attention mask (torch.int64) is not bool
Traceback (most recent call last):
File "/content/langchain-ChatGLM/cli_demo.py", line 31, in
在colab中运行cli_demo.py 报错
请问只输入一个.md或.txt文件时是否发生相同报错
是的,发生相同报错
nput your local knowledge file path 请输入本地知识文件路径:/content/langchain-ChatGLM/content
langchain-ChatGLM_README.md 已成功加载
Invalid file /content/langchain-ChatGLM/content/.ipynb_checkpoints. The FileType.UNK file type is not supported in partition.
.ipynb_checkpoints 未能成功加载
Input your question 请输入问题:langchain是什么
The dtype of attention mask (torch.int64) is not bool
╭───────────────────── Traceback (most recent call last) ──────────────────────╮
│ /content/langchain-ChatGLM/cli_demo.py:31 in run
supported with either positional arguments or keyw │
│ │
│ /usr/local/lib/python3.9/dist-packages/langchain/chains/base.py:116 in │
│ call │
│ │
│ 113 │ │ │ outputs = self._call(inputs) │
│ 114 │ │ except (KeyboardInterrupt, Exception) as e: │
│ 115 │ │ │ self.callback_manager.on_chain_error(e, verbose=self.verbo │
│ ❱ 116 │ │ │ raise e │
│ 117 │ │ self.callback_manager.on_chain_end(outputs, verbose=self.verbo │
│ 118 │ │ return self.prep_outputs(inputs, outputs, return_only_outputs) │
│ 119 │
│ │
│ /usr/local/lib/python3.9/dist-packages/langchain/chains/base.py:113 in │
│ call │
│ │
│ 110 │ │ │ verbose=self.verbose, │
│ 111 │ │ ) │
│ 112 │ │ try: │
│ ❱ 113 │ │ │ outputs = self._call(inputs) │
│ 114 │ │ except (KeyboardInterrupt, Exception) as e: │
│ 115 │ │ │ self.callback_manager.on_chain_error(e, verbose=self.verbo │
│ 116 │ │ │ raise e │
│ │
│ /usr/local/lib/python3.9/dist-packages/langchain/chains/combine_documents/ba │
│ se.py:75 in _call │
│ │
│ 72 │ │ docs = inputs[self.input_key] │
│ 73 │ │ # Other keys are assumed to be needed for LLM prediction │
│ 74 │ │ other_keys = {k: v for k, v in inputs.items() if k != self.inp │
│ ❱ 75 │ │ output, extra_return_dict = self.combine_docs(docs, **other_ke │
│ 76 │ │ extra_return_dict[self.output_key] = output │
│ 77 │ │ return extra_return_dict │
│ 78 │
│ │
│ /usr/local/lib/python3.9/dist-packages/langchain/chains/combine_documents/st │
│ uff.py:83 in combine_docs │
│ │
│ 80 │ │ """Stuff all documents into one prompt and pass to LLM.""" │
│ 81 │ │ inputs = self._get_inputs(docs, **kwargs) │
│ 82 │ │ # Call predict on the LLM. │
│ ❱ 83 │ │ return self.llm_chain.predict(**inputs), {} │
│ 84 │ │
│ 85 │ async def acombine_docs( │
│ 86 │ │ self, docs: List[Document], **kwargs: Any │
│ │
│ /usr/local/lib/python3.9/dist-packages/langchain/chains/llm.py:151 in │
│ predict │
│ │
│ 148 │ │ │ │ │
│ 149 │ │ │ │ completion = llm.predict(adjective="funny") │
│ 150 │ │ """ │
│ ❱ 151 │ │ return self(kwargs)[self.output_key] │
│ 152 │ │
│ 153 │ async def apredict(self, **kwargs: Any) -> str: │
│ 154 │ │ """Format prompt with kwargs and pass to LLM. │
│ │
│ /usr/local/lib/python3.9/dist-packages/langchain/chains/base.py:116 in │
│ call │
│ │
│ 113 │ │ │ outputs = self._call(inputs) │
│ 114 │ │ except (KeyboardInterrupt, Exception) as e: │
│ 115 │ │ │ self.callback_manager.on_chain_error(e, verbose=self.verbo │
│ ❱ 116 │ │ │ raise e │
│ 117 │ │ self.callback_manager.on_chain_end(outputs, verbose=self.verbo │
│ 118 │ │ return self.prep_outputs(inputs, outputs, return_only_outputs) │
│ 119 │
│ │
│ /usr/local/lib/python3.9/dist-packages/langchain/chains/base.py:113 in │
│ call │
│ │
│ 110 │ │ │ verbose=self.verbose, │
│ 111 │ │ ) │
│ 112 │ │ try: │
│ ❱ 113 │ │ │ outputs = self._call(inputs) │
│ 114 │ │ except (KeyboardInterrupt, Exception) as e: │
│ 115 │ │ │ self.callback_manager.on_chain_error(e, verbose=self.verbo │
│ 116 │ │ │ raise e │
│ │
│ /usr/local/lib/python3.9/dist-packages/langchain/chains/llm.py:57 in _call │
│ │
│ 54 │ │ return [self.output_key] │
│ 55 │ │
│ 56 │ def _call(self, inputs: Dict[str, Any]) -> Dict[str, str]: │
│ ❱ 57 │ │ return self.apply([inputs])[0] │
│ 58 │ │
│ 59 │ def generate(self, input_list: List[Dict[str, Any]]) -> LLMResult: │
│ 60 │ │ """Generate LLM result from inputs.""" │
│ │
│ /usr/local/lib/python3.9/dist-packages/langchain/chains/llm.py:118 in apply │
│ │
│ 115 │ │
│ 116 │ def apply(self, input_list: List[Dict[str, Any]]) -> List[Dict[str │
│ 117 │ │ """Utilize the LLM generate method for speed gains.""" │
│ ❱ 118 │ │ response = self.generate(input_list) │
│ 119 │ │ return self.create_outputs(response) │
│ 120 │ │
│ 121 │ async def aapply(self, input_list: List[Dict[str, Any]]) -> List[D │
│ │
│ /usr/local/lib/python3.9/dist-packages/langchain/chains/llm.py:62 in │
│ generate │
│ │
│ 59 │ def generate(self, input_list: List[Dict[str, Any]]) -> LLMResult: │
│ 60 │ │ """Generate LLM result from inputs.""" │
│ 61 │ │ prompts, stop = self.prep_prompts(input_list) │
│ ❱ 62 │ │ return self.llm.generate_prompt(prompts, stop) │
│ 63 │ │
│ 64 │ async def agenerate(self, input_list: List[Dict[str, Any]]) -> LLM │
│ 65 │ │ """Generate LLM result from inputs.""" │
│ │
│ /usr/local/lib/python3.9/dist-packages/langchain/llms/base.py:107 in │
│ generate_prompt │
│ │
│ 104 │ │ self, prompts: List[PromptValue], stop: Optional[List[str]] = │
│ 105 │ ) -> LLMResult: │
│ 106 │ │ prompt_strings = [p.to_string() for p in prompts] │
│ ❱ 107 │ │ return self.generate(prompt_strings, stop=stop) │
│ 108 │ │
│ 109 │ async def agenerate_prompt( │
│ 110 │ │ self, prompts: List[PromptValue], stop: Optional[List[str]] = │
│ │
│ /usr/local/lib/python3.9/dist-packages/langchain/llms/base.py:140 in │
│ generate │
│ │
│ 137 │ │ │ │ output = self._generate(prompts, stop=stop) │
│ 138 │ │ │ except (KeyboardInterrupt, Exception) as e: │
│ 139 │ │ │ │ self.callback_manager.on_llm_error(e, verbose=self.ver │
│ ❱ 140 │ │ │ │ raise e │
│ 141 │ │ │ self.callback_manager.on_llm_end(output, verbose=self.verb │
│ 142 │ │ │ return output │
│ 143 │ │ params = self.dict() │
│ │
│ /usr/local/lib/python3.9/dist-packages/langchain/llms/base.py:137 in │
│ generate │
│ │
│ 134 │ │ │ │ {"name": self.class.name}, prompts, verbose=se │
│ 135 │ │ │ ) │
│ 136 │ │ │ try: │
│ ❱ 137 │ │ │ │ output = self._generate(prompts, stop=stop) │
│ 138 │ │ │ except (KeyboardInterrupt, Exception) as e: │
│ 139 │ │ │ │ self.callback_manager.on_llm_error(e, verbose=self.ver │
│ 140 │ │ │ │ raise e │
│ │
│ /usr/local/lib/python3.9/dist-packages/langchain/llms/base.py:324 in │
│ _generate │
│ │
│ 321 │ │ # TODO: add caching here. │
│ 322 │ │ generations = [] │
│ 323 │ │ for prompt in prompts: │
│ ❱ 324 │ │ │ text = self._call(prompt, stop=stop) │
│ 325 │ │ │ generations.append([Generation(text=text)]) │
│ 326 │ │ return LLMResult(generations=generations) │
│ 327 │
│ │
│ /content/langchain-ChatGLM/models/chatglm_llm.py:72 in _call │
│ │
│ 69 │ def _call(self, │
│ 70 │ │ │ prompt: str, │
│ 71 │ │ │ stop: Optional[List[str]] = None) -> str: │
│ ❱ 72 │ │ response, _ = self.model.chat( │
│ 73 │ │ │ self.tokenizer, │
│ 74 │ │ │ prompt, │
│ 75 │ │ │ history=self.history[-self.history_len:] if self.history_l │
│ │
│ /usr/local/lib/python3.9/dist-packages/torch/utils/_contextlib.py:115 in │
│ decorate_context │
│ │
│ 112 │ @functools.wraps(func) │
│ 113 │ def decorate_context(*args, **kwargs): │
│ 114 │ │ with ctx_factory(): │
│ ❱ 115 │ │ │ return func(*args, **kwargs) │
│ 116 │ │
│ 117 │ return decorate_context │
│ 118 │
│ │
│ /root/.cache/huggingface/modules/transformers_modules/THUDM/chatglm-6b-int4/ │
│ e02ba894cf18f3fd9b2526c795f983683c4ec732/modeling_chatglm.py:1288 in chat │
│ │
│ 1285 │ │ inputs = inputs.to(self.device) │
│ 1286 │ │ outputs = self.generate(**inputs, **gen_kwargs) │
│ 1287 │ │ outputs = outputs.tolist()[0][len(inputs["input_ids"][0]):] │
│ ❱ 1288 │ │ response = tokenizer.decode(outputs) │
│ 1289 │ │ response = self.process_response(response) │
│ 1290 │ │ history = history + [(query, response)] │
│ 1291 │ │ return response, history │
│ │
│ /usr/local/lib/python3.9/dist-packages/transformers/tokenization_utils_base. │
│ py:3474 in decode │
│ │
│ 3471 │ │ │ str
: The decoded sentence. │
│ 3472 │ │ """ │
│ 3473 │ │ # Convert inputs to python lists │
│ ❱ 3474 │ │ token_ids = to_py_obj(token_ids) │
│ 3475 │ │ │
│ 3476 │ │ return self._decode( │
│ 3477 │ │ │ token_ids=token_ids, │
│ │
│ /usr/local/lib/python3.9/dist-packages/transformers/utils/generic.py:174 in │
│ to_py_obj │
│ │
│ 171 │ if isinstance(obj, (dict, UserDict)): │
│ 172 │ │ return {k: to_py_obj(v) for k, v in obj.items()} │
│ 173 │ elif isinstance(obj, (list, tuple)): │
│ ❱ 174 │ │ return [to_py_obj(o) for o in obj] │
│ 175 │ elif is_tf_tensor(obj): │
│ 176 │ │ return obj.numpy().tolist() │
│ 177 │ elif is_torch_tensor(obj): │
│ │
│ /usr/local/lib/python3.9/dist-packages/transformers/utils/generic.py:174 in │
│ x
is a tensorflow tensor or not. Safe to call even if t │
│ 150 │ """ │
│ ❱ 151 │ return False if not is_tf_available() else _is_tensorflow(x) │
│ 152 │
│ 153 │
│ 154 def _is_jax(x): │
│ │
│ /usr/local/lib/python3.9/dist-packages/transformers/utils/generic.py:142 in │
│ _is_tensorflow │
│ │
│ 139 │
│ 140 │
│ 141 def _is_tensorflow(x): │
│ ❱ 142 │ import tensorflow as tf │
│ 143 │ │
│ 144 │ return isinstance(x, tf.Tensor) │
│ 145 │
│ │
│ /usr/local/lib/python3.9/dist-packages/tensorflow/init.py:37 in
更新protobuf可以解决: !pip install protobuf==3.20.0
安装了protobuf 3.20.0之后,提示 ERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts. icetk 0.0.7 requires protobuf<3.19, but you have protobuf 3.20.0 which is incompatible.