text-generation-webui icon indicating copy to clipboard operation
text-generation-webui copied to clipboard

Not loading on final step

Open VanShaman opened this issue 1 year ago • 6 comments

Describe the bug

I'm not very tech smart but I know how to run this AI. Problem is that I keep getting this error on the last step. Does anyone know if this can be fixed?

Traceback (most recent call last): File "/content/text-generation-webui/download-model.py", line 169, in links, is_lora = get_download_links_from_huggingface(model, branch) File "/content/text-generation-webui/download-model.py", line 113, in get_download_links_from_huggingface fname = dict[i]['path'] KeyError: 0 python server.py --share --model pygmalion-6b_sharded --settings settings-colab.json --cai-chat --no-stream --extensions gallery 2023-05-03 01:48:26.327126: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT Loading settings from settings-colab.json... Loading pygmalion-6b_sharded... ╭───────────────────── Traceback (most recent call last) ──────────────────────╮ │ /usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py:259 │ │ in hf_raise_for_status │ │ │ │ 256 │ </Tip> │ │ 257 │ """ │ │ 258 │ try: │ │ ❱ 259 │ │ response.raise_for_status() │ │ 260 │ except HTTPError as e: │ │ 261 │ │ error_code = response.headers.get("X-Error-Code") │ │ 262 │ │ │ │ /usr/local/lib/python3.10/dist-packages/requests/models.py:960 in │ │ raise_for_status │ │ │ │ 957 │ │ │ http_error_msg = u'%s Server Error: %s for url: %s' % (sel │ │ 958 │ │ │ │ 959 │ │ if http_error_msg: │ │ ❱ 960 │ │ │ raise HTTPError(http_error_msg, response=self) │ │ 961 │ │ │ 962 │ def close(self): │ │ 963 │ │ """Releases the connection back to the pool. Once this method │ ╰──────────────────────────────────────────────────────────────────────────────╯ HTTPError: 401 Client Error: Unauthorized for url: https://huggingface.co/models/pygmalion-6b_sharded/resolve/main/config.json

The above exception was the direct cause of the following exception:

╭───────────────────── Traceback (most recent call last) ──────────────────────╮ │ /usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py:409 in │ │ cached_file │ │ │ │ 406 │ user_agent = http_user_agent(user_agent) │ │ 407 │ try: │ │ 408 │ │ # Load from URL or cache if already cached │ │ ❱ 409 │ │ resolved_file = hf_hub_download( │ │ 410 │ │ │ path_or_repo_id, │ │ 411 │ │ │ filename, │ │ 412 │ │ │ subfolder=None if len(subfolder) == 0 else subfolder, │ │ │ │ /usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py │ │ :120 in _inner_fn │ │ │ │ 117 │ │ if check_use_auth_token: │ │ 118 │ │ │ kwargs = smoothly_deprecate_use_auth_token(fn_name=fn.__na │ │ 119 │ │ │ │ ❱ 120 │ │ return fn(*args, **kwargs) │ │ 121 │ │ │ 122 │ return _inner_fn # type: ignore │ │ 123 │ │ │ │ /usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py:119 │ │ 5 in hf_hub_download │ │ │ │ 1192 │ if not local_files_only: │ │ 1193 │ │ try: │ │ 1194 │ │ │ try: │ │ ❱ 1195 │ │ │ │ metadata = get_hf_file_metadata( │ │ 1196 │ │ │ │ │ url=url, │ │ 1197 │ │ │ │ │ token=token, │ │ 1198 │ │ │ │ │ proxies=proxies, │ │ │ │ /usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py │ │ :120 in _inner_fn │ │ │ │ 117 │ │ if check_use_auth_token: │ │ 118 │ │ │ kwargs = smoothly_deprecate_use_auth_token(fn_name=fn.__na │ │ 119 │ │ │ │ ❱ 120 │ │ return fn(*args, **kwargs) │ │ 121 │ │ │ 122 │ return _inner_fn # type: ignore │ │ 123 │ │ │ │ /usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py:154 │ │ 1 in get_hf_file_metadata │ │ │ │ 1538 │ │ proxies=proxies, │ │ 1539 │ │ timeout=timeout, │ │ 1540 │ ) │ │ ❱ 1541 │ hf_raise_for_status(r) │ │ 1542 │ │ │ 1543 │ # Return │ │ 1544 │ return HfFileMetadata( │ │ │ │ /usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py:291 │ │ in hf_raise_for_status │ │ │ │ 288 │ │ │ │ " repo_type.\nIf you are trying to access a private │ │ 289 │ │ │ │ " make sure you are authenticated." │ │ 290 │ │ │ ) │ │ ❱ 291 │ │ │ raise RepositoryNotFoundError(message, response) from e │ │ 292 │ │ │ │ 293 │ │ elif response.status_code == 400: │ │ 294 │ │ │ message = ( │ ╰──────────────────────────────────────────────────────────────────────────────╯ RepositoryNotFoundError: 401 Client Error. (Request ID: Root=1-6451bd6b-3d0c360173d1f9602aabb487)

Repository Not Found for url: https://huggingface.co/models/pygmalion-6b_sharded/resolve/main/config.json. Please make sure you specified the correct repo_id and repo_type. If you are trying to access a private or gated repo, make sure you are authenticated. Invalid username or password.

During handling of the above exception, another exception occurred:

╭───────────────────── Traceback (most recent call last) ──────────────────────╮ │ /content/text-generation-webui/server.py:234 in │ │ │ │ 231 │ │ i = int(input())-1 │ │ 232 │ │ print() │ │ 233 │ shared.model_name = available_models[i] │ │ ❱ 234 shared.model, shared.tokenizer = load_model(shared.model_name) │ │ 235 if shared.args.lora: │ │ 236 │ add_lora_to_model(shared.args.lora) │ │ 237 │ │ │ │ /content/text-generation-webui/modules/models.py:51 in load_model │ │ │ │ 48 │ │ if any(size in shared.model_name.lower() for size in ('13b', ' │ │ 49 │ │ │ model = AutoModelForCausalLM.from_pretrained(Path(f"models │ │ 50 │ │ else: │ │ ❱ 51 │ │ │ model = AutoModelForCausalLM.from_pretrained(Path(f"models │ │ 52 │ │ │ if torch.has_mps: │ │ 53 │ │ │ │ device = torch.device('mps') │ │ 54 │ │ │ │ model = model.to(device) │ │ │ │ /usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factor │ │ y.py:445 in from_pretrained │ │ │ │ 442 │ │ │ if kwargs_copy.get("torch_dtype", None) == "auto": │ │ 443 │ │ │ │ _ = kwargs_copy.pop("torch_dtype") │ │ 444 │ │ │ │ │ ❱ 445 │ │ │ config, kwargs = AutoConfig.from_pretrained( │ │ 446 │ │ │ │ pretrained_model_name_or_path, │ │ 447 │ │ │ │ return_unused_kwargs=True, │ │ 448 │ │ │ │ trust_remote_code=trust_remote_code, │ │ │ │ /usr/local/lib/python3.10/dist-packages/transformers/models/auto/configurati │ │ on_auto.py:925 in from_pretrained │ │ │ │ 922 │ │ kwargs["_from_auto"] = True │ │ 923 │ │ kwargs["name_or_path"] = pretrained_model_name_or_path │ │ 924 │ │ trust_remote_code = kwargs.pop("trust_remote_code", False) │ │ ❱ 925 │ │ config_dict, unused_kwargs = PretrainedConfig.get_config_dict( │ │ 926 │ │ if "auto_map" in config_dict and "AutoConfig" in config_dict[" │ │ 927 │ │ │ if not trust_remote_code: │ │ 928 │ │ │ │ raise ValueError( │ │ │ │ /usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py: │ │ 574 in get_config_dict │ │ │ │ 571 │ │ """ │ │ 572 │ │ original_kwargs = copy.deepcopy(kwargs) │ │ 573 │ │ # Get config dict associated with the base config file │ │ ❱ 574 │ │ config_dict, kwargs = cls._get_config_dict(pretrained_model_na │ │ 575 │ │ if "_commit_hash" in config_dict: │ │ 576 │ │ │ original_kwargs["_commit_hash"] = config_dict["_commit_has │ │ 577 │ │ │ │ /usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py: │ │ 629 in _get_config_dict │ │ │ │ 626 │ │ │ │ │ 627 │ │ │ try: │ │ 628 │ │ │ │ # Load from local folder or from cache or download fro │ │ ❱ 629 │ │ │ │ resolved_config_file = cached_file( │ │ 630 │ │ │ │ │ pretrained_model_name_or_path, │ │ 631 │ │ │ │ │ configuration_file, │ │ 632 │ │ │ │ │ cache_dir=cache_dir, │ │ │ │ /usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py:424 in │ │ cached_file │ │ │ │ 421 │ │ ) │ │ 422 │ │ │ 423 │ except RepositoryNotFoundError: │ │ ❱ 424 │ │ raise EnvironmentError( │ │ 425 │ │ │ f"{path_or_repo_id} is not a local folder and is not a va │ │ 426 │ │ │ "listed on 'https://huggingface.co/models'\nIf this is a │ │ 427 │ │ │ "pass a token having permission to this repo with use_au │ ╰──────────────────────────────────────────────────────────────────────────────╯ OSError: models/pygmalion-6b_sharded is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo with use_auth_tokenor log in withhuggingface-cli loginand pass use_auth_token=True`.

Is there an existing issue for this?

  • [X] I have searched the existing issues

Reproduction

https://colab.research.google.com/github/oobabooga/AI-Notebooks/blob/main/Colab-TextGen-GPU.ipynb#scrollTo=hKuocueuXnm5

Open the GPU, go through steps 1-2, then try step 3. I've tried this on 3 google accounts.

Screenshot

No response

Logs

Traceback (most recent call last):
  File "/content/text-generation-webui/download-model.py", line 169, in <module>
    links, is_lora = get_download_links_from_huggingface(model, branch)
  File "/content/text-generation-webui/download-model.py", line 113, in get_download_links_from_huggingface
    fname = dict[i]['path']
KeyError: 0
python server.py --share --model pygmalion-6b_sharded --settings settings-colab.json --cai-chat --no-stream --extensions gallery
2023-05-03 01:48:26.327126: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT
Loading settings from settings-colab.json...
Loading pygmalion-6b_sharded...
╭───────────────────── Traceback (most recent call last) ──────────────────────╮
│ /usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py:259 │
│ in hf_raise_for_status                                                       │
│                                                                              │
│   256 │   </Tip>                                                             │
│   257 │   """                                                                │
│   258 │   try:                                                               │
│ ❱ 259 │   │   response.raise_for_status()                                    │
│   260 │   except HTTPError as e:                                             │
│   261 │   │   error_code = response.headers.get("X-Error-Code")              │
│   262                                                                        │
│                                                                              │
│ /usr/local/lib/python3.10/dist-packages/requests/models.py:960 in            │
│ raise_for_status                                                             │
│                                                                              │
│   957 │   │   │   http_error_msg = u'%s Server Error: %s for url: %s' % (sel │
│   958 │   │                                                                  │
│   959 │   │   if http_error_msg:                                             │
│ ❱ 960 │   │   │   raise HTTPError(http_error_msg, response=self)             │
│   961 │                                                                      │
│   962 │   def close(self):                                                   │
│   963 │   │   """Releases the connection back to the pool. Once this method  │
╰──────────────────────────────────────────────────────────────────────────────╯
HTTPError: 401 Client Error: Unauthorized for url: 
https://huggingface.co/models/pygmalion-6b_sharded/resolve/main/config.json

The above exception was the direct cause of the following exception:

╭───────────────────── Traceback (most recent call last) ──────────────────────╮
│ /usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py:409 in     │
│ cached_file                                                                  │
│                                                                              │
│    406 │   user_agent = http_user_agent(user_agent)                          │
│    407 │   try:                                                              │
│    408 │   │   # Load from URL or cache if already cached                    │
│ ❱  409 │   │   resolved_file = hf_hub_download(                              │
│    410 │   │   │   path_or_repo_id,                                          │
│    411 │   │   │   filename,                                                 │
│    412 │   │   │   subfolder=None if len(subfolder) == 0 else subfolder,     │
│                                                                              │
│ /usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py │
│ :120 in _inner_fn                                                            │
│                                                                              │
│   117 │   │   if check_use_auth_token:                                       │
│   118 │   │   │   kwargs = smoothly_deprecate_use_auth_token(fn_name=fn.__na │
│   119 │   │                                                                  │
│ ❱ 120 │   │   return fn(*args, **kwargs)                                     │
│   121 │                                                                      │
│   122 │   return _inner_fn  # type: ignore                                   │
│   123                                                                        │
│                                                                              │
│ /usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py:119 │
│ 5 in hf_hub_download                                                         │
│                                                                              │
│   1192 │   if not local_files_only:                                          │
│   1193 │   │   try:                                                          │
│   1194 │   │   │   try:                                                      │
│ ❱ 1195 │   │   │   │   metadata = get_hf_file_metadata(                      │
│   1196 │   │   │   │   │   url=url,                                          │
│   1197 │   │   │   │   │   token=token,                                      │
│   1198 │   │   │   │   │   proxies=proxies,                                  │
│                                                                              │
│ /usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py │
│ :120 in _inner_fn                                                            │
│                                                                              │
│   117 │   │   if check_use_auth_token:                                       │
│   118 │   │   │   kwargs = smoothly_deprecate_use_auth_token(fn_name=fn.__na │
│   119 │   │                                                                  │
│ ❱ 120 │   │   return fn(*args, **kwargs)                                     │
│   121 │                                                                      │
│   122 │   return _inner_fn  # type: ignore                                   │
│   123                                                                        │
│                                                                              │
│ /usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py:154 │
│ 1 in get_hf_file_metadata                                                    │
│                                                                              │
│   1538 │   │   proxies=proxies,                                              │
│   1539 │   │   timeout=timeout,                                              │
│   1540 │   )                                                                 │
│ ❱ 1541 │   hf_raise_for_status(r)                                            │
│   1542 │                                                                     │
│   1543 │   # Return                                                          │
│   1544 │   return HfFileMetadata(                                            │
│                                                                              │
│ /usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py:291 │
│ in hf_raise_for_status                                                       │
│                                                                              │
│   288 │   │   │   │   " `repo_type`.\nIf you are trying to access a private  │
│   289 │   │   │   │   " make sure you are authenticated."                    │
│   290 │   │   │   )                                                          │
│ ❱ 291 │   │   │   raise RepositoryNotFoundError(message, response) from e    │
│   292 │   │                                                                  │
│   293 │   │   elif response.status_code == 400:                              │
│   294 │   │   │   message = (                                                │
╰──────────────────────────────────────────────────────────────────────────────╯
RepositoryNotFoundError: 401 Client Error. (Request ID: 
Root=1-6451bd6b-3d0c360173d1f9602aabb487)

Repository Not Found for url: 
https://huggingface.co/models/pygmalion-6b_sharded/resolve/main/config.json.
Please make sure you specified the correct `repo_id` and `repo_type`.
If you are trying to access a private or gated repo, make sure you are 
authenticated.
Invalid username or password.

During handling of the above exception, another exception occurred:

╭───────────────────── Traceback (most recent call last) ──────────────────────╮
│ /content/text-generation-webui/server.py:234 in <module>                     │
│                                                                              │
│   231 │   │   i = int(input())-1                                             │
│   232 │   │   print()                                                        │
│   233 │   shared.model_name = available_models[i]                            │
│ ❱ 234 shared.model, shared.tokenizer = load_model(shared.model_name)         │
│   235 if shared.args.lora:                                                   │
│   236 │   add_lora_to_model(shared.args.lora)                                │
│   237                                                                        │
│                                                                              │
│ /content/text-generation-webui/modules/models.py:51 in load_model            │
│                                                                              │
│    48 │   │   if any(size in shared.model_name.lower() for size in ('13b', ' │
│    49 │   │   │   model = AutoModelForCausalLM.from_pretrained(Path(f"models │
│    50 │   │   else:                                                          │
│ ❱  51 │   │   │   model = AutoModelForCausalLM.from_pretrained(Path(f"models │
│    52 │   │   │   if torch.has_mps:                                          │
│    53 │   │   │   │   device = torch.device('mps')                           │
│    54 │   │   │   │   model = model.to(device)                               │
│                                                                              │
│ /usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factor │
│ y.py:445 in from_pretrained                                                  │
│                                                                              │
│   442 │   │   │   if kwargs_copy.get("torch_dtype", None) == "auto":         │
│   443 │   │   │   │   _ = kwargs_copy.pop("torch_dtype")                     │
│   444 │   │   │                                                              │
│ ❱ 445 │   │   │   config, kwargs = AutoConfig.from_pretrained(               │
│   446 │   │   │   │   pretrained_model_name_or_path,                         │
│   447 │   │   │   │   return_unused_kwargs=True,                             │
│   448 │   │   │   │   trust_remote_code=trust_remote_code,                   │
│                                                                              │
│ /usr/local/lib/python3.10/dist-packages/transformers/models/auto/configurati │
│ on_auto.py:925 in from_pretrained                                            │
│                                                                              │
│   922 │   │   kwargs["_from_auto"] = True                                    │
│   923 │   │   kwargs["name_or_path"] = pretrained_model_name_or_path         │
│   924 │   │   trust_remote_code = kwargs.pop("trust_remote_code", False)     │
│ ❱ 925 │   │   config_dict, unused_kwargs = PretrainedConfig.get_config_dict( │
│   926 │   │   if "auto_map" in config_dict and "AutoConfig" in config_dict[" │
│   927 │   │   │   if not trust_remote_code:                                  │
│   928 │   │   │   │   raise ValueError(                                      │
│                                                                              │
│ /usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py: │
│ 574 in get_config_dict                                                       │
│                                                                              │
│   571 │   │   """                                                            │
│   572 │   │   original_kwargs = copy.deepcopy(kwargs)                        │
│   573 │   │   # Get config dict associated with the base config file         │
│ ❱ 574 │   │   config_dict, kwargs = cls._get_config_dict(pretrained_model_na │
│   575 │   │   if "_commit_hash" in config_dict:                              │
│   576 │   │   │   original_kwargs["_commit_hash"] = config_dict["_commit_has │
│   577                                                                        │
│                                                                              │
│ /usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py: │
│ 629 in _get_config_dict                                                      │
│                                                                              │
│   626 │   │   │                                                              │
│   627 │   │   │   try:                                                       │
│   628 │   │   │   │   # Load from local folder or from cache or download fro │
│ ❱ 629 │   │   │   │   resolved_config_file = cached_file(                    │
│   630 │   │   │   │   │   pretrained_model_name_or_path,                     │
│   631 │   │   │   │   │   configuration_file,                                │
│   632 │   │   │   │   │   cache_dir=cache_dir,                               │
│                                                                              │
│ /usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py:424 in     │
│ cached_file                                                                  │
│                                                                              │
│    421 │   │   )                                                             │
│    422 │                                                                     │
│    423 │   except RepositoryNotFoundError:                                   │
│ ❱  424 │   │   raise EnvironmentError(                                       │
│    425 │   │   │   f"{path_or_repo_id} is not a local folder and is not a va │
│    426 │   │   │   "listed on 'https://huggingface.co/models'\nIf this is a  │
│    427 │   │   │   "pass a token having permission to this repo with `use_au │
╰──────────────────────────────────────────────────────────────────────────────╯
OSError: models/pygmalion-6b_sharded is not a local folder and is not a valid 
model identifier listed on 'https://huggingface.co/models'
If this is a private repository, make sure to pass a token having permission to 
this repo with `use_auth_token` or log in with `huggingface-cli login` and pass 
`use_auth_token=True`.

System Info

Google Colab Notebook

VanShaman avatar May 03 '23 01:05 VanShaman

Hmm my guess is that it can't find the model you're specifying. Make sure it exists in the text-generation-webui/models directory and that the name of the folder of the model is the name you are specifying when running. I've never tried to run this in Google Collab

xNul avatar May 03 '23 04:05 xNul

Same issue.

AsakaJX avatar May 03 '23 14:05 AsakaJX

I fixed the issue, so just replace this line in the 3rd step ![[ ! -f models/$model_name/config.json ]] && python download-model.py $huggingface_org/$huggingface_repo --branch $huggingface_branch with this one: ![[ ! -f models/$model_name/config.json ]] && git clone https://huggingface.co/$huggingface_org/$huggingface_repo --branch $huggingface_branch models/$model_name

AsakaJX avatar May 03 '23 14:05 AsakaJX

Be careful with disk space on collab tho and don't download a lot of models. Or you could just add code block to removing everything from models folder: `#@title 4. Delete previously installed models

!rm -rf models/* `

AsakaJX avatar May 03 '23 15:05 AsakaJX

I fixed the issue, so just replace this line in the 3rd step ![[ ! -f models/$model_name/config.json ]] && python download-model.py $huggingface_org/$huggingface_repo --branch $huggingface_branch with this one: ![[ ! -f models/$model_name/config.json ]] && git clone https://huggingface.co/$huggingface_org/$huggingface_repo --branch $huggingface_branch models/$model_name

I tried this and I worked once, but trying it again didn't work.

VanShaman avatar May 04 '23 14:05 VanShaman

Now it shows this

python server.py --share --model pygmalion-6b_sharded --settings settings-colab.json --no-stream --extensions gallery --cai-chat 2023-05-04 14:52:35.175157: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT Loading settings from settings-colab.json... Loading pygmalion-6b_sharded... Loading checkpoint shards: 0% 0/7 [00:00<?, ?it/s] ╭───────────────────── Traceback (most recent call last) ──────────────────────╮ │ /content/text-generation-webui/server.py:234 in │ │ │ │ 231 │ │ i = int(input())-1 │ │ 232 │ │ print() │ │ 233 │ shared.model_name = available_models[i] │ │ ❱ 234 shared.model, shared.tokenizer = load_model(shared.model_name) │ │ 235 if shared.args.lora: │ │ 236 │ add_lora_to_model(shared.args.lora) │ │ 237 │ │ │ │ /content/text-generation-webui/modules/models.py:51 in load_model │ │ │ │ 48 │ │ if any(size in shared.model_name.lower() for size in ('13b', ' │ │ 49 │ │ │ model = AutoModelForCausalLM.from_pretrained(Path(f"models │ │ 50 │ │ else: │ │ ❱ 51 │ │ │ model = AutoModelForCausalLM.from_pretrained(Path(f"models │ │ 52 │ │ │ if torch.has_mps: │ │ 53 │ │ │ │ device = torch.device('mps') │ │ 54 │ │ │ │ model = model.to(device) │ │ │ │ /usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factor │ │ y.py:468 in from_pretrained │ │ │ │ 465 │ │ │ ) │ │ 466 │ │ elif type(config) in cls._model_mapping.keys(): │ │ 467 │ │ │ model_class = _get_model_class(config, cls._model_mapping) │ │ ❱ 468 │ │ │ return model_class.from_pretrained( │ │ 469 │ │ │ │ pretrained_model_name_or_path, *model_args, config=con │ │ 470 │ │ │ ) │ │ 471 │ │ raise ValueError( │ │ │ │ /usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py:2777 │ │ in from_pretrained │ │ │ │ 2774 │ │ │ │ mismatched_keys, │ │ 2775 │ │ │ │ offload_index, │ │ 2776 │ │ │ │ error_msgs, │ │ ❱ 2777 │ │ │ ) = cls._load_pretrained_model( │ │ 2778 │ │ │ │ model, │ │ 2779 │ │ │ │ state_dict, │ │ 2780 │ │ │ │ loaded_state_dict_keys, # XXX: rename? │ │ │ │ /usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py:3104 │ │ in _load_pretrained_model │ │ │ │ 3101 │ │ │ │ # Skip the load for shards that only contain disk-off │ │ 3102 │ │ │ │ if shard_file in disk_only_shard_files: │ │ 3103 │ │ │ │ │ continue │ │ ❱ 3104 │ │ │ │ state_dict = load_state_dict(shard_file) │ │ 3105 │ │ │ │ │ │ 3106 │ │ │ │ # Mistmatched keys contains tuples key/shape1/shape2 │ │ 3107 │ │ │ │ # matching the weights in the model. │ │ │ │ /usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py:433 │ │ in load_state_dict │ │ │ │ 430 │ """ │ │ 431 │ if checkpoint_file.endswith(".safetensors") and is_safetensors_av │ │ 432 │ │ # Check format of the archive │ │ ❱ 433 │ │ with safe_open(checkpoint_file, framework="pt") as f: │ │ 434 │ │ │ metadata = f.metadata() │ │ 435 │ │ if metadata.get("format") not in ["pt", "tf", "flax"]: │ │ 436 │ │ │ raise OSError( │ ╰──────────────────────────────────────────────────────────────────────────────╯ FileNotFoundError: No such file or directory: "models/pygmalion-6b_sharded/model-00001-of-00007.safetensors"

VanShaman avatar May 04 '23 14:05 VanShaman

This issue has been closed due to inactivity for 6 weeks. If you believe it is still relevant, please leave a comment below. You can tag a developer in your comment.

github-actions[bot] avatar Aug 24 '23 23:08 github-actions[bot]