Stable-Diffusion-Webui-Civitai-Helper
Stable-Diffusion-Webui-Civitai-Helper copied to clipboard
Potential enhancement: collect downloaded model info before checking for updates
I hacked together a quick and dirty proof of concept for what I think is an improvement to the current model update check process - collect model and version IDs for all downloaded models of a given type then check for new versions once per model ID. It seems to work well but for simplicity it integrates everything except get_model_id_from_model_path
and get_model_info_by_id
into check_models_new_version_by_model_types
.
This approach minimizes CivitAI API calls (once per model ID), should likewise only scan the directory tree once per model folder, and makes it more likely that already downloaded newer versions get found - e.g. I have SD1.5 and SDXL LoRAs separated out into different folders, and many LoRAs/LyCORISs list versions for both "ecosystems" under the same model ID, which breaks under the old system of only checking for updates within the same folder.
Intending this as an interest check/request for feedback on the approach in case there's a reason why the current method is preferable, hence opening an issue rather than submitting a draft PR or something.
def check_models_new_version_by_model_types(model_types:list, delay:float=0.2) -> list:
"""
check all models of model_types for new version
parameter: delay - float, how many seconds to delay between each request to civitai
return: new_versions
a list for all new versions, each one is
(model_path, model_id, model_name, new_verion_id,
new_version_name, description, download_url, img_url)
"""
util.printD("Checking models' new version")
if not model_types:
return []
# check model types, which could be a string as 1 type
mts = []
if isinstance(model_types, str):
mts.append(model_types)
elif isinstance(model_types, list):
mts = model_types
else:
util.printD("Unknown model types:")
util.printD(model_types)
return []
# new version list
new_versions = []
new_version_ids = []
# walk all models
for model_type, model_folder in model.folders.items():
if model_type not in mts:
continue
downloaded_models = defaultdict(dict)
util.printD(f"Scanning path: {model_folder}")
for root, _, files in os.walk(model_folder, followlinks=True):
for filename in files:
#region check_single_model_new_version
# check ext
item = os.path.join(root, filename)
_, ext = os.path.splitext(item)
if ext not in model.EXTS:
continue
#endregion
#region check_model_new_version_by_path
if not (item and os.path.isfile(item)):
util.printD(f"model_path is not a file: {item}")
continue
result = get_model_id_from_model_path(item)
if not result:
continue
model_id, local_version_id = result
#endregion
try:
model_id = int(model_id)
except ValueError as e:
util.printD(util.indented_msg(
f"""
Bad or missing model_id for local model, skipping.
Path: {item}
Model ID: {model_id}
Version ID: {local_version_id}
Error: {str(e)}
"""
))
continue
try:
local_version_id = int(local_version_id)
except ValueError as e:
util.printD(util.indented_msg(
f"""
Bad or missing version id for local model, setting to None.
Path: {item}
Model ID: {model_id}
Version ID: {local_version_id}
Error: {str(e)}
"""
))
local_version_id = None
downloaded_models[model_id][local_version_id] = item
#util.printD(f"Downloaded models: {downloaded_models}")
for model_id in sorted(downloaded_models.keys()):
#region check_model_new_version_by_path
# get model info by id from civitai
model_info = get_model_info_by_id(model_id)
util.delay(delay)
if not model_info:
util.printD("Failed to retrieve info from CivitAI.")
continue
model_versions = model_info.get("modelVersions", [])
if len(model_versions) == 0:
util.printD("No model versions listed.")
continue
current_version = model_versions[0]
if not current_version:
util.printD("Failed getting current version from version list.")
continue
current_version_id = current_version.get("id", False)
local_version_ids = sorted(downloaded_models[model_id].keys())
util.printD(f"Compare version ids, remote: {current_version_id}, local: {local_version_ids}")
if current_version_id and current_version_id in local_version_ids:
continue
#use path of highest version ID, which is probably the newest downloaded version
local_path = downloaded_models[model_id][local_version_ids[-1]]
model_name = model_info.get("name", "")
new_version_name = current_version.get("name", "")
description = current_version.get("description", "")
download_url = current_version.get("downloadUrl", "")
# get 1 preview image
try:
img_url = current_version["images"][0]["url"]
except (IndexError, KeyError):
img_url = ""
#endregion
# add to list
new_versions.append((local_path, model_id, model_name, current_version_id, new_version_name, description, download_url, img_url, model_type))
return sorted(new_versions, key=lambda version: Path(version[0]))
This won't handle weird edge cases like LEOSAM's FilmGirl Ultra where completely different types of models share a model ID, but the more common case of mixed LoRA and LyCORIS on a single model ID would be fine for anyone who uses the same path for both model types (probably the majority now?)