deepsparse
deepsparse copied to clipboard
Pipeline support for loading from deployment directories with config.json
PR description
This PR has two main goals:
- Make pipelines read
config.jsonfile fromdeploymentdirectory - Make pipelines accept
deploymentdirectory as amodel_pathargument.
Detailed changes:
- Refactor
pipelineabstract methodsetup_onnx_file_pathtosetup_from_model, so it signals to the user that it does more things (e.g loadsconfig.jsonfile). - Refactor utility function
model_to_pathtomodel_to_path_and_config, so it additionally supports loadingconfig.jsonfile. - For vision pipelines add a setter for the
class_namesvariable. This enables us to make sure that if theclass_namesare set up from theconfig.json, the user will be warned if they attempt to overwrite it. - Make pipelines accept
deploymentdirectories (Directoryclass instance) as amodel_pathargument.
Manual testing plan
Make sure that vision pipelines accept deployment directory as well as Model as input and properly read class_names (mapping from labels to class names, e.g. '11' -> 'goldfish') from config.json:
## Manual Testing
import json
import os
import shutil
from deepsparse import Pipeline
from deepsparse.pipelines.helpers import DeploymentFiles
from sparsezoo import Model
def create_config_file_in_directory(deployment_dir_path, no_classes):
dummy_class = "dummy_class"
config_dict = {}
config_dict[DeploymentFiles.ONNX_MODEL_FILE] = {label: dummy_class for label in range(no_classes)}
with open(os.path.join(deployment_dir_path, DeploymentFiles.CONFIG_FILE), "w") as w:
json.dump(config_dict, w)
def manual_testing(stub, num_classes, task_name):
model = Model(stub)
# create sample input
pipeline_input = model.sample_batch(batch_size=1)['sample_inputs']
args = {'images': pipeline_input[0]}
# add mock config file to deployment directory
path = model._path
create_config_file_in_directory(model.deployment.path, num_classes)
# create model with deployment directory...
model = Model(path)
# ...and feed it to the model
deployment = model.deployment
# pipeline = Pipeline.create(task_name, model) also works
pipeline = Pipeline.create(task_name, deployment)
out = pipeline(**args)
if task_name == "image_classification":
assert out.labels[0] == "dummy_class"
elif task_name == "yolact":
assert out.classes[0][0] == "dummy_class"
else:
# yolo
assert out.labels[0][0] == "dummy_class"
assert list(pipeline.class_names.keys())[0] == '0'
assert list(pipeline.class_names.values())[0] == "dummy_class"
test_cases = {
"image_classification": ("zoo:cv/classification/resnet_v1-50/pytorch/sparseml/imagenet/pruned95_quant-none", 1000),
"yolact": ("zoo:cv/segmentation/yolact-darknet53/pytorch/dbolya/coco/pruned82_quant-none", 80),
"yolo": ("zoo:cv/detection/yolov5-s/pytorch/ultralytics/coco/pruned_quant-aggressive_94", 80)
}
for task_name, (stub, num_classes) in test_cases.items():
manual_testing(stub, num_classes, task_name)
print('done')
Testing annotate pipelines
Successful manual tests for yolo, yolact and IC.