mmpose
mmpose copied to clipboard
How to use GPU on embedded devices
I tried to run the demo on the Jetson Nano using the GPU. I set CUDA_VISIBLE_DEVICES=0, but found that the GPU did not work after running
Most of our demos use 'cuda:0' as the default device for model inference. Could you please specify the demo script and arguments you used when encountering this issue?
webcam_demo.py:
Copyright (c) OpenMMLab. All rights reserved.
import logging from argparse import ArgumentParser
from mmengine import Config, DictAction
from mmpose.apis.webcam import WebcamExecutor from mmpose.apis.webcam.nodes import model_nodes
def parse_args(): parser = ArgumentParser('Webcam executor configs') parser.add_argument( '--config', type=str, default= './demo/webcam_cfg/pose_estimation.py')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
default={},
help='Override settings in the config. The key-value pair '
'in xxx=yyy format will be merged into config file. For example, '
"'--cfg-options executor_cfg.camera_id=1'")
parser.add_argument(
'--debug', action='store_true', help='Show debug information.')
parser.add_argument(
'--cpu', action='store_true', help='Use CPU for model inference.')
parser.add_argument(
'--cuda',
action='store_true',
default=True,
help='Use GPU for model inference.')
return parser.parse_args()
def set_device(cfg: Config, device: str): """Set model device in config.
Args:
cfg (Config): Webcam config
device (str): device indicator like "cpu" or "cuda:0"
"""
device = device.lower()
assert device == 'cpu' or device.startswith('cuda:')
for node_cfg in cfg.executor_cfg.nodes:
if node_cfg.type in model_nodes.__all__:
node_cfg.update(device=device)
return cfg
def run(): args = parse_args() cfg = Config.fromfile(args.config) cfg.merge_from_dict(args.cfg_options)
if args.debug:
logging.basicConfig(level=logging.DEBUG)
if args.cpu:
cfg = set_device(cfg, 'cpu')
if args.cuda:
cfg = set_device(cfg, 'cuda:0')
webcam_exe = WebcamExecutor(**cfg.executor_cfg)
webcam_exe.run()
if name == 'main': run()
pose_estimation.py:
Copyright (c) OpenMMLab. All rights reserved.
executor_cfg = dict(
# Basic configurations of the executor
name='Pose Estimation',
camera_id='http://admin:[email protected]:8081/',
# Define nodes.
# The configuration of a node usually includes:
# 1. 'type': Node class name
# 2. 'name': Node name
# 3. I/O buffers (e.g. 'input_buffer', 'output_buffer'): specify the
# input and output buffer names. This may depend on the node class.
# 4. 'enable_key': assign a hot-key to toggle enable/disable this node.
# This may depend on the node class.
# 5. Other class-specific arguments
nodes=[
# 'DetectorNode':
# This node performs object detection from the frame image using an
# MMDetection model.
dict(
type='DetectorNode',
name='detector',
model_config='./demo/mmdetection_cfg/'
'ssdlite_mobilenetv2-scratch_8xb24-600e_coco.py',
model_checkpoint='./demo/webcam_cfg/'
'ssdlite_mobilenetv2_scratch_600e_coco_20210629_110627-974d9307.pth',
input_buffer='input', # _input_
is an executor-reserved buffer
output_buffer='det_result'),
# 'TopDownPoseEstimatorNode':
# This node performs keypoint detection from the frame image using an
# MMPose top-down model. Detection results is needed.
dict(
type='TopDownPoseEstimatorNode',
name='human pose estimator',
model_config='./configs/body_2d_keypoint/topdown_heatmap/coco/mobilevit_coco-256x192.py',
model_checkpoint="./work_dirs/AP_epoch_300.pth",
labels=['person'],
input_buffer='det_result',
output_buffer='human_pose'),
# 'ObjectAssignerNode':
# This node binds the latest model inference result with the current
# frame. (This means the frame image and inference result may be
# asynchronous).
dict(
type='ObjectAssignerNode',
name='object assigner',
frame_buffer='frame', # _frame_
is an executor-reserved buffer
object_buffer='human_pose',
output_buffer='frame'),
# 'ObjectVisualizerNode':
# This node draw the pose visualization result in the frame image.
# Pose results is needed.
dict(
type='ObjectVisualizerNode',
name='object visualizer',
enable_key='v',
enable=True,
show_bbox=True,
must_have_keypoint=False,
show_keypoint=True,
input_buffer='frame',
output_buffer='vis'),
# 'SunglassesNode':
# This node draw the sunglasses effect in the frame image.
# Pose results is needed.
dict(
type='SunglassesEffectNode',
name='sunglasses',
enable_key='s',
enable=False,
input_buffer='vis',
output_buffer='vis_sunglasses'),
# # 'BigeyeEffectNode':
# # This node draw the big-eye effetc in the frame image.
# # Pose results is needed.
dict(
type='BigeyeEffectNode',
name='big-eye',
enable_key='b',
enable=False,
input_buffer='vis_sunglasses',
output_buffer='vis_bigeye'),
# 'NoticeBoardNode':
# This node show a notice board with given content, e.g. help
# information.
dict(
type='NoticeBoardNode',
name='instruction',
enable_key='h',
enable=False,
input_buffer='vis_bigeye',
output_buffer='vis_notice',
content_lines=[
'This is a demo for pose visualization and simple image '
'effects. Have fun!', '', 'Hot-keys:',
'"v": Pose estimation result visualization',
'"s": Sunglasses effect B-)', '"b": Big-eye effect 0_0',
'"h": Show help information',
'"m": Show diagnostic information', '"q": Exit'
],
),
# 'MonitorNode':
# This node show diagnostic information in the frame image. It can
# be used for debugging or monitoring system resource status.
dict(
type='MonitorNode',
name='monitor',
enable_key='m',
enable=False,
input_buffer='vis_notice',
output_buffer='display'),
# 'RecorderNode':
# This node save the output video into a file.
dict(
type='RecorderNode',
name='recorder',
out_video_file='webcam_demo.mp4',
input_buffer='display',
output_buffer='display'
# _display_
is an executor-reserved buffer
)
])
the command is : CUDA_VISIBLE_DEVICES=0 python demo/webcam_demo.py
But I find the GPU:nvidia,tegra210-gm20b did not work
Could you please check the GPU availability by the following commands?
python -c "import torch; print(torch.cuda.is_available())"