mediapipe
mediapipe copied to clipboard
mediapipe unable to use all CPUs
import os
import sys
import time
import cv2
import mediapipe as mp
from numpy import size
class PoseEstimation:
def __init__(self):
self.base_path = os.getcwd()
self.pose = mp.solutions.pose.Pose(min_detection_confidence=0.7, min_tracking_confidence=0.7)
self.drawing_pose = mp.solutions.drawing_utils
def get_pose(self):
input_vid_dir = os.path.join(self.base_path, "downloaded_video")
output_vid_dir = os.path.join(self.base_path, "output_vid")
if not os.path.exists(input_vid_dir):
os.makedirs(input_vid_dir)
if not os.path.exists(output_vid_dir):
os.makedirs(output_vid_dir)
input_vid_file = os.path.join(input_vid_dir, "20250129T105442Z-20250129T110442Z.mp4")
output_vid_file = os.path.join(output_vid_dir, "20250129T105442Z-20250129T110442Z.mp4")
vid_cap = cv2.VideoCapture(input_vid_file)
if not vid_cap.isOpened():
print("Unable to open open video file")
sys.exit()
fps = int(vid_cap.get(cv2.CAP_PROP_FPS))
width = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fourcc = int(vid_cap.get(cv2.CAP_PROP_FOURCC))
out_vid = cv2.VideoWriter(
filename=output_vid_file,
fourcc=fourcc,
fps=fps,
frameSize=(width, height)
)
starttime = time.perf_counter()
while vid_cap.isOpened():
suc, frame = vid_cap.read()
if not suc:
break
processed_frame = self.process_frame(frame)
out_vid.write(processed_frame)
endtime = time.perf_counter()
totaltime = (endtime - starttime) / 60
if totaltime < 1:
totaltime = totaltime * 60
print(f"time taken to process the video: {totaltime} seconds.")
else:
print(f"time taken to process the video: {totaltime} minutes.")
vid_cap.release()
out_vid.release()
cv2.destroyAllWindows()
def process_frame(self, frame):
results = self.pose.process(frame)
self.drawing_pose.draw_landmarks(
frame,
results.pose_landmarks,
mp.solutions.pose.POSE_CONNECTIONS,
self.drawing_pose.DrawingSpec(color=(255, 0, 0), thickness=4, circle_radius=2),
self.drawing_pose.DrawingSpec(color=(255, 0, 0), thickness=6, circle_radius=2)
)
return frame
if __name__ == "__main__":
obj = PoseEstimation()
obj.get_pose()
LOGS:
WARNING: All log messages before absl::InitializeLog() is called are written to STDERR I0000 00:00:1759228428.788627 4195492 gl_context.cc:369] GL version: 2.1 (2.1 Metal - 89.3), renderer: Apple M1 Pro INFO: Created TensorFlow Lite XNNPACK delegate for CPU. OpenCV: FFMPEG: tag 0x34363268/'h264' is not supported with codec id 27 and format 'mp4 / MP4 (MPEG-4 Part 14)' OpenCV: FFMPEG: fallback to use tag 0x31637661/'avc1' W0000 00:00:1759228428.883094 4195650 inference_feedback_manager.cc:114] Feedback manager requires a model with a single signature inference. Disabling support for feedback tensors. W0000 00:00:1759228428.892257 4195650 inference_feedback_manager.cc:114] Feedback manager requires a model with a single signature inference. Disabling support for feedback tensors. W0000 00:00:1759228430.307321 4195656 landmark_projection_calculator.cc:186] Using NORM_RECT without IMAGE_DIMENSIONS is only supported for the square ROI. Provide IMAGE_DIMENSIONS or use PROJECTION_MATRIX. time taken to process the video: 54.76855908401194 seconds.
In the above logs as this: INFO: Created TensorFlow Lite XNNPACK delegate for CPU
it is using single core. How can i use all CPU cores.
I am running the above code in my macbook with 10 cores, but when the video process starts it is using around 2 cores only. how can i fix this issue to use all available CPUs.