멀티캠퍼스 AI과정/06 Deep Learning
윈도우 환경 mediapipe hand tracking
jhk828
2020. 11. 25. 10:38
20201115 윈도우 환경 mediapipe hand tracking
import os
import cv2
import mediapipe as mp
mp_drawing = mp.solutions.drawing_utils
mp_hands = mp.solutions.hands
def track(input_data_path):
# For webcam input:
hands = mp_hands.Hands(
min_detection_confidence=0.7, min_tracking_confidence=0.5)
# cap = cv2.VideoCapture(0) => default: webcam input
cap = cv2.VideoCapture(input_data_path)
# cap = cv2.VideoCapture('input_video/hwajae/KETI_SL_0000000419.avi')
while cap.isOpened():
success, image = cap.read()
if not success:
break
# Flip the image horizontally for a later selfie-view display, and convert
# the BGR image to RGB.
image = cv2.cvtColor(cv2.flip(image, 1), cv2.COLOR_BGR2RGB)
# To improve performance, optionally mark the image as not writeable to
# pass by reference.
image.flags.writeable = False
results = hands.process(image)
# Draw the hand annotations on the image.
image.flags.writeable = True
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
if results.multi_hand_landmarks:
for hand_landmarks in results.multi_hand_landmarks:
print(hand_landmarks)
mp_drawing.draw_landmarks(
image, hand_landmarks, mp_hands.HAND_CONNECTIONS)
cv2.imshow('MediaPipe Hands', image)
# print(image, end=' ')
# print(len(image))
# print(image[0])
if cv2.waitKey(5) & 0xFF == 27:
break
hands.close()
cap.release()
print("success")
def main(input_data_path):
folder_list = os.listdir(input_data_path)
for folder in folder_list:
word = '/' + folder + '/'
file_list = os.listdir(input_data_path + word)
for file in file_list:
# print(file)
# print(input_data_path + word + file)
track(input_data_path + word + file)
if __name__ == "__main__":
main('input_video')
코드 참고
google.github.io/mediapipe/getting_started/install.html#installing-on-windows