如何增强网站的安全性,上海网站开发孵化,做设计有必要买素材网站会员吗,广告代理公司排名本文基于前面的手部跟踪功能做一个手势音量控制功能#xff0c;代码用到了前面手部跟踪封装的HandDetector.这篇文章在这里#xff1a;
Python Opencv实践 - 手部跟踪-CSDN博客文章浏览阅读626次#xff0c;点赞11次#xff0c;收藏7次。使用mediapipe库做手部的实时跟踪代码用到了前面手部跟踪封装的HandDetector.这篇文章在这里
Python Opencv实践 - 手部跟踪-CSDN博客文章浏览阅读626次点赞11次收藏7次。使用mediapipe库做手部的实时跟踪关于mediapipe的介绍,请自行百度。https://blog.csdn.net/vivo01/article/details/135071340?spm1001.2014.3001.5502 使用了pycaw来做音量控制pacaw的安装直接使用pip install pycaw即可。 代码如下
import cv2 as cv
import math
import mediapipe as mp
import time
from ctypes import cast,POINTER
from comtypes import CLSCTX_ALL
#使用pycaw来控制音量pip install pycaw
from pycaw.pycaw import AudioUtilities,IAudioEndpointVolumeclass HandDetector():def __init__(self, modeFalse,maxNumHands2,modelComplexity1,minDetectionConfidence0.5,minTrackingConfidence0.5):self.mode modeself.maxNumHands maxNumHandsself.modelComplexity modelComplexityself.minDetectionConfidence minDetectionConfidenceself.minTrackingConfidence minTrackingConfidence#创建mediapipe的solutions.hands对象self.mpHands mp.solutions.handsself.handsDetector self.mpHands.Hands(self.mode, self.maxNumHands, self.modelComplexity, self.minDetectionConfidence, self.minTrackingConfidence)#创建mediapipe的绘画工具self.mpDrawUtils mp.solutions.drawing_utilsdef findHands(self, img, drawOnImageTrue):#mediapipe手部检测器需要输入图像格式为RGB#cv默认的格式是BGR需要转换imgRGB cv.cvtColor(img, cv.COLOR_BGR2RGB)#调用手部检测器的process方法进行检测self.results self.handsDetector.process(imgRGB)#print(results.multi_hand_landmarks)#如果multi_hand_landmarks有值表示检测到了手if self.results.multi_hand_landmarks:#遍历每一只手的landmarksfor handLandmarks in self.results.multi_hand_landmarks:if drawOnImage:self.mpDrawUtils.draw_landmarks(img, handLandmarks, self.mpHands.HAND_CONNECTIONS)return img;#从结果中查询某只手的landmark listdef findHandPositions(self, img, handID0, drawOnImageTrue):landmarkList []if self.results.multi_hand_landmarks:handLandmarks self.results.multi_hand_landmarks[handID]for id,landmark in enumerate(handLandmarks.landmark):#处理每一个landmark,将landmark里的X,Y比例转换为帧数据的XY坐标h,w,c img.shapecenterX,centerY int(landmark.x * w), int(landmark.y * h)landmarkList.append([id, centerX, centerY])if (drawOnImage):#将landmark绘制成圆cv.circle(img, (centerX,centerY), 8, (0,255,0))return landmarkListdef DisplayFPS(img, preTime):curTime time.time()if (curTime - preTime 0):return curTime;fps 1 / (curTime - preTime)cv.putText(img, FPS: str(int(fps)), (10,70), cv.FONT_HERSHEY_PLAIN,3, (0,255,0), 3)return curTimedef AudioEndpointGet():devices AudioUtilities.GetSpeakers()interface devices.Activate(IAudioEndpointVolume._iid_, CLSCTX_ALL, None)volume cast(interface, POINTER(IAudioEndpointVolume))range volume.GetVolumeRange()return volume,rangedef AudioVolumeLevelSet(volume, range, value):if volume:if (value range[0]) or (value range[1]):returnvolume.SetMasterVolumeLevel(value, None)def main():video cv.VideoCapture(../../SampleVideos/handVolumeControl.mp4)#FPS显示preTime 0handDetector HandDetector(minDetectionConfidence0.7)volume,volumeRange AudioEndpointGet()print(volumeRange)#AudioVolumeLevelSet(volume, volumeRange, volumeRange[0])minFingerDistance 1000maxFingerDistance 0while True:ret,frame video.read()if ret False:break;frame handDetector.findHands(frame)hand0Landmarks handDetector.findHandPositions(frame)if (len(hand0Landmarks) ! 0):#print(hand0Landmarks[4], hand0Landmarks[8])#取出大拇指(4)和食指(8)的指尖的点对应的坐标thumbX,thumbY hand0Landmarks[4][1], hand0Landmarks[4][2]indexFingerX,indexFingerY hand0Landmarks[8][1],hand0Landmarks[8][2]#计算两个指尖的点指尖的中点cx,cy (thumbX indexFingerX) / 2, (thumbY indexFingerY) / 2#用实心圆突出显示出这两个个点cv.circle(frame, (thumbX,thumbY), 18, (90,220,180), cv.FILLED)cv.circle(frame, (indexFingerX,indexFingerY), 18, (0,120,255), cv.FILLED)#绘制两个点形成的直线cv.line(frame, (thumbX,thumbY), (indexFingerX,indexFingerY), (255,60,60), 3)#计算食指和拇指指尖的距离distance math.hypot(thumbX - indexFingerX, thumbY - indexFingerY)#测试两指指尖最小和最大距离改进方案可以是用摄像头做实时校准后再进行控制#本案例中直接获取视频里的最小和最大距离直接用作判断我拍的视频里范围是30 - 425之间if distance minFingerDistance:minFingerDistance distanceif distance maxFingerDistance:maxFingerDistance distance#print(distance)if distance 40:#两个指尖的中点显示为绿色音量设置为最小值cv.circle(frame, (int(cx),int(cy)), 18, (0,255,0), cv.FILLED)AudioVolumeLevelSet(volume, volumeRange, volumeRange[0])else:cv.circle(frame, (int(cx),int(cy)), 18, (0,0,255), cv.FILLED)#这里为了方便直接使用425本视频最大值做比例换算#我本机的volumeRange是-63.5 到 0 步长0.5value volumeRange[0] * (1 - (distance / 425))print(value)AudioVolumeLevelSet(volume, volumeRange, value)preTime DisplayFPS(frame, preTime)cv.imshow(Real Time Hand Detection, frame)if cv.waitKey(30) 0xFF ord(q):break;print(Min Max distance between thumb and index finger tips: , minFingerDistance, maxFingerDistance)video.release()cv.destroyAllWindows()if __name__ __main__:main() 效果可以参考我的B站视频 Python Opencv练手-手势音量控制_哔哩哔哩_bilibili基于mediapipe手部检测实现一个手势音量控制功能源码参考我的CSDNhttps://blog.csdn.net/vivo01/article/details/135118979?spm1001.2014.3001.5502, 视频播放量 1、弹幕量 0、点赞数 0、投硬币枚数 0、收藏人数 0、转发人数 0, 视频作者 vivo119, 作者简介 一个喜欢小狗子的码农业余爱好游戏开发相关视频小乖最喜欢吃面条小乖(白芝麻(黑的日常冲突这只胖狗想要跳上沙发可是胖了点Python Opencv - mediapipe做手部跟踪识别为什么小狗看镜头就尴尬突然爱吃番茄的狗子旋转的米糯狗子有手动旋转和自动旋转两种模式好好上课小狗的无糖藕粉初体验米糯狗子洗澡记全程都是乖乖狗https://www.bilibili.com/video/BV1Ej411H79q/?vd_source474bff49614e62744eb84e9f8340d91a