保姆级教程:用YOLOv8-Pose和OpenCV实现跌倒检测(附完整Python代码)
从零实现基于YOLOv8-Pose的智能跌倒检测系统在养老监护、工地安全等场景中跌倒检测技术正成为计算机视觉落地的典型应用。本文将手把手带您用Python构建完整的跌倒检测系统相比传统方案YOLOv8-Pose提供的17个关键点检测能力让算法实现更加优雅。1. 环境配置与模型准备首先需要搭建支持CUDA的Python环境。建议使用conda创建独立环境conda create -n fall_detection python3.8 conda activate fall_detection pip install ultralytics opencv-python numpyYOLOv8-Pose模型可通过Ultralytics库直接加载from ultralytics import YOLO # 加载预训练姿态估计模型 pose_model YOLO(yolov8n-pose.pt) # 也可选择yolov8s/l/x-pose等不同规模提示首次运行会自动下载模型权重建议在GPU环境下使用以获得实时检测性能2. 人体关键点解析原理YOLOv8-Pose输出的17个关键点对应人体不同部位关键点索引对应部位关键点索引对应部位0鼻子9右腕1左眼10左髋5左肩11左膝6右肩12右膝跌倒检测主要关注四个核心点pt5/pt6左右肩中点上基准点pt11/pt12左右膝中点下基准点3. 跌倒判定算法实现核心算法通过计算躯干倾斜角度进行判断def is_falling(keypoints, bbox): # 获取关键点坐标 pt5 keypoints[5][:2] # 左肩 pt6 keypoints[6][:2] # 右肩 pt11 keypoints[11][:2] # 左膝 pt12 keypoints[12][:2] # 右膝 # 计算上下中心点 center_up ((pt5[0]pt6[0])/2, (pt5[1]pt6[1])/2) center_down ((pt11[0]pt12[0])/2, (pt11[1]pt12[1])/2) # 构建直角三角形计算角度 right_angle_x center_down[0] right_angle_y center_up[1] a abs(right_angle_x - center_up[0]) b abs(center_down[1] - right_angle_y) angle math.degrees(math.atan(a/b)) # 计算宽高比 width bbox[2] height bbox[3] aspect_ratio width/height # 跌倒判定条件 fall_conditions [ angle 60, # 躯干倾斜角度过大 center_down[1] center_up[1], # 膝盖高于肩膀 aspect_ratio 5/3 # 宽高比异常 ] return any(fall_conditions)4. 完整系统集成与可视化将各模块整合为端到端解决方案def visualize_detection(frame, results): for result in results: # 绘制检测框 bbox result.boxes.xywh[0].cpu().numpy() cv2.rectangle(frame, (int(bbox[0]-bbox[2]/2), int(bbox[1]-bbox[3]/2)), (int(bbox[0]bbox[2]/2), int(bbox[1]bbox[3]/2)), (0,0,255), 2) # 绘制关键点连线 keypoints result.keypoints.xy[0].cpu().numpy() skeleton [[5,6], [5,11], [6,12]] # 肩线、左躯干、右躯干 for i,j in skeleton: cv2.line(frame, (int(keypoints[i][0]), int(keypoints[i][1])), (int(keypoints[j][0]), int(keypoints[j][1])), (255,0,0), 2) # 跌倒状态显示 if is_falling(keypoints, bbox): cv2.putText(frame, FALL DETECTED!, (50,50), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,0,255), 3) # 视频流处理主循环 cap cv2.VideoCapture(0) # 0为默认摄像头 while cap.isOpened(): ret, frame cap.read() results pose_model(frame) visualize_detection(frame, results) cv2.imshow(Fall Detection, frame) if cv2.waitKey(1) ord(q): break5. 性能优化与部署建议实际部署时需要考虑以下优化点模型选择室内场景yolov8n-pose2.6ms/帧 RTX 3060复杂场景yolov8x-pose精度更高但速度较慢多目标处理for result in results: if len(result.boxes) 0: continue for i in range(len(result.boxes)): bbox result.boxes.xywh[i].cpu().numpy() keypoints result.keypoints.xy[i].cpu().numpy() # 处理每个检测到的人体误报过滤添加时间持续判断连续5帧检测到跌倒才触发警报设置ROI区域限制检测范围在树莓派等边缘设备部署时建议使用TensorRT加速降低输入分辨率到320x320开启Half-precision推理