【V1.0】 Based on raspberry pie OpenCV Camera face tracking system based on
The system currently combines raspberry pie +51 Single chip microcomputer
Raspberry pie is mainly used to run Python Program Tracking faces Simultaneous use GPIO Port gives signal
Single chip microcomputer is used to control 42 The guide rail of stepper motor moves left and right
resources :
https://download.csdn.net/download/weixin_53403301/80270053
video :
【V1.0】 Based on raspberry pie OpenCV-Python Camera face tracking system - Bili, Bili https://b23.tv/gk7YVaO
【V1.0】 Based on raspberry pie OpenCV-Python Camera face tracking system
Previous articles :
Face tracking part :
https://blog.csdn.net/weixin_53403301/article/details/120497427
MCU control 42 Guide rail of stepping motor :
https://blog.csdn.net/weixin_53403301/article/details/122658780
Raspberry pie Python The code is as follows :
import cv2
import threading
import RPi.GPIO as GPIO
# import time
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
GPIO.setup(23, GPIO.OUT)
GPIO.setup(24, GPIO.OUT)
GPIO.output(23, GPIO.HIGH)
GPIO.output(24, GPIO.HIGH)
def LEFT():
GPIO.output(23, GPIO.LOW)
GPIO.output(24, GPIO.HIGH)
def RIGHT():
GPIO.output(23, GPIO.HIGH)
GPIO.output(24, GPIO.LOW)
def STOP():
GPIO.output(23, GPIO.HIGH)
GPIO.output(24, GPIO.HIGH)
cap = cv2.VideoCapture(0) # Turn on camera
classifier = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
def track():
faceRects = classifier.detectMultiScale(gray,scaleFactor=1.2,minNeighbors=3,minSize=(32, 32))
if len(faceRects):
for faceRect in faceRects:
x,y,w,h = faceRect
# Select the face from the box Last parameter 2 Is the width of the frame
cv2.rectangle(faceImg,(x, y), (x + w, y + h), (0,255,0), 2)
# print(x+w/2)
if x+w/2 > width/2+50:
print("Left")
LEFT()
# time.sleep(0.1)
elif x+w/2 < width/2-50:
print("Right")
RIGHT()
# time.sleep(0.1)
elif width/2-50 < x+w/2 < width/2+50:
print("Central")
STOP()
# Cycle through the image
while True:
ok, faceImg = cap.read() # Read the camera image
if ok is False:
print(' Unable to read camera !')
break
# faceImg = img
high=faceImg.shape[0]
width=faceImg.shape[1]
# print(width,high)
gray = cv2.cvtColor(faceImg,cv2.COLOR_BGR2GRAY)
cv2.imshow("faceImg",faceImg)
# Show the image
thread = threading.Thread(target=track)
thread.start()
if cv2.waitKey(10) == 27: # adopt esc Press the key to exit the camera
break
# Turn off camera
cap.release()
cv2.destroyAllWindows()
pure Python The code is as follows :
import cv2
import threading
#import RPi.GPIO as GPIO
# import time
#GPIO.setwarnings(False)
#GPIO.setmode(GPIO.BCM)
#GPIO.setup(23, GPIO.OUT)
#GPIO.setup(24, GPIO.OUT)
#GPIO.output(23, GPIO.HIGH)
#GPIO.output(24, GPIO.HIGH)
#
#def LEFT():
# GPIO.output(23, GPIO.LOW)
# GPIO.output(24, GPIO.HIGH)
#
#def RIGHT():
# GPIO.output(23, GPIO.HIGH)
# GPIO.output(24, GPIO.LOW)
#
#def STOP():
# GPIO.output(23, GPIO.HIGH)
# GPIO.output(24, GPIO.HIGH)
cap = cv2.VideoCapture(0) # Turn on camera
classifier = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
def track():
faceRects = classifier.detectMultiScale(gray,scaleFactor=1.2,minNeighbors=3,minSize=(32, 32))
if len(faceRects):
for faceRect in faceRects:
x,y,w,h = faceRect
# Select the face from the box Last parameter 2 Is the width of the frame
cv2.rectangle(faceImg,(x, y), (x + w, y + h), (0,255,0), 2)
# print(x+w/2)
if x+w/2 > width/2+50:
print("Left")
# LEFT()
# time.sleep(0.1)
elif x+w/2 < width/2-50:
print("Right")
# RIGHT()
# time.sleep(0.1)
elif width/2-50 < x+w/2 < width/2+50:
print("Central")
# STOP()
# Cycle through the image
while True:
ok, faceImg = cap.read() # Read the camera image
if ok is False:
print(' Unable to read camera !')
break
# faceImg = img
high=faceImg.shape[0]
width=faceImg.shape[1]
# print(width,high)
gray = cv2.cvtColor(faceImg,cv2.COLOR_BGR2GRAY)
cv2.imshow("faceImg",faceImg)
# Show the image
thread = threading.Thread(target=track)
thread.start()
if cv2.waitKey(10) == 27: # adopt esc Press the key to exit the camera
break
# Turn off camera
cap.release()
cv2.destroyAllWindows()