Turn from https://blog.csdn.net/chen801090/article/details/95072592 , the original text is to identify Huang Jiaju and Huang Jiaqiang. The difference is that this article changes to Wanxi and Jiang Shuying. Why do you choose them... In addition, add a recognizer to download in the link (the resource has not been approved). Nothing else is different, just put it in your blog. In addition, I read the comments and said that the algorithm in the link above is not recognized correctly. Maybe because there are too few training sets, I also met with the pictures of two goddesses. Then I changed the picture
# # -*- coding:utf-8 -*- import cv2 import os import numpy as np # Create a mapping list of tags and people names (tags can only be integers) subjects = ["jiangshuying", "wanxi"] # Face detection def detect_face(img): # Convert the test image to grayscale image, because opencv face detector needs grayscale image gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # Load OpenCV face detection classifier Haar face_cascade = cv2.CascadeClassifier('/home/menglingwei/Desktop/study_1/haarcascade_frontalface_default.xml') # To detect multi-scale image, the return value is a list of facial region information (x,y, width, height) faces = face_cascade.detectMultiScale(gray, scaleFactor=1.2, minNeighbors=5) # If no face is detected, the original image is returned if (len(faces) == 0): return None, None # At present, it is assumed that there is only one face, xy is the coordinate of the upper left corner, wh is the width and height of the rectangle (x, y, w, h) = faces[0] # Back to the front of the image return gray[y:y + w, x:x + h], faces[0] # This function will read all training images, detect faces from each image and return two lists of the same size, namely face information and label def prepare_training_data(data_folder_path): # Get directories in the data folder (one for each topic) dirs = os.listdir(data_folder_path) # Two lists save all faces and labels respectively faces = [] labels = [] # Browse each directory and access the images in it for dir_name in dirs: # Dir? Name (STR type) is the label label = int(dir_name) # Create a directory path that contains images of the current theme subject_dir_path = data_folder_path + "/" + dir_name # Get the image name in the given topic directory subject_images_names = os.listdir(subject_dir_path) # Browse each picture and detect the face, then add the face information to the face list faces [] for image_name in subject_images_names: # Establish image path image_path = subject_dir_path + "/" + image_name # Read image image = cv2.imread(image_path) # Display image 0.1s cv2.imshow("Training on image...", image) cv2.waitKey(100) # Face detection face, rect = detect_face(image) # We ignore undetected faces if face is not None: # Add a face to the face list and label it accordingly faces.append(face) labels.append(label) cv2.waitKey(1) cv2.destroyAllWindows()#Close window after training # The final return value is face and tag list return faces, labels # Draw a rectangle on the image according to the given (x, y) coordinates and width height def draw_rectangle(img, rect): (x, y, w, h) = rect cv2.rectangle(img, (x, y), (x + w, y + h), (128, 128, 0), 2) # Identify the person name according to the given (x, y) coordinate def draw_text(img, text, x, y): cv2.putText(img, text, (x, y), cv2.FONT_HERSHEY_COMPLEX, 1, (128, 128, 0), 2) # This function identifies the person in the transferred image and draws a rectangle and its name around the detected face def predict(test_img): # Make a copy of the image so you can keep the original image img = test_img.copy() # Face detection face, rect = detect_face(img) # Predictive face label = face_recognizer.predict(face) # Get the name of the corresponding label returned by the face recognizer label_text = subjects[label[0]] # Draw a rectangle around the detected face draw_rectangle(img, rect) # Name the forecast draw_text(img, label_text, rect[0], rect[1] - 5) # Return the predicted image return img # Call the prepare? Training? Data() function to train the model faces, labels = prepare_training_data("training_data") # Create LBPH recognizer and start training, or choose Eigen or Fisher recognizer face_recognizer = cv2.face.LBPHFaceRecognizer_create() face_recognizer.train(faces, np.array(labels)) # Load test image test_img1 = cv2.imread("test_data/test1.jpg") test_img2 = cv2.imread("test_data/test2.jpg") # Execution prediction predicted_img1 = predict(test_img1) predicted_img2 = predict(test_img2) # Show two images cv2.imshow(subjects[0], predicted_img1) cv2.imshow(subjects[1], predicted_img2) cv2.waitKey(0) cv2.destroyAllWindows()
It can be recognized normally. Post scholars should pay attention to it