diff --git a/computer_vision/facemaskdetectorproject.py b/computer_vision/facemaskdetectorproject.py new file mode 100644 index 000000000000..ad7a91084883 --- /dev/null +++ b/computer_vision/facemaskdetectorproject.py @@ -0,0 +1,117 @@ +#Face-Mask-Detector-Project In Python Programming Language + + +##Part-01: Training Part + +from keras.optimizers import RMSprop +from keras.preprocessing.image import ImageDataGenerator +import cv2 +from keras.models import Sequential +from keras.layers import Conv2D, Input, ZeroPadding2D, BatchNormalization, Activation, MaxPooling2D, Flatten, Dense,Dropout +from keras.models import Model, load_model +from keras.callbacks import TensorBoard, ModelCheckpoint +from sklearn.model_selection import train_test_split +from sklearn.metrics import f1_score +from sklearn.utils import shuffle +import imutils +import numpy as np + +model =Sequential([ + Conv2D(100, (3,3), activation='relu', input_shape=(150, 150, 3)), + MaxPooling2D(2,2), + + Conv2D(100, (3,3), activation='relu'), + MaxPooling2D(2,2), + + Flatten(), + Dropout(0.5), + Dense(50, activation='relu'), + Dense(2, activation='softmax') +]) +model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['acc']) + +TRAINING_DIR = "./train" +train_datagen = ImageDataGenerator(rescale=1.0/255, + rotation_range=40, + width_shift_range=0.2, + height_shift_range=0.2, + shear_range=0.2, + zoom_range=0.2, + horizontal_flip=True, + fill_mode='nearest') + +train_generator = train_datagen.flow_from_directory(TRAINING_DIR, + batch_size=10, + target_size=(150, 150)) +VALIDATION_DIR = "./test" +validation_datagen = ImageDataGenerator(rescale=1.0/255) + +validation_generator = validation_datagen.flow_from_directory(VALIDATION_DIR, + batch_size=10, + target_size=(150, 150)) +checkpoint = ModelCheckpoint('model2-{epoch:03d}.model',monitor='val_loss',verbose=0,save_best_only=True,mode='auto') + + +history = model.fit_generator(train_generator, + epochs=10, + validation_data=validation_generator, + callbacks=[checkpoint]) + + +##Part-02: Testing Of Model + +import cv2 +import numpy as np +from keras.models import load_model +model=load_model("./model-010.h5") + +labels_dict={0:'without mask',1:'mask'} +color_dict={0:(0,0,255),1:(0,255,0)} + +size = 4 +webcam = cv2.VideoCapture(0) #Use camera 0 + +# We load the xml file +classifier = cv2.CascadeClassifier('/home/shivam/.local/lib/python3.6/site-packages/cv2/data/haarcascade_frontalface_default.xml') + +while True: + (rval, im) = webcam.read() + im=cv2.flip(im,1,1) #Flip to act as a mirror + + # Resize the image to speed up detection + mini = cv2.resize(im, (im.shape[1] // size, im.shape[0] // size)) + + # detect MultiScale / faces + faces = classifier.detectMultiScale(mini) + + # Draw rectangles around each face + for f in faces: + (x, y, w, h) = [v * size for v in f] #Scale the shapesize backup + #Save just the rectangle faces in SubRecFaces + face_img = im[y:y+h, x:x+w] + resized=cv2.resize(face_img,(150,150)) + normalized=resized/255.0 + reshaped=np.reshape(normalized,(1,150,150,3)) + reshaped = np.vstack([reshaped]) + result=model.predict(reshaped) + #print(result) + + label=np.argmax(result,axis=1)[0] + + cv2.rectangle(im,(x,y),(x+w,y+h),color_dict[label],2) + cv2.rectangle(im,(x,y-40),(x+w,y),color_dict[label],-1) + cv2.putText(im, labels_dict[label], (x, y-10),cv2.FONT_HERSHEY_SIMPLEX,0.8,(255,255,255),2) + + # Show the image + cv2.imshow('LIVE', im) + key = cv2.waitKey(10) + # if Esc key is press then break out of the loop + if key == 27: #The Esc key + break +# Stop video +webcam.release() + +# Close all started windows +cv2.destroyAllWindows() + +#end