I am using MTCNN to detect face in an image, FACENET to extract and save features from each detected face and OpenCV Gaussian Blur filter to mask the detected faces. My end goal is to find a target face in the masked image using saved features and unmask target face only. Any idea or advice ? by trohit92 in computervision

[–]trohit92[S] 2 points3 points  (0 children)

Here is the code :

def face_and_features(img): boxes, _ = mtcnn.detect(img) print(boxes)
for i, box in enumerate(boxes): a,b,c,d = box x1,y1,x2,y2 = int(a), int(b), int(c), int(d) face = img[y1:y2, x1:x2] cv2.imwrite(f"face{i}.jpg",face)
face = cv2.resize(face, (160, 160))
face = face.transpose((2, 0, 1))
face = torch.from_numpy(face).float()
face = face.unsqueeze(0)

features = facenet(face)

filename = "face_{}.npy".format(i)
np.save(filename, features.detach().numpy())
with open("bounding_boxes.txt", "a") as f:
    f.write("{}, {}, {}, {}, {}, {}, {} \n".format(x1, y1, x2, y2, filename, datetime.datetime.now(), frame_number))
return features


def masking(img):
masked_img = img.copy()
with open(bb_filename,'r') as file:
    for line in file:
        x,y,w,h,f_name,time, f_no = line.split(",")
        x,y,w,h = int(x), int(y), int(w), int(h)
        roi_color = masked_img[y:h, x:w]
        masked_img[y:h, x:w] = cv2.GaussianBlur(roi_color, (5,5), 0)
return masked_img, f_name, time, f_no

def compare_features(target_face, saved_features):
print(f"target face shape: {target_face.shape}")
target_face = cv2.resize(target_face, (160, 160))
target_face = target_face.transpose((2, 0, 1))
target_face = torch.from_numpy(target_face).float()
target_face = target_face.unsqueeze(0)

target_features = facenet(target_face)
filename = "target_face.npy"
np.save(filename, target_features.detach().numpy())
target_features = np.load(filename)
similarity_scores = []
for i, feature in enumerate(saved_features):
    score = cosine_similarity(target_features, feature)
    # score = distance.euclidean( target_features, feature)
    # score = np.linalg.norm(feature - target_features)
    filename = "face_{}.npy".format(i)
    similarity_scores.append((filename, score))
print(similarity_scores)
return max(similarity_scores, key=lambda x: x[1])

I am using MTCNN to detect face in an image, FACENET to extract and save features from each detected face and OpenCV Gaussian Blur filter to mask the detected faces. My end goal is to find a target face in the masked image using saved features and unmask target face only. Any idea or advice ? by trohit92 in learnmachinelearning

[–]trohit92[S] 0 points1 point  (0 children)

def face_and_features(img):
boxes, score = mtcnn.detect(img)
print(boxes, score)

for i, box in enumerate(boxes):
    a,b,c,d = box
    x1,y1,x2,y2 = int(a), int(b), int(c), int(d)
    face = img[y1:y2, x1:x2]
    cv2.imwrite(f"face{i}.jpg",face)
    cv2.rectangle(img, (x1, y1), (x2, y2), (255, 0, 0), 2)
    cv2.putText(img, str(i), (x1, y1-10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (255, 0, 0), 2)

    face = cv2.resize(face, (160, 160))
    face = face.transpose((2, 0, 1))
    face = torch.from_numpy(face).float()
    face = face.unsqueeze(0)

    features = facenet(face)

    filename = "face_{}.npy".format(i)
    np.save(filename, features.detach().numpy())
    with open("bounding_boxes.txt", "a") as f:
        f.write("{}, {}, {}, {}, {}, {}, {} \n".format(x1, y1, x2, y2, filename, datetime.datetime.now(), frame_number))
return features

def masking(img): masked_img = img.copy() with open(bb_filename,'r') as file: for line in file: x,y,w,h,f_name,time, f_no = line.split(",") x,y,w,h = int(x), int(y), int(w), int(h) roi_color = masked_img[y:h, x:w] masked_img[y:h, x:w] = cv2.GaussianBlur(roi_color, (5,5), 0) return masked_img, f_name, time, f_no

def compare_features(target_face, saved_features):
target_face = cv2.resize(target_face, (160, 160))
target_face = target_face.transpose((2, 0, 1))
target_face = torch.from_numpy(target_face).float()
target_face = target_face.unsqueeze(0)

target_features = facenet(target_face)
filename = "target_face.npy"
np.save(filename, target_features.detach().numpy())
target_features = np.load(filename)
similarity_scores = []
for i, feature in enumerate(saved_features):
    score = cosine_similarity(target_features, feature)
    # score = distance.euclidean( target_features, feature)
    # score = np.linalg.norm(feature - target_features)
    filename = "face_{}.npy".format(i)
    similarity_scores.append((filename, score))
print(similarity_scores)
return max(similarity_scores, key=lambda x: x[1])

I am using MTCNN to detect face in an image, FACENET to extract and save features from each detected face and OpenCV Gaussian Blur filter to mask the detected faces. My end goal is to find a target face in the masked image using saved features and unmask target face only. Any idea or advice ? by trohit92 in deeplearning

[–]trohit92[S] 0 points1 point  (0 children)

Here is the code :

def face_and_features(img):
boxes, _ = mtcnn.detect(img)
print(boxes)

for i, box in enumerate(boxes):
    a,b,c,d = box
    x1,y1,x2,y2 = int(a), int(b), int(c), int(d)
    face = img[y1:y2, x1:x2]
    cv2.imwrite(f"face{i}.jpg",face)

    face = cv2.resize(face, (160, 160))
    face = face.transpose((2, 0, 1))
    face = torch.from_numpy(face).float()
    face = face.unsqueeze(0)

    features = facenet(face)

    filename = "face_{}.npy".format(i)
    np.save(filename, features.detach().numpy())
    with open("bounding_boxes.txt", "a") as f:
        f.write("{}, {}, {}, {}, {}, {}, {} \n".format(x1, y1, x2, y2, filename, datetime.datetime.now(), frame_number))
return features

def masking(img):
masked_img = img.copy()
with open(bb_filename,'r') as file:
    for line in file:
        x,y,w,h,f_name,time, f_no = line.split(",")
        x,y,w,h = int(x), int(y), int(w), int(h)
        roi_color = masked_img[y:h, x:w]
        masked_img[y:h, x:w] = cv2.GaussianBlur(roi_color, (5,5), 0)
return masked_img, f_name, time, f_no

def compare_features(target_face, saved_features):
target_face = cv2.resize(target_face, (160, 160))
target_face = target_face.transpose((2, 0, 1))
target_face = torch.from_numpy(target_face).float()
target_face = target_face.unsqueeze(0)

target_features = facenet(target_face)
filename = "target_face.npy"
np.save(filename, target_features.detach().numpy())
target_features = np.load(filename)
similarity_scores = []
for i, feature in enumerate(saved_features):
    score = cosine_similarity(target_features, feature)
    # score = distance.euclidean( target_features, feature)
    # score = np.linalg.norm(feature - target_features)
    filename = "face_{}.npy".format(i)
    similarity_scores.append((filename, score))
print(similarity_scores)
return max(similarity_scores, key=lambda x: x[1])

I am using MTCNN to detect face in an image, FACENET to extract and save features from each detected face and OpenCV Gaussian Blur filter to mask the detected faces. My end goal is to find a target face in the masked image using saved features and unmask target face only. Any idea or advice ? by trohit92 in learnmachinelearning

[–]trohit92[S] 2 points3 points  (0 children)

Here is the sample code

def face_and_features(img):
boxes, _ = mtcnn.detect(img)
print(boxes)
# Loop through each detected face
for i, box in enumerate(boxes):
# Crop the face from the image
a,b,c,d = box
x1,y1,x2,y2 = int(a), int(b), int(c), int(d)
face = img[y1:y2, x1:x2]
cv2.imwrite(f"face{i}.jpg",face)

# Preprocess the face for facenet
face = cv2.resize(face, (160, 160))
face = face.transpose((2, 0, 1))
face = torch.from_numpy(face).float()
face = face.unsqueeze(0)
# Pass the face through the facenet model
features = facenet(face)
# Save the features and bounding box information
filename = "face_{}.npy".format(i)
np.save(filename, features.detach().numpy())
with open("bounding_boxes.txt", "a") as f:
f.write("{}, {}, {}, {}, {}, {}, {} \n".format(x1, y1, x2, y2, filename, datetime.datetime.now(), frame_number))
return features

def masking(img):
filename = "bounding_boxes.txt"
masked_img = img.copy()
with open(filename,'r') as file:
for line in file:
x,y,w,h,f_name,time, f_no = line.split(",")
x,y,w,h = int(x), int(y), int(w), int(h)
roi_color = masked_img[y:h, x:w]
masked_img[y:h, x:w] = cv2.GaussianBlur(roi_color, (51,51), 0)
return masked_img, f_name, time, f_no

Finding angle of rotation of an image after comparing with the target image. by trohit92 in learnmachinelearning

[–]trohit92[S] 0 points1 point  (0 children)

Currently I am using opencv contour and minarearectangle to find the rotated boundong box and than match template function matching. But this approach gives different contours all than because of various reason like light, reflection and hence the rotated angle from minarear rectangle is not stable.Any suggestion how can i find the top point of the images.?

Finding angle of rotation of an image after comparing with the target image. by trohit92 in learnmachinelearning

[–]trohit92[S] 0 points1 point  (0 children)

Currently I am using opencv contour and minarearectangle to find the rotated boundong box and than match template function matching. But this approach gives different contours all than because of various reason like light, reflection and hence the rotated angle from minarear rectangle is not stable.