Created
March 24, 2025 17:27
-
-
Save cometothed4rkside/5e7e930b2417d2666cd7720f2f7995f8 to your computer and use it in GitHub Desktop.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| import cv2 | |
| import numpy as np | |
| import insightface | |
| from insightface.app import FaceAnalysis | |
| from PIL import Image, ImageDraw, ImageFont | |
| import os | |
| from skimage.metrics import structural_similarity as ssim | |
| def cv2_to_pil(cv2_img): | |
| cv2_img = cv2.cvtColor(cv2_img, cv2.COLOR_BGR2RGB) | |
| return Image.fromarray(cv2_img) | |
| def pil_to_cv2(pil_img): | |
| cv2_img = np.array(pil_img) | |
| return cv2.cvtColor(cv2_img, cv2.COLOR_RGB2BGR) | |
| def main(): | |
| img1 = cv2.imread("a.png") | |
| img2 = cv2.imread("b.png") | |
| app = FaceAnalysis(name='buffalo_l') | |
| app.prepare(ctx_id=0, det_size=(640, 640)) | |
| print("Kullanılan modüller:", app.models) | |
| faces1 = app.get(img1) | |
| faces2 = app.get(img2) | |
| img1_display = img1.copy() | |
| img2_display = img2.copy() | |
| if len(faces1) == 0 or len(faces2) == 0: | |
| print("Bir ya da iki görüntüde yüz tespit edilemedi") | |
| return | |
| face1 = faces1[0] | |
| face2 = faces2[0] | |
| emb1 = face1.embedding | |
| emb2 = face2.embedding | |
| similarity = np.dot(emb1, emb2) / (np.linalg.norm(emb1) * np.linalg.norm(emb2)) | |
| similarity_percent = max(0, float(similarity) * 100) | |
| def get_eye_nose_region(face, img): | |
| bbox = face.bbox.astype(int) | |
| x1, y1, x2, y2 = bbox | |
| face_height = y2 - y1 | |
| eye_nose_y2 = y1 + int(face_height * 0.65) | |
| return img[y1:eye_nose_y2, x1:x2], (x1, y1, x2, eye_nose_y2) | |
| eye_nose1, region1 = get_eye_nose_region(face1, img1) | |
| eye_nose2, region2 = get_eye_nose_region(face2, img2) | |
| eye_nose1_gray = cv2.cvtColor(eye_nose1, cv2.COLOR_BGR2GRAY) | |
| eye_nose2_gray = cv2.cvtColor(eye_nose2, cv2.COLOR_BGR2GRAY) | |
| eye_nose1_gray = cv2.resize(eye_nose1_gray, (128, 64)) | |
| eye_nose2_gray = cv2.resize(eye_nose2_gray, (128, 64)) | |
| eye_nose_sim = ssim(eye_nose1_gray, eye_nose2_gray) | |
| eye_nose_percent = eye_nose_sim * 100 | |
| weighted = 0.7 * eye_nose_percent + 0.3 * similarity_percent | |
| for face in faces1: | |
| box = face.bbox.astype(int) | |
| cv2.rectangle(img1_display, (box[0], box[1]), (box[2], box[3]), (0, 255, 0), 2) | |
| for face in faces2: | |
| box = face.bbox.astype(int) | |
| cv2.rectangle(img2_display, (box[0], box[1]), (box[2], box[3]), (0, 255, 0), 2) | |
| x1, y1, x2, y2 = region1 | |
| cv2.rectangle(img1_display, (x1, y1), (x2, y2), (255, 0, 0), 2) | |
| x1, y1, x2, y2 = region2 | |
| cv2.rectangle(img2_display, (x1, y1), (x2, y2), (255, 0, 0), 2) | |
| if hasattr(face1, 'landmark_2d_106') and face1.landmark_2d_106 is not None: | |
| landmarks = face1.landmark_2d_106.astype(np.int32) | |
| for point in landmarks: | |
| cv2.circle(img1_display, tuple(point), 3, (0, 255, 255), -1) | |
| if hasattr(face2, 'landmark_2d_106') and face2.landmark_2d_106 is not None: | |
| landmarks = face2.landmark_2d_106.astype(np.int32) | |
| for point in landmarks: | |
| cv2.circle(img2_display, tuple(point), 3, (0, 255, 255), -1) | |
| if not hasattr(face1, 'landmark_2d_106') or face1.landmark_2d_106 is None: | |
| if hasattr(face1, 'landmark_3d_68') and face1.landmark_3d_68 is not None: | |
| landmarks = face1.landmark_3d_68.astype(np.int32) | |
| for point in landmarks: | |
| cv2.circle(img1_display, (point[0], point[1]), 3, (0, 255, 255), -1) | |
| if not hasattr(face2, 'landmark_2d_106') or face2.landmark_2d_106 is None: | |
| if hasattr(face2, 'landmark_3d_68') and face2.landmark_3d_68 is not None: | |
| landmarks = face2.landmark_3d_68.astype(np.int32) | |
| for point in landmarks: | |
| cv2.circle(img2_display, (point[0], point[1]), 3, (0, 255, 255), -1) | |
| target_height = 500 | |
| img1_display = cv2.resize(img1_display, (int(target_height * img1.shape[1] / img1.shape[0]), target_height)) | |
| img2_display = cv2.resize(img2_display, (int(target_height * img2.shape[1] / img2.shape[0]), target_height)) | |
| combined = np.hstack((img1_display, img2_display)) | |
| pil_img = cv2_to_pil(combined) | |
| draw = ImageDraw.Draw(pil_img) | |
| font_path = None | |
| font_paths = [ | |
| "C:\\Windows\\Fonts\\arial.ttf", | |
| "C:\\Windows\\Fonts\\segoeui.ttf", | |
| "C:\\Windows\\Fonts\\calibri.ttf" | |
| ] | |
| for path in font_paths: | |
| if os.path.exists(path): | |
| font_path = path | |
| break | |
| if font_path: | |
| font_big = ImageFont.truetype(font_path, 32) | |
| font_small = ImageFont.truetype(font_path, 20) | |
| else: | |
| font_big = ImageFont.load_default() | |
| font_small = ImageFont.load_default() | |
| draw.text((10, 30), f"Benzerlik: {weighted:.2f}%", font=font_big, fill=(0, 255, 0)) | |
| draw.text((10, 70), f"Tam yüz: {similarity_percent:.2f}% | Göz-burun: {eye_nose_percent:.2f}%", font=font_small, fill=(255, 255, 255)) | |
| draw.text((10, 100), "Üst yüz bölgesi (mavi)", font=font_small, fill=(255, 0, 0)) | |
| draw.text((10, 130), "Tam yüz bölgesi (yeşil)", font=font_small, fill=(0, 255, 0)) | |
| draw.text((10, 160), "Yüz noktaları (sarı)", font=font_small, fill=(255, 255, 0)) | |
| result = pil_to_cv2(pil_img) | |
| cv2.imwrite("turkce_karsilastirma_landmark.jpg", result) | |
| print(f"Görsel karşılaştırma kaydedildi: turkce_karsilastirma_landmark.jpg") | |
| print(f"Tam yüz benzerliği: {similarity_percent:.2f}%") | |
| print(f"Göz-burun benzerliği: {eye_nose_percent:.2f}%") | |
| print(f"Ağırlıklı benzerlik: {weighted:.2f}%") | |
| if __name__ == "__main__": | |
| main() |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment