Last active
November 22, 2025 15:21
-
-
Save mesutpiskin/0ced27981487491403610324fea55038 to your computer and use it in GitHub Desktop.
Fisheye Camera Calibration with OpenCV
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| import numpy as np | |
| import cv2 | |
| import glob | |
| # Define the chess board rows and columns | |
| CHECKERBOARD = (6,9) | |
| subpix_criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.1) | |
| calibration_flags = cv2.fisheye.CALIB_RECOMPUTE_EXTRINSIC + cv2.fisheye.CALIB_CHECK_COND + cv2.fisheye.CALIB_FIX_SKEW | |
| objp = np.zeros((1, CHECKERBOARD[0]*CHECKERBOARD[1], 3), np.float32) | |
| objp[0,:,:2] = np.mgrid[0:CHECKERBOARD[0], 0:CHECKERBOARD[1]].T.reshape(-1, 2) | |
| objpoints = [] # 3d point in real world space | |
| imgpoints = [] # 2d points in image plane. | |
| counter = 0 | |
| for path in glob.glob('datasets/*.png'): | |
| # Load the image and convert it to gray scale | |
| img = cv2.imread(path) | |
| gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) | |
| # Find the chess board corners | |
| ret, corners = cv2.findChessboardCorners(gray, CHECKERBOARD, cv2.CALIB_CB_ADAPTIVE_THRESH+cv2.CALIB_CB_FAST_CHECK+cv2.CALIB_CB_NORMALIZE_IMAGE) | |
| # Make sure the chess board pattern was found in the image | |
| if ret: | |
| objpoints.append(objp) | |
| cv2.cornerSubPix(gray,corners,(3,3),(-1,-1),subpix_criteria) | |
| imgpoints.append(corners) | |
| #cv2.drawChessboardCorners(img, (rows, cols), corners, ret) | |
| print(str(path)) | |
| counter+=1 | |
| N_imm = counter# number of calibration images | |
| K = np.zeros((3, 3)) | |
| D = np.zeros((4, 1)) | |
| rvecs = [np.zeros((1, 1, 3), dtype=np.float64) for i in range(N_imm)] | |
| tvecs = [np.zeros((1, 1, 3), dtype=np.float64) for i in range(N_imm)] | |
| rms, _, _, _, _ = cv2.fisheye.calibrate( | |
| objpoints, | |
| imgpoints, | |
| gray.shape[::-1], | |
| K, | |
| D, | |
| rvecs, | |
| tvecs, | |
| calibration_flags, | |
| (cv2.TERM_CRITERIA_EPS+cv2.TERM_CRITERIA_MAX_ITER, 30, 1e-6)) | |
| img =cv2.imread("datasets/1.png") | |
| map1, map2 = cv2.fisheye.initUndistortRectifyMap(K, D, np.eye(3), K, (340,240), cv2.CV_16SC2) | |
| undistorted_img = cv2.remap(img, map1, map2, interpolation=cv2.INTER_LINEAR, borderMode=cv2.BORDER_CONSTANT) | |
| cv2.imshow('Original Image', img) | |
| cv2.imshow('Undistort Image', undistorted_img) | |
| cv2.waitKey(0) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
Because you can calculate the remapping once and then apply multiple times, which will be faster. For FullHD image (I tested that yesterday in class with multiple students) the time for just undistort was in order of 10 ms, while remap was in order of 1 ms