|
| 1 | +import cv2 |
| 2 | +import numpy as np |
| 3 | +from glob import glob |
| 4 | +import math |
| 5 | + |
| 6 | +import matplotlib.pyplot as plt |
| 7 | +import struct |
| 8 | +from mpl_toolkits.mplot3d import Axes3D |
| 9 | + |
| 10 | +def StereoCalib(imageList, boardSize, squareSize, displayCorners = True, useCalibrated = True, showRectified = True): |
| 11 | + nImages = int(len(imageList)/2) |
| 12 | + goodImageList = [] |
| 13 | + imageSize = None |
| 14 | + |
| 15 | + fig = plt.figure() |
| 16 | + ax = fig.add_subplot(111, projection='3d') |
| 17 | + |
| 18 | + topImagePoints = [] |
| 19 | + sideImagePoints = [] |
| 20 | + |
| 21 | + |
| 22 | + imagePoints = [] |
| 23 | + imagePoints.append([]) |
| 24 | + imagePoints.append([]) |
| 25 | + |
| 26 | + for n in range(0, nImages): |
| 27 | + imagePoints[0].append(None) |
| 28 | + imagePoints[1].append(None) |
| 29 | + |
| 30 | + pattern_points = np.zeros((np.prod(boardSize), 3), np.float32) |
| 31 | + pattern_points[:, :2] = np.indices(boardSize).T.reshape(-1, 2) |
| 32 | + pattern_points *= squareSize |
| 33 | + objectPoints = [] |
| 34 | + |
| 35 | + j = 0 |
| 36 | + |
| 37 | + tempTop = None |
| 38 | + |
| 39 | + for i in range(0, nImages): |
| 40 | + for k in range(0,2): |
| 41 | + filename = imageList[i*2+k] |
| 42 | + print('processsing %s... ' % filename) |
| 43 | + img = cv2.imread(filename, 0) |
| 44 | + if img is None: |
| 45 | + break |
| 46 | + if imageSize is None: |
| 47 | + imageSize = img.shape[:2] |
| 48 | + |
| 49 | + h, w = img.shape[:2] |
| 50 | + found, corners = cv2.findChessboardCorners(img, boardSize) |
| 51 | + |
| 52 | + if not found: |
| 53 | + print('chessboard not found') |
| 54 | + break |
| 55 | + else: |
| 56 | + term = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_COUNT, 30, 0.1) |
| 57 | + cv2.cornerSubPix(img, corners, (5, 5), (-1, -1), term) |
| 58 | + if displayCorners is True: |
| 59 | + # print(filename) |
| 60 | + vis = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) |
| 61 | + cv2.drawChessboardCorners(vis, boardSize, corners, found) |
| 62 | + cv2.imshow("corners", vis) |
| 63 | + # cv2.waitKey(50) |
| 64 | + if k is 1: |
| 65 | + # Image from side camera |
| 66 | + goodImageList.append(imageList[i*2]) |
| 67 | + goodImageList.append(imageList[i*2+1]) |
| 68 | + j = j + 1 |
| 69 | + sideImagePoints.append(corners.reshape(-1,2)) |
| 70 | + topImagePoints.append(tempTop) |
| 71 | + objectPoints.append(pattern_points) |
| 72 | + print('Added left and right points') |
| 73 | + |
| 74 | + else: |
| 75 | + # Image from top camera |
| 76 | + # rightImagePoints.append(corners.reshape(-1,2)) |
| 77 | + tempTop= corners.reshape(-1,2) |
| 78 | + |
| 79 | + # imagePoints[k].append(corners.reshape(-1,2)) |
| 80 | + # objectPoints.append(pattern_points) |
| 81 | + print('OK') |
| 82 | + |
| 83 | + # print(corners) |
| 84 | + |
| 85 | + |
| 86 | + print(str(j) + " chessboard pairs have been detected\n") |
| 87 | + |
| 88 | + nImages = j |
| 89 | + if nImages < 2: |
| 90 | + print("Too few pairs to run calibration\n") |
| 91 | + return |
| 92 | + |
| 93 | + # print(imagePoints[1]) |
| 94 | + # print(objectPoints) |
| 95 | + |
| 96 | + print("Img count: " + str(len(topImagePoints))) |
| 97 | + print("Obj count: " + str(len(objectPoints))) |
| 98 | + |
| 99 | + # print(np.array(imagePoints[0])) |
| 100 | + |
| 101 | + top_calibration = np.load('top_calibration.npz') |
| 102 | + side_calibration = np.load('side_calibration.npz') |
| 103 | + |
| 104 | + top_rms = top_calibration['rms'] |
| 105 | + top_camera_matrix = top_calibration['camera_matrix'] |
| 106 | + top_dist_coefs = top_calibration['dist_coefs'] |
| 107 | + |
| 108 | + side_rms = side_calibration['rms'] |
| 109 | + side_camera_matrix = side_calibration['camera_matrix'] |
| 110 | + side_dist_coefs = side_calibration['dist_coefs'] |
| 111 | + |
| 112 | + # camera_transform = np.load('camera_transform.npz') |
| 113 | + # R = camera_transform['R'] |
| 114 | + # T = camera_transform['T'] |
| 115 | + |
| 116 | + # print('Calibrating top...') |
| 117 | + # top_rms, top_camera_matrix, top_dist_coefs, rvecs, tvecs = cv2.calibrateCamera(objectPoints, topImagePoints, imageSize, None, None) |
| 118 | + print("Top Camera\nRMS:" + str(top_rms)) |
| 119 | + print("camera matrix: " + str(top_camera_matrix)) |
| 120 | + print("distortion coefficients: " + str(top_dist_coefs.ravel())) |
| 121 | + |
| 122 | + # print('Calibrating side...') |
| 123 | + # side_rms, side_camera_matrix, side_dist_coefs, rvecs, tvecs = cv2.calibrateCamera(objectPoints, sideImagePoints, imageSize, None, None) |
| 124 | + print("Side Camera\nRMS:", side_rms) |
| 125 | + print("camera matrix:\n", side_camera_matrix) |
| 126 | + print("distortion coefficients: ", side_dist_coefs.ravel()) |
| 127 | + |
| 128 | + # top_undistorted = cv2.undistort(cv2.imread(imageList[0]), top_camera_matrix, top_dist_coefs) |
| 129 | + |
| 130 | + # cv2.imshow("Top Undistorted", top_undistorted) |
| 131 | + |
| 132 | + # side_undistorted = cv2.undistort(cv2.imread(imageList[1]), side_camera_matrix, side_dist_coefs) |
| 133 | + |
| 134 | + # cv2.imshow("Side Undistorted", side_undistorted) |
| 135 | + |
| 136 | + # cameraMatrix[0] = cv2.initCameraMatrix2D(np.array(objectPoints), np.array(imagePoints[0]), imageSize, 0) |
| 137 | + # cameraMatrix[1] = cv2.initCameraMatrix2D(objectPoints, imagePoints[1], imageSize, 0) |
| 138 | + # print(objectPoints[0]) |
| 139 | + |
| 140 | + # ret, rvec_top, tvec_top = cv2.solvePnP(objectPoints[0], topImagePoints[0], top_camera_matrix, top_dist_coefs) |
| 141 | + |
| 142 | + # ret, rvec_side, tvec_side = cv2.solvePnP(objectPoints[0], sideImagePoints[0], side_camera_matrix, side_dist_coefs) |
| 143 | + |
| 144 | + # print('\n') |
| 145 | + # print('rvec top', rvec_top) |
| 146 | + # print('tvec top', tvec_top) |
| 147 | + |
| 148 | + # print('rvec side', rvec_side) |
| 149 | + # print('tvec side', tvec_side) |
| 150 | + |
| 151 | + # print(topImagePoints[0]) |
| 152 | + |
| 153 | + |
| 154 | + |
| 155 | + print('\n') |
| 156 | + print('Stereo calibration (cv2)...') |
| 157 | + retval, cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, R, T, E, F = cv2.stereoCalibrate(objectPoints, topImagePoints, sideImagePoints, top_camera_matrix, top_dist_coefs, side_camera_matrix, side_dist_coefs, imageSize, (cv2.CALIB_FIX_INTRINSIC)) |
| 158 | + print("Rotation: ", R) |
| 159 | + r_vec, jac = cv2.Rodrigues(R) |
| 160 | + print("R_vec: ", np.multiply(r_vec, 180/math.pi)) |
| 161 | + print("Translation: ", T) |
| 162 | + print("Essential: ", E) |
| 163 | + print("Fundamental: ", F) |
| 164 | + |
| 165 | + print('Stereo calibration (DIY)...') |
| 166 | + topImagePointsConcat = topImagePoints[0] |
| 167 | + sideImagePointsConcat = sideImagePoints[0] |
| 168 | + for i in range(1, len(topImagePoints)): |
| 169 | + topImagePointsConcat = np.concatenate((topImagePointsConcat, topImagePoints[i])) |
| 170 | + sideImagePointsConcat = np.concatenate((sideImagePointsConcat, sideImagePoints[i])) |
| 171 | + |
| 172 | + |
| 173 | + m1 = np.ones((len(topImagePointsConcat), 3)) |
| 174 | + m1[:,0:2] = topImagePointsConcat |
| 175 | + |
| 176 | + m2 = np.ones((len(sideImagePointsConcat), 3)) |
| 177 | + m2[:,0:2] = sideImagePointsConcat |
| 178 | + |
| 179 | + |
| 180 | + x1, T1 = normalizePoints(m1) |
| 181 | + x2, T2 = normalizePoints(m2) |
| 182 | + # print('Normalized', x1, T) |
| 183 | + # Normalization matrix |
| 184 | + # N = np.array([[2.0/w,0.0,-1.0],[0.0,2.0/h,-1.0],[0.0,0.0,1.0]], np.float64) |
| 185 | + # print('N', N) |
| 186 | + # x1 = np.dot(N,m1.T).T |
| 187 | + # print('x1,',x1) |
| 188 | + # x2 = np.dot(N,m2.T).T |
| 189 | + # print('x2',x2) |
| 190 | + |
| 191 | + |
| 192 | + A = np.ones((len(topImagePointsConcat),9)) |
| 193 | + A[:,0] = np.multiply(x1[:,0],x2[:,0]) |
| 194 | + A[:,1] = np.multiply(x1[:,1],x2[:,0]) |
| 195 | + A[:,2] = x2[:,0] |
| 196 | + A[:,3] = np.multiply(x1[:,0],x2[:,1]) |
| 197 | + A[:,4] = np.multiply(x1[:,1],x2[:,1]) |
| 198 | + A[:,5] = x2[:,1] |
| 199 | + A[:,6] = x1[:,0] |
| 200 | + A[:,7] = x1[:,1] |
| 201 | + # A[:,0] = np.multiply(x2[:,0],x1[:,0]) |
| 202 | + # A[:,1] = np.multiply(x2[:,0],x1[:,1]) |
| 203 | + # A[:,2] = x2[:,0] |
| 204 | + # A[:,3] = np.multiply(x2[:,1],x1[:,0]) |
| 205 | + # A[:,4] = np.multiply(x2[:,1],x1[:,1]) |
| 206 | + # A[:,5] = x2[:,1] |
| 207 | + # A[:,6] = x1[:,0] |
| 208 | + # A[:,7] = x1[:,1] |
| 209 | + print(A) |
| 210 | + |
| 211 | + U, D, V = np.linalg.svd(A) |
| 212 | + # print('U',U) |
| 213 | + # print('D',D) |
| 214 | + # print('V',V) |
| 215 | + |
| 216 | + V = V.conj().T |
| 217 | + F_new = V[:,8].reshape(3,3).copy() |
| 218 | + # make rank 2 |
| 219 | + U, D, V = np.linalg.svd(F_new); |
| 220 | + # print('U',U) |
| 221 | + # print('D',D) |
| 222 | + # print('V',V) |
| 223 | + |
| 224 | + D_diag = np.diag([D[0], D[1], 0]) |
| 225 | + F_new = np.dot(np.dot(U, D_diag), V) |
| 226 | + |
| 227 | + # F_new=np.dot(N.T,np.dot(F_new,N)) |
| 228 | + F_new = np.dot(np.dot(T2.T, F_new), T1) |
| 229 | + |
| 230 | + print(F_new) |
| 231 | +# |
| 232 | + R, jac = cv2.Rodrigues(np.dot(np.array([[-90],[0],[0]], dtype = np.float64), math.pi/180)) |
| 233 | + T = np.array([[0], [-130], [130]], dtype=np.float64) |
| 234 | + print("Rotation: ", R) |
| 235 | + # r_vec, jac = cv2.Rodrigues(R) |
| 236 | + # print("R_vec: ", r_vec) |
| 237 | + print("Translation: ", T) |
| 238 | + # print("Fundamental: ", F_new) |
| 239 | + # F = F_new |
| 240 | + |
| 241 | + # top_undistorted = cv2.undistort(cv2.imread(imageList[0]), cameraMatrix1, distCoeffs1) |
| 242 | + |
| 243 | + # cv2.imshow("Top Undistorted", top_undistorted) |
| 244 | + |
| 245 | + # side_undistorted = cv2.undistort(cv2.imread(imageList[1]), cameraMatrix2, distCoeffs2) |
| 246 | + |
| 247 | + # cv2.imshow("Side Undistorted", side_undistorted) |
| 248 | + |
| 249 | + # R1, R2, P1, P2, Q, ret1, ret2 = cv2.stereoRectify(cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, imageSize, R, T) |
| 250 | + |
| 251 | + # print('R', R) |
| 252 | + # R_vec, jac = cv2.Rodrigues(R) |
| 253 | + # print('R vec', R_vec) |
| 254 | + # print('T', T) |
| 255 | + print('\n') |
| 256 | + # print('Stereo rectification (cv2)...') |
| 257 | + # R1, R2, P1, P2, Q, ret1, ret2 = cv2.stereoRectify(top_camera_matrix, top_dist_coefs, side_camera_matrix, side_dist_coefs, imageSize, R, T, alpha=1) |
| 258 | + |
| 259 | + # print("R1: ", R1) |
| 260 | + # R1_vec, jac = cv2.Rodrigues(R1) |
| 261 | + # print("R1 vec: ", R1_vec) |
| 262 | + # print("R2: ", R2) |
| 263 | + # R2_vec, jac = cv2.Rodrigues(R2) |
| 264 | + # print("P1: ", P1) |
| 265 | + # print("P2: ", P2) |
| 266 | + |
| 267 | + |
| 268 | + # print('Q: ', Q) |
| 269 | + print('Stereo rectification (DIY)...') |
| 270 | + P1 = np.concatenate((np.dot(side_camera_matrix,np.eye(3)),np.dot(side_camera_matrix,np.zeros((3,1)))), axis = 1) |
| 271 | + P2 = np.concatenate((np.dot(side_camera_matrix,R),np.dot(side_camera_matrix,T)), axis = 1) |
| 272 | + # print("R2 vec: ", R2_vec) |
| 273 | + print("P1: ", P1) |
| 274 | + print("P2: ", P2) |
| 275 | + |
| 276 | + |
| 277 | + # np.savez_compressed('calibration.npz', R1=R1, R2=R2, P1=P1, P2=P2, CameraMatrix1=cameraMatrix1, CameraMatrix2=cameraMatrix2, DistCoeffs1=distCoeffs1, DistCoeffs2=distCoeffs2,R=R,T=T,E=E,F=F) |
| 278 | + # np.savez_compressed('calibration.npz', CameraMatrix1=top_camera_matrix, CameraMatrix2=side_camera_matrix, DistCoeffs1=top_dist_coefs, DistCoeffs2=side_dist_coefs) |
| 279 | + np.savez_compressed('calibration.npz', P1=P1, P2=P2, CameraMatrix1=top_camera_matrix, CameraMatrix2=side_camera_matrix, DistCoeffs1=top_dist_coefs, DistCoeffs2=side_dist_coefs,R=R,T=T,E=E,F=F) |
| 280 | + |
| 281 | + |
| 282 | + # path = np.load("path.npz") |
| 283 | + # top_path = path["top_path"] |
| 284 | + # side_path = path["side_path"] |
| 285 | + |
| 286 | + # tip3D_homogeneous = cv2.triangulatePoints(P1, P2, top_path.reshape(2,-1)[:,50:75], side_path.reshape(2,-1)[:,50:75]) |
| 287 | + # tip3D = (tip3D_homogeneous/tip3D_homogeneous[3])[0:3] |
| 288 | + |
| 289 | + # # print("homogeneous coords: " , tip3D_homogeneous) |
| 290 | + |
| 291 | + # print("3D coords: ", tip3D) |
| 292 | + |
| 293 | + # ax.scatter(np.array(tip3D)[0,:],np.array(tip3D)[1,:],np.array(tip3D)[2,:]) |
| 294 | + # # plt.show() |
| 295 | + |
| 296 | + # leftInputPoints = np.array(leftImagePoints[0]).reshape(2,-1) |
| 297 | + # rightInputPoints = np.array(rightImagePoints[0]).reshape(2,-1) |
| 298 | + |
| 299 | + # np.savez_compressed('points.npz',left=leftInputPoints,right=rightInputPoints) |
| 300 | + |
| 301 | + # print("Left inputs: " + str(leftInputPoints)) |
| 302 | + |
| 303 | + # points = cv2.triangulatePoints(P1, P2, leftInputPoints[:,50:100], rightInputPoints[:,50:100]) |
| 304 | + |
| 305 | + # print('\n') |
| 306 | + # testPoint = points[:,0] |
| 307 | + # testPoint3D = testPoint/testPoint[3] |
| 308 | + |
| 309 | + # point3D = points/points[3,:] |
| 310 | + # print("3D points: " + str(point3D)) |
| 311 | + |
| 312 | +def main(): |
| 313 | + size = (9, 7) |
| 314 | + squareSize = 6 # millimeters |
| 315 | + sourcePath = '/home/jgschornak/NeedleGuidance/images_converging_cams/' |
| 316 | + |
| 317 | + top_img_mask = sourcePath + 'top*.jpg' |
| 318 | + top_img_names = glob(top_img_mask) |
| 319 | + |
| 320 | + side_img_mask = sourcePath + 'side*.jpg' |
| 321 | + side_img_names = glob(side_img_mask) |
| 322 | + # print(left_img_names) |
| 323 | + # print('\n') |
| 324 | + # print(right_img_names) |
| 325 | + numPairs = len(top_img_names) |
| 326 | + |
| 327 | + imgList = [] |
| 328 | + for i in range(0, numPairs): |
| 329 | + |
| 330 | + imgList.append(sourcePath + 'top%i' % i + '.jpg') |
| 331 | + imgList.append(sourcePath + 'side%i' % i + '.jpg') |
| 332 | + |
| 333 | + print(imgList) |
| 334 | + |
| 335 | + StereoCalib(imgList, size, squareSize) |
| 336 | + |
| 337 | + # while True: |
| 338 | + # if cv2.waitKey(1) & 0xFF == ord('q'): |
| 339 | + # break |
| 340 | + |
| 341 | +def normalizePoints(pts): |
| 342 | + centroid = np.mean(pts, axis=0) |
| 343 | + # print('Centroid', centroid) |
| 344 | + |
| 345 | + new_pts = np.array(pts - centroid) |
| 346 | + # print('new_pts', new_pts) |
| 347 | + |
| 348 | + mean_dist = np.mean(np.linalg.norm(new_pts, axis=1)) |
| 349 | + # print('mean dist', mean_dist) |
| 350 | + |
| 351 | + scale = math.sqrt(2)/mean_dist |
| 352 | + |
| 353 | + T = np.eye(3) |
| 354 | + T[0,0] = scale |
| 355 | + T[1,1] = scale |
| 356 | + T[0,2] = -scale*centroid[0] |
| 357 | + T[1,2] = -scale*centroid[1] |
| 358 | + print(T) |
| 359 | + |
| 360 | + return np.dot(T, pts.T).T, T |
| 361 | + |
| 362 | +if __name__ == '__main__': |
| 363 | + main() |
| 364 | + |
0 commit comments