import os import numpy as np import cv2 import itertools from scipy.optimize import least_squares from .reconstruction import get_camera_parameters from .pose_estimation import get_ground_truth_seams, estimate_camera_pose from .final_reconstruction import merge_seams # 之前的merge_seams函数依然可用 def get_initial_parameters_with_solvepnp(all_2d_endpoints, ground_truth): """ 【新】使用 solvePnP 为全局优化提供一个更好的初始位姿。 """ print("\n--- Step 1: Getting a good initial guess for poses using solvePnP ---") # 1. 内参和3D点 (与之前相同) cam_params_L, cam_params_R, _ = get_camera_parameters() camera_intrinsics = np.array([ cam_params_L['fc'][0], cam_params_L['fc'][1], cam_params_L['cc'][0], cam_params_L['cc'][1], cam_params_R['fc'][0], cam_params_R['fc'][1], cam_params_R['cc'][0], cam_params_R['cc'][1] ]) points_3d_init = np.array([ ground_truth['up_line1']['start_3d'], ground_truth['up_line1']['end_3d'], ground_truth['up_line2']['start_3d'], ground_truth['up_line2']['end_3d'], ground_truth['bottom_line1']['start_3d'], ground_truth['bottom_line1']['end_3d'], ]) # 2. 【关键】为每个相机独立计算初始位姿 camera_poses_init = np.zeros((4, 6)) camera_map = {0: ('up', 'L'), 1: ('up', 'R'), 2: ('bottom', 'L'), 3: ('bottom', 'R')} for i in range(4): part_type, side = camera_map[i] obj_pts, img_pts = [], [] # 收集该相机能看到的所有点 for line_name in ['line1', 'line2']: if f"{part_type}_{line_name}" in ground_truth: gt_key = f"{part_type}_{line_name}" img_key = f"{part_type}_{side.lower()}_{line_name}" # 确定3D点在 points_3d_init 中的索引 if gt_key == 'up_line1': p_indices = [0, 1] elif gt_key == 'up_line2': p_indices = [2, 3] elif gt_key == 'bottom_line1': p_indices = [4, 5] elif gt_key == 'bottom_line2': p_indices = [2, 3] # bottom_line2也对应第2,3个点 obj_pts.extend([points_3d_init[p_indices[0]], points_3d_init[p_indices[1]]]) img_pts.extend([all_2d_endpoints[img_key]['start'], all_2d_endpoints[img_key]['end']]) # 使用我们之前验证过的点对应寻找逻辑 best_err = float('inf') best_pose_for_cam = None for a, b in itertools.product([0, 1], repeat=2): # 假设最多两条线 current_img_pts = list(img_pts) if a: current_img_pts[0], current_img_pts[1] = current_img_pts[1], current_img_pts[0] if b: current_img_pts[2], current_img_pts[3] = current_img_pts[3], current_img_pts[2] rvec, tvec = estimate_camera_pose(current_img_pts, obj_pts, side) if rvec is not None: cam = cam_params_L if side == 'L' else cam_params_R proj_pts, _ = cv2.projectPoints(np.array(obj_pts), rvec, tvec, cam['K'], cam['kc']) err = cv2.norm(np.array(current_img_pts, dtype=np.float32), proj_pts.reshape(-1, 2).astype(np.float32)) if err < best_err: best_err = err best_pose_for_cam = np.concatenate([rvec.flatten(), tvec.flatten()]) if best_pose_for_cam is not None: camera_poses_init[i] = best_pose_for_cam print(f"Initial pose for camera {i} ({part_type}-{side}) found with error {best_err:.2f}") else: print(f"Warning: Failed to find initial pose for camera {i}") # 3. 准备2D观测点 (与之前相同) obs_2d, p_indices, c_indices = [], [], [] # ... (这部分代码与上一版 get_initial_parameters 完全相同,直接复制) point_map = {'up_line1': [0, 1], 'up_line2': [2, 3], 'bottom_line1': [4, 5], 'bottom_line2': [2, 3]} for cam_idx, (part, side) in camera_map.items(): for line in ['line1', 'line2']: img_key = f"{part}_{side.lower()}_{line}" gt_key = f"{part}_{line}" if img_key in all_2d_endpoints: obs_2d.extend([all_2d_endpoints[img_key]['start'], all_2d_endpoints[img_key]['end']]) p_indices.extend(point_map[gt_key]) c_indices.extend([cam_idx, cam_idx]) return camera_intrinsics, camera_poses_init, points_3d_init, np.array(obs_2d), np.array(p_indices), np.array( c_indices) def cost_function(params, n_cameras, n_points, camera_indices, point_indices, points_2d, fixed_kcs, fixed_3d_points_init): """BA的代价函数(V2 - 带有固定参数)。""" # 1. 从一维参数向量中解析出需要优化的参数 intrinsics_flat = params[:8] camera_poses_flat = params[8: 8 + n_cameras * 6] # 【关键修改】三维点不再全部从params里取 # 我们只优化除了第一个点之外的所有点 points_3d_optimizable_flat = params[8 + n_cameras * 6:] camera_poses = camera_poses_flat.reshape((n_cameras, 6)) # 重新构建完整的三维点列表 points_3d = np.zeros((n_points, 3)) points_3d[0] = fixed_3d_points_init[0] # 第一个点是固定的! points_3d[1:] = points_3d_optimizable_flat.reshape((n_points - 1, 3)) # ... 函数的其余部分(计算残差)完全不变 ... residuals = [] for i in range(len(points_2d)): cam_idx = camera_indices[i] point_idx = point_indices[i] pose = camera_poses[cam_idx] point_3d = points_3d[point_idx] if cam_idx in [0, 2]: # Left cameras fx, fy, cx, cy = intrinsics_flat[:4] kc = fixed_kcs[0] else: # Right cameras fx, fy, cx, cy = intrinsics_flat[4:] kc = fixed_kcs[1] K = np.array([[fx, 0, cx], [0, fy, cy], [0, 0, 1]]) reproj_pt, _ = cv2.projectPoints(point_3d, pose[:3], pose[3:], K, kc) residuals.extend((reproj_pt.ravel() - points_2d[i]).tolist()) return np.array(residuals) def run_global_optimization(all_2d_endpoints, ground_truth): """执行全局优化(V2 - 修正尺度不确定性)。""" # 1. 获取初始值 (不变) intrinsics_init, poses_init, points_3d_init, obs_2d, p_indices, c_indices = get_initial_parameters_with_solvepnp( all_2d_endpoints, ground_truth) # 2. 【关键修改】将参数分为固定部分和优化部分 # 我们要优化的三维点是除了第一个之外的所有点 optimizable_points_3d_init = points_3d_init[1:] # 打包所有需要优化的参数 params_init = np.concatenate([ intrinsics_init.ravel(), poses_init.ravel(), optimizable_points_3d_init.ravel() # 只打包需要优化的点 ]) # 3. 准备固定参数 fixed_kcs = [get_camera_parameters()[0]['kc'], get_camera_parameters()[1]['kc']] # 4. 执行优化 (args 增加了 fixed_3d_points_init) n_cameras = 4 n_points = points_3d_init.shape[0] print("\n--- Step 2: Running Global Bundle Adjustment (with scale constraint) ---") result = least_squares( cost_function, params_init, verbose=2, x_scale='jac', ftol=1e-6, method='trf', args=(n_cameras, n_points, c_indices, p_indices, obs_2d, fixed_kcs, points_3d_init), # 传入固定的初始3D点 max_nfev=2000 # 可以适当增加迭代次数 ) params_final = result.x n_cameras = 4 # 确保 n_cameras 和 n_points 在这里可用 n_points = points_3d_init.shape[0] # --- 以下是新增的解析和保存部分 --- # 5a. 解析所有最终参数 intrinsics_final_flat = params_final[:8] camera_poses_final_flat = params_final[8: 8 + n_cameras * 6] optimizable_points_3d_final_flat = params_final[8 + n_cameras * 6:] intrinsics_final = intrinsics_final_flat.reshape(2, 4) camera_poses_final = camera_poses_final_flat.reshape((n_cameras, 6)) points_3d_final = np.zeros((n_points, 3)) points_3d_final[0] = points_3d_init[0] points_3d_final[1:] = optimizable_points_3d_final_flat.reshape((n_points - 1, 3)) # 5b. 打印到控制台,以便即时查看 print("\n--- Optimized Camera Intrinsics ---") print(f"Left Cam (fx, fy, cx, cy): {intrinsics_final[0]}") print(f"Right Cam (fx, fy, cx, cy): {intrinsics_final[1]}") print("\n--- Optimized Camera Poses (Rodrigues vector + translation) ---") print(f"Pose of up-left cam: {camera_poses_final[0]}") print(f"Pose of up-right cam: {camera_poses_final[1]}") print(f"Pose of bottom-left cam: {camera_poses_final[2]}") print(f"Pose of bottom-right cam: {camera_poses_final[3]}") # 5c. 【核心】将所有参数保存到文件 # 定义保存路径 current_dir = os.path.dirname(os.path.abspath(__file__)) project_root = os.path.dirname(current_dir) save_path = os.path.join(project_root, 'optimized_camera_parameters.npz') # 获取固定的畸变系数 cam_L_params, cam_R_params, _ = get_camera_parameters() np.savez( save_path, # 优化后的内参 optimized_intrinsics_L=intrinsics_final[0], # [fx, fy, cx, cy] optimized_intrinsics_R=intrinsics_final[1], # 固定的畸变系数 (BA中未优化) dist_coeffs_L=cam_L_params['kc'], dist_coeffs_R=cam_R_params['kc'], # 优化后的相机位姿 (相对于物体坐标系) pose_up_L=camera_poses_final[0], # [rvec, tvec] pose_up_R=camera_poses_final[1], pose_bottom_L=camera_poses_final[2], pose_bottom_R=camera_poses_final[3], # 还可以保存一个计算出的新外参作为参考 # (up-right 相对于 up-left 的变换) new_extrinsics=calculate_new_extrinsics(camera_poses_final[0], camera_poses_final[1]), # 优化后的三维点坐标 optimized_3d_points=points_3d_final ) print(f"\n✅ All optimized parameters have been saved to: {save_path}") # 6. 整理输出 (这部分不变) final_seams = { 'up_line1': {'start_3d': points_3d_final[0], 'end_3d': points_3d_final[1]}, 'up_line2': {'start_3d': points_3d_final[2], 'end_3d': points_3d_final[3]}, 'bottom_line1': {'start_3d': points_3d_final[4], 'end_3d': points_3d_final[5]}, 'bottom_line2': {'start_3d': points_3d_final[2], 'end_3d': points_3d_final[3]} } return final_seams def calculate_new_extrinsics(pose_L, pose_R): """根据两个相机相对于物体的位姿,计算它们之间的相对位姿(外参)。""" # 从物体到左相机的变换 rvec_L, tvec_L = pose_L[:3], pose_L[3:] R_L_from_obj, _ = cv2.Rodrigues(rvec_L) T_L_from_obj = tvec_L.reshape(3, 1) # 从物体到右相机的变换 rvec_R, tvec_R = pose_R[:3], pose_R[3:] R_R_from_obj, _ = cv2.Rodrigues(rvec_R) T_R_from_obj = tvec_R.reshape(3, 1) # 计算从左相机到右相机的变换 # T_R_from_L = R_R @ inv(R_L).T @ (T_L - T_R) 是一种错误的推导 # 正确推导: P_obj = inv(R_L)@(P_camL - T_L) = inv(R_R)@(P_camR - T_R) # => P_camR = R_R @ inv(R_L) @ P_camL + (T_R - R_R @ inv(R_L) @ T_L) # => R_R_from_L = R_R @ R_L.T # => T_R_from_L = T_R - R_R_from_L @ T_L R_R_from_L = R_R_from_obj @ R_L_from_obj.T t_R_from_L = T_R_from_obj - (R_R_from_L @ T_L_from_obj) return {'R': R_R_from_L, 't': t_R_from_L}