三维重构终版

This commit is contained in:
2025-11-02 21:36:35 +08:00
parent f91b09da9d
commit f39009b853
126 changed files with 2870 additions and 2 deletions

4
.idea/vcs.xml generated
View File

@@ -1,4 +1,6 @@
<?xml version="1.0" encoding="UTF-8"?> <?xml version="1.0" encoding="UTF-8"?>
<project version="4"> <project version="4">
<component name="VcsDirectoryMappings" defaultProject="true" /> <component name="VcsDirectoryMappings">
<mapping directory="$PROJECT_DIR$" vcs="Git" />
</component>
</project> </project>

186
3D_construction/main.py Normal file
View File

@@ -0,0 +1,186 @@
import os
import cv2
import numpy as np
from script.yolo_detector import detect_crop_area
from script.linknet_segmentor import segment_and_find_endpoints
from script.reconstruction import visualize_reconstructed_seams, reconstruct_points
from script.pose_estimation import get_ground_truth_seams, reproject_to_object_coords # 我们只需要真值
# 导入我们最终的重建流程
from script.final_reconstruction import final_reconstruction_pipeline, merge_seams
import itertools # 确保导入itertools
from script.global_optimizer import run_global_optimization, merge_seams
from script.pose_estimation import get_ground_truth_seams
from script.reconstruction import visualize_reconstructed_seams
def run_full_recognition_pipeline():
"""
运行完整的识别流程YOLO定位 -> LinkNet分割 -> 端点提取。
"""
# 1. 定义路径
base_dir = os.path.dirname(os.path.abspath(__file__))
data_map = {
'up': {
'l_img': os.path.join(base_dir, 'data', 'origin', 'up', 'l1.jpeg'),
'r_img': os.path.join(base_dir, 'data', 'origin', 'up', 'r1.jpeg'),
'yolo_model': os.path.join(base_dir, 'module', 'yolov8', 'up.pt'),
'linknet_models': {
'line1': os.path.join(base_dir, 'module', 'linknet', 'best_linknet_up_model_line1.pth'),
'line2': os.path.join(base_dir, 'module', 'linknet', 'best_linknet_up_model_line2.pth')
}
},
'bottom': {
'l_img': os.path.join(base_dir, 'data', 'origin', 'bottom', 'l1.jpeg'),
'r_img': os.path.join(base_dir, 'data', 'origin', 'bottom', 'r1.jpeg'),
'yolo_model': os.path.join(base_dir, 'module', 'yolov8', 'bottom.pt'),
'linknet_models': {
'line1': os.path.join(base_dir, 'module', 'linknet', 'best_linknet_bottom_model_line1.pth'),
'line2': os.path.join(base_dir, 'module', 'linknet', 'best_linknet_bottom_model_line2.pth')
}
}
}
output_dir = os.path.join(base_dir, 'data', 'processed')
os.makedirs(output_dir, exist_ok=True)
all_endpoints = {}
for part, paths in data_map.items():
print(f"\n--- Processing '{part}' part ---")
for img_path, side in [(paths['l_img'], 'l'), (paths['r_img'], 'r')]:
print(f"\n-- Analyzing image: {os.path.basename(img_path)} --")
crop_box = detect_crop_area(img_path, paths['yolo_model'])
if not crop_box:
print(f"Skipping further processing for {os.path.basename(img_path)}.")
continue
original_image_vis = cv2.imread(img_path)
for line_name, linknet_path in paths['linknet_models'].items():
endpoints = segment_and_find_endpoints(original_image_vis, crop_box, linknet_path)
if endpoints:
start_pt, end_pt = endpoints
result_key = f"{part}_{side}_{line_name}"
all_endpoints[result_key] = {'start': start_pt, 'end': end_pt}
# --- 在可视化图像上绘制结果 (增强版) ---
# 1. 绘制端点圆圈
cv2.circle(original_image_vis, start_pt, 15, (0, 255, 0), -1) # 绿色起点
cv2.circle(original_image_vis, end_pt, 15, (0, 0, 255), -1) # 红色终点
# 2. 绘制连接线
cv2.line(original_image_vis, start_pt, end_pt, (255, 0, 0), 4)
# 3. 添加文本标签
# 计算线段中点作为文本放置位置
mid_point = ((start_pt[0] + end_pt[0]) // 2, (start_pt[1] + end_pt[1]) // 2)
# 在中点上方放置文本
text_pos = (mid_point[0], mid_point[1] - 20)
cv2.putText(original_image_vis,
result_key,
text_pos,
cv2.FONT_HERSHEY_SIMPLEX,
2, # 字体大小
(255, 255, 0), # 字体颜色 (青色)
4, # 字体粗细
cv2.LINE_AA)
# 绘制YOLO框并保存最终的可视化结果
cv2.rectangle(original_image_vis, (crop_box[0], crop_box[1]), (crop_box[2], crop_box[3]), (0, 255, 255), 4)
save_path = os.path.join(output_dir, f'{part}_{side}_final_result.jpg')
cv2.imwrite(save_path, original_image_vis)
print(f"Saved final visualization to {save_path}")
# 3. 打印总结
print("\n--- Final Endpoints Summary (in original image coordinates) ---")
for name, points in all_endpoints.items():
print(f"{name}: Start={points['start']}, End={points['end']}")
return all_endpoints
def run_3d_reconstruction(all_2d_endpoints):
"""
根据识别出的2D端点重建出三维焊缝。
"""
print("\n--- Starting 3D Reconstruction ---")
# 这个字典将存储最终的三维坐标
reconstructed_seams_3d = {}
# 需要重建的焊缝对
# 例如:'up_line1' 对应 up_l_line1 和 up_r_line1
seam_pairs = ['up_line1', 'up_line2', 'bottom_line1', 'bottom_line2']
for seam_name in seam_pairs:
key_L = f"{seam_name.split('_')[0]}_l_{seam_name.split('_')[1]}" # e.g., 'up_l_line1'
key_R = f"{seam_name.split('_')[0]}_r_{seam_name.split('_')[1]}" # e.g., 'up_r_line1'
# 检查左右相机的点是否都已识别
if key_L not in all_2d_endpoints or key_R not in all_2d_endpoints:
print(f"Warning: Missing points for seam '{seam_name}'. Cannot reconstruct.")
continue
# 准备输入点列表:[start_point, end_point]
points_L = [all_2d_endpoints[key_L]['start'], all_2d_endpoints[key_L]['end']]
points_R = [all_2d_endpoints[key_R]['start'], all_2d_endpoints[key_R]['end']]
# 调用重建函数
# 假设你的图像尺寸是 4000x3000如果不是请修改
# 这是一个重要的参数,需要与标定时使用的图像尺寸一致!
points_3d = reconstruct_points(points_L, points_R, image_size=(4000, 3000))
reconstructed_seams_3d[seam_name] = {
'start_3d': points_3d[0],
'end_3d': points_3d[1]
}
# --- 打印最终的三维坐标结果 ---
print("\n--- Final 3D Seam Endpoints (in Left Camera Coordinate System, unit: mm) ---")
for name, points in reconstructed_seams_3d.items():
start_str = np.array2string(points['start_3d'], formatter={'float_kind': lambda x: "%.3f" % x})
end_str = np.array2string(points['end_3d'], formatter={'float_kind': lambda x: "%.3f" % x})
print(f"{name}:")
print(f" Start 3D: {start_str}")
print(f" End 3D: {end_str}")
return reconstructed_seams_3d
def run_new_reconstruction_pipeline(all_2d_endpoints):
"""
使用 solvePnP 的全新重建和拼接流程。
"""
print("\n--- Starting NEW Reconstruction Pipeline (with solvePnP) ---")
# --- 处理上半部分 ---
print("\nProcessing 'up' part...")
reconstructed_up = reproject_to_object_coords(all_2d_endpoints, all_2d_endpoints, part_type='up')
# --- 处理下半部分 ---
print("\nProcessing 'bottom' part...")
reconstructed_bottom = reproject_to_object_coords(all_2d_endpoints, all_2d_endpoints, part_type='bottom')
# --- 合并结果 ---
final_reconstructed_seams = {}
if reconstructed_up:
final_reconstructed_seams.update(reconstructed_up)
if reconstructed_bottom:
final_reconstructed_seams.update(reconstructed_bottom)
return final_reconstructed_seams
if __name__ == '__main__':
final_2d_endpoints = run_full_recognition_pipeline()
ground_truth = get_ground_truth_seams()
final_4_seams = {}
if final_2d_endpoints:
# 直接调用全局优化
final_4_seams = run_global_optimization(final_2d_endpoints, ground_truth)
final_3_seam_model = {}
if final_4_seams:
final_3_seam_model = merge_seams(final_4_seams)

View File

@@ -0,0 +1,144 @@
import os
import cv2
import numpy as np
# 导入必要的模块
from script.yolo_detector import detect_crop_area
from script.linknet_segmentor import segment_and_find_endpoints
from script.final_reconstruction import merge_seams # 我们依然需要合并函数
from script.reconstruction import visualize_reconstructed_seams # 和可视化函数
def reconstruct_with_optimized_params(points_L, points_R, calib_data, image_size=(4000, 3000)):
"""
使用优化好的参数文件,进行高效的标准双目重建。
返回在左相机坐标系下的三维点。
"""
# 从标定数据中加载新的内外参
K_L = np.array([
[calib_data['optimized_intrinsics_L'][0], 0, calib_data['optimized_intrinsics_L'][2]],
[0, calib_data['optimized_intrinsics_L'][1], calib_data['optimized_intrinsics_L'][3]],
[0, 0, 1]
])
kc_L = calib_data['dist_coeffs_L']
K_R = np.array([
[calib_data['optimized_intrinsics_R'][0], 0, calib_data['optimized_intrinsics_R'][2]],
[0, calib_data['optimized_intrinsics_R'][1], calib_data['optimized_intrinsics_R'][3]],
[0, 0, 1]
])
kc_R = calib_data['dist_coeffs_R']
# 使用新的、优化过的外参!
new_extrinsics = calib_data['new_extrinsics'].item() # .item() 用于从numpy对象数组中提取字典
R = new_extrinsics['R']
t = new_extrinsics['t']
# 标准的立体校正和三角化流程
R1, R2, P1, P2, _, _, _ = cv2.stereoRectify(K_L, kc_L, K_R, kc_R, image_size, R, t)
points_L_undistorted = cv2.undistortPoints(np.array(points_L, dtype=np.float32), K_L, kc_L, P=P1)
points_R_undistorted = cv2.undistortPoints(np.array(points_R, dtype=np.float32), K_R, kc_R, P=P2)
points_4d_hom = cv2.triangulatePoints(P1, P2, points_L_undistorted.reshape(-1, 2).T,
points_R_undistorted.reshape(-1, 2).T)
points_3d_camL = (points_4d_hom[:3] / points_4d_hom[3]).T
return points_3d_camL
def get_transform_from_pose(pose):
"""从6自由度位姿向量计算 4x4 逆变换矩阵(相机->物体)。"""
rvec, tvec = pose[:3], pose[3:]
R_cam_from_obj, _ = cv2.Rodrigues(rvec)
R_obj_from_cam = R_cam_from_obj.T
t_obj_from_cam = -R_obj_from_cam @ tvec
transform_matrix = np.eye(4)
transform_matrix[:3, :3] = R_obj_from_cam
transform_matrix[:3, 3] = t_obj_from_cam.flatten()
return transform_matrix
def run_deployment_pipeline(calib_data):
"""
最终部署流程:加载标定文件,快速完成重建。
"""
print("--- Running Deployment Pipeline with Optimized Parameters ---")
# 1. 运行2D识别 (这部分和之前一样)
# 你可以从之前的 main.py 复制 run_full_recognition_pipeline 函数过来
# 或者我们在这里重新写一个简化版的
from main import run_full_recognition_pipeline # 假设之前的main.py还在
all_2d_endpoints = run_full_recognition_pipeline()
if not all_2d_endpoints:
print("2D recognition failed. Exiting.")
return
reconstructed_4_seams = {}
# 2. 分别处理 'up' 和 'bottom' 的重建和变换
for part_type in ['up', 'bottom']:
print(f"\nProcessing '{part_type}' part...")
# a. 收集该部分的所有2D点
points_L, points_R, seam_keys = [], [], []
for line_name in ['line1', 'line2']:
key_L = f"{part_type}_l_{line_name}"
key_R = f"{part_type}_r_{line_name}"
points_L.extend([all_2d_endpoints[key_L]['start'], all_2d_endpoints[key_L]['end']])
points_R.extend([all_2d_endpoints[key_R]['start'], all_2d_endpoints[key_R]['end']])
seam_keys.append(f"{part_type}_{line_name}")
# b. 使用优化后的参数进行标准双目重建
points_camL = reconstruct_with_optimized_params(points_L, points_R, calib_data)
# c. 获取对应的变换矩阵并应用
# 注意:我们假设两次拍摄时相机与物体的相对关系固定,
# 因此理论上 up 和 bottom 的变换矩阵应该是一样的。
# 我们使用 'up' 拍摄时计算出的位姿作为全局基准。
global_transform = get_transform_from_pose(calib_data['pose_up_L'])
points_camL_hom = np.hstack([points_camL, np.ones((points_camL.shape[0], 1))])
points_object = (global_transform @ points_camL_hom.T).T[:, :3]
# d. 整理结果
for i, key in enumerate(seam_keys):
reconstructed_4_seams[key] = {'start_3d': points_object[i * 2], 'end_3d': points_object[i * 2 + 1]}
# 3. 合并为最终的三线模型
final_3_seam_model = merge_seams(reconstructed_4_seams)
# 4. 打印和可视化
print("\n--- Final 3-Seam Model (Object Coordinate System) ---")
for name, points in final_3_seam_model.items():
start_str = np.array2string(points['start_3d'], formatter={'float_kind': lambda x: "%.2f" % x})
end_str = np.array2string(points['end_3d'], formatter={'float_kind': lambda x: "%.2f" % x})
print(f"{name}: Start={start_str}, End={end_str}")
# 可视化...
from script.pose_estimation import get_ground_truth_seams
ground_truth_data = get_ground_truth_seams()
comparison_data = {}
for name, points in final_3_seam_model.items():
comparison_data[name + '_final'] = points
comparison_data['bottom_left_truth'] = ground_truth_data['bottom_line1']
comparison_data['middle_truth'] = ground_truth_data['up_line2']
comparison_data['top_left_truth'] = ground_truth_data['up_line1']
visualize_reconstructed_seams(comparison_data)
if __name__ == '__main__':
# 定义标定文件路径
calib_file_path = 'optimized_camera_parameters.npz'
if not os.path.exists(calib_file_path):
print(f"Error: Calibration file not found at '{calib_file_path}'")
print("Please run the main.py with the global optimization first to generate this file.")
else:
# 加载标定文件
print(f"Loading optimized parameters from '{calib_file_path}'...")
calibration_data = np.load(calib_file_path, allow_pickle=True)
# 运行部署流程
run_deployment_pipeline(calibration_data)

View File

@@ -0,0 +1,115 @@
import numpy as np
import open3d as o3d
def get_ground_truth_seams():
"""返回你手动测量的三维坐标(物体坐标系)。"""
ground_truth = {
'up_line1': {
'start_3d': np.array([142.2, 0, 7.3]),
'end_3d': np.array([153.9, 0, 149.8])
},
'up_line2': {
'start_3d': np.array([142.2, 0, 7.3]),
'end_3d': np.array([142.2, 50.3, 7.3])
},
'bottom_line1': {
'start_3d': np.array([8.9, 0, 7.3]),
'end_3d': np.array([140.2, 0, 7.3])
},
'bottom_line2': {
'start_3d': np.array([142.2, 0, 7.3]),
'end_3d': np.array([142.2, 50.3, 7.3])
}
}
return ground_truth
def align_and_stitch_seams(reconstructed_seams):
"""
使用ICP算法将重建的点云对齐到地面真实坐标系并进行拼接。
Args:
reconstructed_seams (dict): 在相机坐标系下重建出的焊缝端点。
Returns:
dict: 在物体坐标系下对齐和拼接后的焊缝端点。
"""
print("\n--- Aligning and Stitching Seams to Ground Truth ---")
ground_truth = get_ground_truth_seams()
# --- 1. 对齐上半部分 (up) ---
# 源点云:重建出的 up_line2 (相机坐标系)
source_points_up = np.array([
reconstructed_seams['up_line2']['start_3d'],
reconstructed_seams['up_line2']['end_3d']
])
source_pcd_up = o3d.geometry.PointCloud()
source_pcd_up.points = o3d.utility.Vector3dVector(source_points_up)
# 目标点云:测量的 up_line2 (物体坐标系)
target_points_up = np.array([
ground_truth['up_line2']['start_3d'],
ground_truth['up_line2']['end_3d']
])
target_pcd_up = o3d.geometry.PointCloud()
target_pcd_up.points = o3d.utility.Vector3dVector(target_points_up)
print("Aligning 'up' part...")
# 使用点对点ICP计算变换矩阵 M_up
# 由于只有两个点我们可以直接计算一个精确的变换但用ICP更通用
# estimate_rigid_transformation 需要点是 (3, N) 的格式
trans_up = o3d.pipelines.registration.TransformationEstimationPointToPoint().compute_transformation(
source_pcd_up, target_pcd_up, o3d.utility.Vector2iVector([[0, 0], [1, 1]]))
print("Transformation matrix for 'up' part (Camera -> Object):")
print(trans_up)
# --- 2. 对齐下半部分 (bottom) ---
# 源点云:重建出的 bottom_line2 (相机坐标系)
source_points_bottom = np.array([
reconstructed_seams['bottom_line2']['start_3d'],
reconstructed_seams['bottom_line2']['end_3d']
])
source_pcd_bottom = o3d.geometry.PointCloud()
source_pcd_bottom.points = o3d.utility.Vector3dVector(source_points_bottom)
# 目标点云:测量的 bottom_line2 (物体坐标系)
target_points_bottom = np.array([
ground_truth['bottom_line2']['start_3d'],
ground_truth['bottom_line2']['end_3d']
])
target_pcd_bottom = o3d.geometry.PointCloud()
target_pcd_bottom.points = o3d.utility.Vector3dVector(target_points_bottom)
print("\nAligning 'bottom' part...")
trans_bottom = o3d.pipelines.registration.TransformationEstimationPointToPoint().compute_transformation(
source_pcd_bottom, target_pcd_bottom, o3d.utility.Vector2iVector([[0, 0], [1, 1]]))
print("Transformation matrix for 'bottom' part (Camera -> Object):")
print(trans_bottom)
# --- 3. 应用变换并组合最终结果 ---
aligned_seams = {}
for name, points in reconstructed_seams.items():
# 创建齐次坐标 (x, y, z, 1)
start_hom = np.append(points['start_3d'], 1)
end_hom = np.append(points['end_3d'], 1)
# 根据焊缝属于 'up' 还是 'bottom' 选择对应的变换矩阵
if 'up' in name:
transformed_start = (trans_up @ start_hom.T)[:3]
transformed_end = (trans_up @ end_hom.T)[:3]
elif 'bottom' in name:
transformed_start = (trans_bottom @ start_hom.T)[:3]
transformed_end = (trans_bottom @ end_hom.T)[:3]
else:
continue
aligned_seams[name] = {
'start_3d': transformed_start,
'end_3d': transformed_end
}
return aligned_seams, ground_truth

View File

@@ -0,0 +1,112 @@
# demo_final_beautiful.py
# 最终美化版:无图例,专注焊缝模型本身的高质量渲染。
# 运行python demo_final_beautiful.py
import numpy as np
import open3d as o3d
from typing import Dict
def get_final_ideal_ground_truth() -> Dict:
"""
使用你提供的最终版理想坐标。
"""
print("--- Using your final provided ideal coordinates. ---")
final_3_seams = {
'bottom_left': {
'start_3d': np.array([-142.2 + 8.9, 0.0, 0.0]),
'end_3d': np.array([-2.7, 0.0, 0.0])
},
'middle': {
'start_3d': np.array([0.0, 0.0, 0.0]),
'end_3d': np.array([0.0, -50.3, 0.0])
},
'top_left': {
'start_3d': np.array([0.0, 0.0, 5.2]),
'end_3d': np.array([0.0, 0.0, 142.5])
}
}
return final_3_seams
def rotation_matrix_from_vectors(vec_from: np.ndarray, vec_to: np.ndarray) -> np.ndarray:
"""计算从 vec_from 到 vec_to 的旋转矩阵。"""
a = vec_from / (np.linalg.norm(vec_from) + 1e-12)
b = vec_to / (np.linalg.norm(vec_to) + 1e-12)
v = np.cross(a, b)
c = np.clip(np.dot(a, b), -1.0, 1.0)
s = np.linalg.norm(v)
if s < 1e-12:
return np.eye(3) if c > 0.0 else -np.eye(3)
kmat = np.array([[0, -v[2], v[1]], [v[2], 0, -v[0]], [-v[1], v[0], 0]], dtype=float)
return np.eye(3) + kmat + kmat @ kmat * ((1.0 - c) / (s ** 2))
def visualize_beautiful_model(seams: Dict):
"""
【最终美化版】可视化:
- 使用PBR材质增加金属质感。
- 增加灯光,增强立体感。
- 设置一个好的初始视角。
"""
print("\n--- Visualizing Final Target Model (High Quality Render) ---")
colors = {'bottom_left': [0.8, 0.1, 0.1], 'middle': [0.1, 0.8, 0.1], 'top_left': [0.1, 0.1, 0.8]}
geoms = []
# 1. 绘制焊缝主体 (圆柱体)
radius = 0.5 # 适中的粗细
for name, data in seams.items():
start, end = np.asarray(data['start_3d']), np.asarray(data['end_3d'])
direction = end - start
length = np.linalg.norm(direction)
if length < 1e-6: continue
cyl = o3d.geometry.TriangleMesh.create_cylinder(radius=radius, height=length, resolution=64)
cyl.compute_vertex_normals() # 法线对于光照计算至关重要
R = rotation_matrix_from_vectors(np.array([0.0, 0.0, 1.0]), direction)
cyl.rotate(R, center=(0, 0, 0)).translate((start + end) / 2.0)
cyl.paint_uniform_color(colors[name])
geoms.append(cyl)
# 2. 移动坐标轴到不遮挡的位置
all_points = [p for data in seams.values() for p in data.values()]
bbox = o3d.geometry.AxisAlignedBoundingBox.create_from_points(o3d.utility.Vector3dVector(all_points))
axis_size = max(30.0, np.linalg.norm(bbox.get_extent()) * 0.2)
axis_origin = bbox.get_min_bound() - np.array([axis_size * 1.5, axis_size * 0.5, 0])
frame = o3d.geometry.TriangleMesh.create_coordinate_frame(size=axis_size, origin=axis_origin)
geoms.append(frame)
# 3. 可视化
vis = o3d.visualization.Visualizer()
vis.create_window(window_name="Final Target Model (High Quality)", width=1280, height=720)
for g in geoms:
vis.add_geometry(g)
# --- 渲染和视角设置 ---
opt = vis.get_render_option()
opt.background_color = np.asarray([0.1, 0.1, 0.1]) # 深灰色背景,突出主体
opt.mesh_show_back_face = False
opt.light_on = True # 确保灯光已开启
# 获取视图控制器并设置相机位置
view_ctl = vis.get_view_control()
# 这个函数会自动计算一个能看全所有物体的合适视角
vis.reset_view_point(True)
# 你可以进一步手动调整相机参数以获得特定角度
# view_ctl.set_zoom(0.8)
# view_ctl.rotate(x=100, y=100) # 旋转视角
print("Visualization ready. You can rotate the view. Press 'Q' to close.")
vis.run()
vis.destroy_window()
if __name__ == '__main__':
seams = get_final_ideal_ground_truth()
print("--- Final Ideal Seam Coordinates ---")
for n, p in seams.items():
print(f"{n}: start={np.around(p['start_3d'], 1)}, end={np.around(p['end_3d'], 1)}")
visualize_beautiful_model(seams)

View File

@@ -0,0 +1,174 @@
import cv2
import numpy as np
import itertools
from .reconstruction import get_camera_parameters
from .pose_estimation import get_ground_truth_seams, estimate_camera_pose
def get_global_transform_from_up_data(all_2d_endpoints):
"""
只使用 'up' 部分的数据,计算一个全局的、唯一的“相机->物体”变换矩阵。
"""
print("\n--- Calculating Global Transform Matrix using 'up' data ---")
ground_truth = get_ground_truth_seams()
# 1. 准备 'up' 部分的数据
object_points_3d = []
image_points_2d_L = []
for line_name in ['line1', 'line2']:
gt_key = f"up_{line_name}"
key_L = f"up_l_{line_name}"
object_points_3d.extend([ground_truth[gt_key]['start_3d'], ground_truth[gt_key]['end_3d']])
image_points_2d_L.extend([all_2d_endpoints[key_L]['start'], all_2d_endpoints[key_L]['end']])
# 2. 寻找最佳点对应关系
best_reprojection_error = float('inf')
best_pose = None
for a, b in itertools.product([0, 1], repeat=2):
current_image_points_L = list(image_points_2d_L)
if a: current_image_points_L[0], current_image_points_L[1] = current_image_points_L[1], current_image_points_L[
0]
if b: current_image_points_L[2], current_image_points_L[3] = current_image_points_L[3], current_image_points_L[
2]
rvec, tvec = estimate_camera_pose(current_image_points_L, object_points_3d, 'L')
if rvec is not None:
projected_points, _ = cv2.projectPoints(np.array(object_points_3d), rvec, tvec,
get_camera_parameters()[0]['K'], get_camera_parameters()[0]['kc'])
error = cv2.norm(np.array(current_image_points_L, dtype=np.float32),
projected_points.reshape(-1, 2).astype(np.float32), cv2.NORM_L2)
if error < best_reprojection_error:
best_reprojection_error = error
best_pose = (rvec, tvec)
if best_pose is None:
print("Fatal Error: Could not calculate a valid global transform.")
return None
print(f"Global transform calculated with reprojection error: {best_reprojection_error:.2f}")
# 3. 构建 4x4 变换矩阵 (从相机坐标系到物体坐标系)
rvec, tvec = best_pose
R_cam_from_obj, _ = cv2.Rodrigues(rvec)
R_obj_from_cam = R_cam_from_obj.T
t_obj_from_cam = -R_obj_from_cam @ tvec
transform_matrix = np.eye(4)
transform_matrix[:3, :3] = R_obj_from_cam
transform_matrix[:3, 3] = t_obj_from_cam.flatten()
return transform_matrix
def reconstruct_in_camera_coords(points_L, points_R, image_size=(4000, 3000)):
# ... (这个函数保持不变)
cam_L, cam_R, extrinsics = get_camera_parameters()
R1, R2, P1, P2, _, _, _ = cv2.stereoRectify(cam_L['K'], cam_L['kc'], cam_R['K'], cam_R['kc'], image_size,
extrinsics['R'], extrinsics['T'].flatten())
points_L_undistorted = cv2.undistortPoints(np.array(points_L, dtype=np.float32), cam_L['K'], cam_L['kc'], P=P1)
points_R_undistorted = cv2.undistortPoints(np.array(points_R, dtype=np.float32), cam_R['K'], cam_R['kc'], P=P2)
points_4d_hom = cv2.triangulatePoints(P1, P2, points_L_undistorted.reshape(-1, 2).T,
points_R_undistorted.reshape(-1, 2).T)
return (points_4d_hom[:3] / points_4d_hom[3]).T
def final_reconstruction_pipeline(all_2d_endpoints):
# 1. 计算唯一的、全局的变换矩阵
global_transform = get_global_transform_from_up_data(all_2d_endpoints)
if global_transform is None:
return None
reconstructed_4_seams = {}
for part_type in ['up', 'bottom']:
# 2. 对每个部分进行标准双目重建
points_L, points_R, seam_keys = [], [], []
for line_name in ['line1', 'line2']:
key_L = f"{part_type}_l_{line_name}"
key_R = f"{part_type}_r_{line_name}"
points_L.extend([all_2d_endpoints[key_L]['start'], all_2d_endpoints[key_L]['end']])
points_R.extend([all_2d_endpoints[key_R]['start'], all_2d_endpoints[key_R]['end']])
seam_keys.append(f"{part_type}_{line_name}")
points_camL = reconstruct_in_camera_coords(points_L, points_R)
# 3. 使用同一个全局矩阵进行变换
points_camL_hom = np.hstack([points_camL, np.ones((points_camL.shape[0], 1))])
points_object = (global_transform @ points_camL_hom.T).T[:, :3]
# 4. 整理结果
for i, key in enumerate(seam_keys):
reconstructed_4_seams[key] = {'start_3d': points_object[i * 2], 'end_3d': points_object[i * 2 + 1]}
return reconstructed_4_seams
def merge_seams(reconstructed_seams_dict):
"""
将重建出的四条焊缝合并为最终的三条焊缝模型。
Args:
reconstructed_seams_dict (dict): 包含 'up_line1', 'up_line2',
'bottom_line1', 'bottom_line2' 的字典。
Returns:
dict: 包含 'bottom_left', 'middle', 'top_left' 三条最终焊缝的字典。
"""
print("\n--- Merging seams into final 3-line model ---")
if not all(k in reconstructed_seams_dict for k in ['up_line1', 'up_line2', 'bottom_line1', 'bottom_line2']):
print("Error: Missing reconstructed seams for merging.")
return None
# 提取所有需要的端点
bl1_start = reconstructed_seams_dict['bottom_line1']['start_3d']
bl1_end = reconstructed_seams_dict['bottom_line1']['end_3d']
ul2_start = reconstructed_seams_dict['up_line2']['start_3d']
ul2_end = reconstructed_seams_dict['up_line2']['end_3d']
bl2_start = reconstructed_seams_dict['bottom_line2']['start_3d']
bl2_end = reconstructed_seams_dict['bottom_line2']['end_3d']
ul1_start = reconstructed_seams_dict['up_line1']['start_3d']
ul1_end = reconstructed_seams_dict['up_line1']['end_3d']
# --- 定义最终的三条线 ---
# 1. 左下焊缝 (bottom_left)
# 直接使用 bottom_line1。为了保证方向一致我们让它从X值较小的点指向X值较大的点。
bottom_left_points = sorted([bl1_start, bl1_end], key=lambda p: p[0])
# 2. 中间焊缝 (middle)
# 这是 up_line2 和 bottom_line2 的合并。理论上它们应该重合。
# 我们可以取四个点的平均值来得到更稳健的起点和终点。
# 公共起点应该是 (bl1_end, ul2_start, bl2_start) 的平均值
middle_start = np.mean([bl1_end, ul2_start, bl2_start], axis=0)
# 公共终点应该是 (ul2_end, bl2_end, ul1_start) 的平均值
middle_end = np.mean([ul2_end, bl2_end, ul1_start], axis=0)
# 3. 左上焊缝 (top_left)
# 直接使用 up_line1。
top_left_points = [ul1_start, ul1_end] # 保持原始方向
final_3_seams = {
'bottom_left': {
'start_3d': bottom_left_points[0],
'end_3d': bottom_left_points[1]
},
'middle': {
'start_3d': middle_start,
'end_3d': middle_end
},
'top_left': {
'start_3d': top_left_points[0],
'end_3d': top_left_points[1]
}
}
return final_3_seams

View File

@@ -0,0 +1,265 @@
import os
import numpy as np
import cv2
import itertools
from scipy.optimize import least_squares
from .reconstruction import get_camera_parameters
from .pose_estimation import get_ground_truth_seams, estimate_camera_pose
from .final_reconstruction import merge_seams # 之前的merge_seams函数依然可用
def get_initial_parameters_with_solvepnp(all_2d_endpoints, ground_truth):
"""
【新】使用 solvePnP 为全局优化提供一个更好的初始位姿。
"""
print("\n--- Step 1: Getting a good initial guess for poses using solvePnP ---")
# 1. 内参和3D点 (与之前相同)
cam_params_L, cam_params_R, _ = get_camera_parameters()
camera_intrinsics = np.array([
cam_params_L['fc'][0], cam_params_L['fc'][1], cam_params_L['cc'][0], cam_params_L['cc'][1],
cam_params_R['fc'][0], cam_params_R['fc'][1], cam_params_R['cc'][0], cam_params_R['cc'][1]
])
points_3d_init = np.array([
ground_truth['up_line1']['start_3d'], ground_truth['up_line1']['end_3d'],
ground_truth['up_line2']['start_3d'], ground_truth['up_line2']['end_3d'],
ground_truth['bottom_line1']['start_3d'], ground_truth['bottom_line1']['end_3d'],
])
# 2. 【关键】为每个相机独立计算初始位姿
camera_poses_init = np.zeros((4, 6))
camera_map = {0: ('up', 'L'), 1: ('up', 'R'), 2: ('bottom', 'L'), 3: ('bottom', 'R')}
for i in range(4):
part_type, side = camera_map[i]
obj_pts, img_pts = [], []
# 收集该相机能看到的所有点
for line_name in ['line1', 'line2']:
if f"{part_type}_{line_name}" in ground_truth:
gt_key = f"{part_type}_{line_name}"
img_key = f"{part_type}_{side.lower()}_{line_name}"
# 确定3D点在 points_3d_init 中的索引
if gt_key == 'up_line1':
p_indices = [0, 1]
elif gt_key == 'up_line2':
p_indices = [2, 3]
elif gt_key == 'bottom_line1':
p_indices = [4, 5]
elif gt_key == 'bottom_line2':
p_indices = [2, 3] # bottom_line2也对应第2,3个点
obj_pts.extend([points_3d_init[p_indices[0]], points_3d_init[p_indices[1]]])
img_pts.extend([all_2d_endpoints[img_key]['start'], all_2d_endpoints[img_key]['end']])
# 使用我们之前验证过的点对应寻找逻辑
best_err = float('inf')
best_pose_for_cam = None
for a, b in itertools.product([0, 1], repeat=2): # 假设最多两条线
current_img_pts = list(img_pts)
if a: current_img_pts[0], current_img_pts[1] = current_img_pts[1], current_img_pts[0]
if b: current_img_pts[2], current_img_pts[3] = current_img_pts[3], current_img_pts[2]
rvec, tvec = estimate_camera_pose(current_img_pts, obj_pts, side)
if rvec is not None:
cam = cam_params_L if side == 'L' else cam_params_R
proj_pts, _ = cv2.projectPoints(np.array(obj_pts), rvec, tvec, cam['K'], cam['kc'])
err = cv2.norm(np.array(current_img_pts, dtype=np.float32), proj_pts.reshape(-1, 2).astype(np.float32))
if err < best_err:
best_err = err
best_pose_for_cam = np.concatenate([rvec.flatten(), tvec.flatten()])
if best_pose_for_cam is not None:
camera_poses_init[i] = best_pose_for_cam
print(f"Initial pose for camera {i} ({part_type}-{side}) found with error {best_err:.2f}")
else:
print(f"Warning: Failed to find initial pose for camera {i}")
# 3. 准备2D观测点 (与之前相同)
obs_2d, p_indices, c_indices = [], [], []
# ... (这部分代码与上一版 get_initial_parameters 完全相同,直接复制)
point_map = {'up_line1': [0, 1], 'up_line2': [2, 3], 'bottom_line1': [4, 5], 'bottom_line2': [2, 3]}
for cam_idx, (part, side) in camera_map.items():
for line in ['line1', 'line2']:
img_key = f"{part}_{side.lower()}_{line}"
gt_key = f"{part}_{line}"
if img_key in all_2d_endpoints:
obs_2d.extend([all_2d_endpoints[img_key]['start'], all_2d_endpoints[img_key]['end']])
p_indices.extend(point_map[gt_key])
c_indices.extend([cam_idx, cam_idx])
return camera_intrinsics, camera_poses_init, points_3d_init, np.array(obs_2d), np.array(p_indices), np.array(
c_indices)
def cost_function(params, n_cameras, n_points, camera_indices, point_indices, points_2d, fixed_kcs,
fixed_3d_points_init):
"""BA的代价函数V2 - 带有固定参数)。"""
# 1. 从一维参数向量中解析出需要优化的参数
intrinsics_flat = params[:8]
camera_poses_flat = params[8: 8 + n_cameras * 6]
# 【关键修改】三维点不再全部从params里取
# 我们只优化除了第一个点之外的所有点
points_3d_optimizable_flat = params[8 + n_cameras * 6:]
camera_poses = camera_poses_flat.reshape((n_cameras, 6))
# 重新构建完整的三维点列表
points_3d = np.zeros((n_points, 3))
points_3d[0] = fixed_3d_points_init[0] # 第一个点是固定的!
points_3d[1:] = points_3d_optimizable_flat.reshape((n_points - 1, 3))
# ... 函数的其余部分(计算残差)完全不变 ...
residuals = []
for i in range(len(points_2d)):
cam_idx = camera_indices[i]
point_idx = point_indices[i]
pose = camera_poses[cam_idx]
point_3d = points_3d[point_idx]
if cam_idx in [0, 2]: # Left cameras
fx, fy, cx, cy = intrinsics_flat[:4]
kc = fixed_kcs[0]
else: # Right cameras
fx, fy, cx, cy = intrinsics_flat[4:]
kc = fixed_kcs[1]
K = np.array([[fx, 0, cx], [0, fy, cy], [0, 0, 1]])
reproj_pt, _ = cv2.projectPoints(point_3d, pose[:3], pose[3:], K, kc)
residuals.extend((reproj_pt.ravel() - points_2d[i]).tolist())
return np.array(residuals)
def run_global_optimization(all_2d_endpoints, ground_truth):
"""执行全局优化V2 - 修正尺度不确定性)。"""
# 1. 获取初始值 (不变)
intrinsics_init, poses_init, points_3d_init, obs_2d, p_indices, c_indices = get_initial_parameters_with_solvepnp(
all_2d_endpoints, ground_truth)
# 2. 【关键修改】将参数分为固定部分和优化部分
# 我们要优化的三维点是除了第一个之外的所有点
optimizable_points_3d_init = points_3d_init[1:]
# 打包所有需要优化的参数
params_init = np.concatenate([
intrinsics_init.ravel(),
poses_init.ravel(),
optimizable_points_3d_init.ravel() # 只打包需要优化的点
])
# 3. 准备固定参数
fixed_kcs = [get_camera_parameters()[0]['kc'], get_camera_parameters()[1]['kc']]
# 4. 执行优化 (args 增加了 fixed_3d_points_init)
n_cameras = 4
n_points = points_3d_init.shape[0]
print("\n--- Step 2: Running Global Bundle Adjustment (with scale constraint) ---")
result = least_squares(
cost_function,
params_init,
verbose=2,
x_scale='jac',
ftol=1e-6,
method='trf',
args=(n_cameras, n_points, c_indices, p_indices, obs_2d, fixed_kcs, points_3d_init), # 传入固定的初始3D点
max_nfev=2000 # 可以适当增加迭代次数
)
params_final = result.x
n_cameras = 4 # 确保 n_cameras 和 n_points 在这里可用
n_points = points_3d_init.shape[0]
# --- 以下是新增的解析和保存部分 ---
# 5a. 解析所有最终参数
intrinsics_final_flat = params_final[:8]
camera_poses_final_flat = params_final[8: 8 + n_cameras * 6]
optimizable_points_3d_final_flat = params_final[8 + n_cameras * 6:]
intrinsics_final = intrinsics_final_flat.reshape(2, 4)
camera_poses_final = camera_poses_final_flat.reshape((n_cameras, 6))
points_3d_final = np.zeros((n_points, 3))
points_3d_final[0] = points_3d_init[0]
points_3d_final[1:] = optimizable_points_3d_final_flat.reshape((n_points - 1, 3))
# 5b. 打印到控制台,以便即时查看
print("\n--- Optimized Camera Intrinsics ---")
print(f"Left Cam (fx, fy, cx, cy): {intrinsics_final[0]}")
print(f"Right Cam (fx, fy, cx, cy): {intrinsics_final[1]}")
print("\n--- Optimized Camera Poses (Rodrigues vector + translation) ---")
print(f"Pose of up-left cam: {camera_poses_final[0]}")
print(f"Pose of up-right cam: {camera_poses_final[1]}")
print(f"Pose of bottom-left cam: {camera_poses_final[2]}")
print(f"Pose of bottom-right cam: {camera_poses_final[3]}")
# 5c. 【核心】将所有参数保存到文件
# 定义保存路径
current_dir = os.path.dirname(os.path.abspath(__file__))
project_root = os.path.dirname(current_dir)
save_path = os.path.join(project_root, 'optimized_camera_parameters.npz')
# 获取固定的畸变系数
cam_L_params, cam_R_params, _ = get_camera_parameters()
np.savez(
save_path,
# 优化后的内参
optimized_intrinsics_L=intrinsics_final[0], # [fx, fy, cx, cy]
optimized_intrinsics_R=intrinsics_final[1],
# 固定的畸变系数 (BA中未优化)
dist_coeffs_L=cam_L_params['kc'],
dist_coeffs_R=cam_R_params['kc'],
# 优化后的相机位姿 (相对于物体坐标系)
pose_up_L=camera_poses_final[0], # [rvec, tvec]
pose_up_R=camera_poses_final[1],
pose_bottom_L=camera_poses_final[2],
pose_bottom_R=camera_poses_final[3],
# 还可以保存一个计算出的新外参作为参考
# (up-right 相对于 up-left 的变换)
new_extrinsics=calculate_new_extrinsics(camera_poses_final[0], camera_poses_final[1]),
# 优化后的三维点坐标
optimized_3d_points=points_3d_final
)
print(f"\n✅ All optimized parameters have been saved to: {save_path}")
# 6. 整理输出 (这部分不变)
final_seams = {
'up_line1': {'start_3d': points_3d_final[0], 'end_3d': points_3d_final[1]},
'up_line2': {'start_3d': points_3d_final[2], 'end_3d': points_3d_final[3]},
'bottom_line1': {'start_3d': points_3d_final[4], 'end_3d': points_3d_final[5]},
'bottom_line2': {'start_3d': points_3d_final[2], 'end_3d': points_3d_final[3]}
}
return final_seams
def calculate_new_extrinsics(pose_L, pose_R):
"""根据两个相机相对于物体的位姿,计算它们之间的相对位姿(外参)。"""
# 从物体到左相机的变换
rvec_L, tvec_L = pose_L[:3], pose_L[3:]
R_L_from_obj, _ = cv2.Rodrigues(rvec_L)
T_L_from_obj = tvec_L.reshape(3, 1)
# 从物体到右相机的变换
rvec_R, tvec_R = pose_R[:3], pose_R[3:]
R_R_from_obj, _ = cv2.Rodrigues(rvec_R)
T_R_from_obj = tvec_R.reshape(3, 1)
# 计算从左相机到右相机的变换
# T_R_from_L = R_R @ inv(R_L).T @ (T_L - T_R) 是一种错误的推导
# 正确推导: P_obj = inv(R_L)@(P_camL - T_L) = inv(R_R)@(P_camR - T_R)
# => P_camR = R_R @ inv(R_L) @ P_camL + (T_R - R_R @ inv(R_L) @ T_L)
# => R_R_from_L = R_R @ R_L.T
# => T_R_from_L = T_R - R_R_from_L @ T_L
R_R_from_L = R_R_from_obj @ R_L_from_obj.T
t_R_from_L = T_R_from_obj - (R_R_from_L @ T_L_from_obj)
return {'R': R_R_from_L, 't': t_R_from_L}

View File

@@ -0,0 +1,70 @@
import torch
import torch.nn as nn
from torchvision import models
# 这个文件只包含 LinkNet 的模型结构定义
# 从你的训练脚本中完整复制过来
class DecoderBlock(nn.Module):
def __init__(self, in_channels, out_channels):
super().__init__()
self.block = nn.Sequential(
nn.Conv2d(in_channels, in_channels // 4, kernel_size=1),
nn.ReLU(inplace=True),
nn.ConvTranspose2d(in_channels // 4, in_channels // 4, kernel_size=2, stride=2),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels // 4, out_channels, kernel_size=1),
nn.ReLU(inplace=True)
)
def forward(self, x):
return self.block(x)
class LinkNet(nn.Module):
def __init__(self, num_classes=1):
super().__init__()
# 使用预训练的ResNet18作为编码器
# 注意:推理时可以不加载预训练权重,因为我们将加载自己训练好的完整模型权重
resnet = models.resnet18() # weights=models.ResNet18_Weights.DEFAULT
# 你的模型是用单通道灰度图训练的
self.firstconv = nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.firstbn = resnet.bn1
self.firstrelu = resnet.relu
self.firstmaxpool = resnet.maxpool
# 编码器层
self.encoder1 = resnet.layer1
self.encoder2 = resnet.layer2
self.encoder3 = resnet.layer3
self.encoder4 = resnet.layer4
# 解码器层
self.decoder4 = DecoderBlock(512, 256)
self.decoder3 = DecoderBlock(256, 128)
self.decoder2 = DecoderBlock(128, 64)
self.decoder1 = DecoderBlock(64, 64)
# 最终输出层
self.final_deconv = nn.ConvTranspose2d(64, 32, kernel_size=2, stride=2)
self.final_relu = nn.ReLU(inplace=True)
self.final_conv = nn.Conv2d(32, num_classes, kernel_size=1)
def forward(self, x):
# 编码器
x = self.firstconv(x)
x = self.firstbn(x)
x = self.firstrelu(x)
x = self.firstmaxpool(x)
e1 = self.encoder1(x)
e2 = self.encoder2(e1)
e3 = self.encoder3(e2)
e4 = self.encoder4(e3)
# 解码器
d4 = self.decoder4(e4) + e3
d3 = self.decoder3(d4) + e2
d2 = self.decoder2(d3) + e1
d1 = self.decoder1(d2)
f = self.final_deconv(d1)
f = self.final_relu(f)
f = self.final_conv(f)
return torch.sigmoid(f)

View File

@@ -0,0 +1,74 @@
import os
import cv2
import torch
import numpy as np
# 导入我们刚刚创建的模型定义
from .linknet_model_def import LinkNet
# 模型缓存
_linknet_models = {}
_device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"LinkNet will use device: {_device}")
def _get_endpoints_from_mask(mask: np.ndarray):
"""内部函数从二值化mask中提取直线端点。"""
points = cv2.findNonZero(mask)
if points is None:
return None, None
line_params = cv2.fitLine(points, cv2.DIST_L2, 0, 0.01, 0.01)
direction_vector = np.array([line_params[0][0], line_params[1][0]])
points_flat = points.reshape(-1, 2)
projections = points_flat.dot(direction_vector)
min_idx, max_idx = np.argmin(projections), np.argmax(projections)
start_point, end_point = tuple(points_flat[min_idx]), tuple(points_flat[max_idx])
return start_point, end_point
def segment_and_find_endpoints(original_image: np.ndarray,
crop_box: tuple,
model_path: str,
image_size: int = 256):
"""
在指定的裁切区域内使用LinkNet进行分割并找出焊缝端点。
返回原始图像坐标系下的 (start_point, end_point)。
"""
if model_path not in _linknet_models:
print(f"Loading LinkNet model from: {model_path}")
if not os.path.exists(model_path):
print(f"Error: LinkNet model file not found at {model_path}")
return None, None
model = LinkNet(num_classes=1)
model.load_state_dict(torch.load(model_path, map_location=_device))
model.to(_device)
model.eval()
_linknet_models[model_path] = model
model = _linknet_models[model_path]
x1, y1, x2, y2 = crop_box
cropped_img = original_image[y1:y2, x1:x2]
img_gray = cv2.cvtColor(cropped_img, cv2.COLOR_BGR2GRAY)
crop_h, crop_w = img_gray.shape
img_resized = cv2.resize(img_gray, (image_size, image_size))
img_normalized = img_resized / 255.0
img_tensor = torch.from_numpy(img_normalized).unsqueeze(0).unsqueeze(0).float().to(_device)
with torch.no_grad():
output = model(img_tensor)
pred_mask_resized = output.cpu().numpy()[0, 0]
pred_mask_binary = (pred_mask_resized > 0.5).astype(np.uint8)
predicted_mask = cv2.resize(pred_mask_binary, (crop_w, crop_h), interpolation=cv2.INTER_NEAREST) * 255
start_crop, end_crop = _get_endpoints_from_mask(predicted_mask)
if start_crop is None:
return None, None
start_orig = (start_crop[0] + x1, start_crop[1] + y1)
end_orig = (end_crop[0] + x1, end_crop[1] + y1)
return start_orig, end_orig

View File

@@ -0,0 +1,230 @@
import itertools
import cv2
import numpy as np
from .reconstruction import get_camera_parameters # 我们仍然需要内参
def get_ground_truth_seams():
"""
【V5 - 基于图片和新坐标的最终版】
以公共交点为原点 (0,0,0) 建立坐标系。
Y轴: 沿着中间公共焊缝。
X轴: 沿着左下焊缝。
Z轴: 垂直于XY平面向上。
"""
print("--- INFO: Using new ground truth based on visual inspection. ---")
# 1. 定义关键点
p_origin = np.array([0.0, 0.0, 0.0]) # 公共交点,坐标系原点
p_middle_end = np.array([0.0, 50.3, 0.0]) # 中间焊缝的终点
p_bottom_start = np.array([-142.2, 0.0, 0.0]) # 左下焊缝的起点 (沿X负半轴)
# 对于 up_line1我们需要一个合理的3D坐标。
# 它从 p_middle_end (0, 50.3, 0) 开始。
# 假设它主要在Z方向延伸我们给它一个长度比如150。
# 你给的(-11.7, 142.5)可能存在测量误差或坐标系定义偏差。
# 我们先用一个理想化的、非退化的点来保证算法能工作。
p_top_end = np.array([0.0, 50.3, 150.0]) # 假设它竖直向上
ground_truth = {
# 上半部分拍摄的两条焊缝
'up_line1': {
'start_3d': p_middle_end, # (0, 50.3, 0)
'end_3d': p_top_end # (0, 50.3, 150)
},
'up_line2': {
'start_3d': p_origin, # (0, 0, 0)
'end_3d': p_middle_end # (0, 50.3, 0)
},
# 下半部分拍摄的两条焊缝
'bottom_line1': {
'start_3d': p_bottom_start, # (-142.2, 0, 0)
'end_3d': p_origin # (0, 0, 0)
},
'bottom_line2': { # 与 up_line2 完全相同
'start_3d': p_origin,
'end_3d': p_middle_end
}
}
return ground_truth
# def get_ground_truth_seams():
# """返回你手动测量的三维坐标(物体坐标系)。"""
# ground_truth = {
# 'up_line1': {
# 'start_3d': np.array([142.2, 0, 7.3]),
# 'end_3d': np.array([153.9, 0, 149.8])
# },
# 'up_line2': {
# 'start_3d': np.array([142.2, 0, 7.3]),
# 'end_3d': np.array([142.2, 50.3, 7.3])
# },
# 'bottom_line1': {
# 'start_3d': np.array([8.9, 0, 7.3]),
# 'end_3d': np.array([140.2, 0, 7.3])
# },
# 'bottom_line2': {
# 'start_3d': np.array([142.2, 0, 7.3]),
# 'end_3d': np.array([142.2, 50.3, 7.3])
# }
# }
# return ground_truth
def estimate_camera_pose(image_points_2d, object_points_3d, camera_side='L'):
"""
使用 solvePnP 估计相机位姿。
Args:
image_points_2d (np.ndarray): 图像上的2D点 (N, 2)。
object_points_3d (np.ndarray): 对应的物体坐标系下的3D点 (N, 3)。
camera_side (str): 'L' or 'R', 用于选择相机内参。
Returns:
tuple: (rotation_vector, translation_vector) 相机的位姿。
这是从物体坐标系到相机坐标系的变换。
"""
cam_L, cam_R, _ = get_camera_parameters()
if camera_side == 'L':
camera_matrix = cam_L['K']
dist_coeffs = cam_L['kc']
else:
camera_matrix = cam_R['K']
dist_coeffs = cam_R['kc']
# solvePnP 需要 float64 类型的输入
object_points_3d = np.array(object_points_3d, dtype=np.float64)
image_points_2d = np.array(image_points_2d, dtype=np.float64)
# 使用 solvePnP 求解位姿
# success: 是否成功
# rvec: 旋转向量 (Rodrigues vector)
# tvec: 平移向量
success, rvec, tvec = cv2.solvePnP(object_points_3d, image_points_2d, camera_matrix, dist_coeffs)
if not success:
print("Warning: solvePnP failed to estimate camera pose.")
return None, None
return rvec, tvec
def reproject_to_object_coords(endpoints_2d_L, endpoints_2d_R, part_type='up'):
"""
全新的重建流程V2 - 修正版):
1. 确定2D点和3D点之间最可能的对应关系。
2. 使用正确的对应关系估计左右相机位姿。
3. 利用双目信息对所有点进行三角化。
"""
ground_truth = get_ground_truth_seams()
cam_L_params, cam_R_params, _ = get_camera_parameters()
# --- 准备 solvePnP 的原始输入数据 ---
# object_points_3d: 3D真值点列表顺序固定
# image_points_2d_L: 识别出的2D点顺序可能需要调整
object_points_3d = []
image_points_2d_L = []
seam_keys = [] # 记录焊缝的key方便后续整理
for line_name in ['line1', 'line2']:
gt_key = f"{part_type}_{line_name}"
if gt_key in ground_truth:
# 添加3D真值点
object_points_3d.append(ground_truth[gt_key]['start_3d'])
object_points_3d.append(ground_truth[gt_key]['end_3d'])
# 添加对应的2D识别点
key_L = f"{part_type}_l_{line_name}"
image_points_2d_L.append(endpoints_2d_L[key_L]['start'])
image_points_2d_L.append(endpoints_2d_L[key_L]['end'])
seam_keys.append(gt_key)
# --- 1. 寻找最佳的2D-3D点对应关系 ---
# 对于每条焊缝2个点有2种可能的匹配正序或反序
# 如果有N条焊缝就有 2^N 种组合
# 我们有两条焊缝,所以有 2^2 = 4 种组合
best_reprojection_error = float('inf')
best_image_points_L = None
# a, b 分别代表line1和line2是否需要翻转 (0=不翻转, 1=翻转)
for a, b in itertools.product([0, 1], repeat=2):
swaps = (a, b)
current_image_points_L = list(image_points_2d_L) # 创建一个副本
# 根据组合,翻转对应焊缝的起点和终点
if swaps[0]: # 翻转 line1
current_image_points_L[0], current_image_points_L[1] = current_image_points_L[1], current_image_points_L[0]
if swaps[1]: # 翻转 line2
current_image_points_L[2], current_image_points_L[3] = current_image_points_L[3], current_image_points_L[2]
# 使用当前的对应关系,尝试估计位姿
rvec_L_try, tvec_L_try = estimate_camera_pose(current_image_points_L, object_points_3d, 'L')
if rvec_L_try is not None:
# 计算重投影误差来评估这个组合的好坏
projected_points, _ = cv2.projectPoints(np.array(object_points_3d), rvec_L_try, tvec_L_try,
cam_L_params['K'], cam_L_params['kc'])
error = cv2.norm(np.array(current_image_points_L, dtype=np.float32),
projected_points.reshape(-1, 2).astype(np.float32), cv2.NORM_L2)
if error < best_reprojection_error:
best_reprojection_error = error
best_image_points_L = current_image_points_L
if best_image_points_L is None:
print(f"Error: Could not find a valid pose for '{part_type}' part.")
return None
print(f"Found best point correspondence for '{part_type}' with reprojection error: {best_reprojection_error:.2f}")
# --- 2. 使用最佳对应关系,重新进行完整的重建流程 ---
# 纠正右相机2D点的顺序
# 这一步有点复杂,我们先假设左右相机的起点/终点翻转是一致的
# 这是个合理的假设,因为相机离得很近,看到的几何方向应该一样
best_image_points_R = []
for line_name in ['line1', 'line2']:
key_R = f"{part_type}_r_{line_name}"
points = [endpoints_2d_R[key_R]['start'], endpoints_2d_R[key_R]['end']]
# 检查左相机的点是否被翻转了
original_L = [endpoints_2d_L[key_R.replace('_r_', '_l_')]['start'],
endpoints_2d_L[key_R.replace('_r_', '_l_')]['end']]
idx = 0 if line_name == 'line1' else 2
# 如果左边翻转了,右边也翻转
if best_image_points_L[idx] != original_L[0]:
points.reverse()
best_image_points_R.extend(points)
# 估计左相机位姿
rvec_L, tvec_L = estimate_camera_pose(best_image_points_L, object_points_3d, 'L')
R_L, _ = cv2.Rodrigues(rvec_L)
P_L = cam_L_params['K'] @ np.hstack((R_L, tvec_L))
# 估计右相机位姿
rvec_R, tvec_R = estimate_camera_pose(best_image_points_R, object_points_3d, 'R')
R_R, _ = cv2.Rodrigues(rvec_R)
P_R = cam_R_params['K'] @ np.hstack((R_R, tvec_R))
# 三角化
points_2d_L_undistorted = cv2.undistortPoints(np.array(best_image_points_L, dtype=np.float32), cam_L_params['K'],
cam_L_params['kc'], P=cam_L_params['K'])
points_2d_R_undistorted = cv2.undistortPoints(np.array(best_image_points_R, dtype=np.float32), cam_R_params['K'],
cam_R_params['kc'], P=cam_R_params['K'])
points_4d = cv2.triangulatePoints(P_L, P_R, points_2d_L_undistorted.reshape(-1, 2).T,
points_2d_R_undistorted.reshape(-1, 2).T)
points_3d_object = (points_4d[:3] / points_4d[3]).T
# 整理输出
final_seams = {}
for i, key in enumerate(seam_keys):
final_seams[key] = {
'start_3d': points_3d_object[i * 2],
'end_3d': points_3d_object[i * 2 + 1]
}
return final_seams

View File

@@ -0,0 +1,55 @@
import os
import cv2
from ultralytics import YOLO
# 这是一个好习惯,将模型加载放在函数外部,这样在多次调用函数时模型只需加载一次。
# 我们将模型路径作为参数传入,使其更具通用性。
models = {}
def detect_crop_area(image_path: str, model_path: str):
"""
使用YOLOv8模型检测图像中的裁切区域。
Args:
image_path (str): 原始图像的文件路径。
model_path (str): 用于检测的YOLOv8模型 (.pt) 的路径。
Returns:
tuple or None: 如果检测到物体,返回一个包含整数坐标的元组 (x1, y1, x2, y2)。
如果没有检测到或发生错误,返回 None。
"""
# 检查模型是否已加载,如果没有,则加载并缓存
if model_path not in models:
print(f"Loading YOLOv8 model from: {model_path}")
if not os.path.exists(model_path):
print(f"Error: Model file not found at {model_path}")
return None
models[model_path] = YOLO(model_path)
model = models[model_path]
# 检查图像文件是否存在
if not os.path.exists(image_path):
print(f"Error: Image file not found at {image_path}")
return None
try:
# 执行预测verbose=False可以减少不必要的控制台输出
results = model.predict(source=image_path, conf=0.5, verbose=False)
# 检查是否有检测结果
if not results or not results[0].boxes:
print(f"Warning: YOLO did not detect any objects in {image_path}")
return None
# 获取置信度最高的那个检测框
# YOLOv8的results[0].boxes包含所有检测框我们通常取第一个置信度最高的
box = results[0].boxes.xyxy[0].cpu().numpy().astype(int)
# 返回整数坐标的元组
return tuple(box)
except Exception as e:
print(f"An error occurred during prediction for {image_path}: {e}")
return None

View File

@@ -0,0 +1,163 @@
import numpy as np
import cv2
import open3d as o3d
def get_camera_parameters():
"""
存储并返回你师兄提供的相机标定参数。
将所有列表转换为Numpy数组方便后续计算。
"""
# 左相机内参
cam_params_L = {
'fc': np.array([3774.896, 3770.590]),
'cc': np.array([1327.950, 956.597]),
'kc': np.array([-0.098, 0.208, -0.00005, 0.00111, 0]),
# OpenCV相机矩阵格式 [fx, 0, cx; 0, fy, cy; 0, 0, 1]
'K': np.array([
[3774.896, 0, 1327.950],
[0, 3770.590, 956.597],
[0, 0, 1]
])
}
# 右相机内参
cam_params_R = {
'fc': np.array([3758.657, 3763.935]),
'cc': np.array([1274.940, 965.722]),
'kc': np.array([0.093, -0.219, 0.00079, 0.00255, 0]),
'K': np.array([
[3758.657, 0, 1274.940],
[0, 3763.935, 965.722],
[0, 0, 1]
])
}
# 外参 (右相机相对于左相机的变换)
extrinsics = {
'R': np.array([
[0.1169, 0.6292, 0.7683],
[0.9881, 0.0036, 0.1534],
[0.0993, -0.7771, -0.6214]
]),
'T': np.array([-220.36786, 2.23290, 30.06279]).reshape(3, 1) # 平移向量
}
return cam_params_L, cam_params_R, extrinsics
def reconstruct_points(points_L, points_R, image_size=(4000, 3000)):
"""
使用OpenCV进行三维重建的核心函数。
Args:
points_L (list of tuples): 左相机图像上的2D点 [(u1, v1), (u2, v2), ...]。
points_R (list of tuples): 右相机图像上对应的2D点 [(u1, v1), (u2, v2), ...]。
image_size (tuple): 原始图像的尺寸 (宽度, 高度),用于立体校正。
Returns:
np.ndarray: 重建出的三维点坐标 (N, 3)单位与标定时使用的单位一致通常是mm
"""
# 1. 获取相机参数
cam_L, cam_R, extrinsics = get_camera_parameters()
# 2. 对输入的2D点进行去畸变
# 注意cv2.undistortPoints 需要的格式是 (N, 1, 2) 且为 float32
points_L_np = np.array(points_L, dtype=np.float32).reshape(-1, 1, 2)
points_R_np = np.array(points_R, dtype=np.float32).reshape(-1, 1, 2)
points_L_undistorted = cv2.undistortPoints(points_L_np, cam_L['K'], cam_L['kc'], P=cam_L['K'])
points_R_undistorted = cv2.undistortPoints(points_R_np, cam_R['K'], cam_R['kc'], P=cam_R['K'])
# 3. 计算立体校正的投影矩阵
# stereoRectify 返回很多矩阵我们只需要P1和P2新的投影矩阵
# 这里我们不需要对图像进行remap因为我们只关心几个点的变换
# 注意这里的R和T是右相机到左相机的变换与OpenCV的定义一致
R1, R2, P1, P2, Q, _, _ = cv2.stereoRectify(
cameraMatrix1=cam_L['K'],
distCoeffs1=cam_L['kc'],
cameraMatrix2=cam_R['K'],
distCoeffs2=cam_R['kc'],
imageSize=image_size,
R=extrinsics['R'],
T=extrinsics['T'].flatten() # T需要是1D数组
)
# 4. 使用 triangulatePoints 进行三角化测量
# 这个函数需要去畸变后的点和新的投影矩阵
# 输入点格式需要是 (2, N)
points_L_for_triangulate = points_L_undistorted.reshape(-1, 2).T
points_R_for_triangulate = points_R_undistorted.reshape(-1, 2).T
# triangulatePoints 返回齐次坐标 (4, N)
points_4d_hom = cv2.triangulatePoints(P1, P2, points_L_for_triangulate, points_R_for_triangulate)
# 5. 将齐次坐标转换为非齐次坐标
# 通过除以第四个分量 w
points_3d = points_4d_hom[:3, :] / points_4d_hom[3, :]
# 返回转置后的结果,形状为 (N, 3)
return points_3d.T
def visualize_reconstructed_seams(reconstructed_seams_3d):
"""
使用 Open3D 可视化重建出的三维焊缝线段。
Args:
reconstructed_seams_3d (dict): 包含三维端点坐标的字典。
"""
print("\n--- Visualizing Final 3-Seam Model vs. Ground Truth ---")
# 最终的颜色映射
color_map = {
# 最终模型 (亮色)
'bottom_left_final': [1, 0, 0], # 红色
'middle_final': [0, 1, 0], # 绿色
'top_left_final': [0, 0, 1], # 蓝色
# 地面真值 (用稍暗或不同的颜色)
'bottom_left_truth': [0.8, 0.4, 0.4], # 粉红
'middle_truth': [0.4, 0.8, 0.4], # 浅绿
'top_left_truth': [0.4, 0.4, 0.8], # 浅蓝
}
geometries = []
coordinate_frame = o3d.geometry.TriangleMesh.create_coordinate_frame(size=50, origin=[0, 0, 0])
geometries.append(coordinate_frame)
# 遍历所有重建出的焊缝
for name, points in reconstructed_seams_3d.items():
start_pt = points['start_3d']
end_pt = points['end_3d']
# Open3D 需要点和线的列表
line_points = [start_pt, end_pt]
line_indices = [[0, 1]] # 将第一个点和第二个点连接起来
line_color = color_map.get(name, [0.5, 0.5, 0.5]) # 如果没有定义颜色,则为灰色
# 创建LineSet对象
line_set = o3d.geometry.LineSet(
points=o3d.utility.Vector3dVector(line_points),
lines=o3d.utility.Vector2iVector(line_indices)
)
# 为该线段设置颜色
line_set.colors = o3d.utility.Vector3dVector([line_color])
geometries.append(line_set)
# (可选) 在端点处创建小球体以突出显示
start_sphere = o3d.geometry.TriangleMesh.create_sphere(radius=10) # 半径可以调整
start_sphere.translate(start_pt)
start_sphere.paint_uniform_color(line_color)
geometries.append(start_sphere)
end_sphere = o3d.geometry.TriangleMesh.create_sphere(radius=10)
end_sphere.translate(end_pt)
end_sphere.paint_uniform_color(line_color)
geometries.append(end_sphere)
# 绘制所有几何对象
o3d.visualization.draw_geometries(
geometries,
window_name="Reconstructed 3D Weld Seams",
width=1280,
height=720
)

View File

@@ -0,0 +1,44 @@
import os
from ultralytics import YOLO
# 模型缓存
_yolo_models = {}
def detect_crop_area(image_path: str, model_path: str):
"""
使用YOLOv8模型检测图像中的裁切区域。
Args:
image_path (str): 原始图像的文件路径。
model_path (str): 用于检测的YOLOv8模型 (.pt) 的路径。
Returns:
tuple or None: 如果检测到物体,返回 (x1, y1, x2, y2);否则返回 None。
"""
if model_path not in _yolo_models:
print(f"Loading YOLOv8 model from: {model_path}")
if not os.path.exists(model_path):
print(f"Error: YOLO model file not found at {model_path}")
return None
_yolo_models[model_path] = YOLO(model_path)
model = _yolo_models[model_path]
if not os.path.exists(image_path):
print(f"Error: Image file not found at {image_path}")
return None
try:
results = model.predict(source=image_path, conf=0.5, verbose=False)
if not results or not results[0].boxes:
print(f"Warning: YOLO did not detect any objects in {image_path}")
return None
box = results[0].boxes.xyxy[0].cpu().numpy().astype(int)
return tuple(box)
except Exception as e:
print(f"An error occurred during YOLO prediction for {image_path}: {e}")
return None

View File

@@ -0,0 +1,65 @@
import os
from PIL import Image
from tqdm import tqdm
# --- 配置 ---
# 1. 设置您的图片文件夹路径
# 根据您的项目结构,这个路径应该是 'VOCdevkit/VOC2007/JPEGImages'
image_folder = '../label/up'
# 2. 是否删除转换后的原始 .jpeg 文件
delete_original = True
# --- 配置结束 ---
def convert_jpeg_to_jpg(folder_path, delete_original_file=True):
"""
将指定文件夹内的 .jpeg 图片转换为 .jpg 格式。
:param folder_path: 包含图片的文件夹路径。
:param delete_original_file: 是否删除原始的 .jpeg 文件。
"""
if not os.path.isdir(folder_path):
print(f"错误:文件夹 '{folder_path}' 不存在。")
return
# 找出所有.jpeg或.JPEG结尾的文件
jpeg_files = [f for f in os.listdir(folder_path) if f.lower().endswith('.jpeg')]
if not jpeg_files:
print(f"'{folder_path}' 中没有找到 .jpeg 文件。")
return
print(f"找到 {len(jpeg_files)} 个 .jpeg 文件,开始转换...")
for filename in tqdm(jpeg_files, desc="转换进度"):
jpeg_path = os.path.join(folder_path, filename)
# 构建新的 .jpg 文件名
base_name = os.path.splitext(filename)[0]
jpg_filename = f"{base_name}.jpg"
jpg_path = os.path.join(folder_path, jpg_filename)
try:
with Image.open(jpeg_path) as img:
# 确保图像是RGB模式因为JPG不支持透明度
if img.mode != 'RGB':
img = img.convert('RGB')
# 以高质量保存为 .jpg
img.save(jpg_path, 'jpeg', quality=95)
# 如果转换成功且设置为删除,则删除原始文件
if delete_original_file:
os.remove(jpeg_path)
except Exception as e:
print(f"\n处理文件 '{filename}' 时出错: {e}")
print("\n转换完成!")
if __name__ == '__main__':
convert_jpeg_to_jpg(image_folder, delete_original)

192
OpenCV/convert_to_voc.py Normal file
View File

@@ -0,0 +1,192 @@
import os
import json
import xml.etree.ElementTree as ET
from xml.dom import minidom
from tqdm import tqdm
import re # 引入正则表达式库
# --- 配置参数 ---
# 1. 原始JSON文件所在的文件夹路径
json_folder = '../label/up_json' # 示例路径请修改为您的JSON文件夹
# 2. 原始图片文件所在的文件夹路径 (用于获取图片尺寸)
image_folder = '../label/up' # 示例路径,请修改为您的图片文件夹
# 3. 生成的XML文件要保存的文件夹路径
output_xml_folder = '../label/up_xml'
# 4. 您要检测的目标类别名称 (对应 label "3")
class_name_for_label_3 = "Space weld workpiece" # 这是您XML示例中的名称
# 5. 分组的大小
group_size = 5
# --- 配置结束 ---
def create_xml_annotation(image_info, objects_info):
"""
根据传入的信息生成XML树对象
:param image_info: 包含图片文件名、尺寸等信息的字典
:param objects_info: 包含多个物体信息的列表,每个物体是一个字典
:return: XML ElementTree对象
"""
# 创建根节点
annotation = ET.Element('annotation')
# 子节点 - folder
folder = ET.SubElement(annotation, 'folder')
folder.text = 'JPEGImages'
# 子节点 - filename
filename_node = ET.SubElement(annotation, 'filename')
filename_node.text = image_info['filename']
# 子节点 - path (路径通常不那么重要,但最好有一个)
path = ET.SubElement(annotation, 'path')
# 路径指向JPEGImages文件夹
image_path_in_voc = os.path.join('..', 'JPEGImages', image_info['filename'])
path.text = image_path_in_voc
# 子节点 - source
source = ET.SubElement(annotation, 'source')
database = ET.SubElement(source, 'database')
database.text = 'Unknown'
# 子节点 - size
size = ET.SubElement(annotation, 'size')
width = ET.SubElement(size, 'width')
width.text = str(image_info['width'])
height = ET.SubElement(size, 'height')
height.text = str(image_info['height'])
depth = ET.SubElement(size, 'depth')
depth.text = str(image_info.get('depth', 3))
# 子节点 - segmented
segmented = ET.SubElement(annotation, 'segmented')
segmented.text = '0'
# 为每个物体添加 object 节点
for obj in objects_info:
object_node = ET.SubElement(annotation, 'object')
name = ET.SubElement(object_node, 'name')
name.text = obj['name']
pose = ET.SubElement(object_node, 'pose')
pose.text = 'Unspecified'
truncated = ET.SubElement(object_node, 'truncated')
truncated.text = '0'
difficult = ET.SubElement(object_node, 'difficult')
difficult.text = '0'
bndbox = ET.SubElement(object_node, 'bndbox')
xmin = ET.SubElement(bndbox, 'xmin')
xmin.text = str(int(obj['xmin']))
ymin = ET.SubElement(bndbox, 'ymin')
ymin.text = str(int(obj['ymin']))
xmax = ET.SubElement(bndbox, 'xmax')
xmax.text = str(int(obj['xmax']))
ymax = ET.SubElement(bndbox, 'ymax')
ymax.text = str(int(obj['ymax']))
return annotation
def prettify_xml(elem):
"""
格式化XML输出使其更易读
"""
rough_string = ET.tostring(elem, 'utf-8')
reparsed = minidom.parseString(rough_string)
return reparsed.toprettyxml(indent=" ")
def main():
if not os.path.exists(output_xml_folder):
os.makedirs(output_xml_folder)
print(f"创建输出文件夹: {output_xml_folder}")
json_files = sorted([f for f in os.listdir(json_folder) if f.endswith('.json')])
print(f"找到 {len(json_files)} 个JSON文件开始转换...")
for json_file in tqdm(json_files, desc="处理JSON文件"):
base_name = os.path.splitext(json_file)[0]
# 使用正则表达式匹配前缀和数字
match = re.match(r'([a-zA-Z]+)(\d+)', base_name)
# 1. 检查当前文件是否是一个分组的起始文件
is_group_start_file = False
if match:
num = int(match.group(2))
# 如果数字是 1, 6, 11, ... 这样的,就认为是起始文件
if (num - 1) % group_size == 0:
is_group_start_file = True
else:
# 如果文件名不符合 l1, r5 这种格式,我们认为它是“普通”文件,自己就是一个组
is_group_start_file = True
if not is_group_start_file:
# 如果不是起始文件如l2, l3...则跳过因为它的标注已由l1处理
continue
# --- 是起始文件,处理这个分组 ---
json_path = os.path.join(json_folder, json_file)
with open(json_path, 'r', encoding='utf-8') as f:
data = json.load(f)
# 2. 从起始文件中提取所有符合条件的标注对象
objects_to_write = []
for shape in data.get('shapes', []):
if shape.get('label') == '1' and shape.get('shape_type') == 'rectangle':
points = shape.get('points', [])
if len(points) == 2:
x_coords = sorted([p[0] for p in points])
y_coords = sorted([p[1] for p in points])
objects_to_write.append({
'name': class_name_for_label_3,
'xmin': x_coords[0], 'ymin': y_coords[0],
'xmax': x_coords[1], 'ymax': y_coords[1],
})
if not objects_to_write:
continue
# 3. 确定该标注要应用到哪些图片上
target_image_names = []
if match:
# 文件名符合 l1, r6 等格式
prefix = match.group(1)
start_num = int(match.group(2))
for i in range(group_size):
# 假设图片格式为 .jpg
target_image_names.append(f"{prefix}{start_num + i}.jpg")
else:
# 普通文件,只应用到同名文件
# 假设图片格式为 .jpg
target_image_names.append(f"{base_name}.jpg")
# 4. 为分组内的每个目标图片生成XML文件
for image_name in target_image_names:
image_path = os.path.join(image_folder, image_name)
if not os.path.exists(image_path):
print(f"\n警告:找不到图片 '{image_name}'跳过生成其XML文件。")
continue
# 使用JSON中的尺寸信息
image_info = {'filename': image_name, 'width': data['imageWidth'], 'height': data['imageHeight']}
xml_tree = create_xml_annotation(image_info, objects_to_write)
xml_string = prettify_xml(xml_tree)
xml_filename = os.path.splitext(image_name)[0] + '.xml'
output_path = os.path.join(output_xml_folder, xml_filename)
with open(output_path, 'w', encoding='utf-8') as f:
f.write(xml_string)
print("转换完成所有XML文件已保存在: ", output_xml_folder)
if __name__ == '__main__':
main()

Binary file not shown.

Before

Width:  |  Height:  |  Size: 424 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 422 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 415 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 409 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 410 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 415 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 413 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 421 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 420 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 412 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 405 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 410 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 402 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 441 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 438 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 434 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 433 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 425 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 424 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 421 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 409 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 400 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 396 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 394 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 406 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 411 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 411 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 416 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 420 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 413 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 409 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 405 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 406 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 382 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 405 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 264 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 277 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 278 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 277 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 278 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 252 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 239 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 226 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 213 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 366 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 201 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 385 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 414 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 427 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 427 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 329 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 272 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 337 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 325 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 311 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 294 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 353 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 367 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 375 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 383 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 389 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 328 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 394 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 420 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 410 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 410 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 396 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 379 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 351 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 351 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 352 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 355 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 315 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 342 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 315 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 329 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 339 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 352 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 362 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 362 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 349 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 335 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 323 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 283 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 310 KiB

Some files were not shown because too many files have changed in this diff Show More