054_关于Windows10的设置

本文介绍了Windows 10中控制面板的角色逐渐被设置功能所取代的情况。用户可以通过点击开始按钮的齿轮图标或使用Win+I快捷键快速访问设置窗口。设置窗口不仅提供了类似于控制面板的大类模式,还允许用户通过右键菜单将常用功能固定到开始屏幕,便于快速访问。

         从XP以及Win7等一路升级上来之后,可能是习惯原因,到了Win10之后念念不忘的还有一个控制面板。

         其实,Win10中的控制面板还是存在的,但是这个操作系统很明显在做控制面板功能的弱化。其中,设置功能其实应该就是控制面板的取代者了。

         点击“田”字开始按钮,接着点击齿轮图标即可启动设置窗口。

         其实,还有一个更加简单的方式启动这个窗口那就是使用组合件Win+I。

         设置窗口其实在效果上更像控制面板中的大类模式,但是分类的方式偏向于使用上的功能能化。除了点击进入每个分组功能以外,还有一个有意思的功能那就是右键点击分组可以把相应组增加到开始的页面上。比如,增加个性化的效果。

         多少有点快捷方式的感觉,不过这个开始屏幕的部分似乎更像平板化发展了一点。有些应用的预览信息是可以变化的。如果是家庭用或者多媒体用这应该是一个很好的功能,对我这样的人来说似乎用途并不是很大。

         一直以来,对Windows了解的都不够细致,以后做一下相应小知识点的积累,或许会对我自己用好找个系统有很大的帮助。

import numpy as np import cv2 from scipy.optimize import least_squares import matplotlib.pyplot as plt import matplotlib.font_manager as fm import os from mpl_toolkits.mplot3d import Axes3D 设置中文字体 try: font_path = “C:/Windows/Fonts/msyh.ttc” if os.path.exists(font_path): font_prop = fm.FontProperties(fname=font_path) plt.rcParams[‘font.family’] = font_prop.get_name() else: plt.rcParams[‘font.family’] = ‘SimHei’ except: plt.rcParams[‘font.family’] = ‘sans-serif’ plt.rcParams[‘font.sans-serif’] = [‘SimSun’, ‘Microsoft YaHei’, ‘Arial Unicode MS’] plt.rcParams[‘axes.unicode_minus’] = False ========== 数据加载函数 ========== def load_calibration_data(): “”“加载标定数据,这里使用示例数据,实际应替换为真实数据”“” world_points = np.array([ [4262845.751, 644463.054, 63.939], [4262844.149, 644468.755, 63.351], [4262846.106, 644470.001, 63.271], [4262847.099, 644471.268, 63.415], [4262844.678, 644473.34, 63.263], [4262838.693, 644473.677, 63.215], [4262833.235, 644478.754, 63.372], [4262838.601, 644477.972, 63.281], [4262853.252, 644455.049, 63.827], [4262851.546, 644451.282, 64.525], [4262837.259, 644438.863, 66.975], [4262841.986, 644437.241, 65.401], [4262846.305, 644431.715, 66.07], [4262851.769, 644434.948, 64.153], [4262852.526, 644431.694, 64.434], [4262850.889, 644423.337, 67.69], [4262858.526, 644426.641, 64.46], [4262860.791, 644431.732, 63.503], [4262865.608, 644419.362, 65.658], [4262865.978, 644423.624, 64.35], [4262857.58, 644470.911, 63.286], [4262854.209, 644478.653, 63.279], [4262858.634, 644475.401, 63.29] ], dtype=np.float64) image_points = np.array([ [1058, 614], [855, 680], [901, 711], [913, 726], [727, 739], [480, 689], [140, 694], [295, 736], [1452, 599], [1422, 545], [1439, 421], [1269, 467], [1401, 447], [1515, 513], [1542, 497], [1534, 409], [1677, 494], [1729, 533], [1819, 471], [1828, 506], [1532, 816], [1100, 983], [1557, 940] ], dtype=np.float64) camera_matrix = np.array([ [1883.707684, 0, 998.0551015], [0, 1931.066609, 479.5179864], [0, 0, 1] ], dtype=np.float64) dist_coeffs = np.array([-0.923619555, 0.976179712, 0.007221199, -0.006101453, 0], dtype=np.float64) return world_points, image_points, camera_matrix, dist_coeffs 加载数据 world_points, image_points, camera_matrix, dist_coeffs = load_calibration_data() ========== 策略1:数据预处理 ========== def preprocess_data(world_points, image_points): “”“数据预处理:异常点检测、数据归一化等”“” # 1. 检查数据一致性 if len(world_points) != len(image_points): raise ValueError(“世界坐标点与图像坐标点数量不匹配”) # 2. 坐标归一化 world_center = np.mean(world_points, axis=0) world_points_local = world_points - world_center scale_factor = 1.0 / np.max(np.abs(world_points_local)) world_points_scaled = world_points_local * scale_factor # 3. 图像坐标归一化 image_center = np.array([camera_matrix[0, 2], camera_matrix[1, 2]]) image_points_normalized = (image_points - image_center) / np.array([camera_matrix[0, 0], camera_matrix[1, 1]]) print(f"世界坐标归一化参数: 中心={world_center}, 缩放因子={scale_factor:.6f}") return world_points_scaled, image_points_normalized, world_center, scale_factor 数据预处理 world_points_scaled, image_points_normalized, world_center, scale_factor = preprocess_data( world_points, image_points ) ========== 策略2:多算法初始解生成 ========== def generate_initial_estimates(world_points, image_points, camera_matrix, dist_coeffs): “”“使用多种算法生成初始解并选择最佳”“” methods = [ cv2.SOLVEPNP_ITERATIVE, cv2.SOLVEPNP_EPNP, cv2.SOLVEPNP_DLS, cv2.SOLVEPNP_UPNP ] best_error = float('inf') best_rvec = None best_tvec = None for method in methods: try: success, rvec, tvec = cv2.solvePnP( world_points, image_points, camera_matrix, dist_coeffs, flags=method ) if success: # 计算重投影误差 projected, _ = cv2.projectPoints(world_points, rvec, tvec, camera_matrix, dist_coeffs) error = np.mean(np.linalg.norm(image_points - projected.reshape(-1, 2), axis=1)) if error < best_error: best_error = error best_rvec = rvec best_tvec = tvec print(f"方法 {method} 误差: {error:.2f} 像素") except: continue if best_rvec is None: raise RuntimeError("所有初始解方法均失败") print(f"选择初始解方法,误差: {best_error:.2f} 像素") return best_rvec, best_tvec 生成最佳初始解 rvec_init, tvec_init = generate_initial_estimates( world_points_scaled, image_points_normalized, camera_matrix, dist_coeffs ) ========== 策略3:多阶段优化 ========== def optimize_extrinsics(rvec_init, tvec_init, world_points_scaled, image_points_normalized, camera_matrix, dist_coeffs, world_center, scale_factor): “”“多阶段优化策略”“” # 阶段1:仅优化外参 def error_stage1(params, world_pts, img_pts, K, dist, world_center, scale): rvec = params[:3].reshape(3, 1) tvec = params[3:6].reshape(3, 1) world_pts_orig = world_pts / scale + world_center projected, _ = cv2.projectPoints(world_pts_orig, rvec, tvec, K, dist) error = (img_pts - projected.reshape(-1, 2)).ravel() return error initial_params = np.concatenate([rvec_init.ravel(), tvec_init.ravel()]) result_stage1 = least_squares( error_stage1, initial_params, args=(world_points_scaled, image_points_normalized, camera_matrix, dist_coeffs, world_center, scale_factor), bounds=([-np.pi, -np.pi, -np.pi, -1000, -1000, -1000], [np.pi, np.pi, np.pi, 1000, 1000, 1000]), method='trf', loss='cauchy', max_nfev=200, verbose=0 ) params_stage1 = result_stage1.x rvec_opt1 = params_stage1[:3].reshape(3, 1) tvec_opt1 = params_stage1[3:6].reshape(3, 1) # 阶段2:联合优化外参和畸变系数 def error_stage2(params, world_pts, img_pts, K, world_center, scale): rvec = params[:3].reshape(3, 1) tvec = params[3:6].reshape(3, 1) dist = params[6:11] # k1, k2, p1, p2, k3 world_pts_orig = world_pts / scale + world_center projected, _ = cv2.projectPoints(world_pts_orig, rvec, tvec, K, dist) error = (img_pts - projected.reshape(-1, 2)).ravel() return error initial_params_stage2 = np.concatenate([params_stage1, dist_coeffs]) # 设置畸变系数的合理边界 bounds_stage2 = ( [-np.pi, -np.pi, -np.pi, -1000, -1000, -1000, -2, -2, -0.5, -0.5, -2], [np.pi, np.pi, np.pi, 1000, 1000, 1000, 2, 2, 0.5, 0.5, 2] ) result_stage2 = least_squares( error_stage2, initial_params_stage2, args=(world_points_scaled, image_points_normalized, camera_matrix, world_center, scale_factor), bounds=bounds_stage2, method='trf', loss='cauchy', f_scale=0.1, max_nfev=500, verbose=2 ) params_stage2 = result_stage2.x rvec_opt = params_stage2[:3].reshape(3, 1) tvec_opt = params_stage2[3:6].reshape(3, 1) dist_opt = params_stage2[6:11] # 转换平移向量到原始坐标系 tvec_original = tvec_opt / scale_factor + world_center.reshape(3, 1) return rvec_opt, tvec_original, dist_opt, result_stage1, result_stage2 执行多阶段优化 rvec_opt, tvec_opt, dist_opt, result_stage1, result_stage2 = optimize_extrinsics( rvec_init, tvec_init, world_points_scaled, image_points_normalized, camera_matrix, dist_coeffs, world_center, scale_factor ) ========== 策略4:综合误差评估 ========== def evaluate_reprojection(rvec, tvec, dist, world_points, image_points, camera_matrix): “”“计算并可视化重投影误差”“” # 计算投影点 projected, _ = cv2.projectPoints( world_points, rvec, tvec, camera_matrix, dist ) projected = projected.reshape(-1, 2) # 计算误差 errors = np.linalg.norm(image_points - projected, axis=1) mean_error = np.mean(errors) max_error = np.max(errors) median_error = np.median(errors) std_error = np.std(errors) print("\n重投影误差统计:") print(f"平均误差: {mean_error:.4f} 像素") print(f"最大误差: {max_error:.4f} 像素") print(f"中值误差: {median_error:.4f} 像素") print(f"标准差: {std_error:.4f} 像素") # 可视化 plt.figure(figsize=(15, 10)) # 绘制图像点与投影点 plt.subplot(2, 1, 1) plt.scatter(image_points[:, 0], image_points[:, 1], c='blue', s=80, label='实际点', alpha=0.7) plt.scatter(projected[:, 0], projected[:, 1], c='red', marker='x', s=80, label='投影点', alpha=0.7) plt.title('图像点与投影点对比', fontsize=14) plt.xlabel('X像素坐标', fontsize=12) plt.ylabel('Y像素坐标', fontsize=12) plt.legend(fontsize=12) plt.grid(True) # 绘制误差连线 for i in range(len(image_points)): plt.plot([image_points[i, 0], projected[i, 0]], [image_points[i, 1], projected[i, 1]], 'g--', alpha=0.4) # 绘制误差分布 plt.subplot(2, 1, 2) plt.hist(errors, bins=20, color='skyblue', edgecolor='black') plt.axvline(mean_error, color='r', linestyle='dashed', linewidth=2) plt.text(mean_error * 1.1, plt.ylim()[1] * 0.8, f'平均误差: {mean_error:.2f}px', fontsize=12, color='r') plt.title('重投影误差分布', fontsize=14) plt.xlabel('误差(像素)', fontsize=12) plt.ylabel('点数', fontsize=12) plt.grid(True) plt.tight_layout() plt.savefig('reprojection_error_analysis.png', dpi=300) plt.show() # 返回误差分析结果 return { 'mean_error': mean_error, 'max_error': max_error, 'median_error': median_error, 'std_error': std_error, 'projected_points': projected, 'errors': errors } 评估优化结果 error_analysis = evaluate_reprojection( rvec_opt, tvec_opt, dist_opt, world_points, image_points, camera_matrix ) ========== 策略5:3D可视化验证 ========== def visualize_3d_geometry(world_points, rvec, tvec, camera_matrix): “”“可视化3D几何关系”“” # 计算相机位置 R, _ = cv2.Rodrigues(rvec) camera_position = -R.T @ tvec fig = plt.figure(figsize=(12, 10)) ax = fig.add_subplot(111, projection='3d') # 绘制世界点 ax.scatter(world_points[:, 0], world_points[:, 1], world_points[:, 2], c='blue', s=50, label='标定点') # 绘制相机位置 ax.scatter(camera_position[0], camera_position[1], camera_position[2], c='red', s=100, marker='^', label='相机位置') # 添加坐标轴标签 ax.set_xlabel('X (m)') ax.set_ylabel('Y (m)') ax.set_zlabel('Z (m)') ax.set_title('3D标定几何关系', fontsize=14) # 添加视线 for point in world_points: ax.plot([camera_position[0], point[0]], [camera_position[1], point[1]], [camera_position[2], point[2]], 'g--', alpha=0.3) # 设置坐标轴比例 max_range = np.array([world_points[:, 0].max() - world_points[:, 0].min(), world_points[:, 1].max() - world_points[:, 1].min(), world_points[:, 2].max() - world_points[:, 2].min()]).max() / 2.0 mid_x = (world_points[:, 0].max() + world_points[:, 0].min()) * 0.5 mid_y = (world_points[:, 1].max() + world_points[:, 1].min()) * 0.5 mid_z = (world_points[:, 2].max() + world_points[:, 2].min()) * 0.5 ax.set_xlim(mid_x - max_range, mid_x + max_range) ax.set_ylim(mid_y - max_range, mid_y + max_range) ax.set_zlim(mid_z - max_range, mid_z + max_range) ax.legend() plt.savefig('3d_geometry.png', dpi=300) plt.show() 3D可视化 visualize_3d_geometry(world_points, rvec_opt, tvec_opt, camera_matrix) ========== 策略6:误差点分析 ========== def analyze_error_points(world_points, image_points, projected_points, errors): “”“分析误差较大的点”“” # 找出误差最大的5个点 max_error_indices = np.argsort(errors)[-5:] print("\n误差最大的点分析:") for idx in max_error_indices: print(f"点 {idx + 1}:") print(f" 世界坐标: {world_points[idx]}") print(f" 图像坐标: {image_points[idx]}") print(f" 投影坐标: {projected_points[idx]}") print(f" 误差: {errors[idx]:.2f} 像素") print(f" 距离相机: {np.linalg.norm(world_points[idx] - tvec_opt.ravel()):.2f} m") print() # 可视化误差点 plt.figure(figsize=(10, 8)) plt.scatter(image_points[:, 0], image_points[:, 1], c='blue', s=60, label='正常点') plt.scatter(image_points[max_error_indices, 0], image_points[max_error_indices, 1], c='red', s=100, marker='x', label='高误差点') # 添加标签 for idx in max_error_indices: plt.annotate(f'点 {idx + 1}', (image_points[idx, 0], image_points[idx, 1]), textcoords="offset points", xytext=(0, 10), ha='center') plt.title('高误差点位置', fontsize=14) plt.xlabel('X像素坐标', fontsize=12) plt.ylabel('Y像素坐标', fontsize=12) plt.legend() plt.grid(True) plt.savefig('high_error_points.png', dpi=300) plt.show() 分析误差点 analyze_error_points( world_points, image_points, error_analysis[‘projected_points’], error_analysis[‘errors’] ) ========== 策略7:结果保存与报告 ========== def save_results(rvec, tvec, dist, error_analysis, filename): “”“保存优化结果和误差分析”“” with open(filename, ‘w’) as f: f.write(“===== 相机标定优化结果 ===\n\n") f.write("= 优化后的外参 ===\n”) f.write(f"旋转向量 (rvec): {rvec.ravel().tolist()}\n") # 计算旋转矩阵 R, _ = cv2.Rodrigues(rvec) f.write("\n旋转矩阵 (R):\n") np.savetxt(f, R, fmt='%.8f') f.write("\n平移向量 (tvec):\n") np.savetxt(f, tvec, fmt='%.8f') f.write("\n=== 优化后的畸变系数 ===\n") f.write(f"k1, k2, p1, p2, k3: {dist.tolist()}\n") f.write("\n=== 重投影误差分析 ===\n") f.write(f"平均误差: {error_analysis['mean_error']:.4f} 像素\n") f.write(f"最大误差: {error_analysis['max_error']:.4f} 像素\n") f.write(f"中值误差: {error_analysis['median_error']:.4f} 像素\n") f.write(f"标准差: {error_analysis['std_error']:.4f} 像素\n") f.write("\n=== 标定质量评估 ===\n") mean_error = error_analysis['mean_error'] if mean_error < 1.0: f.write("标定结果: 优秀 (平均误差 < 1像素)\n") elif mean_error < 3.0: f.write("标定结果: 良好 (平均误差 < 3像素)\n") elif mean_error < 5.0: f.write("标定结果: 可接受 (平均误差 < 5像素)\n") else: f.write("标定结果: 需要改进 (平均误差 > 5像素)\n") f.write("\n=== 建议 ===\n") if mean_error > 5.0: f.write("1. 检查高误差点的位置和对应关系\n") f.write("2. 验证世界坐标系的定义是否正确\n") f.write("3. 检查相机内参矩阵的准确性\n") f.write("4. 增加标定点的数量和分布范围\n") f.write("5. 考虑使用更高精度的测量设备\n") else: f.write("标定结果满足要求,可直接用于后续应用\n") 保存结果 save_results(rvec_opt, tvec_opt, dist_opt, error_analysis, ‘calibration_report.txt’) print(“\n标定报告已保存到 calibration_report.txt”) 帮我添加输出旋转向量和平移向量的部分 回答 向我提问的人太多了。正在努力扩容中,请稍后再试。 import numpy as np import cv2 from scipy.optimize import least_squares import matplotlib.pyplot as plt import matplotlib.font_manager as fm import os from mpl_toolkits.mplot3d import Axes3D 设置中文字体 try: font_path = “C:/Windows/Fonts/msyh.ttc” if os.path.exists(font_path): font_prop = fm.FontProperties(fname=font_path) plt.rcParams[‘font.family’] = font_prop.get_name() else: plt.rcParams[‘font.family’] = ‘SimHei’ except: plt.rcParams[‘font.family’] = ‘sans-serif’ plt.rcParams[‘font.sans-serif’] = [‘SimSun’, ‘Microsoft YaHei’, ‘Arial Unicode MS’] plt.rcParams[‘axes.unicode_minus’] = False ========== 数据加载函数 ========== def load_calibration_data(): “”“加载标定数据,这里使用示例数据,实际应替换为真实数据”“” world_points = np.array([ [4262845.751, 644463.054, 63.939], [4262844.149, 644468.755, 63.351], [4262846.106, 644470.001, 63.271], [4262847.099, 644471.268, 63.415], [4262844.678, 644473.34, 63.263], [4262838.693, 644473.677, 63.215], [4262833.235, 644478.754, 63.372], [4262838.601, 644477.972, 63.281], [4262853.252, 644455.049, 63.827], [4262851.546, 644451.282, 64.525], [4262837.259, 644438.863, 66.975], [4262841.986, 644437.241, 65.401], [4262846.305, 644431.715, 66.07], [4262851.769, 644434.948, 64.153], [4262852.526, 644431.694, 64.434], [4262850.889, 644423.337, 67.69], [4262858.526, 644426.641, 64.46], [4262860.791, 644431.732, 63.503], [4262865.608, 644419.362, 65.658], [4262865.978, 644423.624, 64.35], [4262857.58, 644470.911, 63.286], [4262854.209, 644478.653, 63.279], [4262858.634, 644475.401, 63.29] ], dtype=np.float64) image_points = np.array([ [1058, 614], [855, 680], [901, 711], [913, 726], [727, 739], [480, 689], [140, 694], [295, 736], [1452, 599], [1422, 545], [1439, 421], [1269, 467], [1401, 447], [1515, 513], [1542, 497], [1534, 409], [1677, 494], [1729, 533], [1819, 471], [1828, 506], [1532, 816], [1100, 983], [1557, 940] ], dtype=np.float64) camera_matrix = np.array([ [1883.707684, 0, 998.0551015], [0, 1931.066609, 479.5179864], [0, 0, 1] ], dtype=np.float64) dist_coeffs = np.array([-0.923619555, 0.976179712, 0.007221199, -0.006101453, 0], dtype=np.float64) return world_points, image_points, camera_matrix, dist_coeffs 加载数据 world_points, image_points, camera_matrix, dist_coeffs = load_calibration_data() ========== 策略1:数据预处理 ========== def preprocess_data(world_points, image_points): “”“数据预处理:异常点检测、数据归一化等”“” 1. 检查数据一致性 if len(world_points) != len(image_points): raise ValueError(“世界坐标点与图像坐标点数量不匹配”) 2. 坐标归一化 world_center = np.mean(world_points, axis=0) world_points_local = world_points - world_center scale_factor = 1.0 / np.max(np.abs(world_points_local)) world_points_scaled = world_points_local * scale_factor # 3. 图像坐标归一化 image_center = np.array([camera_matrix[0, 2], camera_matrix[1, 2]]) image_points_normalized = (image_points - image_center) / np.array([camera_matrix[0, 0], camera_matrix[1, 1]]) print(f"世界坐标归一化参数: 中心={world_center}, 缩放因子={scale_factor:.6f}") return world_points_scaled, image_points_normalized, world_center, scale_factor 数据预处理 world_points_scaled, image_points_normalized, world_center, scale_factor = preprocess_data( world_points, image_points ) ========== 策略2:多算法初始解生成 ========== def generate_initial_estimates(world_points, image_points, camera_matrix, dist_coeffs): “”“使用多种算法生成初始解并选择最佳”“” methods = [ cv2.SOLVEPNP_ITERATIVE, cv2.SOLVEPNP_EPNP, cv2.SOLVEPNP_DLS, cv2.SOLVEPNP_UPNP ] best_error = float(‘inf’) best_rvec = None best_tvec = None for method in methods: try: success, rvec, tvec = cv2.solvePnP( world_points, image_points, camera_matrix, dist_coeffs, flags=method ) if success: # 计算重投影误差 projected, _ = cv2.projectPoints(world_points, rvec, tvec, camera_matrix, dist_coeffs) error = np.mean(np.linalg.norm(image_points - projected.reshape(-1, 2), axis=1)) if error < best_error: best_error = error best_rvec = rvec best_tvec = tvec print(f"方法 {method} 误差: {error:.2f} 像素") except: continue if best_rvec is None: raise RuntimeError(“所有初始解方法均失败”) print(f"选择初始解方法,误差: {best_error:.2f} 像素") return best_rvec, best_tvec 生成最佳初始解 rvec_init, tvec_init = generate_initial_estimates( world_points_scaled, image_points_normalized, camera_matrix, dist_coeffs ) ========== 策略3:多阶段优化 ========== def optimize_extrinsics(rvec_init, tvec_init, world_points_scaled, image_points_normalized, camera_matrix, dist_coeffs, world_center, scale_factor): “”“多阶段优化策略”“” 阶段1:仅优化外参 def error_stage1(params, world_pts, img_pts, K, dist, world_center, scale): rvec = params[:3].reshape(3, 1) tvec = params[3:6].reshape(3, 1) world_pts_orig = world_pts / scale + world_center projected, _ = cv2.projectPoints(world_pts_orig, rvec, tvec, K, dist) error = (img_pts - projected.reshape(-1, 2)).ravel() return error initial_params = np.concatenate([rvec_init.ravel(), tvec_init.ravel()]) result_stage1 = least_squares( error_stage1, initial_params, args=(world_points_scaled, image_points_normalized, camera_matrix, dist_coeffs, world_center, scale_factor), bounds=([-np.pi, -np.pi, -np.pi, -1000, -1000, -1000], [np.pi, np.pi, np.pi, 1000, 1000, 1000]), method=‘trf’, loss=‘cauchy’, max_nfev=200, verbose=0 ) params_stage1 = result_stage1.x rvec_opt1 = params_stage1[:3].reshape(3, 1) tvec_opt1 = params_stage1[3:6].reshape(3, 1) # 阶段2:联合优化外参和畸变系数 def error_stage2(params, world_pts, img_pts, K, world_center, scale): rvec = params[:3].reshape(3, 1) tvec = params[3:6].reshape(3, 1) dist = params[6:11] # k1, k2, p1, p2, k3 world_pts_orig = world_pts / scale + world_center projected, _ = cv2.projectPoints(world_pts_orig, rvec, tvec, K, dist) error = (img_pts - projected.reshape(-1, 2)).ravel() return error initial_params_stage2 = np.concatenate([params_stage1, dist_coeffs]) # 设置畸变系数的合理边界 bounds_stage2 = ( [-np.pi, -np.pi, -np.pi, -1000, -1000, -1000, -2, -2, -0.5, -0.5, -2], [np.pi, np.pi, np.pi, 1000, 1000, 1000, 2, 2, 0.5, 0.5, 2] ) result_stage2 = least_squares( error_stage2, initial_params_stage2, args=(world_points_scaled, image_points_normalized, camera_matrix, world_center, scale_factor), bounds=bounds_stage2, method=‘trf’, loss=‘cauchy’, f_scale=0.1, max_nfev=500, verbose=2 ) params_stage2 = result_stage2.x rvec_opt = params_stage2[:3].reshape(3, 1) tvec_opt = params_stage2[3:6].reshape(3, 1) dist_opt = params_stage2[6:11] # 转换平移向量到原始坐标系 tvec_original = tvec_opt / scale_factor + world_center.reshape(3, 1) return rvec_opt, tvec_original, dist_opt, result_stage1, result_stage2 执行多阶段优化 rvec_opt, tvec_opt, dist_opt, result_stage1, result_stage2 = optimize_extrinsics( rvec_init, tvec_init, world_points_scaled, image_points_normalized, camera_matrix, dist_coeffs, world_center, scale_factor ) ========== 策略4:综合误差评估 ========== def evaluate_reprojection(rvec, tvec, dist, world_points, image_points, camera_matrix): “”“计算并可视化重投影误差”“” 计算投影点 projected, _ = cv2.projectPoints( world_points, rvec, tvec, camera_matrix, dist ) projected = projected.reshape(-1, 2) 计算误差 errors = np.linalg.norm(image_points - projected, axis=1) mean_error = np.mean(errors) max_error = np.max(errors) median_error = np.median(errors) std_error = np.std(errors) print(“\n重投影误差统计:”) print(f"平均误差: {mean_error:.4f} 像素") print(f"最大误差: {max_error:.4f} 像素") print(f"中值误差: {median_error:.4f} 像素") print(f"标准差: {std_error:.4f} 像素") # 可视化 plt.figure(figsize=(15, 10)) # 绘制图像点与投影点 plt.subplot(2, 1, 1) plt.scatter(image_points[:, 0], image_points[:, 1], c=‘blue’, s=80, label=‘实际点’, alpha=0.7) plt.scatter(projected[:, 0], projected[:, 1], c=‘red’, marker=‘x’, s=80, label=‘投影点’, alpha=0.7) plt.title(‘图像点与投影点对比’, fontsize=14) plt.xlabel(‘X像素坐标’, fontsize=12) plt.ylabel(‘Y像素坐标’, fontsize=12) plt.legend(fontsize=12) plt.grid(True) # 绘制误差连线 for i in range(len(image_points)): plt.plot([image_points[i, 0], projected[i, 0]], [image_points[i, 1], projected[i, 1]], ‘g–’, alpha=0.4) # 绘制误差分布 plt.subplot(2, 1, 2) plt.hist(errors, bins=20, color=‘skyblue’, edgecolor=‘black’) plt.axvline(mean_error, color=‘r’, linestyle=‘dashed’, linewidth=2) plt.text(mean_error * 1.1, plt.ylim()[1] * 0.8, f’平均误差: {mean_error:.2f}px’, fontsize=12, color=‘r’) plt.title(‘重投影误差分布’, fontsize=14) plt.xlabel(‘误差(像素)’, fontsize=12) plt.ylabel(‘点数’, fontsize=12) plt.grid(True) plt.tight_layout() plt.savefig(‘reprojection_error_analysis.png’, dpi=300) plt.show() # 返回误差分析结果 return 评估优化结果 error_analysis = evaluate_reprojection( rvec_opt, tvec_opt, dist_opt, world_points, image_points, camera_matrix ) ========== 策略5:3D可视化验证 ========== def visualize_3d_geometry(world_points, rvec, tvec, camera_matrix): “”“可视化3D几何关系”“” 计算相机位置 R, _ = cv2.Rodrigues(rvec) camera_position = -R.T @ tvec fig = plt.figure(figsize=(12, 10)) ax = fig.add_subplot(111, projection=‘3d’) # 绘制世界点 ax.scatter(world_points[:, 0], world_points[:, 1], world_points[:, 2], c=‘blue’, s=50, label=‘标定点’) # 绘制相机位置 ax.scatter(camera_position[0], camera_position[1], camera_position[2], c=‘red’, s=100, marker=‘^’, label=‘相机位置’) # 添加坐标轴标签 ax.set_xlabel(‘X (m)’) ax.set_ylabel(‘Y (m)’) ax.set_zlabel(‘Z (m)’) ax.set_title(‘3D标定几何关系’, fontsize=14) # 添加视线 for point in world_points: ax.plot([camera_position[0], point[0]], [camera_position[1], point[1]], [camera_position[2], point[2]], ‘g–’, alpha=0.3) # 设置坐标轴比例 max_range = np.array([world_points[:, 0].max() - world_points[:, 0].min(), world_points[:, 1].max() - world_points[:, 1].min(), world_points[:, 2].max() - world_points[:, 2].min()]).max() / 2.0 mid_x = (world_points[:, 0].max() + world_points[:, 0].min()) * 0.5 mid_y = (world_points[:, 1].max() + world_points[:, 1].min()) * 0.5 mid_z = (world_points[:, 2].max() + world_points[:, 2].min()) * 0.5 ax.set_xlim(mid_x - max_range, mid_x + max_range) ax.set_ylim(mid_y - max_range, mid_y + max_range) ax.set_zlim(mid_z - max_range, mid_z + max_range) ax.legend() plt.savefig(‘3d_geometry.png’, dpi=300) plt.show() 3D可视化 visualize_3d_geometry(world_points, rvec_opt, tvec_opt, camera_matrix) ========== 策略6:误差点分析 ========== def analyze_error_points(world_points, image_points, projected_points, errors): “”“分析误差较大的点”“” 找出误差最大的5个点 max_error_indices = np.argsort(errors)[-5:] print(“\n误差最大的点分析:”) for idx in max_error_indices: print(f"点 {idx + 1}😊 print(f" 世界坐标: {world_points[idx]}“) print(f” 图像坐标: {image_points[idx]}“) print(f” 投影坐标: {projected_points[idx]}“) print(f” 误差: {errors[idx]:.2f} 像素") print(f" 距离相机: {np.linalg.norm(world_points[idx] - tvec_opt.ravel()):.2f} m") print() # 可视化误差点 plt.figure(figsize=(10, 8)) plt.scatter(image_points[:, 0], image_points[:, 1], c=‘blue’, s=60, label=‘正常点’) plt.scatter(image_points[max_error_indices, 0], image_points[max_error_indices, 1], c=‘red’, s=100, marker=‘x’, label=‘高误差点’) # 添加标签 for idx in max_error_indices: plt.annotate(f’点 {idx + 1}', (image_points[idx, 0], image_points[idx, 1]), textcoords=“offset points”, xytext=(0, 10), ha=‘center’) plt.title(‘高误差点位置’, fontsize=14) plt.xlabel(‘X像素坐标’, fontsize=12) plt.ylabel(‘Y像素坐标’, fontsize=12) plt.legend() plt.grid(True) plt.savefig(‘high_error_points.png’, dpi=300) plt.show() 分析误差点 analyze_error_points( world_points, image_points, error_analysis[‘projected_points’], error_analysis[‘errors’] ) ========== 策略7:结果保存与报告 ========== def save_results(rvec, tvec, dist, error_analysis, filename): “”“保存优化结果和误差分析”“” with open(filename, ‘w’) as f: f.write(“===== 相机标定优化结果 ===\n\n") f.write(“= 优化后的外参 ===\n”) f.write(f"旋转向量 (rvec): {rvec.ravel().tolist()}\n”) 计算旋转矩阵 R, _ = cv2.Rodrigues(rvec) f.write(“\n旋转矩阵 ®:\n”) np.savetxt(f, R, fmt=‘%.8f’) f.write(“\n平移向量 (tvec):\n”) np.savetxt(f, tvec, fmt=‘%.8f’) f.write(“\n=== 优化后的畸变系数 =\n") f.write(f"k1, k2, p1, p2, k3: {dist.tolist()}\n") f.write("\n= 重投影误差分析 =\n") f.write(f"平均误差: {error_analysis[‘mean_error’]:.4f} 像素\n") f.write(f"最大误差: {error_analysis[‘max_error’]:.4f} 像素\n") f.write(f"中值误差: {error_analysis[‘median_error’]:.4f} 像素\n") f.write(f"标准差: {error_analysis[‘std_error’]:.4f} 像素\n") f.write("\n= 标定质量评估 =\n") mean_error = error_analysis[‘mean_error’] if mean_error < 1.0: f.write(“标定结果: 优秀 (平均误差 < 1像素)\n”) elif mean_error < 3.0: f.write(“标定结果: 良好 (平均误差 < 3像素)\n”) elif mean_error < 5.0: f.write(“标定结果: 可接受 (平均误差 < 5像素)\n”) else: f.write(“标定结果: 需要改进 (平均误差 > 5像素)\n”) f.write("\n= 建议 ===\n”) if mean_error > 5.0: f.write(“1. 检查高误差点的位置和对应关系\n”) f.write(“2. 验证世界坐标系的定义是否正确\n”) f.write(“3. 检查相机内参矩阵的准确性\n”) f.write(“4. 增加标定点的数量和分布范围\n”) f.write(“5. 考虑使用更高精度的测量设备\n”) else: f.write(“标定结果满足要求,可直接用于后续应用\n”) 保存结果 save_results(rvec_opt, tvec_opt, dist_opt, error_analysis, ‘calibration_report.txt’) print(“\n标定报告已保存到 calibration_report.txt”) 帮我添加输出旋转向量和平移向量的部分 回答 向我提问的人太多了。正在努力扩容中,请稍后再试。 import numpy as np import cv2 from scipy.optimize import least_squares import matplotlib.pyplot as plt import matplotlib.font_manager as fm import os from mpl_toolkits.mplot3d import Axes3D 设置中文字体 try: font_path = “C:/Windows/Fonts/msyh.ttc” if os.path.exists(font_path): font_prop = fm.FontProperties(fname=font_path) plt.rcParams[‘font.family’] = font_prop.get_name() else: plt.rcParams[‘font.family’] = ‘SimHei’ except: plt.rcParams[‘font.family’] = ‘sans-serif’ plt.rcParams[‘font.sans-serif’] = [‘SimSun’, ‘Microsoft YaHei’, ‘Arial Unicode MS’] plt.rcParams[‘axes.unicode_minus’] = False ========== 数据加载函数 ========== def load_calibration_data(): “”“加载标定数据,这里使用示例数据,实际应替换为真实数据”“” world_points = np.array([ [4262845.751, 644463.054, 63.939], [4262844.149, 644468.755, 63.351], [4262846.106, 644470.001, 63.271], [4262847.099, 644471.268, 63.415], [4262844.678, 644473.34, 63.263], [4262838.693, 644473.677, 63.215], [4262833.235, 644478.754, 63.372], [4262838.601, 644477.972, 63.281], [4262853.252, 644455.049, 63.827], [4262851.546, 644451.282, 64.525], [4262837.259, 644438.863, 66.975], [4262841.986, 644437.241, 65.401], [4262846.305, 644431.715, 66.07], [4262851.769, 644434.948, 64.153], [4262852.526, 644431.694, 64.434], [4262850.889, 644423.337, 67.69], [4262858.526, 644426.641, 64.46], [4262860.791, 644431.732, 63.503], [4262865.608, 644419.362, 65.658], [4262865.978, 644423.624, 64.35], [4262857.58, 644470.911, 63.286], [4262854.209, 644478.653, 63.279], [4262858.634, 644475.401, 63.29] ], dtype=np.float64) image_points = np.array([ [1058, 614], [855, 680], [901, 711], [913, 726], [727, 739], [480, 689], [140, 694], [295, 736], [1452, 599], [1422, 545], [1439, 421], [1269, 467], [1401, 447], [1515, 513], [1542, 497], [1534, 409], [1677, 494], [1729, 533], [1819, 471], [1828, 506], [1532, 816], [1100, 983], [1557, 940] ], dtype=np.float64) camera_matrix = np.array([ [1883.707684, 0, 998.0551015], [0, 1931.066609, 479.5179864], [0, 0, 1] ], dtype=np.float64) dist_coeffs = np.array([-0.923619555, 0.976179712, 0.007221199, -0.006101453, 0], dtype=np.float64) return world_points, image_points, camera_matrix, dist_coeffs 加载数据 world_points, image_points, camera_matrix, dist_coeffs = load_calibration_data() ========== 策略1:数据预处理 ========== def preprocess_data(world_points, image_points): “”“数据预处理:异常点检测、数据归一化等”“” 1. 检查数据一致性 if len(world_points) != len(image_points): raise ValueError(“世界坐标点与图像坐标点数量不匹配”) 2. 坐标归一化 world_center = np.mean(world_points, axis=0) world_points_local = world_points - world_center scale_factor = 1.0 / np.max(np.abs(world_points_local)) world_points_scaled = world_points_local * scale_factor # 3. 图像坐标归一化 image_center = np.array([camera_matrix[0, 2], camera_matrix[1, 2]]) image_points_normalized = (image_points - image_center) / np.array([camera_matrix[0, 0], camera_matrix[1, 1]]) print(f"世界坐标归一化参数: 中心={world_center}, 缩放因子={scale_factor:.6f}") return world_points_scaled, image_points_normalized, world_center, scale_factor 数据预处理 world_points_scaled, image_points_normalized, world_center, scale_factor = preprocess_data( world_points, image_points ) ========== 策略2:多算法初始解生成 ========== def generate_initial_estimates(world_points, image_points, camera_matrix, dist_coeffs): “”“使用多种算法生成初始解并选择最佳”“” methods = [ cv2.SOLVEPNP_ITERATIVE, cv2.SOLVEPNP_EPNP, cv2.SOLVEPNP_DLS, cv2.SOLVEPNP_UPNP ] best_error = float(‘inf’) best_rvec = None best_tvec = None for method in methods: try: success, rvec, tvec = cv2.solvePnP( world_points, image_points, camera_matrix, dist_coeffs, flags=method ) if success: # 计算重投影误差 projected, _ = cv2.projectPoints(world_points, rvec, tvec, camera_matrix, dist_coeffs) error = np.mean(np.linalg.norm(image_points - projected.reshape(-1, 2), axis=1)) if error < best_error: best_error = error best_rvec = rvec best_tvec = tvec print(f"方法 {method} 误差: {error:.2f} 像素") except: continue if best_rvec is None: raise RuntimeError(“所有初始解方法均失败”) print(f"选择初始解方法,误差: {best_error:.2f} 像素") return best_rvec, best_tvec 生成最佳初始解 rvec_init, tvec_init = generate_initial_estimates( world_points_scaled, image_points_normalized, camera_matrix, dist_coeffs ) ========== 策略3:多阶段优化 ========== def optimize_extrinsics(rvec_init, tvec_init, world_points_scaled, image_points_normalized, camera_matrix, dist_coeffs, world_center, scale_factor): “”“多阶段优化策略”“” 阶段1:仅优化外参 def error_stage1(params, world_pts, img_pts, K, dist, world_center, scale): rvec = params[:3].reshape(3, 1) tvec = params[3:6].reshape(3, 1) world_pts_orig = world_pts / scale + world_center projected, _ = cv2.projectPoints(world_pts_orig, rvec, tvec, K, dist) error = (img_pts - projected.reshape(-1, 2)).ravel() return error initial_params = np.concatenate([rvec_init.ravel(), tvec_init.ravel()]) result_stage1 = least_squares( error_stage1, initial_params, args=(world_points_scaled, image_points_normalized, camera_matrix, dist_coeffs, world_center, scale_factor), bounds=([-np.pi, -np.pi, -np.pi, -1000, -1000, -1000], [np.pi, np.pi, np.pi, 1000, 1000, 1000]), method=‘trf’, loss=‘cauchy’, max_nfev=200, verbose=0 ) params_stage1 = result_stage1.x rvec_opt1 = params_stage1[:3].reshape(3, 1) tvec_opt1 = params_stage1[3:6].reshape(3, 1) # 阶段2:联合优化外参和畸变系数 def error_stage2(params, world_pts, img_pts, K, world_center, scale): rvec = params[:3].reshape(3, 1) tvec = params[3:6].reshape(3, 1) dist = params[6:11] # k1, k2, p1, p2, k3 world_pts_orig = world_pts / scale + world_center projected, _ = cv2.projectPoints(world_pts_orig, rvec, tvec, K, dist) error = (img_pts - projected.reshape(-1, 2)).ravel() return error initial_params_stage2 = np.concatenate([params_stage1, dist_coeffs]) # 设置畸变系数的合理边界 bounds_stage2 = ( [-np.pi, -np.pi, -np.pi, -1000, -1000, -1000, -2, -2, -0.5, -0.5, -2], [np.pi, np.pi, np.pi, 1000, 1000, 1000, 2, 2, 0.5, 0.5, 2] ) result_stage2 = least_squares( error_stage2, initial_params_stage2, args=(world_points_scaled, image_points_normalized, camera_matrix, world_center, scale_factor), bounds=bounds_stage2, method=‘trf’, loss=‘cauchy’, f_scale=0.1, max_nfev=500, verbose=2 ) params_stage2 = result_stage2.x rvec_opt = params_stage2[:3].reshape(3, 1) tvec_opt = params_stage2[3:6].reshape(3, 1) dist_opt = params_stage2[6:11] # 转换平移向量到原始坐标系 tvec_original = tvec_opt / scale_factor + world_center.reshape(3, 1) return rvec_opt, tvec_original, dist_opt, result_stage1, result_stage2 执行多阶段优化 rvec_opt, tvec_opt, dist_opt, result_stage1, result_stage2 = optimize_extrinsics( rvec_init, tvec_init, world_points_scaled, image_points_normalized, camera_matrix, dist_coeffs, world_center, scale_factor ) ========== 策略4:综合误差评估 ========== def evaluate_reprojection(rvec, tvec, dist, world_points, image_points, camera_matrix): “”“计算并可视化重投影误差”“” 计算投影点 projected, _ = cv2.projectPoints( world_points, rvec, tvec, camera_matrix, dist ) projected = projected.reshape(-1, 2) 计算误差 errors = np.linalg.norm(image_points - projected, axis=1) mean_error = np.mean(errors) max_error = np.max(errors) median_error = np.median(errors) std_error = np.std(errors) print(“\n重投影误差统计:”) print(f"平均误差: {mean_error:.4f} 像素") print(f"最大误差: {max_error:.4f} 像素") print(f"中值误差: {median_error:.4f} 像素") print(f"标准差: {std_error:.4f} 像素") # 可视化 plt.figure(figsize=(15, 10)) # 绘制图像点与投影点 plt.subplot(2, 1, 1) plt.scatter(image_points[:, 0], image_points[:, 1], c=‘blue’, s=80, label=‘实际点’, alpha=0.7) plt.scatter(projected[:, 0], projected[:, 1], c=‘red’, marker=‘x’, s=80, label=‘投影点’, alpha=0.7) plt.title(‘图像点与投影点对比’, fontsize=14) plt.xlabel(‘X像素坐标’, fontsize=12) plt.ylabel(‘Y像素坐标’, fontsize=12) plt.legend(fontsize=12) plt.grid(True) # 绘制误差连线 for i in range(len(image_points)): plt.plot([image_points[i, 0], projected[i, 0]], [image_points[i, 1], projected[i, 1]], ‘g–’, alpha=0.4) # 绘制误差分布 plt.subplot(2, 1, 2) plt.hist(errors, bins=20, color=‘skyblue’, edgecolor=‘black’) plt.axvline(mean_error, color=‘r’, linestyle=‘dashed’, linewidth=2) plt.text(mean_error * 1.1, plt.ylim()[1] * 0.8, f’平均误差: {mean_error:.2f}px’, fontsize=12, color=‘r’) plt.title(‘重投影误差分布’, fontsize=14) plt.xlabel(‘误差(像素)’, fontsize=12) plt.ylabel(‘点数’, fontsize=12) plt.grid(True) plt.tight_layout() plt.savefig(‘reprojection_error_analysis.png’, dpi=300) plt.show() # 返回误差分析结果 return 评估优化结果 error_analysis = evaluate_reprojection( rvec_opt, tvec_opt, dist_opt, world_points, image_points, camera_matrix ) ========== 策略5:3D可视化验证 ========== def visualize_3d_geometry(world_points, rvec, tvec, camera_matrix): “”“可视化3D几何关系”“” 计算相机位置 R, _ = cv2.Rodrigues(rvec) camera_position = -R.T @ tvec fig = plt.figure(figsize=(12, 10)) ax = fig.add_subplot(111, projection=‘3d’) # 绘制世界点 ax.scatter(world_points[:, 0], world_points[:, 1], world_points[:, 2], c=‘blue’, s=50, label=‘标定点’) # 绘制相机位置 ax.scatter(camera_position[0], camera_position[1], camera_position[2], c=‘red’, s=100, marker=‘^’, label=‘相机位置’) # 添加坐标轴标签 ax.set_xlabel(‘X (m)’) ax.set_ylabel(‘Y (m)’) ax.set_zlabel(‘Z (m)’) ax.set_title(‘3D标定几何关系’, fontsize=14) # 添加视线 for point in world_points: ax.plot([camera_position[0], point[0]], [camera_position[1], point[1]], [camera_position[2], point[2]], ‘g–’, alpha=0.3) # 设置坐标轴比例 max_range = np.array([world_points[:, 0].max() - world_points[:, 0].min(), world_points[:, 1].max() - world_points[:, 1].min(), world_points[:, 2].max() - world_points[:, 2].min()]).max() / 2.0 mid_x = (world_points[:, 0].max() + world_points[:, 0].min()) * 0.5 mid_y = (world_points[:, 1].max() + world_points[:, 1].min()) * 0.5 mid_z = (world_points[:, 2].max() + world_points[:, 2].min()) * 0.5 ax.set_xlim(mid_x - max_range, mid_x + max_range) ax.set_ylim(mid_y - max_range, mid_y + max_range) ax.set_zlim(mid_z - max_range, mid_z + max_range) ax.legend() plt.savefig(‘3d_geometry.png’, dpi=300) plt.show() 3D可视化 visualize_3d_geometry(world_points, rvec_opt, tvec_opt, camera_matrix) ========== 策略6:误差点分析 ========== def analyze_error_points(world_points, image_points, projected_points, errors): “”“分析误差较大的点”“” 找出误差最大的5个点 max_error_indices = np.argsort(errors)[-5:] print(“\n误差最大的点分析:”) for idx in max_error_indices: print(f"点 {idx + 1}😊 print(f" 世界坐标: {world_points[idx]}“) print(f” 图像坐标: {image_points[idx]}“) print(f” 投影坐标: {projected_points[idx]}“) print(f” 误差: {errors[idx]:.2f} 像素") print(f" 距离相机: {np.linalg.norm(world_points[idx] - tvec_opt.ravel()):.2f} m") print() # 可视化误差点 plt.figure(figsize=(10, 8)) plt.scatter(image_points[:, 0], image_points[:, 1], c=‘blue’, s=60, label=‘正常点’) plt.scatter(image_points[max_error_indices, 0], image_points[max_error_indices, 1], c=‘red’, s=100, marker=‘x’, label=‘高误差点’) # 添加标签 for idx in max_error_indices: plt.annotate(f’点 {idx + 1}', (image_points[idx, 0], image_points[idx, 1]), textcoords=“offset points”, xytext=(0, 10), ha=‘center’) plt.title(‘高误差点位置’, fontsize=14) plt.xlabel(‘X像素坐标’, fontsize=12) plt.ylabel(‘Y像素坐标’, fontsize=12) plt.legend() plt.grid(True) plt.savefig(‘high_error_points.png’, dpi=300) plt.show() 分析误差点 analyze_error_points( world_points, image_points, error_analysis[‘projected_points’], error_analysis[‘errors’] ) ========== 策略7:结果保存与报告 ========== def save_results(rvec, tvec, dist, error_analysis, filename): “”“保存优化结果和误差分析”“” with open(filename, ‘w’) as f: f.write(“===== 相机标定优化结果 ===\n\n") f.write(“= 优化后的外参 ===\n”) f.write(f"旋转向量 (rvec): {rvec.ravel().tolist()}\n”) 计算旋转矩阵 R, _ = cv2.Rodrigues(rvec) f.write(“\n旋转矩阵 ®:\n”) np.savetxt(f, R, fmt=‘%.8f’) f.write(“\n平移向量 (tvec):\n”) np.savetxt(f, tvec, fmt=‘%.8f’) f.write(“\n=== 优化后的畸变系数 =\n") f.write(f"k1, k2, p1, p2, k3: {dist.tolist()}\n") f.write("\n= 重投影误差分析 =\n") f.write(f"平均误差: {error_analysis[‘mean_error’]:.4f} 像素\n") f.write(f"最大误差: {error_analysis[‘max_error’]:.4f} 像素\n") f.write(f"中值误差: {error_analysis[‘median_error’]:.4f} 像素\n") f.write(f"标准差: {error_analysis[‘std_error’]:.4f} 像素\n") f.write("\n= 标定质量评估 =\n") mean_error = error_analysis[‘mean_error’] if mean_error < 1.0: f.write(“标定结果: 优秀 (平均误差 < 1像素)\n”) elif mean_error < 3.0: f.write(“标定结果: 良好 (平均误差 < 3像素)\n”) elif mean_error < 5.0: f.write(“标定结果: 可接受 (平均误差 < 5像素)\n”) else: f.write(“标定结果: 需要改进 (平均误差 > 5像素)\n”) f.write("\n= 建议 ===\n”) if mean_error > 5.0: f.write(“1. 检查高误差点的位置和对应关系\n”) f.write(“2. 验证世界坐标系的定义是否正确\n”) f.write(“3. 检查相机内参矩阵的准确性\n”) f.write(“4. 增加标定点的数量和分布范围\n”) f.write(“5. 考虑使用更高精度的测量设备\n”) else: f.write(“标定结果满足要求,可直接用于后续应用\n”) 保存结果 save_results(rvec_opt, tvec_opt, dist_opt, error_analysis, ‘calibration_report.txt’) print(“\n标定报告已保存到 calibration_report.txt”) 帮我添加输出旋转向量和平移向量的部分
最新发布
07-26
评论
成就一亿技术人!
拼手气红包6.0元
还能输入1000个字符
 
红包 添加红包
表情包 插入表情
 条评论被折叠 查看
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值