手眼标定的流程
为了收集执行标定所需的数据,需要人工操作或者自动运行程序来控制机器人执行一系列预设的运动(建议10到20个)。在每次移动结束时,触发相机拍摄一张标定对象的图像,从图像中提取标定对象的位姿,并从机器人控制器获取对应的机器人的位姿。为获得良好的标定质量,相机拍摄标定对象图像时使用的机器人位姿应该:
明显的不同
使用所有机器人关节
从而获取多种不同视角的图像。下图展示了eye-to-hand和eye-in-hand所需的不同的成像位姿, 同时, 标定对象 应在相机的视野中完全可见。
备注
如果使用 ArUco 标记作为标定对象,则并非所有标记都需要在每个机器人位姿下的相机视野内完全可见。
接下来的任务是通过求解齐次变换方程来估计标定对象位置和手眼变换位置的旋转和平移分量。
手眼标定流程的步骤:
小技巧
建议对相机进行 预热 ,并在进行手眼标定之前运行 现场标定 。可在预热、现场标定和手眼标定期间使用与您的应用相同的捕获周期。如需进一步降低与温度相关的性能因素的影响,请启用 Thermal Stabilization(热稳定功能)。
将机器人移动到一个新的位姿
记录末端执行器的位姿
对标定对象进行成像(获取其位姿)
多次重复步骤1-3,例如10-20次
计算手眼坐标变换关系
如需了解如何将手眼标定集成到您的解决方案中,请查看我们的交互式代码示例:
/*
Perform Hand-Eye calibration.
*/
#include <Zivid/Application.h>
#include <Zivid/Calibration/Detector.h>
#include <Zivid/Calibration/HandEye.h>
#include <Zivid/Calibration/Pose.h>
#include <Zivid/Exception.h>
#include <Zivid/Zivid.h>
#include <iostream>
namespace
{
enum class CommandType
{
AddPose,
Calibrate,
Unknown
};
std::string getInput()
{
std::string command;
std::getline(std::cin, command);
return command;
}
CommandType enterCommand()
{
std::cout << "Enter command, p (to add robot pose) or c (to perform calibration): ";
const auto command = getInput();
if(command == "P" || command == "p")
{
return CommandType::AddPose;
}
if(command == "C" || command == "c")
{
return CommandType::Calibrate;
}
return CommandType::Unknown;
}
Zivid::Calibration::Pose enterRobotPose(size_t index)
{
std::cout << "Enter pose with id (a line with 16 space separated values describing 4x4 row-major matrix) : "
<< index << std::endl;
std::stringstream input(getInput());
float element{ 0 };
std::vector<float> transformElements;
for(size_t i = 0; i < 16 && input >> element; ++i)
{
transformElements.emplace_back(element);
}
const auto robotPose{ Zivid::Matrix4x4{ transformElements.cbegin(), transformElements.cend() } };
std::cout << "The following pose was entered: \n" << robotPose << std::endl;
return robotPose;
}
Zivid::Frame assistedCapture(Zivid::Camera &camera)
{
const auto parameters = Zivid::CaptureAssistant::SuggestSettingsParameters{
Zivid::CaptureAssistant::SuggestSettingsParameters::AmbientLightFrequency::none,
Zivid::CaptureAssistant::SuggestSettingsParameters::MaxCaptureTime{ std::chrono::milliseconds{ 800 } }
};
const auto settings = Zivid::CaptureAssistant::suggestSettings(camera, parameters);
return camera.capture(settings);
}
std::string markersToString(const std::vector<int> &markerIds)
{
std::ostringstream oss;
for(const auto &id : markerIds)
{
oss << id << " ";
}
return oss.str();
}
void handleAddPose(
size_t ¤tPoseId,
std::vector<Zivid::Calibration::HandEyeInput> &handEyeInput,
Zivid::Camera &camera,
const std::string &calibrationObject)
{
const auto robotPose = enterRobotPose(currentPoseId);
std::cout << "Detecting calibration object in point cloud" << std::endl;
if(calibrationObject == "c")
{
const auto frame = Zivid::Calibration::captureCalibrationBoard(camera);
const auto detectionResult = Zivid::Calibration::detectCalibrationBoard(frame);
if(detectionResult.valid())
{
std::cout << "Calibration board detected " << std::endl;
handEyeInput.emplace_back(robotPose, detectionResult);
currentPoseId++;
}
else
{
std::cout
<< "Failed to detect calibration board, ensure that the entire board is in the view of the camera"
<< std::endl;
}
}
else if(calibrationObject == "m")
{
const auto frame = assistedCapture(camera);
auto markerDictionary = Zivid::Calibration::MarkerDictionary::aruco4x4_50;
std::vector<int> markerIds = { 1, 2, 3 };
std::cout << "Detecting arUco marker IDs " << markersToString(markerIds) << "from the dictionary "
<< markerDictionary << std::endl;
auto detectionResult = Zivid::Calibration::detectMarkers(frame, markerIds, markerDictionary);
if(detectionResult.valid())
{
std::cout << "ArUco marker(s) detected: " << detectionResult.detectedMarkers().size() << std::endl;
handEyeInput.emplace_back(robotPose, detectionResult);
currentPoseId++;
}
else
{
std::cout
<< "Failed to detect any ArUco markers, ensure that at least one ArUco marker is in the view of the camera"
<< std::endl;
}
}
}
std::vector<Zivid::Calibration::HandEyeInput> readHandEyeInputs(Zivid::Camera &camera)
{
size_t currentPoseId{ 0 };
bool calibrate{ false };
std::string calibrationObject;
while(true)
{
std::cout
<< "Enter calibration object you are using, m (for ArUco marker(s)) or c (for Zivid checkerboard): "
<< std::endl;
calibrationObject = getInput();
if(calibrationObject == "m" || calibrationObject == "c")
{
break;
}
}
std::cout << "Zivid primarily operates with a (4x4) transformation matrix. To convert" << std::endl;
std::cout << "from axis-angle, rotation vector, roll-pitch-yaw, or quaternion, check out" << std::endl;
std::cout << "our PoseConversions sample." << std::endl;
std::vector<Zivid::Calibration::HandEyeInput> handEyeInput;
do
{
switch(enterCommand())
{
case CommandType::AddPose:
{
try
{
handleAddPose(currentPoseId, handEyeInput, camera, calibrationObject);
}
catch(const std::exception &e)
{
std::cout << "Error: " << Zivid::toString(e) << std::endl;
continue;
}
break;
}
case CommandType::Calibrate:
{
calibrate = true;
break;
}
case CommandType::Unknown:
{
std::cout << "Error: Unknown command" << std::endl;
break;
}
}
} while(!calibrate);
return handEyeInput;
}
Zivid::Calibration::HandEyeOutput performCalibration(
const std::vector<Zivid::Calibration::HandEyeInput> &handEyeInput)
{
while(true)
{
std::cout << "Enter type of calibration, eth (for eye-to-hand) or eih (for eye-in-hand): ";
const auto calibrationType = getInput();
if(calibrationType == "eth" || calibrationType == "ETH")
{
std::cout << "Performing eye-to-hand calibration with " << handEyeInput.size() << " dataset pairs"
<< std::endl;
std::cout << "The resulting transform is the camera pose in robot base frame" << std::endl;
return Zivid::Calibration::calibrateEyeToHand(handEyeInput);
}
if(calibrationType == "eih" || calibrationType == "EIH")
{
std::cout << "Performing eye-in-hand calibration with " << handEyeInput.size() << " dataset pairs"
<< std::endl;
std::cout << "The resulting transform is the camera pose in flange (end-effector) frame" << std::endl;
return Zivid::Calibration::calibrateEyeInHand(handEyeInput);
}
std::cout << "Entered uknown method" << std::endl;
}
}
} // namespace
int main()
{
try
{
Zivid::Application zivid;
std::cout << "Connecting to camera" << std::endl;
auto camera{ zivid.connectCamera() };
const auto handEyeInput{ readHandEyeInputs(camera) };
const auto calibrationResult{ performCalibration(handEyeInput) };
std::cout << "Zivid primarily operates with a (4x4) transformation matrix. To convert" << std::endl;
std::cout << "to axis-angle, rotation vector, roll-pitch-yaw, or quaternion, check out" << std::endl;
std::cout << "our PoseConversions sample." << std::endl;
if(calibrationResult.valid())
{
std::cout << "Hand-Eye calibration OK\n"
<< "Result:\n"
<< calibrationResult << std::endl;
}
else
{
std::cout << "Hand-Eye calibration FAILED" << std::endl;
return EXIT_FAILURE;
}
}
catch(const std::exception &e)
{
std::cerr << "\nError: " << Zivid::toString(e) << std::endl;
std::cout << "Press enter to exit." << std::endl;
std::cin.get();
return EXIT_FAILURE;
}
return EXIT_SUCCESS;
}
/*
Perform Hand-Eye calibration.
*/
using System;
using System.Collections.Generic;
using System.IO;
using System.Linq;
using Zivid.NET.Calibration;
using Duration = Zivid.NET.Duration;
class Program
{
static int Main()
{
try
{
var zivid = new Zivid.NET.Application();
Console.WriteLine("Connecting to camera");
var camera = zivid.ConnectCamera();
var handEyeInput = readHandEyeInputs(camera);
var calibrationResult = performCalibration(handEyeInput);
Console.WriteLine("Zivid primarily operates with a (4x4) transformation matrix. To convert");
Console.WriteLine("to axis-angle, rotation vector, roll-pitch-yaw, or quaternion, check out");
Console.WriteLine("our PoseConversions sample.");
if (calibrationResult.Valid())
{
Console.WriteLine("{0}\n{1}\n{2}", "Hand-Eye calibration OK", "Result: ", calibrationResult);
}
else
{
Console.WriteLine("Hand-Eye calibration FAILED");
return 1;
}
}
catch (Exception ex)
{
Console.WriteLine("Error: {0}", ex.Message);
return 1;
}
return 0;
}
static List<HandEyeInput> readHandEyeInputs(Zivid.NET.Camera camera)
{
var handEyeInput = new List<HandEyeInput>();
var currentPoseId = 0U;
var beingInput = true;
var calibrationObject = "";
while (true)
{
Console.WriteLine("Enter calibration object you are using, m (for ArUco marker(s)) or c (for Zivid checkerboard): ");
calibrationObject = Console.ReadLine();
if (calibrationObject.Equals("m", StringComparison.CurrentCultureIgnoreCase) ||
calibrationObject.Equals("c", StringComparison.CurrentCultureIgnoreCase))
{
break;
}
}
Interaction.ExtendInputBuffer(2048);
Console.WriteLine("Zivid primarily operates with a (4x4) transformation matrix. To convert");
Console.WriteLine("from axis-angle, rotation vector, roll-pitch-yaw, or quaternion, check out");
Console.WriteLine("our PoseConversions sample.");
do
{
switch (Interaction.EnterCommand())
{
case CommandType.AddPose:
try
{
HandleAddPose(ref currentPoseId, ref handEyeInput, camera, calibrationObject);
}
catch (Exception ex)
{
Console.WriteLine("Error: {0}", ex.Message);
continue;
}
break;
case CommandType.Calibrate: beingInput = false; break;
case CommandType.Unknown: Console.WriteLine("Error: Unknown command"); break;
}
} while (beingInput);
return handEyeInput;
}
public static void HandleAddPose(ref uint currentPoseId, ref List<HandEyeInput> handEyeInput, Zivid.NET.Camera camera, string calibrationObject)
{
var robotPose = Interaction.EnterRobotPose(currentPoseId);
Console.Write("Detecting calibration object in point cloud");
if (calibrationObject.Equals("c", StringComparison.CurrentCultureIgnoreCase))
{
var frame = Zivid.NET.Calibration.Detector.CaptureCalibrationBoard(camera);
var detectionResult = Detector.DetectCalibrationBoard(frame);
if (detectionResult.Valid())
{
Console.WriteLine("Calibration board detected");
handEyeInput.Add(new HandEyeInput(robotPose, detectionResult));
++currentPoseId;
}
else
{
Console.WriteLine("Failed to detect calibration board, ensure that the entire board is in the view of the camera");
}
}
else if (calibrationObject.Equals("m", StringComparison.CurrentCultureIgnoreCase))
{
var frame = AssistedCapture(camera);
var markerDictionary = Zivid.NET.MarkerDictionary.Aruco4x4_50;
var markerIds = new List<int> { 1, 2, 3 };
Console.WriteLine("Detecting arUco marker IDs " + string.Join(", ", markerIds));
var detectionResult = Detector.DetectMarkers(frame, markerIds, markerDictionary);
if (detectionResult.Valid())
{
Console.WriteLine("ArUco marker(s) detected: " + detectionResult.DetectedMarkers().Length);
handEyeInput.Add(new HandEyeInput(robotPose, detectionResult));
++currentPoseId;
}
else
{
Console.WriteLine("Failed to detect any ArUco markers, ensure that at least one ArUco marker is in the view of the camera");
}
}
}
static Zivid.NET.Calibration.HandEyeOutput performCalibration(List<HandEyeInput> handEyeInput)
{
while (true)
{
Console.WriteLine("Enter type of calibration, eth (for eye-to-hand) or eih (for eye-in-hand): ");
var calibrationType = Console.ReadLine();
if (calibrationType.Equals("eth", StringComparison.CurrentCultureIgnoreCase))
{
Console.WriteLine("Performing eye-to-hand calibration with " + handEyeInput.Count + " dataset pairs");
Console.WriteLine("The resulting transform is the camera pose in robot base frame");
return Calibrator.CalibrateEyeToHand(handEyeInput);
}
if (calibrationType.Equals("eih", StringComparison.CurrentCultureIgnoreCase))
{
Console.WriteLine("Performing eye-in-hand calibration with " + handEyeInput.Count + " dataset pairs");
Console.WriteLine("The resulting transform is the camera pose in flange (end-effector) frame");
return Calibrator.CalibrateEyeInHand(handEyeInput);
}
Console.WriteLine("Entered unknown method");
}
}
public static Zivid.NET.Frame AssistedCapture(Zivid.NET.Camera camera)
{
var suggestSettingsParameters = new Zivid.NET.CaptureAssistant.SuggestSettingsParameters
{
AmbientLightFrequency =
Zivid.NET.CaptureAssistant.SuggestSettingsParameters.AmbientLightFrequencyOption.none,
MaxCaptureTime = Duration.FromMilliseconds(800)
};
var settings = Zivid.NET.CaptureAssistant.Assistant.SuggestSettings(camera, suggestSettingsParameters);
return camera.Capture(settings);
}
}
enum CommandType
{
AddPose,
Calibrate,
Unknown
}
class Interaction
{
// Console.ReadLine only supports reading 256 characters, by default. This limit is modified
// by calling ExtendInputBuffer with the maximum length of characters to be read.
public static void ExtendInputBuffer(int size)
{
Console.SetIn(new StreamReader(Console.OpenStandardInput(), Console.InputEncoding, false, size));
}
public static CommandType EnterCommand()
{
Console.Write("Enter command, p (to add robot pose) or c (to perform calibration): ");
var command = Console.ReadLine().ToLower();
switch (command)
{
case "p": return CommandType.AddPose;
case "c": return CommandType.Calibrate;
default: return CommandType.Unknown;
}
}
public static Pose EnterRobotPose(ulong index)
{
var elementCount = 16;
Console.WriteLine(
"Enter pose with id (a line with {0} space separated values describing 4x4 row-major matrix) : {1}",
elementCount,
index);
var input = Console.ReadLine();
var elements = input.Split().Where(x => !string.IsNullOrEmpty(x.Trim())).Select(x => float.Parse(x)).ToArray();
var robotPose = new Pose(elements); Console.WriteLine("The following pose was entered: \n{0}", robotPose);
return robotPose;
}
}
"""
Perform Hand-Eye calibration.
"""
import datetime
from pathlib import Path
from typing import List, Tuple
import numpy as np
import zivid
from zividsamples.save_load_matrix import assert_affine_matrix_and_save
def _enter_robot_pose(index: int) -> zivid.calibration.Pose:
"""Robot pose user input.
Args:
index: Robot pose ID
Returns:
robot_pose: Robot pose
"""
inputted = input(
f"Enter pose with id={index} (a line with 16 space separated values describing 4x4 row-major matrix): "
)
elements = inputted.split(maxsplit=15)
data = np.array(elements, dtype=np.float64).reshape((4, 4))
robot_pose = zivid.calibration.Pose(data)
print(f"The following pose was entered:\n{robot_pose}")
return robot_pose
def _perform_calibration(hand_eye_input: List[zivid.calibration.HandEyeInput]) -> zivid.calibration.HandEyeOutput:
"""Hand-Eye calibration type user input.
Args:
hand_eye_input: Hand-Eye calibration input
Returns:
hand_eye_output: Hand-Eye calibration result
"""
while True:
calibration_type = input("Enter type of calibration, eth (for eye-to-hand) or eih (for eye-in-hand): ").strip()
if calibration_type.lower() == "eth":
print(f"Performing eye-to-hand calibration with {len(hand_eye_input)} dataset pairs")
print("The resulting transform is the camera pose in robot base frame")
hand_eye_output = zivid.calibration.calibrate_eye_to_hand(hand_eye_input)
return hand_eye_output
if calibration_type.lower() == "eih":
print(f"Performing eye-in-hand calibration with {len(hand_eye_input)} dataset pairs")
print("The resulting transform is the camera pose in flange (end-effector) frame")
hand_eye_output = zivid.calibration.calibrate_eye_in_hand(hand_eye_input)
return hand_eye_output
print(f"Unknown calibration type: '{calibration_type}'")
def _assisted_capture(camera: zivid.Camera) -> zivid.Frame:
"""Acquire frame with capture assistant.
Args:
camera: Zivid camera
Returns:
frame: Zivid frame
"""
suggest_settings_parameters = zivid.capture_assistant.SuggestSettingsParameters(
max_capture_time=datetime.timedelta(milliseconds=800),
ambient_light_frequency=zivid.capture_assistant.SuggestSettingsParameters.AmbientLightFrequency.none,
)
settings = zivid.capture_assistant.suggest_settings(camera, suggest_settings_parameters)
return camera.capture(settings)
def _handle_add_pose(
current_pose_id: int, hand_eye_input: List, camera: zivid.Camera, calibration_object: str
) -> Tuple[int, List]:
"""Acquire frame with capture assistant.
Args:
current_pose_id: Counter of the current pose in the hand-eye calibration dataset
hand_eye_input: List of hand-eye calibration dataset pairs (poses and point clouds)
camera: Zivid camera
calibration_object: m (for ArUco marker(s)) or c (for Zivid checkerboard)
Returns:
Tuple[int, List]: Updated current_pose_id and hand_eye_input
"""
robot_pose = _enter_robot_pose(current_pose_id)
print("Detecting calibration object in point cloud")
if calibration_object == "c":
frame = zivid.calibration.capture_calibration_board(camera)
detection_result = zivid.calibration.detect_calibration_board(frame)
if detection_result.valid():
print("Calibration board detected")
hand_eye_input.append(zivid.calibration.HandEyeInput(robot_pose, detection_result))
current_pose_id += 1
else:
print("Failed to detect calibration board, ensure that the entire board is in the view of the camera")
elif calibration_object == "m":
frame = _assisted_capture(camera)
marker_dictionary = zivid.calibration.MarkerDictionary.aruco4x4_50
marker_ids = [1, 2, 3]
print(f"Detecting arUco marker IDs {marker_ids} from the dictionary {marker_dictionary}")
detection_result = zivid.calibration.detect_markers(frame, marker_ids, marker_dictionary)
if detection_result.valid():
print(f"ArUco marker(s) detected: {len(detection_result.detected_markers())}")
hand_eye_input.append(zivid.calibration.HandEyeInput(robot_pose, detection_result))
current_pose_id += 1
else:
print(
"Failed to detect any ArUco markers, ensure that at least one ArUco marker is in the view of the camera"
)
return current_pose_id, hand_eye_input
def _main() -> None:
app = zivid.Application()
print("Connecting to camera")
camera = app.connect_camera()
current_pose_id = 0
hand_eye_input = []
calibrate = False
while True:
calibration_object = input(
"Enter calibration object you are using, m (for ArUco marker(s)) or c (for Zivid checkerboard): "
).strip()
if calibration_object.lower() == "m" or calibration_object.lower() == "c":
break
print(
"Zivid primarily operates with a (4x4) transformation matrix. To convert\n"
"from axis-angle, rotation vector, roll-pitch-yaw, or quaternion, check out\n"
"our pose_conversions sample."
)
while not calibrate:
command = input("Enter command, p (to add robot pose) or c (to perform calibration): ").strip()
if command == "p":
try:
current_pose_id, hand_eye_input = _handle_add_pose(
current_pose_id, hand_eye_input, camera, calibration_object
)
except ValueError as ex:
print(ex)
elif command == "c":
calibrate = True
else:
print(f"Unknown command '{command}'")
calibration_result = _perform_calibration(hand_eye_input)
transform = calibration_result.transform()
transform_file_path = Path(Path(__file__).parent / "transform.yaml")
assert_affine_matrix_and_save(transform, transform_file_path)
print(
"Zivid primarily operates with a (4x4) transformation matrix. To convert\n"
"to axis-angle, rotation vector, roll-pitch-yaw, or quaternion, check out\n"
"our pose_conversions sample."
)
if calibration_result.valid():
print("Hand-Eye calibration OK")
print(f"Result:\n{calibration_result}")
else:
print("Hand-Eye calibration FAILED")
if __name__ == "__main__":
_main()
通过我们的代码示例,您可以轻松地执行手眼标定:
或者,也可以使用CLI工具计算手眼坐标转换:
用户可以通过该命令行界面使用在步骤1-3中收集的数据集来计算转换矩阵和残差。这些结果将保存在用户指定的文件中。此CLI工具是 实验性的,最终将被GUI取代。
继续阅读有关手眼标定的 注意事项和建议。