移动和抓取
案例视频
使用camera_detect视觉识别包代码
import numpy as np
from pymycobot import Mercury
from camera_detect import camera_detect
if __name__ == "__main__":
map_navigation = MapNavigation()
flag_feed_goalReached = MapNavigation.moveToGoal(1.8811798181533813, 1.25142673254013062, 0.9141818042023212,0.4053043657122249)
if flag_feed_goalReached:
camera_params = np.load("camera_params.npz")
mtx, dist = camera_params["mtx"], camera_params["dist"]
ml = Mercury("/dev/left_arm")
mr = Mercury("/dev/right_arm")
arm = 0
catch_mode = 0
left_camera_id = 3
right_camera_id = 6
stag_size = 32
ml_obj = camera_detect(left_camera_id, stag_size, mtx, dist, arm)
mr_obj = camera_detect(right_camera_id, stag_size, mtx, dist, arm)
mr_obj.vision_trace(catch_mode, mr)
ml_obj.vision_trace(catch_mode, ml)
不使用camera_detect视觉识别包代码
from uvc_camera import UVCCamera
from arm_control import *
from marker_utils import *
import stag
from mercury_ros_api import MapNavigation
Tool_LEN = 175
Camera_LEN = 78
np.set_printoptions(suppress=True, formatter={'float_kind': '{:.2f}'.format})
camera_params = np.load("src/camera_params.npz")
mtx, dist = camera_params["mtx"], camera_params["dist"]
MARKER_SIZE = 32
ml = Mercury("/dev/left_arm")
def CvtRotationMatrixToEulerAngle(pdtRotationMatrix):
pdtEulerAngle = np.zeros(3)
pdtEulerAngle[2] = np.arctan2(pdtRotationMatrix[1, 0], pdtRotationMatrix[0, 0])
fCosRoll = np.cos(pdtEulerAngle[2])
fSinRoll = np.sin(pdtEulerAngle[2])
pdtEulerAngle[1] = np.arctan2(-pdtRotationMatrix[2, 0],
(fCosRoll * pdtRotationMatrix[0, 0]) + (fSinRoll * pdtRotationMatrix[1, 0]))
pdtEulerAngle[0] = np.arctan2((fSinRoll * pdtRotationMatrix[0, 2]) - (fCosRoll * pdtRotationMatrix[1, 2]),
(-fSinRoll * pdtRotationMatrix[0, 1]) + (fCosRoll * pdtRotationMatrix[1, 1]))
return pdtEulerAngle
def CvtEulerAngleToRotationMatrix(ptrEulerAngle):
ptrSinAngle = np.sin(ptrEulerAngle)
ptrCosAngle = np.cos(ptrEulerAngle)
ptrRotationMatrix = np.zeros((3, 3))
ptrRotationMatrix[0, 0] = ptrCosAngle[2] * ptrCosAngle[1]
ptrRotationMatrix[0, 1] = ptrCosAngle[2] * ptrSinAngle[1] * ptrSinAngle[0] - ptrSinAngle[2] * ptrCosAngle[0]
ptrRotationMatrix[0, 2] = ptrCosAngle[2] * ptrSinAngle[1] * ptrCosAngle[0] + ptrSinAngle[2] * ptrSinAngle[0]
ptrRotationMatrix[1, 0] = ptrSinAngle[2] * ptrCosAngle[1]
ptrRotationMatrix[1, 1] = ptrSinAngle[2] * ptrSinAngle[1] * ptrSinAngle[0] + ptrCosAngle[2] * ptrCosAngle[0]
ptrRotationMatrix[1, 2] = ptrSinAngle[2] * ptrSinAngle[1] * ptrCosAngle[0] - ptrCosAngle[2] * ptrSinAngle[0]
ptrRotationMatrix[2, 0] = -ptrSinAngle[1]
ptrRotationMatrix[2, 1] = ptrCosAngle[1] * ptrSinAngle[0]
ptrRotationMatrix[2, 2] = ptrCosAngle[1] * ptrCosAngle[0]
return ptrRotationMatrix
def Transformation_matrix(coord):
position_robot = coord[:3]
pose_robot = coord[3:]
RBT = CvtEulerAngleToRotationMatrix(pose_robot)
PBT = np.array([[position_robot[0]],
[position_robot[1]],
[position_robot[2]]])
temp = np.concatenate((RBT, PBT), axis=1)
array_1x4 = np.array([[0, 0, 0, 1]])
matrix = np.concatenate((temp, array_1x4), axis=0)
return matrix
def Eyes_in_hand_left(coord, camera):
Position_Camera = np.transpose(camera[:3])
Matrix_BT = Transformation_matrix(coord)
Matrix_TC = np.array([[0, -1, 0, Camera_LEN],
[1, 0, 0, 0],
[0, 0, 1, -Tool_LEN],
[0, 0, 0, 1]])
Position_Camera = np.append(Position_Camera, 1)
Position_B = Matrix_BT @ Matrix_TC @ Position_Camera
return Position_B
def waitl():
time.sleep(0.2)
while (ml.is_moving()):
time.sleep(0.03)
def calc_markers_base_position(corners: NDArray, ids: T.List, marker_size: int, mtx: NDArray, dist: NDArray) -> T.List:
if len(corners) == 0:
return []
rvecs, tvecs = solve_marker_pnp(corners, marker_size, mtx, dist)
for i, tvec, rvec in zip(ids, tvecs, rvecs):
tvec = tvec.squeeze().tolist()
rvec = rvec.squeeze().tolist()
rotvector = np.array([[rvec[0], rvec[1], rvec[2]]])
Rotation = cv2.Rodrigues(rotvector)[0]
Euler = CvtRotationMatrixToEulerAngle(Rotation)
target_coords = np.array([tvec[0], tvec[1], tvec[2], Euler[0], Euler[1], Euler[2]])
return target_coords
if __name__ == "__main__":
map_navigation = MapNavigation()
flag_feed_goalReached = MapNavigation.moveToGoal(1.8811798181533813, 1.25142673254013062, 0.9141818042023212,0.4053043657122249)
if flag_feed_goalReached:
camera = UVCCamera(5, mtx, dist)
camera.capture()
origin_anglesL = [-44.24, 15.56, 0.0, -102.59, 65.28, 52.06, 23.49]
ml.set_gripper_mode(0)
ml.set_tool_reference([0, 0, Tool_LEN, 0, 0, 0])
ml.set_end_type(1)
sp = 40
ml.send_angles(origin_anglesL, sp)
waitl()
camera.update_frame()
frame = camera.color_frame()
(corners, ids, rejected_corners) = stag.detectMarkers(frame, 11)
marker_pos_pack = calc_markers_base_position(corners, ids, MARKER_SIZE, mtx, dist)
cur_coords = np.array(ml.get_base_coords())
cur_bcl = cur_coords.copy()
cur_bcl[-3:] *= (np.pi / 180)
fact_bcl = Eyes_in_hand_left(cur_bcl, marker_pos_pack)
target_coords = cur_coords.copy()
target_coords[0] = fact_bcl[0]
target_coords[1] = fact_bcl[1]
target_coords[2] = fact_bcl[2] + 50
ml.send_base_coords(target_coords, 30)
waitl()
ml.set_gripper_value(100, 100)
ml.send_base_coord(3, fact_bcl[2], 10)
waitl()
ml.set_gripper_value(20, 100)
time.sleep(2)
ml.send_base_coord(3, fact_bcl[2] + 50, 10)
← 上一页 | 下一章 →