Change to new VGN interface
This commit is contained in:
parent
cd9fb814e9
commit
c37af70f56
@ -1,5 +1,8 @@
|
|||||||
active_grasp:
|
active_grasp:
|
||||||
frame_id: task
|
frame_id: task
|
||||||
|
base_frame_id: panda_link0
|
||||||
|
ee_frame_id: panda_hand
|
||||||
|
ee_grasp_offset: [0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.06]
|
||||||
camera_name: cam
|
camera_name: cam
|
||||||
vgn:
|
vgn:
|
||||||
model: $(find vgn)/data/models/vgn_conv.pth
|
model: $(find vgn)/data/models/vgn_conv.pth
|
||||||
|
@ -299,17 +299,17 @@ Visualization Manager:
|
|||||||
Value: false
|
Value: false
|
||||||
Field of View: 0.7853981852531433
|
Field of View: 0.7853981852531433
|
||||||
Focal Point:
|
Focal Point:
|
||||||
X: 0.1477860063314438
|
X: 0.192186176776886
|
||||||
Y: 0.1189696341753006
|
Y: 0.14037109911441803
|
||||||
Z: 0.4057367146015167
|
Z: 0.3879348933696747
|
||||||
Focal Shape Fixed Size: true
|
Focal Shape Fixed Size: true
|
||||||
Focal Shape Size: 0.05000000074505806
|
Focal Shape Size: 0.05000000074505806
|
||||||
Invert Z Axis: false
|
Invert Z Axis: false
|
||||||
Name: Current View
|
Name: Current View
|
||||||
Near Clip Distance: 0.009999999776482582
|
Near Clip Distance: 0.009999999776482582
|
||||||
Pitch: 0.10479649901390076
|
Pitch: 0.20979644358158112
|
||||||
Target Frame: <Fixed Frame>
|
Target Frame: <Fixed Frame>
|
||||||
Yaw: 5.323526859283447
|
Yaw: 5.238524913787842
|
||||||
Saved: ~
|
Saved: ~
|
||||||
Window Geometry:
|
Window Geometry:
|
||||||
Displays:
|
Displays:
|
||||||
@ -327,5 +327,5 @@ Window Geometry:
|
|||||||
Views:
|
Views:
|
||||||
collapsed: true
|
collapsed: true
|
||||||
Width: 1200
|
Width: 1200
|
||||||
X: 992
|
X: 720
|
||||||
Y: 65
|
Y: 27
|
||||||
|
52
policies.py
52
policies.py
@ -4,7 +4,6 @@ import cv_bridge
|
|||||||
import numpy as np
|
import numpy as np
|
||||||
import rospy
|
import rospy
|
||||||
import scipy.interpolate
|
import scipy.interpolate
|
||||||
import torch
|
|
||||||
|
|
||||||
from geometry_msgs.msg import Pose
|
from geometry_msgs.msg import Pose
|
||||||
from sensor_msgs.msg import Image, CameraInfo
|
from sensor_msgs.msg import Image, CameraInfo
|
||||||
@ -14,8 +13,7 @@ from robot_utils.ros.conversions import *
|
|||||||
from robot_utils.ros.tf import TransformTree
|
from robot_utils.ros.tf import TransformTree
|
||||||
from robot_utils.perception import *
|
from robot_utils.perception import *
|
||||||
from vgn import vis
|
from vgn import vis
|
||||||
from vgn.detection import *
|
from vgn.detection import VGN, compute_grasps
|
||||||
from vgn.grasp import from_voxel_coordinates
|
|
||||||
|
|
||||||
|
|
||||||
def get_policy(name):
|
def get_policy(name):
|
||||||
@ -29,14 +27,17 @@ def get_policy(name):
|
|||||||
|
|
||||||
class Policy:
|
class Policy:
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.frame_id = rospy.get_param("~frame_id")
|
params = rospy.get_param("active_grasp")
|
||||||
|
|
||||||
|
self.frame_id = params["frame_id"]
|
||||||
|
|
||||||
# Robot
|
# Robot
|
||||||
self.tf_tree = TransformTree()
|
self.tf_tree = TransformTree()
|
||||||
|
self.H_EE_G = Transform.from_list(params["ee_grasp_offset"])
|
||||||
self.target_pose_pub = rospy.Publisher("/target", Pose, queue_size=10)
|
self.target_pose_pub = rospy.Publisher("/target", Pose, queue_size=10)
|
||||||
|
|
||||||
# Camera
|
# Camera
|
||||||
camera_name = rospy.get_param("~camera_name")
|
camera_name = params["camera_name"]
|
||||||
self.cam_frame_id = camera_name + "_optical_frame"
|
self.cam_frame_id = camera_name + "_optical_frame"
|
||||||
self.cv_bridge = cv_bridge.CvBridge()
|
self.cv_bridge = cv_bridge.CvBridge()
|
||||||
depth_topic = camera_name + "/depth/image_raw"
|
depth_topic = camera_name + "/depth/image_raw"
|
||||||
@ -45,9 +46,8 @@ class Policy:
|
|||||||
self.intrinsic = from_camera_info_msg(msg)
|
self.intrinsic = from_camera_info_msg(msg)
|
||||||
|
|
||||||
# VGN
|
# VGN
|
||||||
model_path = Path(rospy.get_param("vgn/model"))
|
params = rospy.get_param("vgn")
|
||||||
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
self.vgn = VGN(Path(params["model"]))
|
||||||
self.net = load_network(model_path, self.device)
|
|
||||||
|
|
||||||
rospy.sleep(1.0)
|
rospy.sleep(1.0)
|
||||||
self.H_B_T = self.tf_tree.lookup("panda_link0", self.frame_id, rospy.Time.now())
|
self.H_B_T = self.tf_tree.lookup("panda_link0", self.frame_id, rospy.Time.now())
|
||||||
@ -99,37 +99,27 @@ class FixedTrajectoryBaseline(Policy):
|
|||||||
map_cloud = self.tsdf.get_map_cloud()
|
map_cloud = self.tsdf.get_map_cloud()
|
||||||
points = np.asarray(map_cloud.points)
|
points = np.asarray(map_cloud.points)
|
||||||
distances = np.asarray(map_cloud.colors)[:, 0]
|
distances = np.asarray(map_cloud.colors)[:, 0]
|
||||||
tsdf_grid = grid_from_cloud(points, distances, self.tsdf.voxel_size)
|
tsdf_grid = create_grid_from_map_cloud(
|
||||||
|
points, distances, self.tsdf.voxel_size
|
||||||
|
)
|
||||||
|
out = self.vgn.predict(tsdf_grid)
|
||||||
|
grasps = compute_grasps(out, voxel_size=self.tsdf.voxel_size)
|
||||||
|
|
||||||
vis.draw_tsdf(tsdf_grid.squeeze(), self.tsdf.voxel_size)
|
# Visualize
|
||||||
|
vis.draw_tsdf(tsdf_grid, self.tsdf.voxel_size)
|
||||||
qual, rot, width = predict(tsdf_grid, self.net, self.device)
|
vis.draw_grasps(grasps, 0.05)
|
||||||
qual, rot, width = process(tsdf_grid, qual, rot, width)
|
|
||||||
grasps, scores = select(qual.copy(), rot, width)
|
|
||||||
grasps, scores = np.asarray(grasps), np.asarray(scores)
|
|
||||||
|
|
||||||
grasps = [from_voxel_coordinates(g, self.tsdf.voxel_size) for g in grasps]
|
|
||||||
|
|
||||||
# Select the highest grasp
|
|
||||||
heights = np.empty(len(grasps))
|
|
||||||
for i, grasp in enumerate(grasps):
|
|
||||||
heights[i] = grasp.pose.translation[2]
|
|
||||||
idx = np.argmax(heights)
|
|
||||||
grasp, score = grasps[idx], scores[idx]
|
|
||||||
vis.draw_grasps(grasps, scores, 0.05)
|
|
||||||
|
|
||||||
# Ensure that the camera is pointing forward.
|
# Ensure that the camera is pointing forward.
|
||||||
|
grasp = grasps[0]
|
||||||
rot = grasp.pose.rotation
|
rot = grasp.pose.rotation
|
||||||
axis = rot.as_matrix()[:, 0]
|
axis = rot.as_matrix()[:, 0]
|
||||||
if axis[0] < 0:
|
if axis[0] < 0:
|
||||||
grasp.pose.rotation = rot * Rotation.from_euler("z", np.pi)
|
grasp.pose.rotation = rot * Rotation.from_euler("z", np.pi)
|
||||||
|
|
||||||
# Add offset between grasp frame and panda_hand frame
|
# Compute target pose of the EE
|
||||||
T_task_grasp = grasp.pose * Transform(
|
H_T_G = grasp.pose
|
||||||
Rotation.identity(), np.r_[0.0, 0.0, -0.06]
|
H_B_EE = self.H_B_T * H_T_G * self.H_EE_G.inv()
|
||||||
)
|
self.best_grasp = H_B_EE
|
||||||
|
|
||||||
self.best_grasp = self.H_B_T * T_task_grasp
|
|
||||||
self.done = True
|
self.done = True
|
||||||
return
|
return
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user