This commit is contained in:
Michel Breyer 2021-07-06 14:00:04 +02:00
parent 12ae207b5b
commit 9a03a26172
10 changed files with 445 additions and 423 deletions

View File

@ -3,17 +3,14 @@
import actionlib import actionlib
import argparse import argparse
import cv_bridge import cv_bridge
import numpy as np
import rospy
import tf2_ros
import control_msgs.msg
import franka_gripper.msg import franka_gripper.msg
from geometry_msgs.msg import PoseStamped from geometry_msgs.msg import PoseStamped
import numpy as np
import rospy
from sensor_msgs.msg import JointState, Image, CameraInfo from sensor_msgs.msg import JointState, Image, CameraInfo
import std_srvs.srv import std_srvs.srv as std_srvs
import tf2_ros
from robot_utils.controllers import CartesianPoseController
from robot_utils.ros.conversions import * from robot_utils.ros.conversions import *
from simulation import Simulation from simulation import Simulation
@ -21,76 +18,36 @@ from simulation import Simulation
class BtSimNode: class BtSimNode:
def __init__(self, gui): def __init__(self, gui):
self.sim = Simulation(gui=gui) self.sim = Simulation(gui=gui)
self.robot_state_interface = RobotStateInterface(self.sim.arm, self.sim.gripper)
self.controller_update_rate = 60 self.arm_interface = ArmInterface(self.sim.arm, self.sim.controller)
self.joint_state_publish_rate = 60 self.gripper_interface = GripperInterface(self.sim.gripper)
self.camera_publish_rate = 5 self.camera_interface = CameraInterface(self.sim.camera)
self.cv_bridge = cv_bridge.CvBridge()
self.setup_robot_topics()
self.setup_camera_topics()
self.setup_controllers()
self.setup_gripper_interface()
self.broadcast_transforms()
rospy.Service("reset", std_srvs.srv.Trigger, self.reset)
self.step_cnt = 0 self.step_cnt = 0
self.reset_requested = False self.reset_requested = False
def setup_robot_topics(self): self.advertise_services()
self.joint_pub = rospy.Publisher("joint_states", JointState, queue_size=10) self.broadcast_transforms()
def setup_controllers(self): def advertise_services(self):
self.cartesian_pose_controller = CartesianPoseController(self.sim.arm) rospy.Service("reset", std_srvs.Trigger, self.reset)
rospy.Subscriber("command", PoseStamped, self.target_pose_cb)
def target_pose_cb(self, msg):
assert msg.header.frame_id == self.sim.arm.base_frame
self.cartesian_pose_controller.x_d = from_pose_msg(msg.pose)
def setup_gripper_interface(self):
self.gripper_interface = GripperInterface(self.sim.gripper)
def setup_camera_topics(self):
self.cam_info_msg = to_camera_info_msg(self.sim.camera.intrinsic)
self.cam_info_msg.header.frame_id = "cam_optical_frame"
self.cam_info_pub = rospy.Publisher(
"/cam/depth/camera_info",
CameraInfo,
queue_size=10,
)
self.depth_pub = rospy.Publisher("/cam/depth/image_raw", Image, queue_size=10)
def broadcast_transforms(self): def broadcast_transforms(self):
self.static_broadcaster = tf2_ros.StaticTransformBroadcaster() self.static_broadcaster = tf2_ros.StaticTransformBroadcaster()
msgs = [
msgs = [] to_transform_stamped_msg(self.sim.T_W_B, "world", "panda_link0"),
msg = geometry_msgs.msg.TransformStamped() to_transform_stamped_msg(
msg.header.stamp = rospy.Time.now() Transform.translation(self.sim.origin), "world", "task"
msg.header.frame_id = "world" ),
msg.child_frame_id = "panda_link0" ]
msg.transform = to_transform_msg(self.sim.T_W_B)
msgs.append(msg)
msg = geometry_msgs.msg.TransformStamped()
msg.header.stamp = rospy.Time.now()
msg.header.frame_id = "world"
msg.child_frame_id = "task"
msg.transform = to_transform_msg(Transform.translation(self.sim.origin))
msgs.append(msg)
self.static_broadcaster.sendTransform(msgs) self.static_broadcaster.sendTransform(msgs)
def reset(self, req): def reset(self, req):
self.reset_requested = True self.reset_requested = True
rospy.sleep(1.0) # wait for the latest sim step to finish rospy.sleep(1.0) # wait for the latest sim step to finish
self.sim.reset() self.sim.reset()
self.cartesian_pose_controller.x_d = self.sim.arm.pose()
self.step_cnt = 0 self.step_cnt = 0
self.reset_requested = False self.reset_requested = False
return std_srvs.srv.TriggerResponse(success=True) return std_srvs.TriggerResponse(success=True)
def run(self): def run(self):
rate = rospy.Rate(self.sim.rate) rate = rospy.Rate(self.sim.rate)
@ -102,18 +59,22 @@ class BtSimNode:
rate.sleep() rate.sleep()
def handle_updates(self): def handle_updates(self):
if self.step_cnt % int(self.sim.rate / self.controller_update_rate) == 0: self.robot_state_interface.update()
self.cartesian_pose_controller.update() self.arm_interface.update()
self.gripper_interface.update(1.0 / self.controller_update_rate) self.gripper_interface.update(self.sim.dt)
if self.step_cnt % int(self.sim.rate / self.joint_state_publish_rate) == 0: if self.step_cnt % int(self.sim.rate / 5) == 0:
self.publish_joint_state() self.camera_interface.update()
if self.step_cnt % int(self.sim.rate / self.camera_publish_rate) == 0:
self.publish_cam_info()
self.publish_cam_imgs()
def publish_joint_state(self):
q, dq = self.sim.arm.get_state() class RobotStateInterface:
width = self.sim.gripper.read() def __init__(self, arm, gripper):
self.arm = arm
self.gripper = gripper
self.joint_pub = rospy.Publisher("joint_states", JointState, queue_size=10)
def update(self):
q, _ = self.arm.get_state()
width = self.gripper.read()
msg = JointState() msg = JointState()
msg.header.stamp = rospy.Time.now() msg.header.stamp = rospy.Time.now()
msg.name = ["panda_joint{}".format(i) for i in range(1, 8)] + [ msg.name = ["panda_joint{}".format(i) for i in range(1, 8)] + [
@ -121,18 +82,23 @@ class BtSimNode:
"panda_finger_joint2", "panda_finger_joint2",
] ]
msg.position = np.r_[q, 0.5 * width, 0.5 * width] msg.position = np.r_[q, 0.5 * width, 0.5 * width]
msg.velocity = dq
self.joint_pub.publish(msg) self.joint_pub.publish(msg)
def publish_cam_info(self):
self.cam_info_msg.header.stamp = rospy.Time.now()
self.cam_info_pub.publish(self.cam_info_msg)
def publish_cam_imgs(self): class ArmInterface:
_, depth = self.sim.camera.get_image() def __init__(self, arm, controller):
depth_msg = self.cv_bridge.cv2_to_imgmsg(depth) self.arm = arm
depth_msg.header.stamp = rospy.Time.now() self.controller = controller
self.depth_pub.publish(depth_msg) rospy.Subscriber("command", PoseStamped, self.target_cb)
def update(self):
q, _ = self.arm.get_state()
cmd = self.controller.update(q)
self.arm.set_desired_joint_velocities(cmd)
def target_cb(self, msg):
assert msg.header.frame_id == self.arm.base_frame
self.controller.x_d = from_pose_msg(msg.pose)
class GripperInterface: class GripperInterface:
@ -143,61 +109,59 @@ class GripperInterface:
franka_gripper.msg.MoveAction, franka_gripper.msg.MoveAction,
auto_start=False, auto_start=False,
) )
self.move_server.register_goal_callback(self.goal_cb) self.move_server.register_goal_callback(self.move_action_goal_cb)
self.move_server.start() self.move_server.start()
def goal_cb(self): self.grasp_server = actionlib.SimpleActionServer(
"/franka_gripper/grasp",
franka_gripper.msg.GraspAction,
auto_start=False,
)
self.grasp_server.register_goal_callback(self.grasp_action_goal_cb)
self.grasp_server.start()
def move_action_goal_cb(self):
self.elapsed_time_since_move_action_goal = 0.0
goal = self.move_server.accept_new_goal() goal = self.move_server.accept_new_goal()
self.gripper.move(goal.width) self.gripper.set_desired_width(goal.width)
self.elapsed_time = 0.0
def grasp_action_goal_cb(self):
self.elapsed_time_since_grasp_action_goal = 0.0
goal = self.grasp_server.accept_new_goal()
self.gripper.set_desired_width(goal.width)
def update(self, dt): def update(self, dt):
if self.move_server.is_active(): if self.move_server.is_active():
self.elapsed_time += dt self.elapsed_time_since_move_action_goal += dt
if self.elapsed_time > 1.0: if self.elapsed_time_since_move_action_goal > 1.0:
self.move_server.set_succeeded() self.move_server.set_succeeded()
if self.grasp_server.is_active():
self.elapsed_time_since_grasp_action_goal += dt
if self.elapsed_time_since_grasp_action_goal > 1.0:
self.grasp_server.set_succeeded()
class JointTrajectoryController: class CameraInterface:
def __init__(self, arm, stopped=False): def __init__(self, camera):
self.arm = arm self.camera = camera
self.action_server = actionlib.SimpleActionServer( self.cv_bridge = cv_bridge.CvBridge()
"position_joint_trajectory_controller/follow_joint_trajectory", self.cam_info_msg = to_camera_info_msg(self.camera.intrinsic)
control_msgs.msg.FollowJointTrajectoryAction, self.cam_info_msg.header.frame_id = "cam_optical_frame"
auto_start=False, self.cam_info_pub = rospy.Publisher(
"/cam/depth/camera_info",
CameraInfo,
queue_size=10,
) )
self.action_server.register_goal_callback(self.goal_cb) self.depth_pub = rospy.Publisher("/cam/depth/image_raw", Image, queue_size=10)
self.action_server.start()
self.is_running = False def update(self):
if not stopped: stamp = rospy.Time.now()
self.start() self.cam_info_msg.header.stamp = stamp
self.cam_info_pub.publish(self.cam_info_msg)
def goal_cb(self): _, depth = self.camera.get_image()
goal = self.action_server.accept_new_goal() depth_msg = self.cv_bridge.cv2_to_imgmsg(depth)
self.elapsed_time = 0.0 depth_msg.header.stamp = stamp
self.points = iter(goal.trajectory.points) self.depth_pub.publish(depth_msg)
self.next_point = next(self.points)
def start(self):
self.is_running = True
def stop(self):
self.is_running = False
def update(self, dt):
if not (self.is_running and self.action_server.is_active()):
return
self.elapsed_time += dt
if self.elapsed_time > self.next_point.time_from_start.to_sec():
try:
self.next_point = next(self.points)
except StopIteration:
self.action_server.set_succeeded()
return
self.arm.set_desired_joint_positions(self.next_point.positions)
def create_parser(): def create_parser():
@ -208,10 +172,8 @@ def create_parser():
def main(): def main():
rospy.init_node("bt_sim") rospy.init_node("bt_sim")
parser = create_parser() parser = create_parser()
args, _ = parser.parse_known_args() args, _ = parser.parse_known_args()
server = BtSimNode(args.gui) server = BtSimNode(args.gui)
server.run() server.run()

View File

@ -9,5 +9,5 @@ active_grasp:
info_topic: /cam/depth/camera_info info_topic: /cam/depth/camera_info
depth_topic: /cam/depth/image_raw depth_topic: /cam/depth/image_raw
vgn: vgn:
model: $(find vgn)/data/models/vgn_conv.pth model: $(find vgn)/assets/models/vgn_conv.pth
finger_depth: 0.05 finger_depth: 0.05

71
controller.py Normal file
View File

@ -0,0 +1,71 @@
import numpy as np
import rospy
from robot_utils.ros import tf
from robot_utils.ros.panda import PandaGripperClient
from robot_utils.spatial import Rotation, Transform
from utils import CartesianPoseControllerClient
class GraspController:
def __init__(self, policy):
self.policy = policy
self.controller = CartesianPoseControllerClient()
self.gripper = PandaGripperClient()
self.load_parameters()
def load_parameters(self):
self.T_G_EE = Transform.from_list(rospy.get_param("~ee_grasp_offset")).inv()
def run(self):
self.reset()
grasp = self.explore()
if grasp:
self.execute_grasp(grasp)
def reset(self):
raise NotImplementedError
def explore(self):
self.policy.activate()
r = rospy.Rate(self.policy.rate)
while True:
cmd = self.policy.update()
if self.policy.done:
break
self.controller.send_target(cmd)
r.sleep()
return self.policy.best_grasp
def execute_grasp(self, grasp):
T_B_G = self.postprocess(grasp)
self.gripper.move(0.08)
# Move to an initial pose offset.
self.controller.send_target(
T_B_G * Transform.translation([0, 0, -0.05]) * self.T_G_EE
)
rospy.sleep(3.0)
# Approach grasp pose.
self.controller.send_target(T_B_G * self.T_G_EE)
rospy.sleep(1.0)
# Close the fingers.
self.gripper.grasp()
# Lift the object.
target = Transform.translation([0, 0, 0.2]) * T_B_G * self.T_G_EE
self.controller.send_target(target)
rospy.sleep(2.0)
# Check whether the object remains in the hand
return self.gripper.read() > 0.005
def postprocess(self, T_B_G):
# Ensure that the camera is pointing forward.
rot = T_B_G.rotation
if rot.as_matrix()[:, 0][0] < 0:
T_B_G.rotation = rot * Rotation.from_euler("z", np.pi)
return T_B_G

View File

@ -1,19 +1,12 @@
<?xml version="1.0" ?> <?xml version="1.0" ?>
<launch> <launch>
<!-- Arguments -->
<arg name="launch_rviz" default="true" /> <arg name="launch_rviz" default="true" />
<!-- Load parameters. -->
<rosparam command="load" file="$(find active_grasp)/config/active_grasp.yaml" subst_value="true" /> <rosparam command="load" file="$(find active_grasp)/config/active_grasp.yaml" subst_value="true" />
<param name="robot_description" command="$(find xacro)/xacro $(find active_grasp)/assets/urdfs/panda_arm_hand.urdf.xacro" />
<!-- Bringup the robot --> <param name="robot_description" command="$(find xacro)/xacro $(find active_grasp)/assets/urdfs/panda_arm_hand.urdf.xacro" />
<node pkg="active_grasp" type="bt_sim_node.py" name="simulation" args="--gui" output="screen" /> <node pkg="active_grasp" type="bt_sim_node.py" name="simulation" args="--gui" output="screen" />
<node pkg="robot_state_publisher" type="robot_state_publisher" name="robot_state_publisher" /> <node pkg="robot_state_publisher" type="robot_state_publisher" name="robot_state_publisher" />
<!-- Launch MoveIt --> <node pkg="rviz" type="rviz" name="rviz" args="-d $(find active_grasp)/launch/active_grasp.rviz" if="$(arg launch_rviz)" />
<!-- <include file="$(find panda_moveit_config)/launch/move_group.launch" /> -->
<!-- Visualize the robot. -->
<node pkg="rviz" type="rviz" name="rviz" args="-d $(find active_grasp)/launch/panda_visualization.rviz" if="$(arg launch_rviz)" />
</launch> </launch>

View File

@ -4,9 +4,9 @@ Panels:
Name: Displays Name: Displays
Property Tree Widget: Property Tree Widget:
Expanded: Expanded:
- /Global Options1 - /TF1/Tree1
Splitter Ratio: 0.5 Splitter Ratio: 0.5
Tree Height: 549 Tree Height: 547
- Class: rviz/Selection - Class: rviz/Selection
Name: Selection Name: Selection
- Class: rviz/Tool Properties - Class: rviz/Tool Properties
@ -25,7 +25,7 @@ Panels:
Experimental: false Experimental: false
Name: Time Name: Time
SyncMode: 0 SyncMode: 0
SyncSource: Scene Cloud SyncSource: ""
Preferences: Preferences:
PromptSaveOnExit: true PromptSaveOnExit: true
Toolbars: Toolbars:
@ -137,13 +137,13 @@ Visualization Manager:
Frames: Frames:
All Enabled: false All Enabled: false
cam_optical_frame: cam_optical_frame:
Value: true Value: false
panda_hand: panda_hand:
Value: true Value: false
panda_leftfinger: panda_leftfinger:
Value: false Value: false
panda_link0: panda_link0:
Value: true Value: false
panda_link1: panda_link1:
Value: false Value: false
panda_link2: panda_link2:
@ -164,6 +164,8 @@ Visualization Manager:
Value: false Value: false
task: task:
Value: true Value: true
world:
Value: true
Marker Alpha: 1 Marker Alpha: 1
Marker Scale: 0.5 Marker Scale: 0.5
Name: TF Name: TF
@ -171,6 +173,7 @@ Visualization Manager:
Show Axes: true Show Axes: true
Show Names: true Show Names: true
Tree: Tree:
world:
panda_link0: panda_link0:
panda_link1: panda_link1:
panda_link2: panda_link2:
@ -191,14 +194,6 @@ Visualization Manager:
{} {}
Update Interval: 0 Update Interval: 0
Value: true Value: true
- Class: rviz/Marker
Enabled: true
Marker Topic: /workspace
Name: Workspace
Namespaces:
"": true
Queue Size: 100
Value: true
- Alpha: 0.20000000298023224 - Alpha: 0.20000000298023224
Autocompute Intensity Bounds: false Autocompute Intensity Bounds: false
Autocompute Value Bounds: Autocompute Value Bounds:
@ -214,7 +209,9 @@ Visualization Manager:
Enabled: false Enabled: false
Invert Rainbow: false Invert Rainbow: false
Max Color: 255; 255; 255 Max Color: 255; 255; 255
Max Intensity: 4096
Min Color: 0; 0; 0 Min Color: 0; 0; 0
Min Intensity: 0
Name: TSDF Name: TSDF
Position Transformer: XYZ Position Transformer: XYZ
Queue Size: 10 Queue Size: 10
@ -243,7 +240,7 @@ Visualization Manager:
Invert Rainbow: false Invert Rainbow: false
Max Color: 255; 255; 255 Max Color: 255; 255; 255
Min Color: 0; 0; 0 Min Color: 0; 0; 0
Name: Scene Cloud Name: SceneCloud
Position Transformer: XYZ Position Transformer: XYZ
Queue Size: 10 Queue Size: 10
Selectable: true Selectable: true
@ -255,12 +252,36 @@ Visualization Manager:
Use Fixed Frame: true Use Fixed Frame: true
Use rainbow: true Use rainbow: true
Value: true Value: true
- Alpha: 1
Axes Length: 0.05000000074505806
Axes Radius: 0.004999999888241291
Class: rviz/Pose
Color: 255; 25; 0
Enabled: true
Head Length: 0.30000001192092896
Head Radius: 0.10000000149011612
Name: EETarget
Queue Size: 10
Shaft Length: 1
Shaft Radius: 0.05000000074505806
Shape: Axes
Topic: /command
Unreliable: false
Value: true
- Class: rviz/MarkerArray
Enabled: true
Marker Topic: /path
Name: CameraPath
Namespaces:
{}
Queue Size: 100
Value: true
- Class: rviz/MarkerArray - Class: rviz/MarkerArray
Enabled: true Enabled: true
Marker Topic: /grasps Marker Topic: /grasps
Name: Predicted Grasps Name: PredictedGrasps
Namespaces: Namespaces:
"": true {}
Queue Size: 100 Queue Size: 100
Value: true Value: true
Enabled: true Enabled: true
@ -291,7 +312,7 @@ Visualization Manager:
Views: Views:
Current: Current:
Class: rviz/Orbit Class: rviz/Orbit
Distance: 1.5115783214569092 Distance: 1.5376757383346558
Enable Stereo Rendering: Enable Stereo Rendering:
Stereo Eye Separation: 0.05999999865889549 Stereo Eye Separation: 0.05999999865889549
Stereo Focal Distance: 1 Stereo Focal Distance: 1
@ -307,17 +328,17 @@ Visualization Manager:
Invert Z Axis: false Invert Z Axis: false
Name: Current View Name: Current View
Near Clip Distance: 0.009999999776482582 Near Clip Distance: 0.009999999776482582
Pitch: 0.20979644358158112 Pitch: 0.21479728817939758
Target Frame: <Fixed Frame> Target Frame: <Fixed Frame>
Yaw: 5.238524913787842 Yaw: 5.193514823913574
Saved: ~ Saved: ~
Window Geometry: Window Geometry:
Displays: Displays:
collapsed: false collapsed: false
Height: 846 Height: 844
Hide Left Dock: false Hide Left Dock: false
Hide Right Dock: true Hide Right Dock: true
QMainWindow State: 000000ff00000000fd000000040000000000000156000002b0fc0200000008fb0000001200530065006c0065006300740069006f006e00000001e10000009b0000005c00fffffffb0000001e0054006f006f006c002000500072006f007000650072007400690065007302000001ed000001df00000185000000a3fb000000120056006900650077007300200054006f006f02000001df000002110000018500000122fb000000200054006f006f006c002000500072006f0070006500720074006900650073003203000002880000011d000002210000017afb000000100044006900730070006c006100790073010000003d000002b0000000c900fffffffb0000002000730065006c0065006300740069006f006e00200062007500660066006500720200000138000000aa0000023a00000294fb00000014005700690064006500530074006500720065006f02000000e6000000d2000003ee0000030bfb0000000c004b0069006e0065006300740200000186000001060000030c00000261000000010000010f000002b0fc0200000003fb0000001e0054006f006f006c002000500072006f00700065007200740069006500730100000041000000780000000000000000fb0000000a00560069006500770073000000003d000002b0000000a400fffffffb0000001200530065006c0065006300740069006f006e010000025a000000b200000000000000000000000200000490000000a9fc0100000001fb0000000a00560069006500770073030000004e00000080000002e10000019700000003000004b00000003efc0100000002fb0000000800540069006d00650100000000000004b0000002eb00fffffffb0000000800540069006d0065010000000000000450000000000000000000000354000002b000000004000000040000000800000008fc00000002000000020000000000000002000000010000000a0054006f006f006c00730100000000ffffffff0000000000000000 QMainWindow State: 000000ff00000000fd000000040000000000000156000002aefc0200000008fb0000001200530065006c0065006300740069006f006e00000001e10000009b0000005c00fffffffb0000001e0054006f006f006c002000500072006f007000650072007400690065007302000001ed000001df00000185000000a3fb000000120056006900650077007300200054006f006f02000001df000002110000018500000122fb000000200054006f006f006c002000500072006f0070006500720074006900650073003203000002880000011d000002210000017afb000000100044006900730070006c006100790073010000003d000002ae000000c900fffffffb0000002000730065006c0065006300740069006f006e00200062007500660066006500720200000138000000aa0000023a00000294fb00000014005700690064006500530074006500720065006f02000000e6000000d2000003ee0000030bfb0000000c004b0069006e0065006300740200000186000001060000030c00000261000000010000010f000002b0fc0200000003fb0000001e0054006f006f006c002000500072006f00700065007200740069006500730100000041000000780000000000000000fb0000000a00560069006500770073000000003d000002b0000000a400fffffffb0000001200530065006c0065006300740069006f006e010000025a000000b200000000000000000000000200000490000000a9fc0100000001fb0000000a00560069006500770073030000004e00000080000002e10000019700000003000004f30000003efc0100000002fb0000000800540069006d00650100000000000004f3000002eb00fffffffb0000000800540069006d0065010000000000000450000000000000000000000397000002ae00000004000000040000000800000008fc00000002000000020000000000000002000000010000000a0054006f006f006c00730100000000ffffffff0000000000000000
Selection: Selection:
collapsed: false collapsed: false
Time: Time:
@ -326,6 +347,6 @@ Window Geometry:
collapsed: false collapsed: false
Views: Views:
collapsed: true collapsed: true
Width: 1200 Width: 1267
X: 720 X: 100
Y: 27 Y: 457

View File

@ -2,259 +2,128 @@ import cv_bridge
import numpy as np import numpy as np
from pathlib import Path from pathlib import Path
import rospy import rospy
import scipy.interpolate
from geometry_msgs.msg import PoseStamped
from sensor_msgs.msg import Image, CameraInfo from sensor_msgs.msg import Image, CameraInfo
import std_srvs.srv
from visualization_msgs.msg import Marker, MarkerArray from visualization_msgs.msg import Marker, MarkerArray
from robot_utils.perception import *
from robot_utils.spatial import Rotation, Transform
from robot_utils.ros.conversions import *
from robot_utils.ros.panda import PandaGripperClient
from robot_utils.ros import tf from robot_utils.ros import tf
from robot_utils.ros.conversions import *
from robot_utils.ros.rviz import * from robot_utils.ros.rviz import *
from robot_utils.spatial import Transform
from vgn.detection import VGN, compute_grasps
from vgn.perception import UniformTSDFVolume
import vgn.vis import vgn.vis
from vgn.detection import VGN, compute_grasps
def get_policy(name):
def get_controller(name):
if name == "single-view": if name == "single-view":
return SingleViewBaseline() return SingleViewBaseline()
elif name == "fixed-trajectory":
return FixedTrajectoryBaseline()
elif name == "mvp":
return MultiViewPicking()
else: else:
raise ValueError("{} policy does not exist.".format(name)) raise ValueError("{} policy does not exist.".format(name))
class BaseController: class BasePolicy:
def __init__(self): def __init__(self):
self.frame = rospy.get_param("~frame_id")
self.length = rospy.get_param("~length")
self.cv_bridge = cv_bridge.CvBridge() self.cv_bridge = cv_bridge.CvBridge()
self.reset_client = rospy.ServiceProxy("/reset", std_srvs.srv.Trigger)
self.tsdf = UniformTSDFVolume(0.3, 40) self.tsdf = UniformTSDFVolume(0.3, 40)
self.vgn = VGN(Path(rospy.get_param("vgn/model"))) self.vgn = VGN(Path(rospy.get_param("vgn/model")))
self._setup_robot_connection() self.load_parameters()
self._setup_camera_connection() self.lookup_transforms()
self._setup_rviz_connection() self.connect_to_camera()
self._lookup_transforms() self.connect_to_rviz()
def run(self): def load_parameters(self):
self.reset()
self.explore()
self.execute_grasp()
# self.release_object()
def reset(self):
self._reset_env()
self._clear_rviz()
rospy.sleep(1.0) # wait for states to be updated
self._init_policy()
self.viewpoints = []
self.done = False
self.best_grasp = None
def explore(self):
r = rospy.Rate(self.rate)
while not self.done:
self._update()
r.sleep()
def execute_grasp(self):
if not self.best_grasp:
return
self.gripper.move(0.08)
# Ensure that the camera is pointing forward.
T_O_G = self.best_grasp.pose
rot = T_O_G.rotation
if rot.as_matrix()[:, 0][0] < 0:
T_O_G.rotation = rot * Rotation.from_euler("z", np.pi)
# Move to an initial pose offset.
target = T_O_G * Transform.translation([0, 0, -0.05]) * self.T_G_EE
self._send_target_pose(target)
rospy.sleep(3.0)
# Approach grasp pose.
self._send_target_pose(T_O_G * self.T_G_EE)
rospy.sleep(1.0)
# Close the fingers.
self.gripper.move(0.0)
# Lift the object.
target = Transform.translation([0, 0, 0.2]) * T_O_G * self.T_G_EE
self._send_target_pose(target)
rospy.sleep(2.0)
def _setup_robot_connection(self):
self.base_frame = rospy.get_param("~base_frame_id") self.base_frame = rospy.get_param("~base_frame_id")
self.ee_frame = rospy.get_param("~ee_frame_id") self.task_frame = rospy.get_param("~frame_id")
self.T_G_EE = Transform.from_list(rospy.get_param("~ee_grasp_offset")).inv() self.cam_frame = rospy.get_param("~camera/frame_id")
self.target_pose_pub = rospy.Publisher("/command", PoseStamped, queue_size=10) self.info_topic = rospy.get_param("~camera/info_topic")
self.gripper = PandaGripperClient() self.depth_topic = rospy.get_param("~camera/depth_topic")
def _setup_camera_connection(self): def lookup_transforms(self):
self._cam_frame_id = rospy.get_param("~camera/frame_id") tf._init_listener()
info_topic = rospy.get_param("~camera/info_topic") rospy.sleep(1.0) # wait to receive transforms
msg = rospy.wait_for_message(info_topic, CameraInfo, rospy.Duration(2.0)) self.T_B_task = tf.lookup(self.base_frame, self.task_frame)
def connect_to_camera(self):
msg = rospy.wait_for_message(self.info_topic, CameraInfo, rospy.Duration(2.0))
self.intrinsic = from_camera_info_msg(msg) self.intrinsic = from_camera_info_msg(msg)
depth_topic = rospy.get_param("~camera/depth_topic") rospy.Subscriber(self.depth_topic, Image, self.sensor_cb, queue_size=1)
rospy.Subscriber(depth_topic, Image, self._sensor_cb, queue_size=1)
def _sensor_cb(self, msg): def sensor_cb(self, msg):
self.last_depth_img = self.cv_bridge.imgmsg_to_cv2(msg).astype(np.float32) self.depth_img = self.cv_bridge.imgmsg_to_cv2(msg).astype(np.float32)
self.last_extrinsic = tf.lookup( self.extrinsic = tf.lookup(
self._cam_frame_id, self.cam_frame,
self.frame, self.task_frame,
msg.header.stamp, msg.header.stamp,
rospy.Duration(0.1), rospy.Duration(0.2),
) )
def _setup_rviz_connection(self): def connect_to_rviz(self):
self.path_pub = rospy.Publisher("path", MarkerArray, queue_size=1, latch=True) self.path_pub = rospy.Publisher("path", MarkerArray, queue_size=1, latch=True)
def _lookup_transforms(self): def activate(self):
self.T_B_O = tf.lookup(self.base_frame, self.frame) self.viewpoints = []
self.done = False
self.best_grasp = None # grasp pose defined w.r.t. the robot's base frame
def _reset_env(self): def update(self):
req = std_srvs.srv.TriggerRequest()
self.reset_client(req)
def _clear_rviz(self):
vgn.vis.clear()
self.path_pub.publish(DELETE_MARKER_ARRAY_MSG)
def _init_policy(self):
raise NotImplementedError raise NotImplementedError
def _update(self): def integrate_latest_image(self):
raise NotImplementedError self.viewpoints.append(self.extrinsic.inv())
def _draw_camera_path(self, frustum=False):
identity = Transform.identity()
color = np.r_[31, 119, 180] / 255.0
# Spheres for each viewpoint
scale = 0.01 * np.ones(3)
spheres = create_marker(Marker.SPHERE_LIST, self.frame, identity, scale, color)
spheres.id = 0
spheres.points = [to_point_msg(p.translation) for p in self.viewpoints]
# Line strip connecting viewpoints
scale = [0.005, 0.0, 0.0]
lines = create_marker(Marker.LINE_STRIP, self.frame, identity, scale, color)
lines.id = 1
lines.points = [to_point_msg(p.translation) for p in self.viewpoints]
markers = [spheres, lines]
# Frustums
if frustum:
for i, pose in enumerate(self.viewpoints):
msg = create_cam_marker(self.intrinsic, pose, self.frame)
msg.id = i + 2
markers.append(msg)
self.path_pub.publish(MarkerArray(markers))
def _draw_scene_cloud(self):
cloud = self.tsdf.get_scene_cloud()
vgn.vis.draw_points(np.asarray(cloud.points))
def _integrate_latest_image(self):
self.viewpoints.append(self.last_extrinsic.inv())
self.tsdf.integrate( self.tsdf.integrate(
self.last_depth_img, self.depth_img,
self.intrinsic, self.intrinsic,
self.last_extrinsic, self.extrinsic,
) )
def _predict_best_grasp(self): def predict_best_grasp(self):
tsdf_grid = self.tsdf.get_grid() tsdf_grid = self.tsdf.get_grid()
out = self.vgn.predict(tsdf_grid) out = self.vgn.predict(tsdf_grid)
score_fn = lambda g: g.pose.translation[2] score_fn = lambda g: g.pose.translation[2]
grasps = compute_grasps(self.tsdf.voxel_size, out, score_fn) grasps = compute_grasps(self.tsdf.voxel_size, out, score_fn)
vgn.vis.draw_grasps(grasps, 0.05) vgn.vis.draw_grasps(grasps, 0.05)
return grasps[0] if len(grasps) > 0 else None return self.T_B_task * grasps[0].pose if len(grasps) > 0 else None
def _send_target_pose(self, target): def draw_scene_cloud(self):
"""Target is expected to be given w.r.t. the task frame.""" cloud = self.tsdf.get_scene_cloud()
msg = PoseStamped() vgn.vis.draw_points(np.asarray(cloud.points))
msg.header.frame_id = self.base_frame
msg.pose = to_pose_msg(self.T_B_O * target) def draw_camera_path(self):
self.target_pose_pub.publish(msg) identity = Transform.identity()
color = np.r_[31, 119, 180] / 255.0
# Spheres for each viewpoint
scale = 0.01 * np.ones(3)
spheres = create_marker(
Marker.SPHERE_LIST, self.task_frame, identity, scale, color
)
spheres.id = 0
spheres.points = [to_point_msg(p.translation) for p in self.viewpoints]
# Line strip connecting viewpoints
scale = [0.005, 0.0, 0.0]
lines = create_marker(
Marker.LINE_STRIP, self.task_frame, identity, scale, color
)
lines.id = 1
lines.points = [to_point_msg(p.translation) for p in self.viewpoints]
self.path_pub.publish(MarkerArray([spheres, lines]))
class SingleViewBaseline(BaseController): class SingleViewBaseline(BasePolicy):
""" """
Integrate a single image from the initial viewpoint. Process a single image from the initial viewpoint.
""" """
def __init__(self): def __init__(self):
super().__init__() super().__init__()
self.rate = 1 self.rate = 1
def _init_policy(self): def update(self):
pass self.integrate_latest_image()
self.draw_scene_cloud()
def _update(self): self.draw_camera_path()
self._integrate_latest_image() self.best_grasp = self.predict_best_grasp()
self._draw_scene_cloud()
self._draw_camera_path(frustum=True)
self.best_grasp = self._predict_best_grasp()
self.done = True self.done = True
class FixedTrajectoryBaseline(BaseController):
"""Follow a pre-defined circular trajectory."""
def __init__(self):
super().__init__()
self.rate = 10
self.duration = 4.0
self.radius = 0.1
self.m = scipy.interpolate.interp1d([0, self.duration], [np.pi, 3.0 * np.pi])
def _init_policy(self):
self.tic = rospy.Time.now()
x0 = tf.lookup(self.frame, self.ee_frame)
self.center = np.r_[x0.translation[0] + self.radius, x0.translation[1:]]
self.target = x0
def _update(self):
elapsed_time = (rospy.Time.now() - self.tic).to_sec()
if elapsed_time > self.duration:
self.best_grasp = self._predict_best_grasp()
self.done = True
else:
# Update state
self._integrate_latest_image()
# Compute next viewpoint
t = self.m(elapsed_time)
self.target.translation = (
self.center
+ np.r_[self.radius * np.cos(t), self.radius * np.sin(t), 0.0]
)
self._send_target_pose(self.target)
# Draw
self._draw_scene_cloud()
self._draw_camera_path()
class MultiViewPicking(BaseController):
pass

32
run.py
View File

@ -1,32 +0,0 @@
import argparse
import rospy
from policies import get_controller
def create_parser():
parser = argparse.ArgumentParser()
parser.add_argument(
"--policy",
type=str,
choices=[
"single-view",
"fixed-trajectory",
"mvp",
],
)
return parser
def main():
rospy.init_node("active_grasp")
parser = create_parser()
args = parser.parse_args()
controller = get_controller(args.policy)
controller.run()
if __name__ == "__main__":
main()

49
sim_grasp.py Normal file
View File

@ -0,0 +1,49 @@
import argparse
import rospy
import std_srvs.srv as std_srvs
from controller import GraspController
from policies import get_policy
class SimGraspController(GraspController):
def __init__(self, policy):
super().__init__(policy)
self.reset_sim = rospy.ServiceProxy("/reset", std_srvs.Trigger)
def reset(self):
req = std_srvs.TriggerRequest()
self.reset_sim(req)
rospy.sleep(1.0) # wait for states to be updated
def create_parser():
parser = argparse.ArgumentParser()
parser.add_argument(
"--policy",
type=str,
choices=[
"single-view",
"top",
"alignment",
"random",
"fixed-trajectory",
],
)
return parser
def main():
rospy.init_node("active_grasp")
parser = create_parser()
args = parser.parse_args()
policy = get_policy(args.policy)
controller = SimGraspController(policy)
while True:
controller.run()
if __name__ == "__main__":
main()

View File

@ -3,43 +3,53 @@ import pybullet as p
import rospkg import rospkg
from robot_utils.bullet import * from robot_utils.bullet import *
from robot_utils.controllers import CartesianPoseController
from robot_utils.spatial import Rotation, Transform from robot_utils.spatial import Rotation, Transform
class Simulation(BtManipulationSim): class Simulation(BtSim):
def __init__(self, gui=True): def __init__(self, gui=True):
super().__init__(gui=gui, sleep=False) super().__init__(gui=gui, sleep=False)
p.configureDebugVisualizer(p.COV_ENABLE_GUI, 0) p.configureDebugVisualizer(p.COV_ENABLE_GUI, 0)
p.resetDebugVisualizerCamera(1.4, 50, -35, [0.0, 0.0, 0.6]) p.resetDebugVisualizerCamera(1.4, 50, -35, [0.0, 0.0, 0.6])
self.object_uids = []
self.find_object_urdfs() self.find_object_urdfs()
self.add_table() self.load_table()
self.add_robot() self.load_robot()
self.load_controller()
self.reset()
def find_object_urdfs(self): def find_object_urdfs(self):
rospack = rospkg.RosPack() rospack = rospkg.RosPack()
root = Path(rospack.get_path("vgn")) / "data/urdfs/packed/test" root = Path(rospack.get_path("vgn")) / "assets/urdfs/packed/test"
self.urdfs = [str(f) for f in root.iterdir() if f.suffix == ".urdf"] self.urdfs = [str(f) for f in root.iterdir() if f.suffix == ".urdf"]
def add_table(self): def load_table(self):
p.loadURDF("plane.urdf") p.loadURDF("plane.urdf")
ori = Rotation.from_rotvec(np.array([0, 0, np.pi / 2])).as_quat() ori = Rotation.from_rotvec(np.array([0, 0, np.pi / 2])).as_quat()
p.loadURDF("table/table.urdf", baseOrientation=ori, useFixedBase=True) p.loadURDF("table/table.urdf", baseOrientation=ori, useFixedBase=True)
self.length = 0.3 self.length = 0.3
self.origin = [-0.3, -0.5 * self.length, 0.5] self.origin = [-0.3, -0.5 * self.length, 0.5]
def add_robot(self): def load_robot(self):
self.T_W_B = Transform(Rotation.identity(), np.r_[-0.6, 0.0, 0.5]) self.T_W_B = Transform(Rotation.identity(), np.r_[-0.6, 0.0, 0.4])
self.arm = BtPandaArm(self.T_W_B) self.arm = BtPandaArm(self.T_W_B)
self.gripper = BtPandaGripper(self.arm) self.gripper = BtPandaGripper(self.arm)
self.model = Model(self.arm.urdf_path, self.arm.base_frame, self.arm.ee_frame)
self.camera = BtCamera(320, 240, 1.047, 0.1, 1.0, self.arm.uid, 11) self.camera = BtCamera(320, 240, 1.047, 0.1, 1.0, self.arm.uid, 11)
def load_controller(self):
self.controller = CartesianPoseController(self.model, self.arm.ee_frame, None)
def reset(self): def reset(self):
self.remove_all_objects() self.remove_all_objects()
self.set_initial_arm_configuration() self.set_initial_arm_configuration()
urdfs = np.random.choice(self.urdfs, 4) urdfs = np.random.choice(self.urdfs, 4)
origin = np.r_[self.origin[:2], 0.625] origin = np.r_[self.origin[:2], 0.625]
self.add_random_arrangement(urdfs, origin, self.length, 0.8) self.random_object_arrangement(urdfs, origin, self.length, 0.8)
def set_initial_arm_configuration(self): def set_initial_arm_configuration(self):
q = self.arm.configurations["ready"] q = self.arm.configurations["ready"]
@ -47,3 +57,69 @@ class Simulation(BtManipulationSim):
q[5] = np.deg2rad(np.random.uniform(90, 105)) q[5] = np.deg2rad(np.random.uniform(90, 105))
for i, q_i in enumerate(q): for i, q_i in enumerate(q):
p.resetJointState(self.arm.uid, i, q_i, 0) p.resetJointState(self.arm.uid, i, q_i, 0)
p.resetJointState(self.arm.uid, 9, 0.04, 0)
p.resetJointState(self.arm.uid, 10, 0.04, 0)
x0 = self.model.pose(self.arm.ee_frame, q)
self.controller.x_d = x0
def load_object(self, urdf, ori, pos, scale=1.0):
uid = p.loadURDF(str(urdf), pos, ori.as_quat(), globalScaling=scale)
self.object_uids.append(uid)
return uid
def remove_object(self, uid):
p.removeBody(uid)
self.object_uids.remove(uid)
def remove_all_objects(self):
for uid in list(self.object_uids):
self.remove_object(uid)
def random_object_arrangement(self, urdfs, origin, length, scale=1.0, attempts=10):
for urdf in urdfs:
# Load the object
uid = self.load_object(urdf, Rotation.identity(), [0.0, 0.0, 0.0], scale)
lower, upper = p.getAABB(uid)
z_offset = 0.5 * (upper[2] - lower[2]) + 0.002
state_id = p.saveState()
for _ in range(attempts):
# Try to place the object without collision
ori = Rotation.from_rotvec([0.0, 0.0, np.random.uniform(0, 2 * np.pi)])
offset = np.r_[np.random.uniform(0.2, 0.8, 2) * length, z_offset]
p.resetBasePositionAndOrientation(uid, origin + offset, ori.as_quat())
self.step()
if not p.getContactPoints(uid):
break
else:
p.restoreState(stateId=state_id)
else:
# No placement found, remove the object
self.remove_object(uid)
class CartesianPoseController:
def __init__(self, model, frame, x0):
self._model = model
self._frame = frame
self.kp = np.ones(6) * 4.0
self.max_linear_vel = 0.2
self.max_angular_vel = 1.57
self.x_d = x0
def update(self, q):
x = self._model.pose(self._frame, q)
error = np.zeros(6)
error[:3] = self.x_d.translation - x.translation
error[3:] = (self.x_d.rotation * x.rotation.inv()).as_rotvec()
dx = self._limit_rate(self.kp * error)
J_pinv = np.linalg.pinv(self._model.jacobian(self._frame, q))
cmd = np.dot(J_pinv, dx)
return cmd
def _limit_rate(self, dx):
linear, angular = dx[:3], dx[3:]
linear = np.clip(linear, -self.max_linear_vel, self.max_linear_vel)
angular = np.clip(angular, -self.max_angular_vel, self.max_angular_vel)
return np.r_[linear, angular]

13
utils.py Normal file
View File

@ -0,0 +1,13 @@
from geometry_msgs.msg import PoseStamped
import rospy
from robot_utils.ros.conversions import *
class CartesianPoseControllerClient:
def __init__(self, topic="/command"):
self.target_pub = rospy.Publisher(topic, PoseStamped, queue_size=10)
def send_target(self, pose):
msg = to_pose_stamped_msg(pose, "panda_link0")
self.target_pub.publish(msg)