diff --git a/blender_util.py b/blender_util.py index ad4ef05..88d7676 100644 --- a/blender_util.py +++ b/blender_util.py @@ -69,13 +69,10 @@ class BlenderUtils: @staticmethod def setup_scene(init_light_and_camera_config, table_model_path, binocular_vision): bpy.context.scene.render.engine = 'BLENDER_WORKBENCH' - bpy.context.scene.display.shading.light = 'FLAT' - bpy.context.scene.display.shading.color_type = 'MATERIAL' bpy.context.scene.display.shading.show_xray = False bpy.context.scene.display.shading.use_dof = False bpy.context.scene.display.render_aa = 'OFF' bpy.context.scene.view_settings.view_transform = 'Standard' - BlenderUtils.init_light_and_camera(init_light_and_camera_config, binocular_vision) BlenderUtils.add_plane("plane_floor", location=(0,0,0), orientation=(0,0,0)) @@ -170,14 +167,24 @@ class BlenderUtils: min_z = min([v.z for v in vertices_world]) return min_z + @staticmethod + def setup_render_mask(): + bpy.context.scene.display.shading.light = 'FLAT' + bpy.context.scene.display.shading.color_type = 'MATERIAL' + + @staticmethod + def setup_render_normal(): + bpy.context.scene.display.shading.light = 'MATCAP' + bpy.context.scene.display.shading.studio_light = 'check_normal+y.exr' + @staticmethod def render_and_save(output_dir, file_name, binocular_vision=False): target_cameras = [BlenderUtils.CAMERA_NAME] if binocular_vision: target_cameras.append(BlenderUtils.CAMERA_RIGHT_NAME) + BlenderUtils.setup_render_mask() for cam_name in target_cameras: - # Set the current camera bpy.context.scene.camera = BlenderUtils.get_obj(cam_name) bpy.context.scene.view_layers["ViewLayer"].use_pass_z = True cam_suffix = "L" if cam_name == BlenderUtils.CAMERA_NAME else "R" @@ -187,8 +194,6 @@ class BlenderUtils: mask_dir = os.path.join(output_dir, "mask") if not os.path.exists(mask_dir): os.makedirs(mask_dir) - - # Modify the file name based on the camera scene.render.filepath = os.path.join(output_dir, mask_dir, f"{file_name}_{cam_suffix}.png") scene.render.image_settings.color_depth = '8' @@ -226,7 +231,29 @@ class BlenderUtils: tree.links.new(map_range.outputs[0], output_depth.inputs[0]) bpy.ops.render.render(write_still=True) - msg = "success" + + BlenderUtils.setup_render_normal() + for cam_name in target_cameras: + bpy.context.scene.camera = BlenderUtils.get_obj(cam_name) + bpy.context.scene.view_layers["ViewLayer"].use_pass_normal = True + cam_suffix = "L" if cam_name == BlenderUtils.CAMERA_NAME else "R" + scene = bpy.context.scene + scene.render.filepath = "" + + normal_dir = os.path.join(output_dir, "normal") + if not os.path.exists(normal_dir): + os.makedirs(normal_dir) + + scene.render.filepath = os.path.join(output_dir, normal_dir, f"{file_name}_{cam_suffix}.png") + scene.render.image_settings.color_depth = '8' + scene.render.resolution_percentage = 100 + scene.render.use_overwrite = False + scene.render.use_file_extension = False + scene.render.use_placeholder = False + + bpy.ops.render.render(write_still=True) + + msg = "success" return msg @staticmethod diff --git a/view_sample_util.py b/view_sample_util.py index a39a622..7d69e71 100644 --- a/view_sample_util.py +++ b/view_sample_util.py @@ -40,7 +40,7 @@ class ViewSampleUtil: return np.array(downsampled_points), downsampled_indices @staticmethod - def sample_view_data(obj, distance_range:tuple = (0.2,0.4), voxel_size:float = 0.005, max_views: int = 1) -> dict: + def sample_view_data(obj, distance_range:tuple = (0.2,0.4), voxel_size:float = 0.005, max_views: int = 1, pertube_repeat:int = 1) -> dict: view_data = { "look_at_points": [], "cam_positions": [], @@ -69,16 +69,18 @@ class ViewSampleUtil: if np.dot(normal, look_at_point) < 0: normal = -normal normals.append(normal) - perturb_angle = np.radians(np.random.uniform(0, 30)) - perturb_axis = np.random.normal(size=3) - perturb_axis /= np.linalg.norm(perturb_axis) - rotation_matrix = R.from_rotvec(perturb_angle * perturb_axis).as_matrix() - perturbed_normal = np.dot(rotation_matrix, normal) + + for _ in range(pertube_repeat): + perturb_angle = np.radians(np.random.uniform(0, 30)) + perturb_axis = np.random.normal(size=3) + perturb_axis /= np.linalg.norm(perturb_axis) + rotation_matrix = R.from_rotvec(perturb_angle * perturb_axis).as_matrix() + perturbed_normal = np.dot(rotation_matrix, normal) - distance = np.random.uniform(*distance_range) - cam_position = look_at_point + distance * perturbed_normal - look_at_points.append(look_at_point) - cam_positions.append(cam_position) + distance = np.random.uniform(*distance_range) + cam_position = look_at_point + distance * perturbed_normal + look_at_points.append(look_at_point) + cam_positions.append(cam_position) bm.free()