import pyrealsense2.pyrealsense2 as rs import numpy as np import cv2 # Configure depth and color streams pipeline = rs.pipeline() config = rs.config() # Get device product line for setting a supporting resolution pipeline_wrapper = rs.pipeline_wrapper(pipeline) pipeline_profile = config.resolve(pipeline_wrapper) device = pipeline_profile.get_device() device_product_line = str(device.get_info(rs.camera_info.product_line)) found_rgb = True config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30) # Start streaming pipeline.start(config) try: while True: # Wait for a coherent pair of frames: depth and color frames = pipeline.wait_for_frames() depth_frame = frames.get_depth_frame() if not depth_frame: continue # Convert images to numpy arrays depth_image = np.asanyarray(depth_frame.get_data()) # Apply colormap on depth image (image must be converted to 8-bit per pixel first) depth_colormap_c = cv2.applyColorMap(cv2.convertScaleAbs(depth_image, alpha=0.2), cv2.COLORMAP_BONE) depth_colormap = cv2.cvtColor(depth_colormap_c, cv2.COLOR_BGR2GRAY) # depth_colormap = depth_colormap_c # print(type(depth_colormap)) # Show images cv2.imshow('RealSense', depth_colormap) cv2.waitKey(1) finally: # Stop streaming pipeline.stop()