realsense

How to get intel realsense D435i camera serial numbers from frames for multiple cameras?


I have initialized one pipeline for two cameras and I am getting color and depth images from the same.

The problem is that I cannot find camera serial numbers for corresponding frames to determine which camera captured the frames.

Below is my code:

import pyrealsense2 as rs
import numpy as np
import cv2
import logging
import time

# Configure depth and color streams...
pipeline_1 = rs.pipeline()
config_1 = rs.config()
config_1.enable_device('938422072752')
config_1.enable_device('902512070386')
config_1.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
config_1.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)



# Start streaming from both cameras
pipeline_1.start(config_1)

try:
    while True:

        # Camera 1
        # Wait for a coherent pair of frames: depth and color
        frames_1 = pipeline_1.wait_for_frames()
        depth_frame_1 = frames_1.get_depth_frame()
        color_frame_1 = frames_1.get_color_frame()
        if not depth_frame_1 or not color_frame_1:
            continue
        # Convert images to numpy arrays
        depth_image_1 = np.asanyarray(depth_frame_1.get_data())
        color_image_1 = np.asanyarray(color_frame_1.get_data())
        # Apply colormap on depth image (image must be converted to 8-bit per pixel first)
        depth_colormap_1 = cv2.applyColorMap(cv2.convertScaleAbs(depth_image_1, alpha=0.5), cv2.COLORMAP_JET)

        # Camera 2
        # Wait for a coherent pair of frames: depth and color
        frames_2 = pipeline_1.wait_for_frames()
        depth_frame_2 = frames_2.get_depth_frame()
        color_frame_2 = frames_2.get_color_frame()
    
        if not depth_frame_2 or not color_frame_2:
            continue
        # Convert images to numpy arrays

        depth_image_2 = np.asanyarray(depth_frame_2.get_data())
        color_image_2 = np.asanyarray(color_frame_2.get_data())
        # Apply colormap on depth image (image must be converted to 8-bit per pixel first)
        depth_colormap_2 = cv2.applyColorMap(cv2.convertScaleAbs(depth_image_2, alpha=0.5), cv2.COLORMAP_JET)

        # Stack all images horizontally
        images = np.hstack((color_image_1, depth_colormap_1,color_image_2, depth_colormap_2))

        # Show images from both cameras
        cv2.namedWindow('RealSense', cv2.WINDOW_NORMAL)
        cv2.imshow('RealSense', images)
        
        cv2.waitKey(20)

       
finally:
    pipeline_1.stop()

How can I find camera serial numbers after wait_for_frames() to determine which camera captured depth and color image.


Solution

  • I adopted your code, combined it with the C++ example posted by nayab to compose the following code that grabs the color image (only) of multiple RealSense cameras and stacks them horizontally:

    import pyrealsense2 as rs
    import numpy as np
    import cv2
    import logging
    import time
    
    realsense_ctx = rs.context()  # The context encapsulates all of the devices and sensors, and provides some additional functionalities.
    connected_devices = []
    
    # get serial numbers of connected devices:
    for i in range(len(realsense_ctx.devices)):
        detected_camera = realsense_ctx.devices[i].get_info(
            rs.camera_info.serial_number)
        connected_devices.append(detected_camera)
    
    pipelines = []
    configs = []
    for i in range(len(realsense_ctx.devices)):
        pipelines.append(rs.pipeline())  # one pipeline for each device
        configs.append(rs.config())      # one config for each device
        configs[i].enable_device(connected_devices[i])
        configs[i].enable_stream(rs.stream.color, 1920, 1080, rs.format.bgr8, 30)
        pipelines[i].start(configs[i])
    
    try:
        while True:
            images = []
            for i in range(len(pipelines)):
                print("waiting for frame at cam", i)
                frames = pipelines[i].wait_for_frames()
                color_frame = frames.get_color_frame()
                images.append(np.asanyarray(color_frame.get_data()))
    
            # Stack all images horizontally
            image_composite = images[0]
            for i in range(1, len(images)):
                images_composite = np.hstack((image_composite, images[i]))
    
            # Show images from both cameras
            cv2.namedWindow('RealSense', cv2.WINDOW_NORMAL)
            cv2.imshow('RealSense', images_composite)
    
            cv2.waitKey(20)
    
    finally:
        for i in range(len(pipelines)):
            pipelines[i].stop()