cgstreamer

How to use a tee to split a video device's output in C?


I'm at my wits end. I'm trying to use Gstreamer in C to "split" the output of a H264 webcam dynamically.

#include <stdio.h>
#include <string.h>
#include <gst/gst.h>

typedef struct _CameraData
{
    GstElement *pipeline;
    GstElement *video_source;
    GstElement *audio_source;
    GstElement *video_capsfilter;
    GstElement *audio_capsfilter;
    GstElement *h264parse;
    GstElement *audio_convert;
    GstElement *audio_resample;
    GstElement *mux;
    GstElement *sink;
    gboolean is_recording;
} CameraData;

CameraData camera_data;

void StartRecording(const char *filename)
{
    if (camera_data.is_recording)
    {
        g_print("Recording is already in progress.\n");
        return;
    }

    // Set the output file location to the user-provided filename
    g_object_set(G_OBJECT(camera_data.sink), "location", filename, NULL);

    GstStateChangeReturn result;

    // Set pipeline state to PLAYING
    result = gst_element_set_state(camera_data.pipeline, GST_STATE_PLAYING);
    if (result == GST_STATE_CHANGE_FAILURE)
    {
        g_printerr("Failed to set pipeline to PLAYING state.\n");

        // Get additional error details
        GstBus *bus = gst_element_get_bus(camera_data.pipeline);
        GstMessage *msg = gst_bus_poll(bus, GST_MESSAGE_ERROR, -1); // Wait indefinitely for an error message

        if (msg != NULL)
        {
            GError *err;
            gchar *debug_info;

            gst_message_parse_error(msg, &err, &debug_info);
            g_printerr("Error received from element %s: %s\n", GST_OBJECT_NAME(msg->src), err->message);
            g_printerr("Debugging information: %s\n", debug_info ? debug_info : "none");

            g_clear_error(&err);
            g_free(debug_info);
            gst_message_unref(msg);
        }

        gst_object_unref(bus);
        return;
    }

    // Wait for the state change to complete
    GstBus *bus = gst_element_get_bus(camera_data.pipeline);
    GstMessage *msg;

    g_print("Starting recording to file: %s\n", filename);

    msg = gst_bus_timed_pop_filtered(bus, GST_CLOCK_TIME_NONE,
                                     GST_MESSAGE_ERROR | GST_MESSAGE_ASYNC_DONE | GST_MESSAGE_EOS);

    if (msg != NULL)
    {
        GError *err;
        gchar *debug_info;

        switch (GST_MESSAGE_TYPE(msg))
        {
            case GST_MESSAGE_ERROR:
                gst_message_parse_error(msg, &err, &debug_info);
                g_printerr("Error received from element %s: %s\n", GST_OBJECT_NAME(msg->src), err->message);
                g_printerr("Debugging information: %s\n", debug_info ? debug_info : "none");
                g_clear_error(&err);
                g_free(debug_info);
                break;
            case GST_MESSAGE_ASYNC_DONE:
                g_print("Pipeline is now PLAYING.\n");
                camera_data.is_recording = TRUE;
                break;
            case GST_MESSAGE_EOS:
                g_print("End-Of-Stream reached.\n");
                break;
            default:
                g_printerr("Unexpected message received.\n");
                break;
        }
        gst_message_unref(msg);
    }

    gst_object_unref(bus);
}

void StopRecording()
{
    if (!camera_data.is_recording)
    {
        g_print("Recording is not in progress.\n");
        return;
    }

    // Set pipeline state to NULL to stop recording
    gst_element_set_state(camera_data.pipeline, GST_STATE_NULL);
    g_print("Recording stopped.\n");
    camera_data.is_recording = FALSE;
}

void InitializeGStreamerPipeline()
{
    // Initialize GStreamer
    gst_init(NULL, NULL);

    // Create the GStreamer elements
    camera_data.video_source = gst_element_factory_make("v4l2src", "video_source");
    camera_data.audio_source = gst_element_factory_make("alsasrc", "audio_source");
    camera_data.video_capsfilter = gst_element_factory_make("capsfilter", "video_filter");
    camera_data.audio_capsfilter = gst_element_factory_make("capsfilter", "audio_filter");
    camera_data.h264parse = gst_element_factory_make("h264parse", "h264parse");
    camera_data.audio_convert = gst_element_factory_make("audioconvert", "audio_convert");
    camera_data.audio_resample = gst_element_factory_make("audioresample", "audio_resample");
    camera_data.mux = gst_element_factory_make("matroskamux", "mux");
    camera_data.sink = gst_element_factory_make("filesink", "sink");

    // Check if elements are created successfully
    if (!camera_data.video_source || !camera_data.audio_source || !camera_data.video_capsfilter ||
        !camera_data.audio_capsfilter || !camera_data.h264parse || !camera_data.audio_convert ||
        !camera_data.audio_resample || !camera_data.mux || !camera_data.sink)
    {
        g_printerr("Failed to create GStreamer elements.\n");
        return;
    }

    // Create a GStreamer pipeline
    camera_data.pipeline = gst_pipeline_new("webcam-pipeline");

    if (!camera_data.pipeline)
    {
        g_printerr("Failed to create GStreamer pipeline.\n");
        return;
    }

    // Set properties for the elements
    g_object_set(G_OBJECT(camera_data.video_source), "device", "/dev/video2", NULL); // Change to your webcam device
    g_object_set(G_OBJECT(camera_data.audio_source), "device", "hw:1,0", NULL);      // Audio device

    // Set caps for MJPEG capture
    GstCaps *video_caps = gst_caps_new_simple("video/x-h264",
                                              "framerate", GST_TYPE_FRACTION, 30, 1,  // Set to 30 fps
                                              "width", G_TYPE_INT, 1920,              // Adjust width as needed
                                              "height", G_TYPE_INT, 1080,             // Adjust height as needed
                                              NULL);

    g_object_set(G_OBJECT(camera_data.video_capsfilter), "caps", video_caps, NULL);
    gst_caps_unref(video_caps);

    // Set caps for the audio capture to match S16_LE format and 48 kHz
    GstCaps *audio_caps = gst_caps_new_simple("audio/x-raw",
                                              "format", G_TYPE_STRING, "S16LE",
                                              "rate", G_TYPE_INT, 48000,
                                              "channels", G_TYPE_INT, 2, // Stereo
                                              NULL);

    g_object_set(G_OBJECT(camera_data.audio_capsfilter), "caps", audio_caps, NULL);
    gst_caps_unref(audio_caps);

    // Add elements to the pipeline
    gst_bin_add_many(GST_BIN(camera_data.pipeline), camera_data.video_source, camera_data.audio_source,
                     camera_data.video_capsfilter, camera_data.audio_capsfilter, camera_data.h264parse,
                     camera_data.audio_convert, camera_data.audio_resample, camera_data.mux, camera_data.sink, NULL);

    // Link video elements in the pipeline
    if (!gst_element_link_many(camera_data.video_source, camera_data.video_capsfilter, camera_data.h264parse, camera_data.mux, NULL))
    {
        g_printerr("Failed to link video elements in the pipeline.\n");
        gst_object_unref(camera_data.pipeline);
        return;
    }

    // Link audio elements in the pipeline
    if (!gst_element_link_many(camera_data.audio_source, camera_data.audio_capsfilter, camera_data.audio_convert, camera_data.audio_resample, camera_data.mux, NULL))
    {
        g_printerr("Failed to link audio elements in the pipeline.\n");
        gst_object_unref(camera_data.pipeline);
        return;
    }

    // Link muxer to sink
    if (!gst_element_link(camera_data.mux, camera_data.sink))
    {
        g_printerr("Failed to link muxer to sink.\n");
        gst_object_unref(camera_data.pipeline);
        return;
    }

    camera_data.is_recording = FALSE;
    g_print("GStreamer pipeline initialized.\n");
}

int main(void)
{
    InitializeGStreamerPipeline();

    char input[100];
    while (1)
    {
        printf("Enter 'start <filename>' to begin recording, 'stop' to end recording, or 'quit' to exit: ");
        fgets(input, 100, stdin);

        if (strncmp(input, "start", 5) == 0)
        {
            char *filename = strtok(input + 6, "\n"); // Get filename from input
            if (filename)
            {
                StartRecording(filename);
            }
            else
            {
                printf("Please provide a filename.\n");
            }
        }
        else if (strncmp(input, "stop", 4) == 0)
        {
            StopRecording();
        }
        else if (strncmp(input, "quit", 4) == 0)
        {
            StopRecording(); // Ensure the pipeline is properly stopped before quitting
            break;
        }
        else
        {
            printf("Invalid command. Please enter 'start <filename>', 'stop', or 'quit'.\n");
        }
    }

    return 0;
}

I got it working for a simple input (/dev/video2 + alsa) to a filesink.

Because the camera provides H264, I use h264parse. So no encoding necessary.

However, I have no clue whatsoever regarding how to use insert a tee into the equation and all my attempts have met with a disastrous amount of frustration (I now understand the goose farmer meme).

My end goal would be dynamically adding/ removing queues to a tee. But, for now, I'd simply settle to simply setting one up and saving the output to two different files. No, I can't use multifilesink, because I want to also use tcpserversinks.

Thanks, guys!


Solution

  • You can use a tee like this

    gst-launch-1.0 v4l2src ! video/x-h264 ! tee name=t \
    t. ! queue ! h264parse ! qtmux ! filesink location=vid0.mp4 async=true sync=false \
    t. ! queue ! h264parse ! qtmux ! filesink location=vid1.mp4 async=true sync=false -e
    

    it's important to add a queue after each branch of the tee