I am trying to scale a video from 1080p to 480p. For that, I have setup swscaler context as:
encoder_sc->sws_ctx = sws_getContext(1920, 1080,
AV_PIX_FMT_YUV420P,
854, 480, AV_PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL );
However, when I am calling the scale frame function as
sws_scale_frame(encoder->sws_ctx, input_frame, input_frame);
However, when I do that I am getting the error Slice parameter 0, 1080 are in valid
. I am very new to FFMPEG and video processing in general. I could not find any solution while searching. Any help is greatly appreciated.
EDIT: I am including the entire source code because I cannot seem to solve the issue.
typedef struct StreamingContext{
AVFormatContext* avfc;
AVCodec *video_avc;
AVCodec *audio_avc;
AVStream *video_avs;
AVStream *audio_avs;
AVCodecContext *video_avcc;
AVCodecContext *audio_avcc;
int video_index;
int audio_index;
char* filename;
struct SwsContext *sws_ctx;
}StreamingContext;
typedef struct StreamingParams{
char copy_video;
char copy_audio;
char *output_extension;
char *muxer_opt_key;
char *muxer_opt_value;
char *video_codec;
char *audio_codec;
char *codec_priv_key;
char *codec_priv_value;
}StreamingParams;
int prepare_video_encoder(StreamingContext *encoder_sc, AVCodecContext *decoder_ctx, AVRational input_framerate,
StreamingParams sp)
{
encoder_sc->video_avs = avformat_new_stream(encoder_sc->avfc, NULL);
encoder_sc->video_avc = avcodec_find_encoder_by_name(sp.video_codec);
if (!encoder_sc->video_avc)
{
logging("Cannot find the Codec.");
return -1;
}
encoder_sc->video_avcc = avcodec_alloc_context3(encoder_sc->video_avc);
if (!encoder_sc->video_avcc)
{
logging("Could not allocate memory for Codec Context.");
return -1;
}
av_opt_set(encoder_sc->video_avcc->priv_data, "preset", "fast", 0);
if (sp.codec_priv_key && sp.codec_priv_value)
av_opt_set(encoder_sc->video_avcc->priv_data, sp.codec_priv_key, sp.codec_priv_value, 0);
encoder_sc->video_avcc->height = decoder_ctx->height;
encoder_sc->video_avcc->width = decoder_ctx->width;
encoder_sc->video_avcc->sample_aspect_ratio = decoder_ctx->sample_aspect_ratio;
if (encoder_sc->video_avc->pix_fmts)
encoder_sc->video_avcc->pix_fmt = encoder_sc->video_avc->pix_fmts[0];
else
encoder_sc->video_avcc->pix_fmt = decoder_ctx->pix_fmt;
encoder_sc->video_avcc->bit_rate = 2 * 1000 * 1000;
encoder_sc->video_avcc->rc_buffer_size = 4 * 1000 * 1000;
encoder_sc->video_avcc->rc_max_rate = 2 * 1000 * 1000;
encoder_sc->video_avcc->rc_min_rate = 2.5 * 1000 * 1000;
encoder_sc->video_avcc->time_base = av_inv_q(input_framerate);
encoder_sc->video_avs->time_base = encoder_sc->video_avcc->time_base;
//Creating Scaling Context
encoder_sc->sws_ctx = sws_getContext(1920, 1080,
decoder_ctx->pix_fmt,
854, 480, encoder_sc->video_avcc->pix_fmt, SWS_BICUBIC, NULL, NULL, NULL );
if (!encoder_sc->sws_ctx){logging("Cannot Create Scaling Context."); return -1;}
if (avcodec_open2(encoder_sc->video_avcc, encoder_sc->video_avc, NULL) < 0)
{
logging("Could not open the Codec.");
return -1;
}
avcodec_parameters_from_context(encoder_sc->video_avs->codecpar, encoder_sc->video_avcc);
return 0;
}
int transcode_video(StreamingContext *decoder, StreamingContext *encoder, AVPacket *input_packet, AVFrame *input_frame, AVFrame *scaled_frame)
{
int response = avcodec_send_packet(decoder->video_avcc, input_packet);
if (response < 0)
{
logging("Error while sending the Packet to Decoder: %s", av_err2str(response));
return response;
}
while (response >= 0)
{
response = avcodec_receive_frame(decoder->video_avcc, input_frame);
if (response == AVERROR(EAGAIN) || response == AVERROR_EOF)
{
break;
}
else if (response < 0)
{
logging("Error while receiving frame from Decoder: %s", av_err2str(response));
return response;
}
if (response >= 0)
{
scaled_frame->format = encoder->video_avcc->pix_fmt;
scaled_frame->width = 854;
scaled_frame->height = 480;
sws_scale_frame(encoder->sws_ctx, scaled_frame, input_frame);
//ERROR is in the scaled_frame
if (encode_video(decoder, encoder, scaled_frame))
return -1;
}
av_frame_unref(input_frame);
}
return 0;
}
In the small piece of code you have posted we can see that there is an issue.
Using input_frame, input_frame
instead of output_frame, input_frame
is the issue.
Replace sws_scale_frame(encoder->sws_ctx, input_frame, input_frame);
with:
sws_scale_frame(encoder->sws_ctx, output_frame, input_frame);
When executing sws_scale_frame
, the function verifies that the dimensions and format of the input and the output matches the dimensions and format defined in sws_getContext
.
In our case:
The width and height input must be 1920 and 1080.
The width and height output must be 854 and 480.
When the dimensions doesn't match, the function fails and return an error (return a negative value).
In our case, the expected height of the output is 854
, but the given height of the frame is 1080
.
I don't know if this is the only issue...
I created a complete reproducible example that demonstrate the usage of sws_scale_frame
- scaling a single frame.
Code sample:
extern "C" //extern "C" is required because we are using C++ code.
{
#include <libswscale/swscale.h>
}
#include <cassert>
int main()
{
//Allocate buffer for input frame
////////////////////////////////////////////////////////////////////////////
const int in_width = 1920;
const int in_height = 1080;
AVFrame *input_frame = av_frame_alloc();
input_frame->format = AV_PIX_FMT_YUV420P;
input_frame->width = in_width;
input_frame->height = in_height;
int sts = av_frame_get_buffer(input_frame, 0);
assert(sts >= 0);
//Make sure Y,U,V buffers are continuous in memory (this is not part of the solution, we need it only due to the way we are reading the frame from file).
assert((input_frame->linesize[0] == in_width) && (input_frame->linesize[1] == in_width/2) && (input_frame->linesize[2] == in_width/2));
////////////////////////////////////////////////////////////////////////////
//Read input_frame from binary file: input_frame.bin
//The binary file was created using FFmpeg CLI (for testing):
//ffmpeg -y -f lavfi -i testsrc=size=1920x1080:rate=1 -vcodec rawvideo -pix_fmt yuv420p -frames 1 -f rawvideo input_frame.bin
////////////////////////////////////////////////////////////////////////////
FILE *f = fopen("input_frame.bin", "rb");
assert(f != nullptr);
fread(input_frame->data[0], 1, in_width*in_height, f); //Read Y channel
fread(input_frame->data[1], 1, in_width*in_height/4, f); //Read U channel
fread(input_frame->data[2], 1, in_width*in_height/4, f); //Read V channel
fclose(f);
////////////////////////////////////////////////////////////////////////////
//Allocate buffer for output frame
////////////////////////////////////////////////////////////////////////////
const int out_width = 854;
const int out_height = 480;
AVFrame *output_frame = av_frame_alloc();
output_frame->format = AV_PIX_FMT_YUV420P;
output_frame->width = out_width;
output_frame->height = out_height;
sts = av_frame_get_buffer(output_frame, 0);
assert(sts >= 0);
//Make sure Y,U,V buffers are continuous in memory (this is not part of the solution, we need it only due to the way we are writing the frame to file).
//assert((output_frame->linesize[0] == out_width) && (output_frame->linesize[1] == out_width/2) && (output_frame->linesize[2] == out_width/2));
////////////////////////////////////////////////////////////////////////////
//Allocate SwsContext
////////////////////////////////////////////////////////////////////////////
SwsContext *sws_ctx = sws_getContext(input_frame->width,
input_frame->height,
(AVPixelFormat)input_frame->format,
output_frame->width,
output_frame->height,
(AVPixelFormat)output_frame->format,
SWS_BICUBIC,
nullptr,
nullptr,
nullptr);
assert(sws_ctx != nullptr);
////////////////////////////////////////////////////////////////////////////
//Scale 1920x1080 input_frame and store the result in 854x480 output_frame.
////////////////////////////////////////////////////////////////////////////
//sts = sws_scale_frame(sws_ctx, input_frame, input_frame); //Return error code -22 "Slice parameters 0, 1080 are invalid" (input_frame, input_frame is a bug).
sts = sws_scale_frame(sws_ctx, output_frame, input_frame);
if (sts < 0)
{
char errbuf[AV_ERROR_MAX_STRING_SIZE];
av_strerror(sts, errbuf, sizeof(errbuf));
fprintf(stderr, "sws_scale_frame error: %s\n", errbuf);
return -1;
}
////////////////////////////////////////////////////////////////////////////
//Write output_frame to binary file - write line by line because output_frame channels are not continuous in memory
//After saving the output, we may convert it to PNG image using FFmpeg CLI (for testing):
//ffmpeg -y -f rawvideo -video_size 854x480 -pixel_format yuv420p -i output_frame.bin output_frame.png
////////////////////////////////////////////////////////////////////////////
f = fopen("output_frame.bin", "wb");
assert(f != nullptr);
// Writing line by line
for (int y = 0; y < output_frame->height; y++)
{
fwrite(output_frame->data[0] + (size_t)y * output_frame->linesize[0], 1, output_frame->width, f); //Write Y channel (line by line).
}
for (int y = 0; y < output_frame->height/2; y++)
{
fwrite(output_frame->data[1] + (size_t)y * output_frame->linesize[1], 1, output_frame->width/2, f); //Write U channel (line by line).
}
for (int y = 0; y < output_frame->height/2; y++)
{
fwrite(output_frame->data[2] + (size_t)y * output_frame->linesize[2], 1, output_frame->width/2, f); //Write V channel (line by line).
}
fclose(f);
////////////////////////////////////////////////////////////////////////////
//Free allocated resources.
////////////////////////////////////////////////////////////////////////////
sws_freeContext(sws_ctx);
av_frame_free(&input_frame);
av_frame_free(&output_frame);
////////////////////////////////////////////////////////////////////////////
return 0;
}
Output image after converting from YUV420p to PNG image (downscaled):