I am basically first scaling a frame and then sending the frame to the encoder as below:
scaled_frame->pts = input_frame->pts;
scaled_frame->pkt_dts = input_frame->pkt_dts;
scaled_frame->pict_type = input_frame->pict_type;
sws_scale_frame(encoder->sws_ctx, scaled_frame, input_frame);
if (encode_video(decoder, encoder, scaled_frame))
return -1;
The scaling context is configured as:
scaled_frame->width = 854;
scaled_frame->height=480;
encoder->sws_ctx = sws_getContext(1920, 1080,
decoder->video_avcc->pix_fmt,
scaled_frame->width, scaled_frame->height, decoder->video_avcc->pix_fmt, SWS_BICUBIC, NULL, NULL, NULL );
if (!encoder->sws_ctx){logging("Cannot Create Scaling Context."); return -1;}
The encoder is configured as:
encoder_sc->video_avcc->height = decoder_ctx->height; //1080
encoder_sc->video_avcc->width = decoder_ctx->width; //1920
encoder_sc->video_avcc->bit_rate = 2 * 1000 * 1000;
encoder_sc->video_avcc->rc_buffer_size = 4 * 1000 * 1000;
encoder_sc->video_avcc->rc_max_rate = 2 * 1000 * 1000;
encoder_sc->video_avcc->rc_min_rate = 2.5 * 1000 * 1000;
encoder_sc->video_avcc->time_base = av_inv_q(input_framerate);
encoder_sc->video_avs->time_base = encoder_sc->video_avcc->time_base;
When I get the output, the output video is 1080p and I have glitches like:
I changed the encoder avcc resolution to 480p (854 x 480). However, that is causing the video to get sliced to the top quarter of the original frame. I am new to FFMPEG and video processing in general.
EDIT: I am adding the minimal reproducible code sample. However, it is really long because I need to include code for decoding, scaling and then encoding because the possible error is either in scaling or encoding:
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libavutil/timestamp.h>
#include <libavutil/opt.h>
#include <libswscale/swscale.h>
#include <string.h>
#include <inttypes.h>
typedef struct StreamingContext{
AVFormatContext* avfc;
AVCodec *video_avc;
AVCodec *audio_avc;
AVStream *video_avs;
AVStream *audio_avs;
AVCodecContext *video_avcc;
AVCodecContext *audio_avcc;
int video_index;
int audio_index;
char* filename;
struct SwsContext *sws_ctx;
}StreamingContext;
typedef struct StreamingParams{
char copy_video;
char copy_audio;
char *output_extension;
char *muxer_opt_key;
char *muxer_opt_value;
char *video_codec;
char *audio_codec;
char *codec_priv_key;
char *codec_priv_value;
}StreamingParams;
void logging(const char *fmt, ...)
{
va_list args;
fprintf(stderr, "LOG: ");
va_start(args, fmt);
vfprintf(stderr, fmt, args);
va_end(args);
fprintf(stderr, "\n");
}
int fill_stream_info(AVStream *avs, AVCodec **avc, AVCodecContext **avcc)
{
*avc = avcodec_find_decoder(avs->codecpar->codec_id);
if (!*avc)
{
logging("Failed to find the codec.\n");
return -1;
}
*avcc = avcodec_alloc_context3(*avc);
if (!*avcc)
{
logging("Failed to alloc memory for codec context.");
return -1;
}
if (avcodec_parameters_to_context(*avcc, avs->codecpar) < 0)
{
logging("Failed to fill Codec Context.");
return -1;
}
if (avcodec_open2(*avcc, *avc, NULL) < 0)
{
logging("Failed to open Codec.");
return -1;
}
return 0;
}
int open_media(const char *in_filename, AVFormatContext **avfc)
{
*avfc = avformat_alloc_context();
if (!*avfc)
{
logging("Failed to Allocate Memory for Format Context");
return -1;
}
if (avformat_open_input(avfc, in_filename, NULL, NULL) != 0)
{
logging("Failed to open input file %s", in_filename);
return -1;
}
if (avformat_find_stream_info(*avfc, NULL) < 0)
{
logging("Failed to get Stream Info.");
return -1;
}
}
int prepare_decoder(StreamingContext *sc)
{
for (int i = 0; i < sc->avfc->nb_streams; i++)
{
if (sc->avfc->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
{
sc->video_avs = sc->avfc->streams[i];
sc->video_index = i;
if (fill_stream_info(sc->video_avs, &sc->video_avc, &sc->video_avcc))
{
return -1;
}
}
else
{
logging("Skipping Streams other than Video.");
}
}
return 0;
}
int prepare_video_encoder(StreamingContext *encoder_sc, AVCodecContext *decoder_ctx, AVRational input_framerate,
StreamingParams sp)
{
encoder_sc->video_avs = avformat_new_stream(encoder_sc->avfc, NULL);
encoder_sc->video_avc = avcodec_find_encoder_by_name(sp.video_codec);
if (!encoder_sc->video_avc)
{
logging("Cannot find the Codec.");
return -1;
}
encoder_sc->video_avcc = avcodec_alloc_context3(encoder_sc->video_avc);
if (!encoder_sc->video_avcc)
{
logging("Could not allocate memory for Codec Context.");
return -1;
}
av_opt_set(encoder_sc->video_avcc->priv_data, "preset", "fast", 0);
if (sp.codec_priv_key && sp.codec_priv_value)
av_opt_set(encoder_sc->video_avcc->priv_data, sp.codec_priv_key, sp.codec_priv_value, 0);
encoder_sc->video_avcc->height = decoder_ctx->height;
encoder_sc->video_avcc->width = decoder_ctx->width;
encoder_sc->video_avcc->sample_aspect_ratio = decoder_ctx->sample_aspect_ratio;
if (encoder_sc->video_avc->pix_fmts)
encoder_sc->video_avcc->pix_fmt = encoder_sc->video_avc->pix_fmts[0];
else
encoder_sc->video_avcc->pix_fmt = decoder_ctx->pix_fmt;
encoder_sc->video_avcc->bit_rate = 2 * 1000 * 1000;
encoder_sc->video_avcc->rc_buffer_size = 4 * 1000 * 1000;
encoder_sc->video_avcc->rc_max_rate = 2 * 1000 * 1000;
encoder_sc->video_avcc->rc_min_rate = 2.5 * 1000 * 1000;
encoder_sc->video_avcc->time_base = av_inv_q(input_framerate);
encoder_sc->video_avs->time_base = encoder_sc->video_avcc->time_base;
if (avcodec_open2(encoder_sc->video_avcc, encoder_sc->video_avc, NULL) < 0)
{
logging("Could not open the Codec.");
return -1;
}
avcodec_parameters_from_context(encoder_sc->video_avs->codecpar, encoder_sc->video_avcc);
return 0;
}
int encode_video(StreamingContext *decoder, StreamingContext *encoder, AVFrame *input_frame)
{
if (input_frame)
input_frame->pict_type = AV_PICTURE_TYPE_NONE;
AVPacket *output_packet = av_packet_alloc();
if (!output_packet)
{
logging("Could not allocate memory for Output Packet.");
return -1;
}
int response = avcodec_send_frame(encoder->video_avcc, input_frame);
while (response >= 0)
{
response = avcodec_receive_packet(encoder->video_avcc, output_packet);
if (response == AVERROR(EAGAIN) || response == AVERROR_EOF)
{
break;
}
else if (response < 0)
{
logging("Error while receiving packet from encoder: %s", av_err2str(response));
return -1;
}
output_packet->stream_index = decoder->video_index;
output_packet->duration = encoder->video_avs->time_base.den / encoder->video_avs->time_base.num / decoder->video_avs->avg_frame_rate.num * decoder->video_avs->avg_frame_rate.den;
av_packet_rescale_ts(output_packet, decoder->video_avs->time_base, encoder->video_avs->time_base);
response = av_interleaved_write_frame(encoder->avfc, output_packet);
if (response != 0)
{
logging("Error %d while receiving packet from decoder: %s", response, av_err2str(response));
return -1;
}
}
av_packet_unref(output_packet);
av_packet_free(&output_packet);
return 0;
}
int transcode_video(StreamingContext *decoder, StreamingContext *encoder, AVPacket *input_packet, AVFrame *input_frame, AVFrame *scaled_frame)
{
int response = avcodec_send_packet(decoder->video_avcc, input_packet);
if (response < 0)
{
logging("Error while sending the Packet to Decoder: %s", av_err2str(response));
return response;
}
while (response >= 0)
{
response = avcodec_receive_frame(decoder->video_avcc, input_frame);
if (response == AVERROR(EAGAIN) || response == AVERROR_EOF)
{
break;
}
else if (response < 0)
{
logging("Error while receiving frame from Decoder: %s", av_err2str(response));
return response;
}
if (response >= 0)
{
scaled_frame->pts = input_frame->pts;
scaled_frame->pkt_dts = input_frame->pkt_dts;
scaled_frame->pict_type = input_frame->pict_type;
sws_scale_frame(encoder->sws_ctx, scaled_frame, input_frame);
if (encode_video(decoder, encoder, scaled_frame))
return -1;
}
av_frame_unref(input_frame);
}
return 0;
}
int main(int argc, char *argv[])
{
StreamingParams sp = {0};
sp.copy_audio = 1;
sp.copy_video = 0;
sp.video_codec = "libx265";
StreamingContext *decoder = (StreamingContext *)calloc(1, sizeof(StreamingContext));
decoder->filename = argv[1];
StreamingContext *encoder = (StreamingContext *)calloc(1, sizeof(StreamingContext));
encoder->filename = argv[2];
if (sp.output_extension)
{
strcat(encoder->filename, sp.output_extension);
}
if (open_media(decoder->filename, &decoder->avfc))
return -1;
if (prepare_decoder(decoder))
return -1;
avformat_alloc_output_context2(&encoder->avfc, NULL, NULL, encoder->filename);
if (!encoder->avfc)
{
logging("Could not allocate memory for output Format Context.");
return -1;
}
AVRational input_framerate = av_guess_frame_rate(decoder->avfc, decoder->video_avs, NULL);
prepare_video_encoder(encoder, decoder->video_avcc, input_framerate, sp);
if (encoder->avfc->oformat->flags & AVFMT_GLOBALHEADER)
encoder->avfc->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
if (!(encoder->avfc->oformat->flags & AVFMT_NOFILE))
{
if (avio_open(&encoder->avfc->pb, encoder->filename, AVIO_FLAG_WRITE) < 0)
{
logging("could not open the output file");
return -1;
}
}
AVDictionary *muxer_opts = NULL;
if (sp.muxer_opt_key && sp.muxer_opt_value)
{
av_dict_set(&muxer_opts, sp.muxer_opt_key, sp.muxer_opt_value, 0);
}
if (avformat_write_header(encoder->avfc, &muxer_opts) < 0)
{
logging("an error occurred when opening output file");
return -1;
}
AVFrame *input_frame = av_frame_alloc();
AVFrame *scaled_frame = av_frame_alloc();
if (!input_frame || !scaled_frame)
{
logging("Failed to allocate memory for AVFrame");
return -1;
}
// scaled_frame->format = AV_PIX_FMT_YUV420P;
scaled_frame->width = 854;
scaled_frame->height=480;
//Creating Scaling Context
encoder->sws_ctx = sws_getContext(1920, 1080,
decoder->video_avcc->pix_fmt,
scaled_frame->width, scaled_frame->height, decoder->video_avcc->pix_fmt, SWS_BICUBIC, NULL, NULL, NULL );
if (!encoder->sws_ctx){logging("Cannot Create Scaling Context."); return -1;}
AVPacket *input_packet = av_packet_alloc();
if (!input_packet)
{
logging("Failed to allocate memory for AVPacket.");
return -1;
}
while (av_read_frame(decoder->avfc, input_packet) >= 0)
{
if (decoder->avfc->streams[input_packet->stream_index]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
{
if (transcode_video(decoder, encoder, input_packet, input_frame, scaled_frame))
return -1;
av_packet_unref(input_packet);
}
else
{
logging("Ignoring all nonvideo packets.");
}
}
if (encode_video(decoder, encoder, NULL))
return -1;
av_write_trailer(encoder->avfc);
if (muxer_opts != NULL)
{
av_dict_free(&muxer_opts);
muxer_opts = NULL;
}
if (input_frame != NULL)
{
av_frame_free(&input_frame);
input_frame = NULL;
}
if (input_packet != NULL)
{
av_packet_free(&input_packet);
input_packet = NULL;
}
avformat_close_input(&decoder->avfc);
avformat_free_context(decoder->avfc);
decoder->avfc = NULL;
avformat_free_context(encoder->avfc);
encoder->avfc = NULL;
avcodec_free_context(&decoder->video_avcc);
decoder->video_avcc = NULL;
avcodec_free_context(&decoder->audio_avcc);
decoder->audio_avcc = NULL;
free(decoder);
decoder = NULL;
free(encoder);
encoder = NULL;
return 0;
}
The video I am using for testing is available at the repo: https://github.com/leandromoreira/ffmpeg-libav-tutorial
The file name is small_bunny_1080p_60fps.mp4
The current resolution of the output video is 1920x1080 instead of 854x480.
The reason is that the width and height are copied from the decoder to the encoder:
In the function prepare_video_encoder(...)
:
encoder_sc->video_avcc->height = decoder_ctx->height;
encoder_sc->video_avcc->width = decoder_ctx->width;
We have to set width
and height
according to the scaled resolution:
encoder_sc->video_avcc->height = 480;
encoder_sc->video_avcc->width = 854;
Updated code:
extern "C" //Building as C++
{
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libavutil/timestamp.h>
#include <libavutil/opt.h>
#include <libswscale/swscale.h>
}
#include <string.h>
#include <inttypes.h>
typedef struct StreamingContext{
AVFormatContext* avfc;
const AVCodec *video_avc;
const AVCodec *audio_avc;
AVStream *video_avs;
AVStream *audio_avs;
AVCodecContext *video_avcc;
AVCodecContext *audio_avcc;
int video_index;
int audio_index;
const char* filename;
struct SwsContext *sws_ctx;
}StreamingContext;
typedef struct StreamingParams{
char copy_video;
char copy_audio;
char *output_extension;
char *muxer_opt_key;
char *muxer_opt_value;
const char *video_codec;
char *audio_codec;
char *codec_priv_key;
char *codec_priv_value;
}StreamingParams;
//C++ version of av_err2str_cpp function
static char static_errbuf[AV_ERROR_MAX_STRING_SIZE];
static const char *av_err2str_cpp(int sts)
{
av_make_error_string(static_errbuf, AV_ERROR_MAX_STRING_SIZE, sts);
return static_errbuf;
}
void logging(const char *fmt, ...)
{
va_list args;
fprintf(stderr, "LOG: ");
va_start(args, fmt);
vfprintf(stderr, fmt, args);
va_end(args);
fprintf(stderr, "\n");
}
int fill_stream_info(AVStream *avs, const AVCodec **avc, AVCodecContext **avcc)
{
*avc = avcodec_find_decoder(avs->codecpar->codec_id);
if (!*avc)
{
logging("Failed to find the codec.\n");
return -1;
}
*avcc = avcodec_alloc_context3(*avc);
if (!*avcc)
{
logging("Failed to alloc memory for codec context.");
return -1;
}
if (avcodec_parameters_to_context(*avcc, avs->codecpar) < 0)
{
logging("Failed to fill Codec Context.");
return -1;
}
if (avcodec_open2(*avcc, *avc, NULL) < 0)
{
logging("Failed to open Codec.");
return -1;
}
return 0;
}
int open_media(const char *in_filename, AVFormatContext **avfc)
{
*avfc = avformat_alloc_context();
if (!*avfc)
{
logging("Failed to Allocate Memory for Format Context");
return -1;
}
if (avformat_open_input(avfc, in_filename, NULL, NULL) != 0)
{
logging("Failed to open input file %s", in_filename);
return -1;
}
if (avformat_find_stream_info(*avfc, NULL) < 0)
{
logging("Failed to get Stream Info.");
return -1;
}
return 0;
}
int prepare_decoder(StreamingContext *sc)
{
for (int i = 0; i < (int)sc->avfc->nb_streams; i++)
{
if (sc->avfc->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
{
sc->video_avs = sc->avfc->streams[i];
sc->video_index = i;
if (fill_stream_info(sc->video_avs, &sc->video_avc, &sc->video_avcc))
{
return -1;
}
}
else
{
logging("Skipping Streams other than Video.");
}
}
return 0;
}
//Adding arguments dst_width and dst_height
int prepare_video_encoder(StreamingContext *encoder_sc, AVCodecContext *decoder_ctx, AVRational input_framerate,
StreamingParams sp, int dst_width, int dst_height)
{
encoder_sc->video_avs = avformat_new_stream(encoder_sc->avfc, NULL);
encoder_sc->video_avc = avcodec_find_encoder_by_name(sp.video_codec);
if (!encoder_sc->video_avc)
{
logging("Cannot find the Codec.");
return -1;
}
encoder_sc->video_avcc = avcodec_alloc_context3(encoder_sc->video_avc);
if (!encoder_sc->video_avcc)
{
logging("Could not allocate memory for Codec Context.");
return -1;
}
av_opt_set(encoder_sc->video_avcc->priv_data, "preset", "fast", 0);
if (sp.codec_priv_key && sp.codec_priv_value)
av_opt_set(encoder_sc->video_avcc->priv_data, sp.codec_priv_key, sp.codec_priv_value, 0);
//We have to set the scaled resolution (854x480) instead of copy the resolution from the decoder to the encoder.
////////////////////////////////////////////////////////////////////////////
//encoder_sc->video_avcc->height = decoder_ctx->height;
//encoder_sc->video_avcc->width = decoder_ctx->width;
encoder_sc->video_avcc->width = dst_width;
encoder_sc->video_avcc->height = dst_height;
////////////////////////////////////////////////////////////////////////////
encoder_sc->video_avcc->sample_aspect_ratio = decoder_ctx->sample_aspect_ratio;
if (encoder_sc->video_avc->pix_fmts)
encoder_sc->video_avcc->pix_fmt = encoder_sc->video_avc->pix_fmts[0];
else
encoder_sc->video_avcc->pix_fmt = decoder_ctx->pix_fmt;
encoder_sc->video_avcc->bit_rate = 2 * 1000 * 1000;
encoder_sc->video_avcc->rc_buffer_size = 4 * 1000 * 1000;
encoder_sc->video_avcc->rc_max_rate = 2 * 1000 * 1000;
encoder_sc->video_avcc->rc_min_rate = (int64_t)(2.5 * 1000 * 1000);
encoder_sc->video_avcc->time_base = av_inv_q(input_framerate);
encoder_sc->video_avs->time_base = encoder_sc->video_avcc->time_base;
if (avcodec_open2(encoder_sc->video_avcc, encoder_sc->video_avc, NULL) < 0)
{
logging("Could not open the Codec.");
return -1;
}
avcodec_parameters_from_context(encoder_sc->video_avs->codecpar, encoder_sc->video_avcc);
return 0;
}
int encode_video(StreamingContext *decoder, StreamingContext *encoder, AVFrame *input_frame)
{
if (input_frame)
input_frame->pict_type = AV_PICTURE_TYPE_NONE;
AVPacket *output_packet = av_packet_alloc();
if (!output_packet)
{
logging("Could not allocate memory for Output Packet.");
return -1;
}
int response = avcodec_send_frame(encoder->video_avcc, input_frame);
while (response >= 0)
{
response = avcodec_receive_packet(encoder->video_avcc, output_packet);
if (response == AVERROR(EAGAIN) || response == AVERROR_EOF)
{
break;
}
else if (response < 0)
{
logging("Error while receiving packet from encoder: %s", av_err2str_cpp(response));
return -1;
}
output_packet->stream_index = decoder->video_index;
output_packet->duration = encoder->video_avs->time_base.den / encoder->video_avs->time_base.num / decoder->video_avs->avg_frame_rate.num * decoder->video_avs->avg_frame_rate.den;
av_packet_rescale_ts(output_packet, decoder->video_avs->time_base, encoder->video_avs->time_base);
response = av_interleaved_write_frame(encoder->avfc, output_packet);
if (response != 0)
{
logging("Error %d while receiving packet from decoder: %s", response, av_err2str_cpp(response));
return -1;
}
}
av_packet_unref(output_packet);
av_packet_free(&output_packet);
return 0;
}
int transcode_video(StreamingContext *decoder, StreamingContext *encoder, AVPacket *input_packet, AVFrame *input_frame, AVFrame *scaled_frame)
{
int response = avcodec_send_packet(decoder->video_avcc, input_packet);
if (response < 0)
{
logging("Error while sending the Packet to Decoder: %s", av_err2str_cpp(response));
return response;
}
while (response >= 0)
{
response = avcodec_receive_frame(decoder->video_avcc, input_frame);
if (response == AVERROR(EAGAIN) || response == AVERROR_EOF)
{
break;
}
else if (response < 0)
{
logging("Error while receiving frame from Decoder: %s", av_err2str_cpp(response));
return response;
}
if (response >= 0)
{
scaled_frame->pts = input_frame->pts;
scaled_frame->pkt_dts = input_frame->pkt_dts;
scaled_frame->pict_type = input_frame->pict_type;
sws_scale_frame(encoder->sws_ctx, scaled_frame, input_frame);
if (encode_video(decoder, encoder, scaled_frame))
return -1;
}
av_frame_unref(input_frame);
}
return 0;
}
//Building input file using FFmpeg CLI:
//ffmpeg -y -ss 30 -i bunny_1080p_60fps.mp4 -c:v copy -c:a aac -map v:0 -map a:0 -t 5 short_bunny_streo.mp4
int main(int argc, char *argv[])
{
const int scaled_frame_width = 854;
const int scaled_frame_height = 480;
StreamingParams sp = {0};
sp.copy_audio = 1;
sp.copy_video = 0;
sp.video_codec = "libx265";
StreamingContext *decoder = (StreamingContext *)calloc(1, sizeof(StreamingContext));
//decoder->filename = argv[1];
decoder->filename = "short_bunny_streo.mp4";
StreamingContext *encoder = (StreamingContext *)calloc(1, sizeof(StreamingContext));
//encoder->filename = argv[2];
encoder->filename = "downscaled_short_bunny_streo.mp4";
//if (sp.output_extension)
//{
// strcat(encoder->filename, sp.output_extension);
//}
if (open_media(decoder->filename, &decoder->avfc))
return -1;
if (prepare_decoder(decoder))
return -1;
avformat_alloc_output_context2(&encoder->avfc, NULL, NULL, encoder->filename);
if (!encoder->avfc)
{
logging("Could not allocate memory for output Format Context.");
return -1;
}
AVRational input_framerate = av_guess_frame_rate(decoder->avfc, decoder->video_avs, NULL);
prepare_video_encoder(encoder, decoder->video_avcc, input_framerate, sp, scaled_frame_width, scaled_frame_height);
if (encoder->avfc->oformat->flags & AVFMT_GLOBALHEADER)
encoder->avfc->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
if (!(encoder->avfc->oformat->flags & AVFMT_NOFILE))
{
if (avio_open(&encoder->avfc->pb, encoder->filename, AVIO_FLAG_WRITE) < 0)
{
logging("could not open the output file");
return -1;
}
}
AVDictionary *muxer_opts = NULL;
if (sp.muxer_opt_key && sp.muxer_opt_value)
{
av_dict_set(&muxer_opts, sp.muxer_opt_key, sp.muxer_opt_value, 0);
}
if (avformat_write_header(encoder->avfc, &muxer_opts) < 0)
{
logging("an error occurred when opening output file");
return -1;
}
AVFrame *input_frame = av_frame_alloc();
AVFrame *scaled_frame = av_frame_alloc();
if (!input_frame || !scaled_frame)
{
logging("Failed to allocate memory for AVFrame");
return -1;
}
// scaled_frame->format = AV_PIX_FMT_YUV420P;
scaled_frame->width = scaled_frame_width;
scaled_frame->height = scaled_frame_height;
//Creating Scaling Context
encoder->sws_ctx = sws_getContext(1920, 1080,
decoder->video_avcc->pix_fmt,
scaled_frame->width, scaled_frame->height, decoder->video_avcc->pix_fmt, SWS_BICUBIC, NULL, NULL, NULL );
if (!encoder->sws_ctx){logging("Cannot Create Scaling Context."); return -1;}
AVPacket *input_packet = av_packet_alloc();
if (!input_packet)
{
logging("Failed to allocate memory for AVPacket.");
return -1;
}
while (av_read_frame(decoder->avfc, input_packet) >= 0)
{
if (decoder->avfc->streams[input_packet->stream_index]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
{
if (transcode_video(decoder, encoder, input_packet, input_frame, scaled_frame))
return -1;
av_packet_unref(input_packet);
}
else
{
logging("Ignoring all nonvideo packets.");
}
}
if (encode_video(decoder, encoder, NULL))
return -1;
av_write_trailer(encoder->avfc);
if (muxer_opts != NULL)
{
av_dict_free(&muxer_opts);
muxer_opts = NULL;
}
if (input_frame != NULL)
{
av_frame_free(&input_frame);
input_frame = NULL;
}
if (input_packet != NULL)
{
av_packet_free(&input_packet);
input_packet = NULL;
}
avformat_close_input(&decoder->avfc);
avformat_free_context(decoder->avfc);
decoder->avfc = NULL;
avformat_free_context(encoder->avfc);
encoder->avfc = NULL;
avcodec_free_context(&decoder->video_avcc);
decoder->video_avcc = NULL;
avcodec_free_context(&decoder->audio_avcc);
decoder->audio_avcc = NULL;
free(decoder);
decoder = NULL;
free(encoder);
encoder = NULL;
return 0;
}