pretty sure esc here is just a holdover from past, and has no use in rearranged implementation.
/*
* Hedgewars, a free turn based strategy game
* Copyright (c) 2004-2012 Andrey Korotaev <unC0Rr@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
*/
#include <stdlib.h>
#include <stdio.h>
#include <stdint.h>
#include <string.h>
#include <stdarg.h>
#include "libavformat/avformat.h"
#include "libavutil/mathematics.h"
#ifndef AVIO_FLAG_WRITE
#define AVIO_FLAG_WRITE AVIO_WRONLY
#endif
static AVFormatContext* g_pContainer;
static AVOutputFormat* g_pFormat;
static AVStream* g_pAStream;
static AVStream* g_pVStream;
static AVFrame* g_pAFrame;
static AVFrame* g_pVFrame;
static AVCodec* g_pACodec;
static AVCodec* g_pVCodec;
static AVCodecContext* g_pAudio;
static AVCodecContext* g_pVideo;
static int g_Width, g_Height;
static uint32_t g_Frequency, g_Channels;
static int g_VQuality;
static AVRational g_Framerate;
static FILE* g_pSoundFile;
static int16_t* g_pSamples;
static int g_NumSamples;
#if LIBAVCODEC_VERSION_MAJOR < 54
#define OUTBUFFER_SIZE 200000
static uint8_t g_OutBuffer[OUTBUFFER_SIZE];
#endif
// pointer to function from hwengine (uUtils.pas)
static void (*AddFileLogRaw)(const char* pString);
static void FatalError(const char* pFmt, ...)
{
char Buffer[1024];
va_list VaArgs;
va_start(VaArgs, pFmt);
vsnprintf(Buffer, 1024, pFmt, VaArgs);
va_end(VaArgs);
AddFileLogRaw("Error in av-wrapper: ");
AddFileLogRaw(Buffer);
AddFileLogRaw("\n");
exit(1);
}
// Function to be called from libav for logging.
// Note: libav can call LogCallback from different threads
// (there is mutex in AddFileLogRaw).
static void LogCallback(void* p, int Level, const char* pFmt, va_list VaArgs)
{
char Buffer[1024];
vsnprintf(Buffer, 1024, pFmt, VaArgs);
AddFileLogRaw(Buffer);
}
static void Log(const char* pFmt, ...)
{
char Buffer[1024];
va_list VaArgs;
va_start(VaArgs, pFmt);
vsnprintf(Buffer, 1024, pFmt, VaArgs);
va_end(VaArgs);
AddFileLogRaw(Buffer);
}
static void AddAudioStream()
{
#if LIBAVFORMAT_VERSION_MAJOR >= 53
g_pAStream = avformat_new_stream(g_pContainer, g_pACodec);
#else
g_pAStream = av_new_stream(g_pContainer, 1);
#endif
if(!g_pAStream)
{
Log("Could not allocate audio stream\n");
return;
}
g_pAStream->id = 1;
g_pAudio = g_pAStream->codec;
avcodec_get_context_defaults3(g_pAudio, g_pACodec);
g_pAudio->codec_id = g_pACodec->id;
// put parameters
g_pAudio->sample_fmt = AV_SAMPLE_FMT_S16;
g_pAudio->sample_rate = g_Frequency;
g_pAudio->channels = g_Channels;
// set quality
g_pAudio->bit_rate = 160000;
// for codecs that support variable bitrate use it, it should be better
g_pAudio->flags |= CODEC_FLAG_QSCALE;
g_pAudio->global_quality = 1*FF_QP2LAMBDA;
// some formats want stream headers to be separate
if (g_pFormat->flags & AVFMT_GLOBALHEADER)
g_pAudio->flags |= CODEC_FLAG_GLOBAL_HEADER;
// open it
#if LIBAVCODEC_VERSION_MAJOR >= 53
if (avcodec_open2(g_pAudio, g_pACodec, NULL) < 0)
#else
if (avcodec_open(g_pAudio, g_pACodec) < 0)
#endif
{
Log("Could not open audio codec %s\n", g_pACodec->long_name);
return;
}
#if LIBAVCODEC_VERSION_MAJOR >= 54
if (g_pACodec->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE)
#else
if (g_pAudio->frame_size == 0)
#endif
g_NumSamples = 4096;
else
g_NumSamples = g_pAudio->frame_size;
g_pSamples = (int16_t*)av_malloc(g_NumSamples*g_Channels*sizeof(int16_t));
g_pAFrame = avcodec_alloc_frame();
if (!g_pAFrame)
{
Log("Could not allocate frame\n");
return;
}
}
// returns non-zero if there is more sound
static int WriteAudioFrame()
{
if (!g_pAStream)
return 0;
AVPacket Packet = { 0 };
av_init_packet(&Packet);
int NumSamples = fread(g_pSamples, 2*g_Channels, g_NumSamples, g_pSoundFile);
#if LIBAVCODEC_VERSION_MAJOR >= 53
AVFrame* pFrame = NULL;
if (NumSamples > 0)
{
g_pAFrame->nb_samples = NumSamples;
avcodec_fill_audio_frame(g_pAFrame, g_Channels, AV_SAMPLE_FMT_S16,
(uint8_t*)g_pSamples, NumSamples*2*g_Channels, 1);
pFrame = g_pAFrame;
}
// when NumSamples == 0 we still need to call encode_audio2 to flush
int got_packet;
if (avcodec_encode_audio2(g_pAudio, &Packet, pFrame, &got_packet) != 0)
FatalError("avcodec_encode_audio2 failed");
if (!got_packet)
return 0;
#else
if (NumSamples == 0)
return 0;
int BufferSize = OUTBUFFER_SIZE;
if (g_pAudio->frame_size == 0)
BufferSize = NumSamples*g_Channels*2;
Packet.size = avcodec_encode_audio(g_pAudio, g_OutBuffer, BufferSize, g_pSamples);
if (Packet.size == 0)
return 1;
if (g_pAudio->coded_frame && g_pAudio->coded_frame->pts != AV_NOPTS_VALUE)
Packet.pts = av_rescale_q(g_pAudio->coded_frame->pts, g_pAudio->time_base, g_pAStream->time_base);
Packet.flags |= AV_PKT_FLAG_KEY;
Packet.data = g_OutBuffer;
#endif
// Write the compressed frame to the media file.
Packet.stream_index = g_pAStream->index;
if (av_interleaved_write_frame(g_pContainer, &Packet) != 0)
FatalError("Error while writing audio frame");
return 1;
}
// add a video output stream
static void AddVideoStream()
{
#if LIBAVFORMAT_VERSION_MAJOR >= 53
g_pVStream = avformat_new_stream(g_pContainer, g_pVCodec);
#else
g_pVStream = av_new_stream(g_pContainer, 0);
#endif
if (!g_pVStream)
FatalError("Could not allocate video stream");
g_pVideo = g_pVStream->codec;
avcodec_get_context_defaults3(g_pVideo, g_pVCodec);
g_pVideo->codec_id = g_pVCodec->id;
// put parameters
// resolution must be a multiple of two
g_pVideo->width = g_Width & ~1; // make even (dimensions should be even)
g_pVideo->height = g_Height & ~1; // make even
/* time base: this is the fundamental unit of time (in seconds) in terms
of which frame timestamps are represented. for fixed-fps content,
timebase should be 1/framerate and timestamp increments should be
identically 1. */
g_pVideo->time_base.den = g_Framerate.num;
g_pVideo->time_base.num = g_Framerate.den;
//g_pVideo->gop_size = 12; /* emit one intra frame every twelve frames at most */
g_pVideo->pix_fmt = PIX_FMT_YUV420P;
// set quality
if (g_VQuality > 100)
g_pVideo->bit_rate = g_VQuality;
else
{
g_pVideo->flags |= CODEC_FLAG_QSCALE;
g_pVideo->global_quality = g_VQuality*FF_QP2LAMBDA;
}
// some formats want stream headers to be separate
if (g_pFormat->flags & AVFMT_GLOBALHEADER)
g_pVideo->flags |= CODEC_FLAG_GLOBAL_HEADER;
#if LIBAVCODEC_VERSION_MAJOR < 53
// for some versions of ffmpeg x264 options must be set explicitly
if (strcmp(g_pVCodec->name, "libx264") == 0)
{
g_pVideo->coder_type = FF_CODER_TYPE_AC;
g_pVideo->flags |= CODEC_FLAG_LOOP_FILTER;
g_pVideo->crf = 23;
g_pVideo->thread_count = 3;
g_pVideo->me_cmp = FF_CMP_CHROMA;
g_pVideo->partitions = X264_PART_I8X8 | X264_PART_I4X4 | X264_PART_P8X8 | X264_PART_B8X8;
g_pVideo->me_method = ME_HEX;
g_pVideo->me_subpel_quality = 7;
g_pVideo->me_range = 16;
g_pVideo->gop_size = 250;
g_pVideo->keyint_min = 25;
g_pVideo->scenechange_threshold = 40;
g_pVideo->i_quant_factor = 0.71;
g_pVideo->b_frame_strategy = 1;
g_pVideo->qcompress = 0.6;
g_pVideo->qmin = 10;
g_pVideo->qmax = 51;
g_pVideo->max_qdiff = 4;
g_pVideo->max_b_frames = 3;
g_pVideo->refs = 3;
g_pVideo->directpred = 1;
g_pVideo->trellis = 1;
g_pVideo->flags2 = CODEC_FLAG2_BPYRAMID | CODEC_FLAG2_MIXED_REFS | CODEC_FLAG2_WPRED | CODEC_FLAG2_8X8DCT | CODEC_FLAG2_FASTPSKIP;
g_pVideo->weighted_p_pred = 2;
}
#endif
// open the codec
#if LIBAVCODEC_VERSION_MAJOR >= 53
AVDictionary* pDict = NULL;
if (strcmp(g_pVCodec->name, "libx264") == 0)
av_dict_set(&pDict, "preset", "medium", 0);
if (avcodec_open2(g_pVideo, g_pVCodec, &pDict) < 0)
#else
if (avcodec_open(g_pVideo, g_pVCodec) < 0)
#endif
FatalError("Could not open video codec %s", g_pVCodec->long_name);
g_pVFrame = avcodec_alloc_frame();
if (!g_pVFrame)
FatalError("Could not allocate frame");
g_pVFrame->linesize[0] = g_Width;
g_pVFrame->linesize[1] = g_Width/2;
g_pVFrame->linesize[2] = g_Width/2;
g_pVFrame->linesize[3] = 0;
}
static int WriteFrame(AVFrame* pFrame)
{
double AudioTime, VideoTime;
// write interleaved audio frame
if (g_pAStream)
{
VideoTime = (double)g_pVStream->pts.val*g_pVStream->time_base.num/g_pVStream->time_base.den;
do
AudioTime = (double)g_pAStream->pts.val*g_pAStream->time_base.num/g_pAStream->time_base.den;
while (AudioTime < VideoTime && WriteAudioFrame());
}
if (!g_pVStream)
return 0;
AVPacket Packet;
av_init_packet(&Packet);
Packet.data = NULL;
Packet.size = 0;
g_pVFrame->pts++;
if (g_pFormat->flags & AVFMT_RAWPICTURE)
{
/* raw video case. The API will change slightly in the near
future for that. */
Packet.flags |= AV_PKT_FLAG_KEY;
Packet.stream_index = g_pVStream->index;
Packet.data = (uint8_t*)pFrame;
Packet.size = sizeof(AVPicture);
if (av_interleaved_write_frame(g_pContainer, &Packet) != 0)
FatalError("Error while writing video frame");
return 0;
}
else
{
#if LIBAVCODEC_VERSION_MAJOR >= 54
int got_packet;
if (avcodec_encode_video2(g_pVideo, &Packet, pFrame, &got_packet) < 0)
FatalError("avcodec_encode_video2 failed");
if (!got_packet)
return 0;
if (Packet.pts != AV_NOPTS_VALUE)
Packet.pts = av_rescale_q(Packet.pts, g_pVideo->time_base, g_pVStream->time_base);
if (Packet.dts != AV_NOPTS_VALUE)
Packet.dts = av_rescale_q(Packet.dts, g_pVideo->time_base, g_pVStream->time_base);
#else
Packet.size = avcodec_encode_video(g_pVideo, g_OutBuffer, OUTBUFFER_SIZE, pFrame);
if (Packet.size < 0)
FatalError("avcodec_encode_video failed");
if (Packet.size == 0)
return 0;
if( g_pVideo->coded_frame->pts != AV_NOPTS_VALUE)
Packet.pts = av_rescale_q(g_pVideo->coded_frame->pts, g_pVideo->time_base, g_pVStream->time_base);
if( g_pVideo->coded_frame->key_frame )
Packet.flags |= AV_PKT_FLAG_KEY;
Packet.data = g_OutBuffer;
#endif
// write the compressed frame in the media file
Packet.stream_index = g_pVStream->index;
if (av_interleaved_write_frame(g_pContainer, &Packet) != 0)
FatalError("Error while writing video frame");
return 1;
}
}
void AVWrapper_WriteFrame(uint8_t* pY, uint8_t* pCb, uint8_t* pCr)
{
g_pVFrame->data[0] = pY;
g_pVFrame->data[1] = pCb;
g_pVFrame->data[2] = pCr;
WriteFrame(g_pVFrame);
}
void AVWrapper_Init(
void (*pAddFileLogRaw)(const char*),
const char* pFilename,
const char* pDesc,
const char* pSoundFile,
const char* pFormatName,
const char* pVCodecName,
const char* pACodecName,
int Width, int Height,
int FramerateNum, int FramerateDen,
int VQuality)
{
AddFileLogRaw = pAddFileLogRaw;
av_log_set_callback( &LogCallback );
g_Width = Width;
g_Height = Height;
g_Framerate.num = FramerateNum;
g_Framerate.den = FramerateDen;
g_VQuality = VQuality;
// initialize libav and register all codecs and formats
av_register_all();
// find format
g_pFormat = av_guess_format(pFormatName, NULL, NULL);
if (!g_pFormat)
FatalError("Format \"%s\" was not found", pFormatName);
// allocate the output media context
g_pContainer = avformat_alloc_context();
if (!g_pContainer)
FatalError("Could not allocate output context");
g_pContainer->oformat = g_pFormat;
// store description of file
av_dict_set(&g_pContainer->metadata, "comment", pDesc, 0);
// append extesnion to filename
char ext[16];
strncpy(ext, g_pFormat->extensions, 16);
ext[15] = 0;
ext[strcspn(ext,",")] = 0;
snprintf(g_pContainer->filename, sizeof(g_pContainer->filename), "%s.%s", pFilename, ext);
// find codecs
g_pVCodec = avcodec_find_encoder_by_name(pVCodecName);
g_pACodec = avcodec_find_encoder_by_name(pACodecName);
// add audio and video stream to container
g_pVStream = NULL;
g_pAStream = NULL;
if (g_pVCodec)
AddVideoStream();
else
Log("Video codec \"%s\" was not found; video will be ignored.\n", pVCodecName);
if (g_pACodec)
{
g_pSoundFile = fopen(pSoundFile, "rb");
if (g_pSoundFile)
{
fread(&g_Frequency, 4, 1, g_pSoundFile);
fread(&g_Channels, 4, 1, g_pSoundFile);
AddAudioStream();
}
else
Log("Could not open %s\n", pSoundFile);
}
else
Log("Audio codec \"%s\" was not found; audio will be ignored.\n", pACodecName);
if (!g_pAStream && !g_pVStream)
FatalError("No video, no audio, aborting...");
// write format info to log
av_dump_format(g_pContainer, 0, g_pContainer->filename, 1);
// open the output file, if needed
if (!(g_pFormat->flags & AVFMT_NOFILE))
{
if (avio_open(&g_pContainer->pb, g_pContainer->filename, AVIO_FLAG_WRITE) < 0)
FatalError("Could not open output file (%s)", g_pContainer->filename);
}
// write the stream header, if any
avformat_write_header(g_pContainer, NULL);
g_pVFrame->pts = -1;
}
void AVWrapper_Close()
{
// output buffered frames
if (g_pVCodec->capabilities & CODEC_CAP_DELAY)
while( WriteFrame(NULL) );
// output any remaining audio
while( WriteAudioFrame() );
// write the trailer, if any.
av_write_trailer(g_pContainer);
// close the output file
if (!(g_pFormat->flags & AVFMT_NOFILE))
avio_close(g_pContainer->pb);
// free everything
if (g_pVStream)
{
avcodec_close(g_pVideo);
av_free(g_pVideo);
av_free(g_pVStream);
av_free(g_pVFrame);
}
if (g_pAStream)
{
avcodec_close(g_pAudio);
av_free(g_pAudio);
av_free(g_pAStream);
av_free(g_pAFrame);
av_free(g_pSamples);
fclose(g_pSoundFile);
}
av_free(g_pContainer);
}