mirror of
https://github.com/rwengine/openrw.git
synced 2025-04-28 21:08:05 +03:00
wip! engine: ffmpeg 7.0 support
Signed-off-by: David Heidelberg <david@ixit.cz>
This commit is contained in:
parent
51806f4ba6
commit
2e79b75224
2 changed files with 46 additions and 30 deletions
|
@ -14,6 +14,8 @@ extern "C" {
|
||||||
#define avio_context_free av_freep
|
#define avio_context_free av_freep
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#define HAVE_CH_LAYOUT (LIBAVUTIL_VERSION_INT >= AV_VERSION_INT(57, 28, 100))
|
||||||
|
|
||||||
constexpr int kNumOutputChannels = 2;
|
constexpr int kNumOutputChannels = 2;
|
||||||
constexpr AVSampleFormat kOutputFMT = AV_SAMPLE_FMT_S16;
|
constexpr AVSampleFormat kOutputFMT = AV_SAMPLE_FMT_S16;
|
||||||
constexpr size_t kNrFramesToPreload = 50;
|
constexpr size_t kNrFramesToPreload = 50;
|
||||||
|
@ -200,7 +202,6 @@ bool SoundSource::prepareCodecContext() {
|
||||||
// Fill the codecCtx with the parameters of the codec used in the read file.
|
// Fill the codecCtx with the parameters of the codec used in the read file.
|
||||||
if (avcodec_parameters_to_context(codecContext, audioStream->codecpar) !=
|
if (avcodec_parameters_to_context(codecContext, audioStream->codecpar) !=
|
||||||
0) {
|
0) {
|
||||||
avcodec_close(codecContext);
|
|
||||||
avcodec_free_context(&codecContext);
|
avcodec_free_context(&codecContext);
|
||||||
avformat_close_input(&formatContext);
|
avformat_close_input(&formatContext);
|
||||||
RW_ERROR("Couldn't find parametrs for context");
|
RW_ERROR("Couldn't find parametrs for context");
|
||||||
|
@ -209,7 +210,6 @@ bool SoundSource::prepareCodecContext() {
|
||||||
|
|
||||||
// Initialize the decoder.
|
// Initialize the decoder.
|
||||||
if (avcodec_open2(codecContext, codec, nullptr) != 0) {
|
if (avcodec_open2(codecContext, codec, nullptr) != 0) {
|
||||||
avcodec_close(codecContext);
|
|
||||||
avcodec_free_context(&codecContext);
|
avcodec_free_context(&codecContext);
|
||||||
avformat_close_input(&formatContext);
|
avformat_close_input(&formatContext);
|
||||||
RW_ERROR("Couldn't open the audio codec context");
|
RW_ERROR("Couldn't open the audio codec context");
|
||||||
|
@ -243,7 +243,6 @@ bool SoundSource::prepareCodecContextSfx() {
|
||||||
0) {
|
0) {
|
||||||
av_free(formatContext->pb->buffer);
|
av_free(formatContext->pb->buffer);
|
||||||
avio_context_free(&formatContext->pb);
|
avio_context_free(&formatContext->pb);
|
||||||
avcodec_close(codecContext);
|
|
||||||
avcodec_free_context(&codecContext);
|
avcodec_free_context(&codecContext);
|
||||||
avformat_close_input(&formatContext);
|
avformat_close_input(&formatContext);
|
||||||
RW_ERROR("Couldn't find parametrs for context");
|
RW_ERROR("Couldn't find parametrs for context");
|
||||||
|
@ -254,7 +253,6 @@ bool SoundSource::prepareCodecContextSfx() {
|
||||||
if (avcodec_open2(codecContext, codec, nullptr) != 0) {
|
if (avcodec_open2(codecContext, codec, nullptr) != 0) {
|
||||||
av_free(formatContext->pb->buffer);
|
av_free(formatContext->pb->buffer);
|
||||||
avio_context_free(&formatContext->pb);
|
avio_context_free(&formatContext->pb);
|
||||||
avcodec_close(codecContext);
|
|
||||||
avcodec_free_context(&codecContext);
|
avcodec_free_context(&codecContext);
|
||||||
avformat_close_input(&formatContext);
|
avformat_close_input(&formatContext);
|
||||||
RW_ERROR("Couldn't open the audio codec context");
|
RW_ERROR("Couldn't open the audio codec context");
|
||||||
|
@ -267,9 +265,9 @@ bool SoundSource::prepareCodecContextSfx() {
|
||||||
#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(57, 37, 100)
|
#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(57, 37, 100)
|
||||||
void SoundSource::decodeFramesLegacy(size_t framesToDecode) {
|
void SoundSource::decodeFramesLegacy(size_t framesToDecode) {
|
||||||
while ((framesToDecode == 0 || decodedFrames < framesToDecode) &&
|
while ((framesToDecode == 0 || decodedFrames < framesToDecode) &&
|
||||||
av_read_frame(formatContext, &readingPacket) == 0) {
|
av_read_frame(formatContext, readingPacket) == 0) {
|
||||||
if (readingPacket.stream_index == audioStream->index) {
|
if (readingPacket->stream_index == audioStream->index) {
|
||||||
AVPacket decodingPacket = readingPacket;
|
AVPacket decodingPacket = *readingPacket;
|
||||||
|
|
||||||
while (decodingPacket.size > 0) {
|
while (decodingPacket.size > 0) {
|
||||||
// Decode audio packet
|
// Decode audio packet
|
||||||
|
@ -299,7 +297,7 @@ void SoundSource::decodeFramesLegacy(size_t framesToDecode) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
av_free_packet(&readingPacket);
|
av_free_packet(readingPacket);
|
||||||
++decodedFrames;
|
++decodedFrames;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -315,9 +313,9 @@ void SoundSource::decodeFramesSfxWrap() {
|
||||||
|
|
||||||
void SoundSource::decodeFrames(size_t framesToDecode) {
|
void SoundSource::decodeFrames(size_t framesToDecode) {
|
||||||
while ((framesToDecode == 0 || decodedFrames < framesToDecode) &&
|
while ((framesToDecode == 0 || decodedFrames < framesToDecode) &&
|
||||||
av_read_frame(formatContext, &readingPacket) == 0) {
|
av_read_frame(formatContext, readingPacket) == 0) {
|
||||||
if (readingPacket.stream_index == audioStream->index) {
|
if (readingPacket->stream_index == audioStream->index) {
|
||||||
AVPacket decodingPacket = readingPacket;
|
AVPacket decodingPacket = *readingPacket;
|
||||||
|
|
||||||
int sendPacket = avcodec_send_packet(codecContext, &decodingPacket);
|
int sendPacket = avcodec_send_packet(codecContext, &decodingPacket);
|
||||||
int receiveFrame = 0;
|
int receiveFrame = 0;
|
||||||
|
@ -342,7 +340,7 @@ void SoundSource::decodeFrames(size_t framesToDecode) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
av_packet_unref(&readingPacket);
|
av_packet_unref(readingPacket);
|
||||||
++decodedFrames;
|
++decodedFrames;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -359,21 +357,38 @@ void SoundSource::decodeAndResampleFrames(const std::filesystem::path& filePath,
|
||||||
size_t framesToDecode) {
|
size_t framesToDecode) {
|
||||||
RW_UNUSED(filePath); // it's used by macro
|
RW_UNUSED(filePath); // it's used by macro
|
||||||
AVFrame* resampled = av_frame_alloc();
|
AVFrame* resampled = av_frame_alloc();
|
||||||
|
int err = 0;
|
||||||
|
|
||||||
while ((framesToDecode == 0 || decodedFrames < framesToDecode) &&
|
while ((framesToDecode == 0 || decodedFrames < framesToDecode) &&
|
||||||
av_read_frame(formatContext, &readingPacket) == 0) {
|
av_read_frame(formatContext, readingPacket) == 0) {
|
||||||
if (readingPacket.stream_index == audioStream->index) {
|
if (readingPacket->stream_index == audioStream->index) {
|
||||||
int sendPacket = avcodec_send_packet(codecContext, &readingPacket);
|
int sendPacket = avcodec_send_packet(codecContext, readingPacket);
|
||||||
av_packet_unref(&readingPacket);
|
av_packet_unref(readingPacket);
|
||||||
int receiveFrame = 0;
|
int receiveFrame = 0;
|
||||||
|
|
||||||
while ((receiveFrame =
|
while ((receiveFrame =
|
||||||
avcodec_receive_frame(codecContext, frame)) == 0) {
|
avcodec_receive_frame(codecContext, frame)) == 0) {
|
||||||
if (!swr) {
|
if (!swr) {
|
||||||
if (frame->channels == 1 || frame->channel_layout == 0) {
|
#if HAVE_CH_LAYOUT
|
||||||
|
AVChannelLayout out_chlayout = AV_CHANNEL_LAYOUT_STEREO;
|
||||||
|
err = swr_alloc_set_opts2(
|
||||||
|
&swr, &out_chlayout, kOutputFMT, frame->sample_rate,
|
||||||
|
&frame->ch_layout, // input channel layout
|
||||||
|
static_cast<AVSampleFormat>(
|
||||||
|
frame->format), // input format
|
||||||
|
frame->sample_rate, // input sample rate
|
||||||
|
0, nullptr);
|
||||||
|
|
||||||
|
if (err < 0) {
|
||||||
|
RW_ERROR(
|
||||||
|
"Resampler has not been successfully allocated.");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
if (frame->channels == 1 || frame->channel_layout == 0)
|
||||||
frame->channel_layout =
|
frame->channel_layout =
|
||||||
av_get_default_channel_layout(1);
|
av_get_default_channel_layout(1);
|
||||||
}
|
|
||||||
swr = swr_alloc_set_opts(
|
swr = swr_alloc_set_opts(
|
||||||
nullptr,
|
nullptr,
|
||||||
AV_CH_LAYOUT_STEREO, // output channel layout
|
AV_CH_LAYOUT_STEREO, // output channel layout
|
||||||
|
@ -384,6 +399,7 @@ void SoundSource::decodeAndResampleFrames(const std::filesystem::path& filePath,
|
||||||
frame->format), // input format
|
frame->format), // input format
|
||||||
frame->sample_rate, // input sample rate
|
frame->sample_rate, // input sample rate
|
||||||
0, nullptr);
|
0, nullptr);
|
||||||
|
#endif
|
||||||
if (!swr) {
|
if (!swr) {
|
||||||
RW_ERROR(
|
RW_ERROR(
|
||||||
"Resampler has not been successfully allocated.");
|
"Resampler has not been successfully allocated.");
|
||||||
|
@ -400,10 +416,14 @@ void SoundSource::decodeAndResampleFrames(const std::filesystem::path& filePath,
|
||||||
// Decode audio packet
|
// Decode audio packet
|
||||||
if (receiveFrame == 0 && sendPacket == 0) {
|
if (receiveFrame == 0 && sendPacket == 0) {
|
||||||
// Write samples to audio buffer
|
// Write samples to audio buffer
|
||||||
|
#if HAVE_CH_LAYOUT
|
||||||
|
resampled->ch_layout = AV_CHANNEL_LAYOUT_STEREO;
|
||||||
|
#else
|
||||||
resampled->channel_layout = AV_CH_LAYOUT_STEREO;
|
resampled->channel_layout = AV_CH_LAYOUT_STEREO;
|
||||||
|
resampled->channels = kNumOutputChannels;
|
||||||
|
#endif
|
||||||
resampled->sample_rate = frame->sample_rate;
|
resampled->sample_rate = frame->sample_rate;
|
||||||
resampled->format = kOutputFMT;
|
resampled->format = kOutputFMT;
|
||||||
resampled->channels = kNumOutputChannels;
|
|
||||||
|
|
||||||
swr_config_frame(swr, resampled, frame);
|
swr_config_frame(swr, resampled, frame);
|
||||||
|
|
||||||
|
@ -438,10 +458,6 @@ void SoundSource::cleanupAfterSoundLoading() {
|
||||||
/// Free all data used by the frame.
|
/// Free all data used by the frame.
|
||||||
av_frame_free(&frame);
|
av_frame_free(&frame);
|
||||||
|
|
||||||
/// Close the context and free all data associated to it, but not the
|
|
||||||
/// context itself.
|
|
||||||
avcodec_close(codecContext);
|
|
||||||
|
|
||||||
/// Free the context itself.
|
/// Free the context itself.
|
||||||
avcodec_free_context(&codecContext);
|
avcodec_free_context(&codecContext);
|
||||||
|
|
||||||
|
@ -453,10 +469,6 @@ void SoundSource::cleanupAfterSfxLoading() {
|
||||||
/// Free all data used by the frame.
|
/// Free all data used by the frame.
|
||||||
av_frame_free(&frame);
|
av_frame_free(&frame);
|
||||||
|
|
||||||
/// Close the context and free all data associated to it, but not the
|
|
||||||
/// context itself.
|
|
||||||
avcodec_close(codecContext);
|
|
||||||
|
|
||||||
/// Free the context itself.
|
/// Free the context itself.
|
||||||
avcodec_free_context(&codecContext);
|
avcodec_free_context(&codecContext);
|
||||||
|
|
||||||
|
@ -474,7 +486,11 @@ void SoundSource::exposeSoundMetadata() {
|
||||||
}
|
}
|
||||||
|
|
||||||
void SoundSource::exposeSfxMetadata(LoaderSDT& sdt) {
|
void SoundSource::exposeSfxMetadata(LoaderSDT& sdt) {
|
||||||
|
#if HAVE_CH_LAYOUT
|
||||||
|
channels = static_cast<size_t>(codecContext->ch_layout.nb_channels);
|
||||||
|
#else
|
||||||
channels = static_cast<size_t>(codecContext->channels);
|
channels = static_cast<size_t>(codecContext->channels);
|
||||||
|
#endif
|
||||||
sampleRate = sdt.assetInfo.sampleRate;
|
sampleRate = sdt.assetInfo.sampleRate;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -502,7 +518,7 @@ void SoundSource::loadFromFile(const std::filesystem::path& filePath, bool strea
|
||||||
if (allocateAudioFrame() && allocateFormatContext(filePath) &&
|
if (allocateAudioFrame() && allocateFormatContext(filePath) &&
|
||||||
findAudioStream(filePath) && prepareCodecContextWrap()) {
|
findAudioStream(filePath) && prepareCodecContextWrap()) {
|
||||||
exposeSoundMetadata();
|
exposeSoundMetadata();
|
||||||
av_init_packet(&readingPacket);
|
readingPacket = av_packet_alloc();
|
||||||
|
|
||||||
decodeFramesWrap(filePath);
|
decodeFramesWrap(filePath);
|
||||||
|
|
||||||
|
@ -521,7 +537,7 @@ void SoundSource::loadSfx(LoaderSDT& sdt, size_t index, bool asWave,
|
||||||
if (allocateAudioFrame() && prepareFormatContextSfx(sdt, index, asWave) &&
|
if (allocateAudioFrame() && prepareFormatContextSfx(sdt, index, asWave) &&
|
||||||
findAudioStreamSfx() && prepareCodecContextSfxWrap()) {
|
findAudioStreamSfx() && prepareCodecContextSfxWrap()) {
|
||||||
exposeSfxMetadata(sdt);
|
exposeSfxMetadata(sdt);
|
||||||
av_init_packet(&readingPacket);
|
readingPacket = av_packet_alloc();
|
||||||
|
|
||||||
decodeFramesSfxWrap();
|
decodeFramesSfxWrap();
|
||||||
|
|
||||||
|
|
|
@ -93,7 +93,7 @@ private:
|
||||||
const AVCodec* codec = nullptr;
|
const AVCodec* codec = nullptr;
|
||||||
SwrContext* swr = nullptr;
|
SwrContext* swr = nullptr;
|
||||||
AVCodecContext* codecContext = nullptr;
|
AVCodecContext* codecContext = nullptr;
|
||||||
AVPacket readingPacket;
|
AVPacket* readingPacket;
|
||||||
|
|
||||||
// For sfx
|
// For sfx
|
||||||
AVIOContext* avioContext;
|
AVIOContext* avioContext;
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue