2019-01-26 14:41:28 +00:00
|
|
|
// Copyright 2018 Citra Emulator Project
|
|
|
|
// Licensed under GPLv2 or any later version
|
|
|
|
// Refer to the license.txt file included.
|
|
|
|
|
2023-05-03 15:24:10 +00:00
|
|
|
#include <unordered_map>
|
2019-01-26 14:41:28 +00:00
|
|
|
#include "common/assert.h"
|
|
|
|
#include "common/file_util.h"
|
|
|
|
#include "common/logging/log.h"
|
2020-01-29 06:54:39 +00:00
|
|
|
#include "common/param_package.h"
|
2023-05-16 09:28:21 +00:00
|
|
|
#include "common/scope_exit.h"
|
2022-12-08 11:27:25 +00:00
|
|
|
#include "common/settings.h"
|
2020-02-01 04:28:13 +00:00
|
|
|
#include "common/string_util.h"
|
2019-01-26 14:41:28 +00:00
|
|
|
#include "core/dumping/ffmpeg_backend.h"
|
2020-06-10 23:42:23 +00:00
|
|
|
#include "core/hw/gpu.h"
|
2019-01-26 14:41:28 +00:00
|
|
|
#include "video_core/renderer_base.h"
|
|
|
|
#include "video_core/video_core.h"
|
|
|
|
|
2023-06-16 23:06:18 +00:00
|
|
|
using namespace DynamicLibrary;
|
2019-01-26 14:41:28 +00:00
|
|
|
|
|
|
|
namespace VideoDumper {
|
|
|
|
|
|
|
|
void InitializeFFmpegLibraries() {
|
|
|
|
static bool initialized = false;
|
2023-06-16 23:06:18 +00:00
|
|
|
if (initialized) {
|
2019-01-26 14:41:28 +00:00
|
|
|
return;
|
2023-06-16 23:06:18 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
FFmpeg::avformat_network_init();
|
2019-01-26 14:41:28 +00:00
|
|
|
initialized = true;
|
|
|
|
}
|
|
|
|
|
2020-01-29 06:54:39 +00:00
|
|
|
AVDictionary* ToAVDictionary(const std::string& serialized) {
|
|
|
|
Common::ParamPackage param_package{serialized};
|
|
|
|
AVDictionary* result = nullptr;
|
|
|
|
for (const auto& [key, value] : param_package) {
|
2023-06-16 23:06:18 +00:00
|
|
|
FFmpeg::av_dict_set(&result, key.c_str(), value.c_str(), 0);
|
2020-01-29 06:54:39 +00:00
|
|
|
}
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2019-01-26 14:41:28 +00:00
|
|
|
FFmpegStream::~FFmpegStream() {
|
|
|
|
Free();
|
|
|
|
}
|
|
|
|
|
2020-03-21 04:02:21 +00:00
|
|
|
bool FFmpegStream::Init(FFmpegMuxer& muxer) {
|
2019-01-26 14:41:28 +00:00
|
|
|
InitializeFFmpegLibraries();
|
|
|
|
|
2020-03-21 04:02:21 +00:00
|
|
|
format_context = muxer.format_context.get();
|
|
|
|
format_context_mutex = &muxer.format_context_mutex;
|
|
|
|
|
2019-01-26 14:41:28 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
void FFmpegStream::Free() {
|
|
|
|
codec_context.reset();
|
|
|
|
}
|
|
|
|
|
|
|
|
void FFmpegStream::Flush() {
|
|
|
|
SendFrame(nullptr);
|
|
|
|
}
|
|
|
|
|
2023-06-09 17:18:42 +00:00
|
|
|
void FFmpegStream::WritePacket(AVPacket* packet) {
|
|
|
|
FFmpeg::av_packet_rescale_ts(packet, codec_context->time_base, stream->time_base);
|
|
|
|
packet->stream_index = stream->index;
|
2020-03-21 04:02:21 +00:00
|
|
|
{
|
|
|
|
std::lock_guard lock{*format_context_mutex};
|
2023-06-09 17:18:42 +00:00
|
|
|
FFmpeg::av_interleaved_write_frame(format_context, packet);
|
2020-03-21 04:02:21 +00:00
|
|
|
}
|
2019-01-26 14:41:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void FFmpegStream::SendFrame(AVFrame* frame) {
|
|
|
|
// Initialize packet
|
2023-06-09 17:18:42 +00:00
|
|
|
AVPacket* packet = FFmpeg::av_packet_alloc();
|
|
|
|
if (!packet) {
|
|
|
|
LOG_ERROR(Render, "Frame dropped: av_packet_alloc failed");
|
|
|
|
}
|
|
|
|
SCOPE_EXIT({ FFmpeg::av_packet_free(&packet); });
|
|
|
|
|
|
|
|
packet->data = nullptr;
|
|
|
|
packet->size = 0;
|
2019-01-26 14:41:28 +00:00
|
|
|
|
|
|
|
// Encode frame
|
2023-06-16 23:06:18 +00:00
|
|
|
if (FFmpeg::avcodec_send_frame(codec_context.get(), frame) < 0) {
|
2019-01-26 14:41:28 +00:00
|
|
|
LOG_ERROR(Render, "Frame dropped: could not send frame");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
int error = 1;
|
|
|
|
while (error >= 0) {
|
2023-06-09 17:18:42 +00:00
|
|
|
error = FFmpeg::avcodec_receive_packet(codec_context.get(), packet);
|
2019-01-26 14:41:28 +00:00
|
|
|
if (error == AVERROR(EAGAIN) || error == AVERROR_EOF)
|
|
|
|
return;
|
|
|
|
if (error < 0) {
|
|
|
|
LOG_ERROR(Render, "Frame dropped: could not encode audio");
|
|
|
|
return;
|
|
|
|
} else {
|
|
|
|
// Write frame to video file
|
|
|
|
WritePacket(packet);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
FFmpegVideoStream::~FFmpegVideoStream() {
|
|
|
|
Free();
|
|
|
|
}
|
|
|
|
|
2023-05-16 09:28:21 +00:00
|
|
|
// This is modified from libavcodec/decode.c
|
|
|
|
// The original version was broken
|
|
|
|
static AVPixelFormat GetPixelFormat(AVCodecContext* avctx, const AVPixelFormat* fmt) {
|
|
|
|
// Choose a software pixel format if any, prefering those in the front of the list
|
|
|
|
for (int i = 0; fmt[i] != AV_PIX_FMT_NONE; i++) {
|
2023-06-16 23:06:18 +00:00
|
|
|
const AVPixFmtDescriptor* desc = FFmpeg::av_pix_fmt_desc_get(fmt[i]);
|
2023-05-16 09:28:21 +00:00
|
|
|
if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL)) {
|
|
|
|
return fmt[i];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Finally, traverse the list in order and choose the first entry
|
|
|
|
// with no external dependencies (if there is no hardware configuration
|
|
|
|
// information available then this just picks the first entry).
|
|
|
|
for (int i = 0; fmt[i] != AV_PIX_FMT_NONE; i++) {
|
|
|
|
const AVCodecHWConfig* config;
|
|
|
|
for (int j = 0;; j++) {
|
2023-06-16 23:06:18 +00:00
|
|
|
config = FFmpeg::avcodec_get_hw_config(avctx->codec, j);
|
2023-05-16 09:28:21 +00:00
|
|
|
if (!config || config->pix_fmt == fmt[i]) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (!config) {
|
|
|
|
// No specific config available, so the decoder must be able
|
|
|
|
// to handle this format without any additional setup.
|
|
|
|
return fmt[i];
|
|
|
|
}
|
|
|
|
if (config->methods & AV_CODEC_HW_CONFIG_METHOD_INTERNAL) {
|
|
|
|
// Usable with only internal setup.
|
|
|
|
return fmt[i];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Nothing is usable, give up.
|
|
|
|
return AV_PIX_FMT_NONE;
|
|
|
|
}
|
|
|
|
|
2020-03-21 04:02:21 +00:00
|
|
|
bool FFmpegVideoStream::Init(FFmpegMuxer& muxer, const Layout::FramebufferLayout& layout_) {
|
2019-01-26 14:41:28 +00:00
|
|
|
InitializeFFmpegLibraries();
|
|
|
|
|
2023-06-16 23:06:18 +00:00
|
|
|
if (!FFmpegStream::Init(muxer)) {
|
2019-01-26 14:41:28 +00:00
|
|
|
return false;
|
2023-06-16 23:06:18 +00:00
|
|
|
}
|
2019-01-26 14:41:28 +00:00
|
|
|
|
|
|
|
layout = layout_;
|
|
|
|
frame_count = 0;
|
|
|
|
|
|
|
|
// Initialize video codec
|
2023-06-16 23:06:18 +00:00
|
|
|
const AVCodec* codec =
|
|
|
|
FFmpeg::avcodec_find_encoder_by_name(Settings::values.video_encoder.c_str());
|
|
|
|
codec_context.reset(FFmpeg::avcodec_alloc_context3(codec));
|
2019-01-26 14:41:28 +00:00
|
|
|
if (!codec || !codec_context) {
|
|
|
|
LOG_ERROR(Render, "Could not find video encoder or allocate video codec context");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Configure video codec context
|
|
|
|
codec_context->codec_type = AVMEDIA_TYPE_VIDEO;
|
2020-01-29 06:54:39 +00:00
|
|
|
codec_context->bit_rate = Settings::values.video_bitrate;
|
2019-01-26 14:41:28 +00:00
|
|
|
codec_context->width = layout.width;
|
|
|
|
codec_context->height = layout.height;
|
2023-05-16 09:28:21 +00:00
|
|
|
// Use 60fps here, since the video is already filtered (resampled)
|
|
|
|
codec_context->time_base.num = 1;
|
|
|
|
codec_context->time_base.den = 60;
|
2019-01-26 14:41:28 +00:00
|
|
|
codec_context->gop_size = 12;
|
2023-05-16 09:28:21 +00:00
|
|
|
|
|
|
|
// Get pixel format for codec
|
2023-05-19 17:58:05 +00:00
|
|
|
auto options = ToAVDictionary(Settings::values.video_encoder_options);
|
2023-06-16 23:06:18 +00:00
|
|
|
auto pixel_format_opt = FFmpeg::av_dict_get(options, "pixel_format", nullptr, 0);
|
2023-05-19 17:58:05 +00:00
|
|
|
if (pixel_format_opt) {
|
2023-06-16 23:06:18 +00:00
|
|
|
sw_pixel_format = FFmpeg::av_get_pix_fmt(pixel_format_opt->value);
|
2023-05-19 17:58:05 +00:00
|
|
|
} else if (codec->pix_fmts) {
|
2023-05-16 09:28:21 +00:00
|
|
|
sw_pixel_format = GetPixelFormat(codec_context.get(), codec->pix_fmts);
|
|
|
|
} else {
|
|
|
|
sw_pixel_format = AV_PIX_FMT_YUV420P;
|
|
|
|
}
|
|
|
|
if (sw_pixel_format == AV_PIX_FMT_NONE) {
|
|
|
|
// This encoder requires HW context configuration.
|
|
|
|
if (!InitHWContext(codec)) {
|
|
|
|
LOG_ERROR(Render, "Failed to initialize HW context");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
requires_hw_frames = false;
|
|
|
|
codec_context->pix_fmt = sw_pixel_format;
|
|
|
|
}
|
|
|
|
|
2023-06-16 23:06:18 +00:00
|
|
|
if (format_context->oformat->flags & AVFMT_GLOBALHEADER) {
|
2019-01-26 14:41:28 +00:00
|
|
|
codec_context->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
|
2023-06-16 23:06:18 +00:00
|
|
|
}
|
2019-01-26 14:41:28 +00:00
|
|
|
|
2023-06-16 23:06:18 +00:00
|
|
|
if (FFmpeg::avcodec_open2(codec_context.get(), codec, &options) < 0) {
|
2019-01-26 14:41:28 +00:00
|
|
|
LOG_ERROR(Render, "Could not open video codec");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2023-06-16 23:06:18 +00:00
|
|
|
if (FFmpeg::av_dict_count(options) != 0) { // Successfully set options are removed from the dict
|
2020-01-29 06:54:39 +00:00
|
|
|
char* buf = nullptr;
|
2023-06-16 23:06:18 +00:00
|
|
|
FFmpeg::av_dict_get_string(options, &buf, ':', ';');
|
2020-01-29 06:54:39 +00:00
|
|
|
LOG_WARNING(Render, "Video encoder options not found: {}", buf);
|
|
|
|
}
|
|
|
|
|
2019-01-26 14:41:28 +00:00
|
|
|
// Create video stream
|
2023-06-16 23:06:18 +00:00
|
|
|
stream = FFmpeg::avformat_new_stream(format_context, codec);
|
|
|
|
if (!stream ||
|
|
|
|
FFmpeg::avcodec_parameters_from_context(stream->codecpar, codec_context.get()) < 0) {
|
2019-01-26 14:41:28 +00:00
|
|
|
LOG_ERROR(Render, "Could not create video stream");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2022-04-23 16:23:53 +00:00
|
|
|
stream->time_base = codec_context->time_base;
|
|
|
|
|
2019-01-26 14:41:28 +00:00
|
|
|
// Allocate frames
|
2023-06-16 23:06:18 +00:00
|
|
|
current_frame.reset(FFmpeg::av_frame_alloc());
|
|
|
|
filtered_frame.reset(FFmpeg::av_frame_alloc());
|
2019-01-26 14:41:28 +00:00
|
|
|
|
2023-05-16 09:28:21 +00:00
|
|
|
if (requires_hw_frames) {
|
2023-06-16 23:06:18 +00:00
|
|
|
hw_frame.reset(FFmpeg::av_frame_alloc());
|
|
|
|
if (FFmpeg::av_hwframe_get_buffer(codec_context->hw_frames_ctx, hw_frame.get(), 0) < 0) {
|
2023-05-16 09:28:21 +00:00
|
|
|
LOG_ERROR(Render, "Could not allocate buffer for HW frame");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
2019-01-26 14:41:28 +00:00
|
|
|
|
2023-05-16 09:28:21 +00:00
|
|
|
return InitFilters();
|
2019-01-26 14:41:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void FFmpegVideoStream::Free() {
|
|
|
|
FFmpegStream::Free();
|
|
|
|
|
|
|
|
current_frame.reset();
|
2023-05-16 09:28:21 +00:00
|
|
|
filtered_frame.reset();
|
|
|
|
hw_frame.reset();
|
|
|
|
filter_graph.reset();
|
|
|
|
source_context = nullptr;
|
|
|
|
sink_context = nullptr;
|
2019-01-26 14:41:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void FFmpegVideoStream::ProcessFrame(VideoFrame& frame) {
|
|
|
|
if (frame.width != layout.width || frame.height != layout.height) {
|
|
|
|
LOG_ERROR(Render, "Frame dropped: resolution does not match");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
// Prepare frame
|
|
|
|
current_frame->data[0] = frame.data.data();
|
|
|
|
current_frame->linesize[0] = frame.stride;
|
|
|
|
current_frame->format = pixel_format;
|
|
|
|
current_frame->width = layout.width;
|
|
|
|
current_frame->height = layout.height;
|
2023-05-16 09:28:21 +00:00
|
|
|
current_frame->pts = frame_count++;
|
2019-01-26 14:41:28 +00:00
|
|
|
|
2023-05-16 09:28:21 +00:00
|
|
|
// Filter the frame
|
2023-06-16 23:06:18 +00:00
|
|
|
if (FFmpeg::av_buffersrc_add_frame(source_context, current_frame.get()) < 0) {
|
2023-05-16 09:28:21 +00:00
|
|
|
LOG_ERROR(Render, "Video frame dropped: Could not add frame to filter graph");
|
2020-03-21 04:02:21 +00:00
|
|
|
return;
|
|
|
|
}
|
2023-05-16 09:28:21 +00:00
|
|
|
while (true) {
|
2023-06-16 23:06:18 +00:00
|
|
|
const int error = FFmpeg::av_buffersink_get_frame(sink_context, filtered_frame.get());
|
2023-05-16 09:28:21 +00:00
|
|
|
if (error == AVERROR(EAGAIN) || error == AVERROR_EOF) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (error < 0) {
|
|
|
|
LOG_ERROR(Render, "Video frame dropped: Could not receive frame from filter graph");
|
|
|
|
return;
|
|
|
|
} else {
|
|
|
|
if (requires_hw_frames) {
|
2023-06-16 23:06:18 +00:00
|
|
|
if (FFmpeg::av_hwframe_transfer_data(hw_frame.get(), filtered_frame.get(), 0) < 0) {
|
2023-05-16 09:28:21 +00:00
|
|
|
LOG_ERROR(Render, "Video frame dropped: Could not upload to HW frame");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
SendFrame(hw_frame.get());
|
|
|
|
} else {
|
|
|
|
SendFrame(filtered_frame.get());
|
|
|
|
}
|
|
|
|
|
2023-06-16 23:06:18 +00:00
|
|
|
FFmpeg::av_frame_unref(filtered_frame.get());
|
2023-05-16 09:28:21 +00:00
|
|
|
}
|
2019-01-26 14:41:28 +00:00
|
|
|
}
|
2023-05-16 09:28:21 +00:00
|
|
|
}
|
2019-01-26 14:41:28 +00:00
|
|
|
|
2023-05-16 09:28:21 +00:00
|
|
|
bool FFmpegVideoStream::InitHWContext(const AVCodec* codec) {
|
|
|
|
for (std::size_t i = 0; codec->pix_fmts[i] != AV_PIX_FMT_NONE; ++i) {
|
|
|
|
const AVCodecHWConfig* config;
|
|
|
|
for (int j = 0;; ++j) {
|
2023-06-16 23:06:18 +00:00
|
|
|
config = FFmpeg::avcodec_get_hw_config(codec, j);
|
2023-05-16 09:28:21 +00:00
|
|
|
if (!config || config->pix_fmt == codec->pix_fmts[i]) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// If we are at this point, there should not be any possible HW format that does not
|
|
|
|
// need configuration.
|
|
|
|
ASSERT_MSG(config, "HW pixel format that does not need config should have been selected");
|
|
|
|
|
|
|
|
if (!(config->methods & (AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX |
|
|
|
|
AV_CODEC_HW_CONFIG_METHOD_HW_FRAMES_CTX))) {
|
|
|
|
// Maybe this format requires ad-hoc configuration, unsupported.
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
codec_context->pix_fmt = codec->pix_fmts[i];
|
|
|
|
|
|
|
|
// Create HW device context
|
|
|
|
AVBufferRef* hw_device_context;
|
2023-06-16 23:06:18 +00:00
|
|
|
SCOPE_EXIT({ FFmpeg::av_buffer_unref(&hw_device_context); });
|
2023-05-16 09:28:21 +00:00
|
|
|
|
|
|
|
// TODO: Provide the argument here somehow.
|
|
|
|
// This is necessary for some devices like CUDA where you must supply the GPU name.
|
|
|
|
// This is not necessary for VAAPI, etc.
|
2023-06-16 23:06:18 +00:00
|
|
|
if (FFmpeg::av_hwdevice_ctx_create(&hw_device_context, config->device_type, nullptr,
|
|
|
|
nullptr, 0) < 0) {
|
2023-05-16 09:28:21 +00:00
|
|
|
LOG_ERROR(Render, "Failed to create HW device context");
|
|
|
|
continue;
|
|
|
|
}
|
2023-06-16 23:06:18 +00:00
|
|
|
codec_context->hw_device_ctx = FFmpeg::av_buffer_ref(hw_device_context);
|
2023-05-16 09:28:21 +00:00
|
|
|
|
|
|
|
// Get the SW format
|
|
|
|
AVHWFramesConstraints* constraints =
|
2023-06-16 23:06:18 +00:00
|
|
|
FFmpeg::av_hwdevice_get_hwframe_constraints(hw_device_context, nullptr);
|
|
|
|
SCOPE_EXIT({ FFmpeg::av_hwframe_constraints_free(&constraints); });
|
2023-05-16 09:28:21 +00:00
|
|
|
|
|
|
|
if (constraints) {
|
|
|
|
sw_pixel_format = constraints->valid_sw_formats ? constraints->valid_sw_formats[0]
|
|
|
|
: AV_PIX_FMT_YUV420P;
|
|
|
|
} else {
|
|
|
|
LOG_WARNING(Render, "Could not query HW device constraints");
|
|
|
|
sw_pixel_format = AV_PIX_FMT_YUV420P;
|
|
|
|
}
|
|
|
|
|
|
|
|
// For encoders that only need the HW device
|
|
|
|
if (config->methods & AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX) {
|
|
|
|
requires_hw_frames = false;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
requires_hw_frames = true;
|
|
|
|
|
|
|
|
// Create HW frames context
|
|
|
|
AVBufferRef* hw_frames_context_ref;
|
2023-06-16 23:06:18 +00:00
|
|
|
SCOPE_EXIT({ FFmpeg::av_buffer_unref(&hw_frames_context_ref); });
|
2023-05-16 09:28:21 +00:00
|
|
|
|
2023-06-16 23:06:18 +00:00
|
|
|
if (!(hw_frames_context_ref = FFmpeg::av_hwframe_ctx_alloc(hw_device_context))) {
|
2023-05-16 09:28:21 +00:00
|
|
|
LOG_ERROR(Render, "Failed to create HW frames context");
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
AVHWFramesContext* hw_frames_context =
|
|
|
|
reinterpret_cast<AVHWFramesContext*>(hw_frames_context_ref->data);
|
|
|
|
hw_frames_context->format = codec->pix_fmts[i];
|
|
|
|
hw_frames_context->sw_format = sw_pixel_format;
|
|
|
|
hw_frames_context->width = codec_context->width;
|
|
|
|
hw_frames_context->height = codec_context->height;
|
|
|
|
hw_frames_context->initial_pool_size = 20; // value from FFmpeg's example
|
|
|
|
|
2023-06-16 23:06:18 +00:00
|
|
|
if (FFmpeg::av_hwframe_ctx_init(hw_frames_context_ref) < 0) {
|
2023-05-16 09:28:21 +00:00
|
|
|
LOG_ERROR(Render, "Failed to initialize HW frames context");
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2023-06-16 23:06:18 +00:00
|
|
|
codec_context->hw_frames_ctx = FFmpeg::av_buffer_ref(hw_frames_context_ref);
|
2023-05-16 09:28:21 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
LOG_ERROR(Render, "Failed to find a usable HW pixel format");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool FFmpegVideoStream::InitFilters() {
|
2023-06-16 23:06:18 +00:00
|
|
|
filter_graph.reset(FFmpeg::avfilter_graph_alloc());
|
2023-05-16 09:28:21 +00:00
|
|
|
|
2023-06-16 23:06:18 +00:00
|
|
|
const AVFilter* source = FFmpeg::avfilter_get_by_name("buffer");
|
|
|
|
const AVFilter* sink = FFmpeg::avfilter_get_by_name("buffersink");
|
2023-05-16 09:28:21 +00:00
|
|
|
if (!source || !sink) {
|
|
|
|
LOG_ERROR(Render, "Could not find buffer source or sink");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Configure buffer source
|
|
|
|
static constexpr AVRational src_time_base{static_cast<int>(GPU::frame_ticks),
|
|
|
|
static_cast<int>(BASE_CLOCK_RATE_ARM11)};
|
2023-05-19 17:58:05 +00:00
|
|
|
const std::string in_args =
|
|
|
|
fmt::format("video_size={}x{}:pix_fmt={}:time_base={}/{}:pixel_aspect=1", layout.width,
|
|
|
|
layout.height, pixel_format, src_time_base.num, src_time_base.den);
|
2023-06-16 23:06:18 +00:00
|
|
|
if (FFmpeg::avfilter_graph_create_filter(&source_context, source, "in", in_args.c_str(),
|
|
|
|
nullptr, filter_graph.get()) < 0) {
|
2023-05-16 09:28:21 +00:00
|
|
|
LOG_ERROR(Render, "Could not create buffer source");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Configure buffer sink
|
2023-06-16 23:06:18 +00:00
|
|
|
if (FFmpeg::avfilter_graph_create_filter(&sink_context, sink, "out", nullptr, nullptr,
|
|
|
|
filter_graph.get()) < 0) {
|
2023-05-16 09:28:21 +00:00
|
|
|
LOG_ERROR(Render, "Could not create buffer sink");
|
|
|
|
return false;
|
|
|
|
}
|
2023-06-16 23:06:18 +00:00
|
|
|
|
|
|
|
// Point av_opt_set_int_list to correct functions.
|
|
|
|
#define av_int_list_length_for_size FFmpeg::av_int_list_length_for_size
|
|
|
|
#define av_opt_set_bin FFmpeg::av_opt_set_bin
|
|
|
|
|
2023-05-16 09:28:21 +00:00
|
|
|
const AVPixelFormat pix_fmts[] = {sw_pixel_format, AV_PIX_FMT_NONE};
|
|
|
|
if (av_opt_set_int_list(sink_context, "pix_fmts", pix_fmts, AV_PIX_FMT_NONE,
|
|
|
|
AV_OPT_SEARCH_CHILDREN) < 0) {
|
|
|
|
LOG_ERROR(Render, "Could not set output pixel format");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Initialize filter graph
|
|
|
|
// `outputs` as in outputs of the 'previous' graphs
|
2023-06-16 23:06:18 +00:00
|
|
|
AVFilterInOut* outputs = FFmpeg::avfilter_inout_alloc();
|
|
|
|
outputs->name = FFmpeg::av_strdup("in");
|
2023-05-16 09:28:21 +00:00
|
|
|
outputs->filter_ctx = source_context;
|
|
|
|
outputs->pad_idx = 0;
|
|
|
|
outputs->next = nullptr;
|
|
|
|
|
|
|
|
// `inputs` as in inputs to the 'next' graphs
|
2023-06-16 23:06:18 +00:00
|
|
|
AVFilterInOut* inputs = FFmpeg::avfilter_inout_alloc();
|
|
|
|
inputs->name = FFmpeg::av_strdup("out");
|
2023-05-16 09:28:21 +00:00
|
|
|
inputs->filter_ctx = sink_context;
|
|
|
|
inputs->pad_idx = 0;
|
|
|
|
inputs->next = nullptr;
|
|
|
|
|
|
|
|
SCOPE_EXIT({
|
2023-06-16 23:06:18 +00:00
|
|
|
FFmpeg::avfilter_inout_free(&outputs);
|
|
|
|
FFmpeg::avfilter_inout_free(&inputs);
|
2023-05-16 09:28:21 +00:00
|
|
|
});
|
|
|
|
|
2023-06-16 23:06:18 +00:00
|
|
|
if (FFmpeg::avfilter_graph_parse_ptr(filter_graph.get(), filter_graph_desc.data(), &inputs,
|
|
|
|
&outputs, nullptr) < 0) {
|
2023-05-16 09:28:21 +00:00
|
|
|
LOG_ERROR(Render, "Could not parse or create filter graph");
|
|
|
|
return false;
|
|
|
|
}
|
2023-06-16 23:06:18 +00:00
|
|
|
if (FFmpeg::avfilter_graph_config(filter_graph.get(), nullptr) < 0) {
|
2023-05-16 09:28:21 +00:00
|
|
|
LOG_ERROR(Render, "Could not configure filter graph");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
2019-01-26 14:41:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
FFmpegAudioStream::~FFmpegAudioStream() {
|
|
|
|
Free();
|
|
|
|
}
|
|
|
|
|
2020-03-21 04:02:21 +00:00
|
|
|
bool FFmpegAudioStream::Init(FFmpegMuxer& muxer) {
|
2019-01-26 14:41:28 +00:00
|
|
|
InitializeFFmpegLibraries();
|
|
|
|
|
2023-06-16 23:06:18 +00:00
|
|
|
if (!FFmpegStream::Init(muxer)) {
|
2019-01-26 14:41:28 +00:00
|
|
|
return false;
|
2023-06-16 23:06:18 +00:00
|
|
|
}
|
2019-01-26 14:41:28 +00:00
|
|
|
|
ffmpeg: Correctly handle sample rates
Previously, we just used the native sample rate for encoding. However, some encoders like libmp3lame doesn't support it. Therefore, we now use a supported sample rate (preferring the native one if possible).
FFmpeg requires audio data to be sent in a sequence of frames, each containing the same specific number of samples. Previously, we buffered input samples in FFmpegBackend. However, as the source and destination sample rates can now be different, we should buffer resampled data instead. swresample have an internal input buffer, so we now just forward all data to it and 'gradually' receive resampled data, at most one frame_size at a time. When there is not enough resampled data to form a frame, we will record the current offset and request for less data on the next call.
Additionally, this commit also fixes a flaw. When an encoder supports variable frame sizes, its frame size is reported to be 0, which breaks our buffering system. Now we treat variable frame size encoders as having a frame size of 160 (the size of a HLE audio frame).
2020-02-01 04:23:07 +00:00
|
|
|
frame_count = 0;
|
2019-01-26 14:41:28 +00:00
|
|
|
|
|
|
|
// Initialize audio codec
|
2023-06-16 23:06:18 +00:00
|
|
|
const AVCodec* codec =
|
|
|
|
FFmpeg::avcodec_find_encoder_by_name(Settings::values.audio_encoder.c_str());
|
|
|
|
codec_context.reset(FFmpeg::avcodec_alloc_context3(codec));
|
2019-01-26 14:41:28 +00:00
|
|
|
if (!codec || !codec_context) {
|
|
|
|
LOG_ERROR(Render, "Could not find audio encoder or allocate audio codec context");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Configure audio codec context
|
|
|
|
codec_context->codec_type = AVMEDIA_TYPE_AUDIO;
|
2020-01-29 06:54:39 +00:00
|
|
|
codec_context->bit_rate = Settings::values.audio_bitrate;
|
2020-02-01 03:28:57 +00:00
|
|
|
if (codec->sample_fmts) {
|
2020-02-27 08:37:06 +00:00
|
|
|
codec_context->sample_fmt = codec->sample_fmts[0];
|
2020-02-01 03:28:57 +00:00
|
|
|
} else {
|
|
|
|
codec_context->sample_fmt = AV_SAMPLE_FMT_S16P;
|
|
|
|
}
|
|
|
|
|
ffmpeg: Correctly handle sample rates
Previously, we just used the native sample rate for encoding. However, some encoders like libmp3lame doesn't support it. Therefore, we now use a supported sample rate (preferring the native one if possible).
FFmpeg requires audio data to be sent in a sequence of frames, each containing the same specific number of samples. Previously, we buffered input samples in FFmpegBackend. However, as the source and destination sample rates can now be different, we should buffer resampled data instead. swresample have an internal input buffer, so we now just forward all data to it and 'gradually' receive resampled data, at most one frame_size at a time. When there is not enough resampled data to form a frame, we will record the current offset and request for less data on the next call.
Additionally, this commit also fixes a flaw. When an encoder supports variable frame sizes, its frame size is reported to be 0, which breaks our buffering system. Now we treat variable frame size encoders as having a frame size of 160 (the size of a HLE audio frame).
2020-02-01 04:23:07 +00:00
|
|
|
if (codec->supported_samplerates) {
|
|
|
|
codec_context->sample_rate = codec->supported_samplerates[0];
|
|
|
|
// Prefer native sample rate if supported
|
|
|
|
const int* ptr = codec->supported_samplerates;
|
|
|
|
while ((*ptr)) {
|
|
|
|
if ((*ptr) == AudioCore::native_sample_rate) {
|
|
|
|
codec_context->sample_rate = AudioCore::native_sample_rate;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
ptr++;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
codec_context->sample_rate = AudioCore::native_sample_rate;
|
|
|
|
}
|
2020-03-21 04:02:21 +00:00
|
|
|
codec_context->time_base.num = 1;
|
|
|
|
codec_context->time_base.den = codec_context->sample_rate;
|
2023-06-09 17:18:42 +00:00
|
|
|
#if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(59, 24, 100) // lavc 59.24.100
|
2023-06-16 23:06:18 +00:00
|
|
|
codec_context->ch_layout = AV_CHANNEL_LAYOUT_STEREO;
|
|
|
|
#else
|
2019-01-26 14:41:28 +00:00
|
|
|
codec_context->channel_layout = AV_CH_LAYOUT_STEREO;
|
|
|
|
codec_context->channels = 2;
|
2023-06-16 23:06:18 +00:00
|
|
|
#endif
|
|
|
|
if (format_context->oformat->flags & AVFMT_GLOBALHEADER) {
|
2020-03-21 04:02:21 +00:00
|
|
|
codec_context->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
|
2023-06-16 23:06:18 +00:00
|
|
|
}
|
2019-01-26 14:41:28 +00:00
|
|
|
|
2020-01-29 06:54:39 +00:00
|
|
|
AVDictionary* options = ToAVDictionary(Settings::values.audio_encoder_options);
|
2023-06-16 23:06:18 +00:00
|
|
|
if (FFmpeg::avcodec_open2(codec_context.get(), codec, &options) < 0) {
|
2019-01-26 14:41:28 +00:00
|
|
|
LOG_ERROR(Render, "Could not open audio codec");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2023-06-16 23:06:18 +00:00
|
|
|
if (FFmpeg::av_dict_count(options) != 0) { // Successfully set options are removed from the dict
|
2020-01-29 06:54:39 +00:00
|
|
|
char* buf = nullptr;
|
2023-06-16 23:06:18 +00:00
|
|
|
FFmpeg::av_dict_get_string(options, &buf, ':', ';');
|
2020-01-29 06:54:39 +00:00
|
|
|
LOG_WARNING(Render, "Audio encoder options not found: {}", buf);
|
|
|
|
}
|
|
|
|
|
ffmpeg: Correctly handle sample rates
Previously, we just used the native sample rate for encoding. However, some encoders like libmp3lame doesn't support it. Therefore, we now use a supported sample rate (preferring the native one if possible).
FFmpeg requires audio data to be sent in a sequence of frames, each containing the same specific number of samples. Previously, we buffered input samples in FFmpegBackend. However, as the source and destination sample rates can now be different, we should buffer resampled data instead. swresample have an internal input buffer, so we now just forward all data to it and 'gradually' receive resampled data, at most one frame_size at a time. When there is not enough resampled data to form a frame, we will record the current offset and request for less data on the next call.
Additionally, this commit also fixes a flaw. When an encoder supports variable frame sizes, its frame size is reported to be 0, which breaks our buffering system. Now we treat variable frame size encoders as having a frame size of 160 (the size of a HLE audio frame).
2020-02-01 04:23:07 +00:00
|
|
|
if (codec_context->frame_size) {
|
|
|
|
frame_size = static_cast<u64>(codec_context->frame_size);
|
|
|
|
} else { // variable frame size support
|
|
|
|
frame_size = std::tuple_size<AudioCore::StereoFrame16>::value;
|
|
|
|
}
|
|
|
|
|
2019-01-26 14:41:28 +00:00
|
|
|
// Create audio stream
|
2023-06-16 23:06:18 +00:00
|
|
|
stream = FFmpeg::avformat_new_stream(format_context, codec);
|
|
|
|
if (!stream ||
|
|
|
|
FFmpeg::avcodec_parameters_from_context(stream->codecpar, codec_context.get()) < 0) {
|
2019-01-26 14:41:28 +00:00
|
|
|
|
|
|
|
LOG_ERROR(Render, "Could not create audio stream");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Allocate frame
|
2023-06-16 23:06:18 +00:00
|
|
|
audio_frame.reset(FFmpeg::av_frame_alloc());
|
2019-01-26 14:41:28 +00:00
|
|
|
audio_frame->format = codec_context->sample_fmt;
|
2020-03-21 04:02:21 +00:00
|
|
|
audio_frame->sample_rate = codec_context->sample_rate;
|
2019-01-26 14:41:28 +00:00
|
|
|
|
2023-06-09 17:18:42 +00:00
|
|
|
#if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(59, 24, 100) // lavc 59.24.100
|
2023-06-16 23:06:18 +00:00
|
|
|
auto num_channels = codec_context->ch_layout.nb_channels;
|
|
|
|
audio_frame->ch_layout = codec_context->ch_layout;
|
|
|
|
SwrContext* context = nullptr;
|
|
|
|
FFmpeg::swr_alloc_set_opts2(&context, &codec_context->ch_layout, codec_context->sample_fmt,
|
|
|
|
codec_context->sample_rate, &codec_context->ch_layout,
|
|
|
|
AV_SAMPLE_FMT_S16P, AudioCore::native_sample_rate, 0, nullptr);
|
|
|
|
#else
|
|
|
|
auto num_channels = codec_context->channels;
|
|
|
|
audio_frame->channel_layout = codec_context->channel_layout;
|
|
|
|
audio_frame->channels = num_channels;
|
|
|
|
auto* context = FFmpeg::swr_alloc_set_opts(
|
|
|
|
nullptr, codec_context->channel_layout, codec_context->sample_fmt,
|
|
|
|
codec_context->sample_rate, codec_context->channel_layout, AV_SAMPLE_FMT_S16P,
|
|
|
|
AudioCore::native_sample_rate, 0, nullptr);
|
|
|
|
#endif
|
|
|
|
|
2019-01-26 14:41:28 +00:00
|
|
|
if (!context) {
|
|
|
|
LOG_ERROR(Render, "Could not create SWR context");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
swr_context.reset(context);
|
2023-06-16 23:06:18 +00:00
|
|
|
if (FFmpeg::swr_init(swr_context.get()) < 0) {
|
2019-01-26 14:41:28 +00:00
|
|
|
LOG_ERROR(Render, "Could not init SWR context");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Allocate resampled data
|
2023-06-16 23:06:18 +00:00
|
|
|
int error = FFmpeg::av_samples_alloc_array_and_samples(
|
|
|
|
&resampled_data, nullptr, num_channels, frame_size, codec_context->sample_fmt, 0);
|
2019-01-26 14:41:28 +00:00
|
|
|
if (error < 0) {
|
|
|
|
LOG_ERROR(Render, "Could not allocate samples storage");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
void FFmpegAudioStream::Free() {
|
|
|
|
FFmpegStream::Free();
|
|
|
|
|
|
|
|
audio_frame.reset();
|
|
|
|
swr_context.reset();
|
|
|
|
// Free resampled data
|
|
|
|
if (resampled_data) {
|
2023-06-16 23:06:18 +00:00
|
|
|
FFmpeg::av_freep(&resampled_data[0]);
|
2019-01-26 14:41:28 +00:00
|
|
|
}
|
2023-06-16 23:06:18 +00:00
|
|
|
FFmpeg::av_freep(&resampled_data);
|
2019-01-26 14:41:28 +00:00
|
|
|
}
|
|
|
|
|
ffmpeg: Correctly handle sample rates
Previously, we just used the native sample rate for encoding. However, some encoders like libmp3lame doesn't support it. Therefore, we now use a supported sample rate (preferring the native one if possible).
FFmpeg requires audio data to be sent in a sequence of frames, each containing the same specific number of samples. Previously, we buffered input samples in FFmpegBackend. However, as the source and destination sample rates can now be different, we should buffer resampled data instead. swresample have an internal input buffer, so we now just forward all data to it and 'gradually' receive resampled data, at most one frame_size at a time. When there is not enough resampled data to form a frame, we will record the current offset and request for less data on the next call.
Additionally, this commit also fixes a flaw. When an encoder supports variable frame sizes, its frame size is reported to be 0, which breaks our buffering system. Now we treat variable frame size encoders as having a frame size of 160 (the size of a HLE audio frame).
2020-02-01 04:23:07 +00:00
|
|
|
void FFmpegAudioStream::ProcessFrame(const VariableAudioFrame& channel0,
|
|
|
|
const VariableAudioFrame& channel1) {
|
2019-01-26 14:41:28 +00:00
|
|
|
ASSERT_MSG(channel0.size() == channel1.size(),
|
|
|
|
"Frames of the two channels must have the same number of samples");
|
|
|
|
|
2023-06-16 23:06:18 +00:00
|
|
|
const auto sample_size = FFmpeg::av_get_bytes_per_sample(codec_context->sample_fmt);
|
ffmpeg: Correctly handle sample rates
Previously, we just used the native sample rate for encoding. However, some encoders like libmp3lame doesn't support it. Therefore, we now use a supported sample rate (preferring the native one if possible).
FFmpeg requires audio data to be sent in a sequence of frames, each containing the same specific number of samples. Previously, we buffered input samples in FFmpegBackend. However, as the source and destination sample rates can now be different, we should buffer resampled data instead. swresample have an internal input buffer, so we now just forward all data to it and 'gradually' receive resampled data, at most one frame_size at a time. When there is not enough resampled data to form a frame, we will record the current offset and request for less data on the next call.
Additionally, this commit also fixes a flaw. When an encoder supports variable frame sizes, its frame size is reported to be 0, which breaks our buffering system. Now we treat variable frame size encoders as having a frame size of 160 (the size of a HLE audio frame).
2020-02-01 04:23:07 +00:00
|
|
|
std::array<const u8*, 2> src_data = {reinterpret_cast<const u8*>(channel0.data()),
|
|
|
|
reinterpret_cast<const u8*>(channel1.data())};
|
2020-02-27 08:37:06 +00:00
|
|
|
|
|
|
|
std::array<u8*, 2> dst_data;
|
2023-06-16 23:06:18 +00:00
|
|
|
if (FFmpeg::av_sample_fmt_is_planar(codec_context->sample_fmt)) {
|
2020-02-27 08:37:06 +00:00
|
|
|
dst_data = {resampled_data[0] + sample_size * offset,
|
|
|
|
resampled_data[1] + sample_size * offset};
|
|
|
|
} else {
|
|
|
|
dst_data = {resampled_data[0] + sample_size * offset * 2}; // 2 channels
|
|
|
|
}
|
ffmpeg: Correctly handle sample rates
Previously, we just used the native sample rate for encoding. However, some encoders like libmp3lame doesn't support it. Therefore, we now use a supported sample rate (preferring the native one if possible).
FFmpeg requires audio data to be sent in a sequence of frames, each containing the same specific number of samples. Previously, we buffered input samples in FFmpegBackend. However, as the source and destination sample rates can now be different, we should buffer resampled data instead. swresample have an internal input buffer, so we now just forward all data to it and 'gradually' receive resampled data, at most one frame_size at a time. When there is not enough resampled data to form a frame, we will record the current offset and request for less data on the next call.
Additionally, this commit also fixes a flaw. When an encoder supports variable frame sizes, its frame size is reported to be 0, which breaks our buffering system. Now we treat variable frame size encoders as having a frame size of 160 (the size of a HLE audio frame).
2020-02-01 04:23:07 +00:00
|
|
|
|
2023-06-16 23:06:18 +00:00
|
|
|
auto resampled_count =
|
|
|
|
FFmpeg::swr_convert(swr_context.get(), dst_data.data(), frame_size - offset,
|
|
|
|
src_data.data(), static_cast<int>(channel0.size()));
|
ffmpeg: Correctly handle sample rates
Previously, we just used the native sample rate for encoding. However, some encoders like libmp3lame doesn't support it. Therefore, we now use a supported sample rate (preferring the native one if possible).
FFmpeg requires audio data to be sent in a sequence of frames, each containing the same specific number of samples. Previously, we buffered input samples in FFmpegBackend. However, as the source and destination sample rates can now be different, we should buffer resampled data instead. swresample have an internal input buffer, so we now just forward all data to it and 'gradually' receive resampled data, at most one frame_size at a time. When there is not enough resampled data to form a frame, we will record the current offset and request for less data on the next call.
Additionally, this commit also fixes a flaw. When an encoder supports variable frame sizes, its frame size is reported to be 0, which breaks our buffering system. Now we treat variable frame size encoders as having a frame size of 160 (the size of a HLE audio frame).
2020-02-01 04:23:07 +00:00
|
|
|
if (resampled_count < 0) {
|
2019-01-26 14:41:28 +00:00
|
|
|
LOG_ERROR(Render, "Audio frame dropped: Could not resample data");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
ffmpeg: Correctly handle sample rates
Previously, we just used the native sample rate for encoding. However, some encoders like libmp3lame doesn't support it. Therefore, we now use a supported sample rate (preferring the native one if possible).
FFmpeg requires audio data to be sent in a sequence of frames, each containing the same specific number of samples. Previously, we buffered input samples in FFmpegBackend. However, as the source and destination sample rates can now be different, we should buffer resampled data instead. swresample have an internal input buffer, so we now just forward all data to it and 'gradually' receive resampled data, at most one frame_size at a time. When there is not enough resampled data to form a frame, we will record the current offset and request for less data on the next call.
Additionally, this commit also fixes a flaw. When an encoder supports variable frame sizes, its frame size is reported to be 0, which breaks our buffering system. Now we treat variable frame size encoders as having a frame size of 160 (the size of a HLE audio frame).
2020-02-01 04:23:07 +00:00
|
|
|
offset += resampled_count;
|
|
|
|
if (offset < frame_size) { // Still not enough to form a frame
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
while (true) {
|
|
|
|
// Prepare frame
|
|
|
|
audio_frame->nb_samples = frame_size;
|
|
|
|
audio_frame->data[0] = resampled_data[0];
|
2023-06-16 23:06:18 +00:00
|
|
|
if (FFmpeg::av_sample_fmt_is_planar(codec_context->sample_fmt)) {
|
2020-02-27 08:37:06 +00:00
|
|
|
audio_frame->data[1] = resampled_data[1];
|
|
|
|
}
|
ffmpeg: Correctly handle sample rates
Previously, we just used the native sample rate for encoding. However, some encoders like libmp3lame doesn't support it. Therefore, we now use a supported sample rate (preferring the native one if possible).
FFmpeg requires audio data to be sent in a sequence of frames, each containing the same specific number of samples. Previously, we buffered input samples in FFmpegBackend. However, as the source and destination sample rates can now be different, we should buffer resampled data instead. swresample have an internal input buffer, so we now just forward all data to it and 'gradually' receive resampled data, at most one frame_size at a time. When there is not enough resampled data to form a frame, we will record the current offset and request for less data on the next call.
Additionally, this commit also fixes a flaw. When an encoder supports variable frame sizes, its frame size is reported to be 0, which breaks our buffering system. Now we treat variable frame size encoders as having a frame size of 160 (the size of a HLE audio frame).
2020-02-01 04:23:07 +00:00
|
|
|
audio_frame->pts = frame_count * frame_size;
|
|
|
|
frame_count++;
|
|
|
|
|
|
|
|
SendFrame(audio_frame.get());
|
|
|
|
|
|
|
|
// swr_convert buffers input internally. Try to get more resampled data
|
2023-06-16 23:06:18 +00:00
|
|
|
resampled_count =
|
|
|
|
FFmpeg::swr_convert(swr_context.get(), resampled_data, frame_size, nullptr, 0);
|
ffmpeg: Correctly handle sample rates
Previously, we just used the native sample rate for encoding. However, some encoders like libmp3lame doesn't support it. Therefore, we now use a supported sample rate (preferring the native one if possible).
FFmpeg requires audio data to be sent in a sequence of frames, each containing the same specific number of samples. Previously, we buffered input samples in FFmpegBackend. However, as the source and destination sample rates can now be different, we should buffer resampled data instead. swresample have an internal input buffer, so we now just forward all data to it and 'gradually' receive resampled data, at most one frame_size at a time. When there is not enough resampled data to form a frame, we will record the current offset and request for less data on the next call.
Additionally, this commit also fixes a flaw. When an encoder supports variable frame sizes, its frame size is reported to be 0, which breaks our buffering system. Now we treat variable frame size encoders as having a frame size of 160 (the size of a HLE audio frame).
2020-02-01 04:23:07 +00:00
|
|
|
if (resampled_count < 0) {
|
|
|
|
LOG_ERROR(Render, "Audio frame dropped: Could not resample data");
|
|
|
|
return;
|
|
|
|
}
|
2023-05-01 19:38:58 +00:00
|
|
|
if (resampled_count < frame_size) {
|
ffmpeg: Correctly handle sample rates
Previously, we just used the native sample rate for encoding. However, some encoders like libmp3lame doesn't support it. Therefore, we now use a supported sample rate (preferring the native one if possible).
FFmpeg requires audio data to be sent in a sequence of frames, each containing the same specific number of samples. Previously, we buffered input samples in FFmpegBackend. However, as the source and destination sample rates can now be different, we should buffer resampled data instead. swresample have an internal input buffer, so we now just forward all data to it and 'gradually' receive resampled data, at most one frame_size at a time. When there is not enough resampled data to form a frame, we will record the current offset and request for less data on the next call.
Additionally, this commit also fixes a flaw. When an encoder supports variable frame sizes, its frame size is reported to be 0, which breaks our buffering system. Now we treat variable frame size encoders as having a frame size of 160 (the size of a HLE audio frame).
2020-02-01 04:23:07 +00:00
|
|
|
offset = resampled_count;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void FFmpegAudioStream::Flush() {
|
|
|
|
// Send the last samples
|
|
|
|
audio_frame->nb_samples = offset;
|
2019-01-26 14:41:28 +00:00
|
|
|
audio_frame->data[0] = resampled_data[0];
|
2023-06-16 23:06:18 +00:00
|
|
|
if (FFmpeg::av_sample_fmt_is_planar(codec_context->sample_fmt)) {
|
2020-02-27 08:37:06 +00:00
|
|
|
audio_frame->data[1] = resampled_data[1];
|
|
|
|
}
|
ffmpeg: Correctly handle sample rates
Previously, we just used the native sample rate for encoding. However, some encoders like libmp3lame doesn't support it. Therefore, we now use a supported sample rate (preferring the native one if possible).
FFmpeg requires audio data to be sent in a sequence of frames, each containing the same specific number of samples. Previously, we buffered input samples in FFmpegBackend. However, as the source and destination sample rates can now be different, we should buffer resampled data instead. swresample have an internal input buffer, so we now just forward all data to it and 'gradually' receive resampled data, at most one frame_size at a time. When there is not enough resampled data to form a frame, we will record the current offset and request for less data on the next call.
Additionally, this commit also fixes a flaw. When an encoder supports variable frame sizes, its frame size is reported to be 0, which breaks our buffering system. Now we treat variable frame size encoders as having a frame size of 160 (the size of a HLE audio frame).
2020-02-01 04:23:07 +00:00
|
|
|
audio_frame->pts = frame_count * frame_size;
|
2019-01-26 14:41:28 +00:00
|
|
|
|
|
|
|
SendFrame(audio_frame.get());
|
|
|
|
|
ffmpeg: Correctly handle sample rates
Previously, we just used the native sample rate for encoding. However, some encoders like libmp3lame doesn't support it. Therefore, we now use a supported sample rate (preferring the native one if possible).
FFmpeg requires audio data to be sent in a sequence of frames, each containing the same specific number of samples. Previously, we buffered input samples in FFmpegBackend. However, as the source and destination sample rates can now be different, we should buffer resampled data instead. swresample have an internal input buffer, so we now just forward all data to it and 'gradually' receive resampled data, at most one frame_size at a time. When there is not enough resampled data to form a frame, we will record the current offset and request for less data on the next call.
Additionally, this commit also fixes a flaw. When an encoder supports variable frame sizes, its frame size is reported to be 0, which breaks our buffering system. Now we treat variable frame size encoders as having a frame size of 160 (the size of a HLE audio frame).
2020-02-01 04:23:07 +00:00
|
|
|
FFmpegStream::Flush();
|
2019-01-26 14:41:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
FFmpegMuxer::~FFmpegMuxer() {
|
|
|
|
Free();
|
|
|
|
}
|
|
|
|
|
2020-01-29 06:54:39 +00:00
|
|
|
bool FFmpegMuxer::Init(const std::string& path, const Layout::FramebufferLayout& layout) {
|
2019-01-26 14:41:28 +00:00
|
|
|
InitializeFFmpegLibraries();
|
|
|
|
|
|
|
|
if (!FileUtil::CreateFullPath(path)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Get output format
|
2020-01-29 06:54:39 +00:00
|
|
|
const auto format = Settings::values.output_format;
|
2023-06-16 23:06:18 +00:00
|
|
|
auto* output_format = FFmpeg::av_guess_format(format.c_str(), path.c_str(), nullptr);
|
2019-01-26 14:41:28 +00:00
|
|
|
if (!output_format) {
|
|
|
|
LOG_ERROR(Render, "Could not get format {}", format);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Initialize format context
|
|
|
|
auto* format_context_raw = format_context.get();
|
2023-06-16 23:06:18 +00:00
|
|
|
if (FFmpeg::avformat_alloc_output_context2(&format_context_raw, output_format, nullptr,
|
|
|
|
path.c_str()) < 0) {
|
2019-01-26 14:41:28 +00:00
|
|
|
LOG_ERROR(Render, "Could not allocate output context");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
format_context.reset(format_context_raw);
|
|
|
|
|
2020-03-21 04:02:21 +00:00
|
|
|
if (!video_stream.Init(*this, layout))
|
2019-01-26 14:41:28 +00:00
|
|
|
return false;
|
2020-03-21 04:02:21 +00:00
|
|
|
if (!audio_stream.Init(*this))
|
2019-01-26 14:41:28 +00:00
|
|
|
return false;
|
|
|
|
|
2020-01-29 06:54:39 +00:00
|
|
|
AVDictionary* options = ToAVDictionary(Settings::values.format_options);
|
2019-01-26 14:41:28 +00:00
|
|
|
// Open video file
|
2023-06-16 23:06:18 +00:00
|
|
|
if (FFmpeg::avio_open(&format_context->pb, path.c_str(), AVIO_FLAG_WRITE) < 0 ||
|
|
|
|
FFmpeg::avformat_write_header(format_context.get(), &options)) {
|
2019-01-26 14:41:28 +00:00
|
|
|
|
|
|
|
LOG_ERROR(Render, "Could not open {}", path);
|
|
|
|
return false;
|
|
|
|
}
|
2023-06-16 23:06:18 +00:00
|
|
|
if (FFmpeg::av_dict_count(options) != 0) { // Successfully set options are removed from the dict
|
2020-01-29 06:54:39 +00:00
|
|
|
char* buf = nullptr;
|
2023-06-16 23:06:18 +00:00
|
|
|
FFmpeg::av_dict_get_string(options, &buf, ':', ';');
|
2020-01-29 06:54:39 +00:00
|
|
|
LOG_WARNING(Render, "Format options not found: {}", buf);
|
|
|
|
}
|
2019-01-26 14:41:28 +00:00
|
|
|
|
|
|
|
LOG_INFO(Render, "Dumping frames to {} ({}x{})", path, layout.width, layout.height);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
void FFmpegMuxer::Free() {
|
|
|
|
video_stream.Free();
|
|
|
|
audio_stream.Free();
|
|
|
|
format_context.reset();
|
|
|
|
}
|
|
|
|
|
|
|
|
void FFmpegMuxer::ProcessVideoFrame(VideoFrame& frame) {
|
|
|
|
video_stream.ProcessFrame(frame);
|
|
|
|
}
|
|
|
|
|
ffmpeg: Correctly handle sample rates
Previously, we just used the native sample rate for encoding. However, some encoders like libmp3lame doesn't support it. Therefore, we now use a supported sample rate (preferring the native one if possible).
FFmpeg requires audio data to be sent in a sequence of frames, each containing the same specific number of samples. Previously, we buffered input samples in FFmpegBackend. However, as the source and destination sample rates can now be different, we should buffer resampled data instead. swresample have an internal input buffer, so we now just forward all data to it and 'gradually' receive resampled data, at most one frame_size at a time. When there is not enough resampled data to form a frame, we will record the current offset and request for less data on the next call.
Additionally, this commit also fixes a flaw. When an encoder supports variable frame sizes, its frame size is reported to be 0, which breaks our buffering system. Now we treat variable frame size encoders as having a frame size of 160 (the size of a HLE audio frame).
2020-02-01 04:23:07 +00:00
|
|
|
void FFmpegMuxer::ProcessAudioFrame(const VariableAudioFrame& channel0,
|
|
|
|
const VariableAudioFrame& channel1) {
|
2019-01-26 14:41:28 +00:00
|
|
|
audio_stream.ProcessFrame(channel0, channel1);
|
|
|
|
}
|
|
|
|
|
|
|
|
void FFmpegMuxer::FlushVideo() {
|
|
|
|
video_stream.Flush();
|
|
|
|
}
|
|
|
|
|
|
|
|
void FFmpegMuxer::FlushAudio() {
|
|
|
|
audio_stream.Flush();
|
|
|
|
}
|
|
|
|
|
|
|
|
void FFmpegMuxer::WriteTrailer() {
|
2020-03-21 04:02:21 +00:00
|
|
|
std::lock_guard lock{format_context_mutex};
|
2023-06-16 23:06:18 +00:00
|
|
|
FFmpeg::av_interleaved_write_frame(format_context.get(), nullptr);
|
|
|
|
FFmpeg::av_write_trailer(format_context.get());
|
2019-01-26 14:41:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
FFmpegBackend::FFmpegBackend() = default;
|
|
|
|
|
|
|
|
FFmpegBackend::~FFmpegBackend() {
|
|
|
|
ASSERT_MSG(!IsDumping(), "Dumping must be stopped first");
|
|
|
|
|
|
|
|
if (video_processing_thread.joinable())
|
|
|
|
video_processing_thread.join();
|
|
|
|
if (audio_processing_thread.joinable())
|
|
|
|
audio_processing_thread.join();
|
|
|
|
ffmpeg.Free();
|
|
|
|
}
|
|
|
|
|
2020-01-29 06:54:39 +00:00
|
|
|
bool FFmpegBackend::StartDumping(const std::string& path, const Layout::FramebufferLayout& layout) {
|
2019-01-26 14:41:28 +00:00
|
|
|
InitializeFFmpegLibraries();
|
|
|
|
|
2020-01-29 06:54:39 +00:00
|
|
|
if (!ffmpeg.Init(path, layout)) {
|
2019-01-26 14:41:28 +00:00
|
|
|
ffmpeg.Free();
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
video_layout = layout;
|
|
|
|
|
2023-06-16 23:06:18 +00:00
|
|
|
if (video_processing_thread.joinable()) {
|
2019-01-26 14:41:28 +00:00
|
|
|
video_processing_thread.join();
|
2023-06-16 23:06:18 +00:00
|
|
|
}
|
2019-01-26 14:41:28 +00:00
|
|
|
video_processing_thread = std::thread([&] {
|
|
|
|
event1.Set();
|
|
|
|
while (true) {
|
|
|
|
event2.Wait();
|
|
|
|
current_buffer = (current_buffer + 1) % 2;
|
|
|
|
next_buffer = (current_buffer + 1) % 2;
|
|
|
|
event1.Set();
|
|
|
|
// Process this frame
|
|
|
|
auto& frame = video_frame_buffers[current_buffer];
|
|
|
|
if (frame.width == 0 && frame.height == 0) {
|
|
|
|
// An empty frame marks the end of frame data
|
|
|
|
ffmpeg.FlushVideo();
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
ffmpeg.ProcessVideoFrame(frame);
|
|
|
|
}
|
|
|
|
// Finish audio execution first if not done yet
|
|
|
|
if (audio_processing_thread.joinable())
|
|
|
|
audio_processing_thread.join();
|
|
|
|
EndDumping();
|
|
|
|
});
|
|
|
|
|
2023-06-16 23:06:18 +00:00
|
|
|
if (audio_processing_thread.joinable()) {
|
2019-01-26 14:41:28 +00:00
|
|
|
audio_processing_thread.join();
|
2023-06-16 23:06:18 +00:00
|
|
|
}
|
2019-01-26 14:41:28 +00:00
|
|
|
audio_processing_thread = std::thread([&] {
|
|
|
|
VariableAudioFrame channel0, channel1;
|
|
|
|
while (true) {
|
|
|
|
channel0 = audio_frame_queues[0].PopWait();
|
|
|
|
channel1 = audio_frame_queues[1].PopWait();
|
|
|
|
if (channel0.empty()) {
|
|
|
|
// An empty frame marks the end of frame data
|
|
|
|
ffmpeg.FlushAudio();
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
ffmpeg.ProcessAudioFrame(channel0, channel1);
|
|
|
|
}
|
|
|
|
});
|
|
|
|
|
|
|
|
VideoCore::g_renderer->PrepareVideoDumping();
|
|
|
|
is_dumping = true;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2020-01-28 14:19:36 +00:00
|
|
|
void FFmpegBackend::AddVideoFrame(VideoFrame frame) {
|
2019-01-26 14:41:28 +00:00
|
|
|
event1.Wait();
|
|
|
|
video_frame_buffers[next_buffer] = std::move(frame);
|
|
|
|
event2.Set();
|
|
|
|
}
|
|
|
|
|
2020-01-28 14:19:36 +00:00
|
|
|
void FFmpegBackend::AddAudioFrame(AudioCore::StereoFrame16 frame) {
|
ffmpeg: Correctly handle sample rates
Previously, we just used the native sample rate for encoding. However, some encoders like libmp3lame doesn't support it. Therefore, we now use a supported sample rate (preferring the native one if possible).
FFmpeg requires audio data to be sent in a sequence of frames, each containing the same specific number of samples. Previously, we buffered input samples in FFmpegBackend. However, as the source and destination sample rates can now be different, we should buffer resampled data instead. swresample have an internal input buffer, so we now just forward all data to it and 'gradually' receive resampled data, at most one frame_size at a time. When there is not enough resampled data to form a frame, we will record the current offset and request for less data on the next call.
Additionally, this commit also fixes a flaw. When an encoder supports variable frame sizes, its frame size is reported to be 0, which breaks our buffering system. Now we treat variable frame size encoders as having a frame size of 160 (the size of a HLE audio frame).
2020-02-01 04:23:07 +00:00
|
|
|
std::array<VariableAudioFrame, 2> refactored_frame;
|
|
|
|
for (auto& channel : refactored_frame) {
|
|
|
|
channel.resize(frame.size());
|
|
|
|
}
|
2019-01-26 14:41:28 +00:00
|
|
|
for (std::size_t i = 0; i < frame.size(); i++) {
|
|
|
|
refactored_frame[0][i] = frame[i][0];
|
|
|
|
refactored_frame[1][i] = frame[i][1];
|
|
|
|
}
|
|
|
|
|
2020-03-21 04:02:21 +00:00
|
|
|
audio_frame_queues[0].Push(std::move(refactored_frame[0]));
|
|
|
|
audio_frame_queues[1].Push(std::move(refactored_frame[1]));
|
2019-01-26 14:41:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void FFmpegBackend::AddAudioSample(const std::array<s16, 2>& sample) {
|
2020-03-21 04:02:21 +00:00
|
|
|
audio_frame_queues[0].Push(VariableAudioFrame{sample[0]});
|
|
|
|
audio_frame_queues[1].Push(VariableAudioFrame{sample[1]});
|
2019-01-26 14:41:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void FFmpegBackend::StopDumping() {
|
|
|
|
is_dumping = false;
|
|
|
|
VideoCore::g_renderer->CleanupVideoDumping();
|
|
|
|
|
|
|
|
// Flush the video processing queue
|
|
|
|
AddVideoFrame(VideoFrame());
|
|
|
|
for (auto i : {0, 1}) {
|
|
|
|
// Flush the audio processing queue
|
|
|
|
audio_frame_queues[i].Push(VariableAudioFrame());
|
|
|
|
}
|
|
|
|
// Wait until processing ends
|
|
|
|
processing_ended.Wait();
|
|
|
|
}
|
|
|
|
|
|
|
|
bool FFmpegBackend::IsDumping() const {
|
|
|
|
return is_dumping.load(std::memory_order_relaxed);
|
|
|
|
}
|
|
|
|
|
|
|
|
Layout::FramebufferLayout FFmpegBackend::GetLayout() const {
|
|
|
|
return video_layout;
|
|
|
|
}
|
|
|
|
|
|
|
|
void FFmpegBackend::EndDumping() {
|
|
|
|
LOG_INFO(Render, "Ending frame dumping");
|
|
|
|
|
|
|
|
ffmpeg.WriteTrailer();
|
|
|
|
ffmpeg.Free();
|
|
|
|
processing_ended.Set();
|
|
|
|
}
|
|
|
|
|
2020-02-01 04:28:13 +00:00
|
|
|
// To std string, but handles nullptr
|
|
|
|
std::string ToStdString(const char* str, const std::string& fallback = "") {
|
|
|
|
return str ? std::string{str} : fallback;
|
|
|
|
}
|
|
|
|
|
|
|
|
std::string FormatDuration(s64 duration) {
|
|
|
|
// The following is implemented according to libavutil code (opt.c)
|
|
|
|
std::string out;
|
|
|
|
if (duration < 0 && duration != std::numeric_limits<s64>::min()) {
|
|
|
|
out.append("-");
|
|
|
|
duration = -duration;
|
|
|
|
}
|
|
|
|
if (duration == std::numeric_limits<s64>::max()) {
|
|
|
|
return "INT64_MAX";
|
|
|
|
} else if (duration == std::numeric_limits<s64>::min()) {
|
|
|
|
return "INT64_MIN";
|
|
|
|
} else if (duration > 3600ll * 1000000ll) {
|
|
|
|
out.append(fmt::format("{}:{:02d}:{:02d}.{:06d}", duration / 3600000000ll,
|
|
|
|
((duration / 60000000ll) % 60), ((duration / 1000000ll) % 60),
|
|
|
|
duration % 1000000));
|
|
|
|
} else if (duration > 60ll * 1000000ll) {
|
|
|
|
out.append(fmt::format("{}:{:02d}.{:06d}", duration / 60000000ll,
|
|
|
|
((duration / 1000000ll) % 60), duration % 1000000));
|
|
|
|
} else {
|
|
|
|
out.append(fmt::format("{}.{:06d}", duration / 1000000ll, duration % 1000000));
|
|
|
|
}
|
|
|
|
while (out.back() == '0') {
|
|
|
|
out.erase(out.size() - 1, 1);
|
|
|
|
}
|
|
|
|
if (out.back() == '.') {
|
|
|
|
out.erase(out.size() - 1, 1);
|
|
|
|
}
|
|
|
|
return out;
|
|
|
|
}
|
|
|
|
|
|
|
|
std::string FormatDefaultValue(const AVOption* option,
|
|
|
|
const std::vector<OptionInfo::NamedConstant>& named_constants) {
|
|
|
|
// The following is taken and modified from libavutil code (opt.c)
|
|
|
|
switch (option->type) {
|
|
|
|
case AV_OPT_TYPE_BOOL: {
|
|
|
|
const auto value = option->default_val.i64;
|
|
|
|
if (value < 0) {
|
|
|
|
return "auto";
|
|
|
|
}
|
|
|
|
return value ? "true" : "false";
|
|
|
|
}
|
|
|
|
case AV_OPT_TYPE_FLAGS: {
|
|
|
|
const auto value = option->default_val.i64;
|
|
|
|
std::string out;
|
|
|
|
for (const auto& constant : named_constants) {
|
|
|
|
if (!(value & constant.value)) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (!out.empty()) {
|
|
|
|
out.append("+");
|
|
|
|
}
|
|
|
|
out.append(constant.name);
|
|
|
|
}
|
|
|
|
return out.empty() ? fmt::format("{}", value) : out;
|
|
|
|
}
|
|
|
|
case AV_OPT_TYPE_DURATION: {
|
|
|
|
return FormatDuration(option->default_val.i64);
|
|
|
|
}
|
|
|
|
case AV_OPT_TYPE_INT:
|
|
|
|
case AV_OPT_TYPE_UINT64:
|
|
|
|
case AV_OPT_TYPE_INT64: {
|
|
|
|
const auto value = option->default_val.i64;
|
|
|
|
for (const auto& constant : named_constants) {
|
|
|
|
if (constant.value == value) {
|
|
|
|
return constant.name;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return fmt::format("{}", value);
|
|
|
|
}
|
|
|
|
case AV_OPT_TYPE_DOUBLE:
|
|
|
|
case AV_OPT_TYPE_FLOAT: {
|
|
|
|
return fmt::format("{}", option->default_val.dbl);
|
|
|
|
}
|
|
|
|
case AV_OPT_TYPE_RATIONAL: {
|
2023-06-16 23:06:18 +00:00
|
|
|
const auto q = FFmpeg::av_d2q(option->default_val.dbl, std::numeric_limits<int>::max());
|
2020-02-01 04:28:13 +00:00
|
|
|
return fmt::format("{}/{}", q.num, q.den);
|
|
|
|
}
|
|
|
|
case AV_OPT_TYPE_PIXEL_FMT: {
|
2023-06-16 23:06:18 +00:00
|
|
|
const char* name =
|
|
|
|
FFmpeg::av_get_pix_fmt_name(static_cast<AVPixelFormat>(option->default_val.i64));
|
2020-02-01 04:28:13 +00:00
|
|
|
return ToStdString(name, "none");
|
|
|
|
}
|
|
|
|
case AV_OPT_TYPE_SAMPLE_FMT: {
|
|
|
|
const char* name =
|
2023-06-16 23:06:18 +00:00
|
|
|
FFmpeg::av_get_sample_fmt_name(static_cast<AVSampleFormat>(option->default_val.i64));
|
2020-02-01 04:28:13 +00:00
|
|
|
return ToStdString(name, "none");
|
|
|
|
}
|
|
|
|
case AV_OPT_TYPE_COLOR:
|
|
|
|
case AV_OPT_TYPE_IMAGE_SIZE:
|
|
|
|
case AV_OPT_TYPE_STRING:
|
|
|
|
case AV_OPT_TYPE_DICT:
|
|
|
|
case AV_OPT_TYPE_VIDEO_RATE: {
|
|
|
|
return ToStdString(option->default_val.str);
|
|
|
|
}
|
|
|
|
case AV_OPT_TYPE_CHANNEL_LAYOUT: {
|
|
|
|
return fmt::format("{:#x}", option->default_val.i64);
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
return "";
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void GetOptionListSingle(std::vector<OptionInfo>& out, const AVClass* av_class) {
|
|
|
|
if (av_class == nullptr) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
const AVOption* current = nullptr;
|
|
|
|
std::unordered_map<std::string, std::vector<OptionInfo::NamedConstant>> named_constants_map;
|
|
|
|
// First iteration: find and place all named constants
|
2023-06-16 23:06:18 +00:00
|
|
|
while ((current = FFmpeg::av_opt_next(&av_class, current))) {
|
2020-02-01 04:28:13 +00:00
|
|
|
if (current->type != AV_OPT_TYPE_CONST || !current->unit) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
named_constants_map[current->unit].push_back(
|
|
|
|
{current->name, ToStdString(current->help), current->default_val.i64});
|
|
|
|
}
|
|
|
|
// Second iteration: find all options
|
|
|
|
current = nullptr;
|
2023-06-16 23:06:18 +00:00
|
|
|
while ((current = FFmpeg::av_opt_next(&av_class, current))) {
|
2020-02-01 04:28:13 +00:00
|
|
|
// Currently we cannot handle binary options
|
|
|
|
if (current->type == AV_OPT_TYPE_CONST || current->type == AV_OPT_TYPE_BINARY) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
std::vector<OptionInfo::NamedConstant> named_constants;
|
|
|
|
if (current->unit && named_constants_map.count(current->unit)) {
|
|
|
|
named_constants = named_constants_map.at(current->unit);
|
|
|
|
}
|
|
|
|
const auto default_value = FormatDefaultValue(current, named_constants);
|
|
|
|
out.push_back({current->name, ToStdString(current->help), current->type, default_value,
|
|
|
|
std::move(named_constants), current->min, current->max});
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-02-28 08:40:13 +00:00
|
|
|
void GetOptionList(std::vector<OptionInfo>& out, const AVClass* av_class, bool search_children) {
|
2020-02-01 04:28:13 +00:00
|
|
|
if (av_class == nullptr) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
GetOptionListSingle(out, av_class);
|
|
|
|
|
2020-02-28 08:40:13 +00:00
|
|
|
if (!search_children) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2020-02-01 04:28:13 +00:00
|
|
|
const AVClass* child_class = nullptr;
|
2023-06-09 17:18:42 +00:00
|
|
|
#if LIBAVUTIL_VERSION_INT >= AV_VERSION_INT(56, 53, 100) // lavu 56.53.100
|
2022-02-20 11:07:10 +00:00
|
|
|
void* iter = nullptr;
|
2023-06-16 23:06:18 +00:00
|
|
|
while ((child_class = FFmpeg::av_opt_child_class_iterate(av_class, &iter))) {
|
2022-02-20 11:07:10 +00:00
|
|
|
#else
|
2023-06-16 23:06:18 +00:00
|
|
|
while ((child_class = FFmpeg::av_opt_child_class_next(av_class, child_class))) {
|
2022-02-20 11:07:10 +00:00
|
|
|
#endif
|
2020-02-01 04:28:13 +00:00
|
|
|
GetOptionListSingle(out, child_class);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-02-28 08:40:13 +00:00
|
|
|
std::vector<OptionInfo> GetOptionList(const AVClass* av_class, bool search_children) {
|
2020-02-01 04:28:13 +00:00
|
|
|
std::vector<OptionInfo> out;
|
2020-02-28 08:40:13 +00:00
|
|
|
GetOptionList(out, av_class, search_children);
|
|
|
|
return out;
|
2020-02-01 04:28:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
std::vector<EncoderInfo> ListEncoders(AVMediaType type) {
|
|
|
|
InitializeFFmpegLibraries();
|
|
|
|
|
|
|
|
std::vector<EncoderInfo> out;
|
|
|
|
|
|
|
|
const AVCodec* current = nullptr;
|
|
|
|
void* data = nullptr; // For libavcodec to save the iteration state
|
2023-06-16 23:06:18 +00:00
|
|
|
while ((current = FFmpeg::av_codec_iterate(&data))) {
|
|
|
|
if (!FFmpeg::av_codec_is_encoder(current) || current->type != type) {
|
2020-02-01 04:28:13 +00:00
|
|
|
continue;
|
|
|
|
}
|
2020-02-28 10:26:20 +00:00
|
|
|
out.push_back({current->name, ToStdString(current->long_name), current->id,
|
|
|
|
GetOptionList(current->priv_class, true)});
|
2020-02-01 04:28:13 +00:00
|
|
|
}
|
|
|
|
return out;
|
|
|
|
}
|
|
|
|
|
2020-02-28 10:26:20 +00:00
|
|
|
std::vector<OptionInfo> GetEncoderGenericOptions() {
|
2023-06-16 23:06:18 +00:00
|
|
|
return GetOptionList(FFmpeg::avcodec_get_class(), false);
|
2020-02-28 10:26:20 +00:00
|
|
|
}
|
|
|
|
|
2020-02-01 04:28:13 +00:00
|
|
|
std::vector<FormatInfo> ListFormats() {
|
|
|
|
InitializeFFmpegLibraries();
|
|
|
|
|
|
|
|
std::vector<FormatInfo> out;
|
|
|
|
|
|
|
|
const AVOutputFormat* current = nullptr;
|
|
|
|
void* data = nullptr; // For libavformat to save the iteration state
|
2023-06-16 23:06:18 +00:00
|
|
|
while ((current = FFmpeg::av_muxer_iterate(&data))) {
|
2023-04-27 04:38:28 +00:00
|
|
|
const auto extensions = Common::SplitString(ToStdString(current->extensions), ',');
|
2020-02-01 04:28:13 +00:00
|
|
|
|
|
|
|
std::set<AVCodecID> supported_video_codecs;
|
|
|
|
std::set<AVCodecID> supported_audio_codecs;
|
|
|
|
// Go through all codecs
|
|
|
|
const AVCodecDescriptor* codec = nullptr;
|
2023-06-16 23:06:18 +00:00
|
|
|
while ((codec = FFmpeg::avcodec_descriptor_next(codec))) {
|
|
|
|
if (FFmpeg::avformat_query_codec(current, codec->id, FF_COMPLIANCE_NORMAL) == 1) {
|
2020-02-01 04:28:13 +00:00
|
|
|
if (codec->type == AVMEDIA_TYPE_VIDEO) {
|
|
|
|
supported_video_codecs.emplace(codec->id);
|
|
|
|
} else if (codec->type == AVMEDIA_TYPE_AUDIO) {
|
|
|
|
supported_audio_codecs.emplace(codec->id);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (supported_video_codecs.empty() || supported_audio_codecs.empty()) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
out.push_back({current->name, ToStdString(current->long_name), std::move(extensions),
|
|
|
|
std::move(supported_video_codecs), std::move(supported_audio_codecs),
|
2020-02-28 10:26:20 +00:00
|
|
|
GetOptionList(current->priv_class, true)});
|
2020-02-01 04:28:13 +00:00
|
|
|
}
|
|
|
|
return out;
|
|
|
|
}
|
|
|
|
|
2020-02-28 10:26:20 +00:00
|
|
|
std::vector<OptionInfo> GetFormatGenericOptions() {
|
2023-06-16 23:06:18 +00:00
|
|
|
return GetOptionList(FFmpeg::avformat_get_class(), false);
|
|
|
|
}
|
|
|
|
|
|
|
|
std::vector<std::string> GetPixelFormats() {
|
|
|
|
std::vector<std::string> out;
|
|
|
|
const AVPixFmtDescriptor* current = nullptr;
|
|
|
|
while ((current = FFmpeg::av_pix_fmt_desc_next(current))) {
|
|
|
|
out.emplace_back(current->name);
|
|
|
|
}
|
|
|
|
return out;
|
|
|
|
}
|
|
|
|
|
|
|
|
std::vector<std::string> GetSampleFormats() {
|
|
|
|
std::vector<std::string> out;
|
|
|
|
for (int current = AV_SAMPLE_FMT_U8; current < AV_SAMPLE_FMT_NB; current++) {
|
|
|
|
out.emplace_back(FFmpeg::av_get_sample_fmt_name(static_cast<AVSampleFormat>(current)));
|
|
|
|
}
|
|
|
|
return out;
|
2020-02-28 10:26:20 +00:00
|
|
|
}
|
|
|
|
|
2019-01-26 14:41:28 +00:00
|
|
|
} // namespace VideoDumper
|