Compatible with ffmpeg-7.0.1 - ChatGPT Assistance.#1281
Open
jiapei100 wants to merge 1 commit intoopenscenegraph:masterfrom
Open
Compatible with ffmpeg-7.0.1 - ChatGPT Assistance.#1281jiapei100 wants to merge 1 commit intoopenscenegraph:masterfrom
jiapei100 wants to merge 1 commit intoopenscenegraph:masterfrom
Conversation
|
Literal lifesaver, thank you. |
|
I hope this PR will be compatible with ffmpeg-5 also. BR, |
brad0
reviewed
May 12, 2024
| // populate vertex list | ||
| // Ref: http://www.opencascade.org/org/forum/thread_16694/?forum=3 | ||
| gp_Pnt pt = (triangulation->Nodes())(j).Transformed(transformation * location.Transformation()); | ||
| gp_Pnt pt = (triangulation->Node(j)).Transformed(transformation * location.Transformation()); |
There was a problem hiding this comment.
This looks to be unrelated to the FFmpeg bits.
There was a problem hiding this comment.
It is. The opencasecade change is covered by #1334. I think the commit can be reduced to:
diff --git a/src/osgPlugins/ffmpeg/FFmpegDecoder.cpp b/src/osgPlugins/ffmpeg/FFmpegDecoder.cpp
index 4de143d22..48e4375e0 100644
--- a/src/osgPlugins/ffmpeg/FFmpegDecoder.cpp
+++ b/src/osgPlugins/ffmpeg/FFmpegDecoder.cpp
@@ -38,6 +38,8 @@ static std::string AvStrError(int errnum)
FFmpegDecoder::FFmpegDecoder() :
m_audio_stream(0),
m_video_stream(0),
+ m_audio_index(-1),
+ m_video_index(-1),
m_audio_queue(100),
m_video_queue(100),
m_audio_decoder(m_audio_queue, m_clocks),
@@ -78,7 +80,7 @@ bool FFmpegDecoder::open(const std::string & filename, FFmpegParameters* paramet
}
std::string format = "video4linux2";
- iformat = av_find_input_format(format.c_str());
+ iformat = const_cast<AVInputFormat*>(av_find_input_format(format.c_str()));
if (iformat)
{
@@ -93,8 +95,8 @@ bool FFmpegDecoder::open(const std::string & filename, FFmpegParameters* paramet
}
else
{
- iformat = parameters ? parameters->getFormat() : 0;
- AVIOContext* context = parameters ? parameters->getContext() : 0;
+ iformat = parameters ? const_cast<AVInputFormat*>(parameters->getFormat()) : nullptr;
+ AVIOContext* context = parameters ? parameters->getContext() : nullptr;
if (context != NULL)
{
p_format_context = avformat_alloc_context();
@@ -105,22 +107,7 @@ bool FFmpegDecoder::open(const std::string & filename, FFmpegParameters* paramet
int error = avformat_open_input(&p_format_context, filename.c_str(), iformat, parameters->getOptions());
if (error != 0)
{
- std::string error_str;
- switch (error)
- {
- //case AVERROR_UNKNOWN: error_str = "AVERROR_UNKNOWN"; break; // same value as AVERROR_INVALIDDATA
- case AVERROR_IO: error_str = "AVERROR_IO"; break;
- case AVERROR_NUMEXPECTED: error_str = "AVERROR_NUMEXPECTED"; break;
- case AVERROR_INVALIDDATA: error_str = "AVERROR_INVALIDDATA"; break;
- case AVERROR_NOMEM: error_str = "AVERROR_NOMEM"; break;
- case AVERROR_NOFMT: error_str = "AVERROR_NOFMT"; break;
- case AVERROR_NOTSUPP: error_str = "AVERROR_NOTSUPP"; break;
- case AVERROR_NOENT: error_str = "AVERROR_NOENT"; break;
- case AVERROR_PATCHWELCOME: error_str = "AVERROR_PATCHWELCOME"; break;
- default: error_str = "Unknown error"; break;
- }
-
- throw std::runtime_error("av_open_input_file() failed : " + error_str);
+ throw std::runtime_error("avformat_open_input() failed: " + AvStrError(error));
}
m_format_context.reset(p_format_context);
@@ -303,12 +290,6 @@ bool FFmpegDecoder::readNextPacketNormal()
}
else
{
- // Make the packet data available beyond av_read_frame() logical scope.
- if ((error = av_dup_packet(&packet)) < 0) {
- OSG_FATAL << "av_dup_packet() returned " << AvStrError(error) << std::endl;
- throw std::runtime_error("av_dup_packet() failed");
- }
-
m_pending_packet = FFmpegPacket(packet);
}
}
diff --git a/src/osgPlugins/ffmpeg/FFmpegDecoderAudio.cpp b/src/osgPlugins/ffmpeg/FFmpegDecoderAudio.cpp
index 636bddd25..f5c08d6a6 100644
--- a/src/osgPlugins/ffmpeg/FFmpegDecoderAudio.cpp
+++ b/src/osgPlugins/ffmpeg/FFmpegDecoderAudio.cpp
@@ -32,8 +32,6 @@ static int decode_audio(AVCodecContext *avctx, int16_t *samples,
int out_nb_channels,
AVSampleFormat out_sample_format)
{
-#if LIBAVCODEC_VERSION_MAJOR >= 53 || (LIBAVCODEC_VERSION_MAJOR==52 && LIBAVCODEC_VERSION_MINOR>=32)
-
AVPacket avpkt;
av_init_packet(&avpkt);
avpkt.data = const_cast<uint8_t *>(buf);
@@ -45,21 +43,32 @@ static int decode_audio(AVCodecContext *avctx, int16_t *samples,
if (!frame)
return AVERROR(ENOMEM);
- ret = avcodec_decode_audio4(avctx, frame, &got_frame, &avpkt);
+ // Send the packet to the decoder
+ ret = avcodec_send_packet(avctx, &avpkt);
+ if (ret < 0) {
+ av_frame_free(&frame);
+ return ret;
+ }
-#ifdef USE_AVRESAMPLE // libav's AVFrame structure does not contain a 'channels' field
- if (ret >= 0 && got_frame) {
-#else
- if (ret >= 0 && got_frame && av_frame_get_channels(frame)>0) {
-#endif
+ // Receive the frame from the decoder
+ ret = avcodec_receive_frame(avctx, frame);
+ if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
+ av_frame_free(&frame);
+ return 0;
+ } else if (ret < 0) {
+ av_frame_free(&frame);
+ return ret;
+ } else {
+ got_frame = 1;
+ }
+
+ if (ret >= 0 && got_frame && frame->ch_layout.nb_channels > 0) {
int ch, plane_size;
int planar = av_sample_fmt_is_planar(avctx->sample_fmt);
int out_samples;
// if sample rate changes, number of samples is different
if ( out_sample_rate != avctx->sample_rate ) {
-// out_samples = av_rescale_rnd(swr_get_delay(swr_context, avctx->sample_rate) +
-// frame->nb_samples, out_sample_rate, avctx->sample_rate, AV_ROUND_UP);
out_samples = av_rescale_rnd(frame->nb_samples, out_sample_rate, avctx->sample_rate, AV_ROUND_UP);
}
else {
@@ -92,9 +101,9 @@ static int decode_audio(AVCodecContext *avctx, int16_t *samples,
memcpy(samples, frame->extended_data[0], plane_size);
- if (planar && avctx->channels > 1) {
+ if (planar && frame->ch_layout.nb_channels > 1) {
uint8_t *out = ((uint8_t *)samples) + plane_size;
- for (ch = 1; ch < avctx->channels; ch++) {
+ for (ch = 1; ch < frame->ch_layout.nb_channels; ch++) {
memcpy(out, frame->extended_data[ch], plane_size);
out += plane_size;
}
@@ -108,11 +117,6 @@ static int decode_audio(AVCodecContext *avctx, int16_t *samples,
}
av_frame_free(&frame);
return ret;
-
-#else
- // fallback for older versions of ffmpeg that don't have avcodec_decode_audio3.
- return avcodec_decode_audio2(avctx, samples, frame_size_ptr, buf, buf_size);
-#endif
}
@@ -151,10 +155,11 @@ void FFmpegDecoderAudio::open(AVStream * const stream, FFmpegParameters* paramet
return;
m_stream = stream;
- m_context = stream->codec;
+ m_context = avcodec_alloc_context3(nullptr);
+ avcodec_parameters_to_context(m_context, stream->codecpar);
m_in_sample_rate = m_context->sample_rate;
- m_in_nb_channels = m_context->channels;
+ m_in_nb_channels = m_context->ch_layout.nb_channels;
m_in_sample_format = m_context->sample_fmt;
AVDictionaryEntry *opt_out_sample_rate = av_dict_get( *parameters->getOptions(), "out_sample_rate", NULL, 0 );
@@ -180,49 +185,46 @@ void FFmpegDecoderAudio::open(AVStream * const stream, FFmpegParameters* paramet
|| m_in_nb_channels != m_out_nb_channels
|| m_in_sample_format != m_out_sample_format )
{
-#if 0
-printf("### CONVERTING from sample format %s TO %s\n\t\tFROM %d TO %d channels\n\t\tFROM %d Hz to %d Hz\n",
- av_get_sample_fmt_name(m_in_sample_format),
- av_get_sample_fmt_name(m_out_sample_format),
- m_in_nb_channels,
- m_out_nb_channels,
- m_in_sample_rate,
- m_out_sample_rate);
-#endif
- m_swr_context = swr_alloc_set_opts(NULL,
- av_get_default_channel_layout(m_out_nb_channels),
- m_out_sample_format,
- m_out_sample_rate,
- av_get_default_channel_layout(m_in_nb_channels),
- m_in_sample_format,
- m_in_sample_rate,
- 0, NULL );
+ AVChannelLayout in_ch_layout;
+ AVChannelLayout out_ch_layout;
+ av_channel_layout_default(&in_ch_layout, m_in_nb_channels);
+ av_channel_layout_default(&out_ch_layout, m_out_nb_channels);
+
+ m_swr_context = swr_alloc();
+ if (!m_swr_context) {
+ throw std::runtime_error("Could not allocate resampler context");
+ }
- int err = swr_init(m_swr_context);
+ av_opt_set_int(m_swr_context, "in_channel_count", in_ch_layout.nb_channels, 0);
+ av_opt_set_int(m_swr_context, "in_sample_rate", m_in_sample_rate, 0);
+ av_opt_set_sample_fmt(m_swr_context, "in_sample_fmt", m_in_sample_format, 0);
+ av_opt_set_chlayout(m_swr_context, "in_chlayout", &in_ch_layout, 0);
- if ( err ) {
+ av_opt_set_int(m_swr_context, "out_channel_count", out_ch_layout.nb_channels, 0);
+ av_opt_set_int(m_swr_context, "out_sample_rate", m_out_sample_rate, 0);
+ av_opt_set_sample_fmt(m_swr_context, "out_sample_fmt", m_out_sample_format, 0);
+ av_opt_set_chlayout(m_swr_context, "out_chlayout", &out_ch_layout, 0);
+
+ int err = swr_init(m_swr_context);
+ if (err < 0) {
char error_string[512];
- av_strerror(err, error_string, 512);
+ av_strerror(err, error_string, sizeof(error_string));
OSG_WARN << "FFmpegDecoderAudio - WARNING: Error initializing resampling context : " << error_string << std::endl;
swr_free(&m_swr_context);
- throw std::runtime_error("swr_init() failed");;
+ throw std::runtime_error("swr_init() failed");
}
}
// Check stream sanity
if (m_context->codec_id == AV_CODEC_ID_NONE)
- throw std::runtime_error("invalid audio codec");;
+ throw std::runtime_error("invalid audio codec");
// Find the decoder for the audio stream
- AVCodec * const p_codec = avcodec_find_decoder(m_context->codec_id);
+ const AVCodec *p_codec = avcodec_find_decoder(m_context->codec_id);
if (p_codec == 0)
throw std::runtime_error("avcodec_find_decoder() failed");
- // Inform the codec that we can handle truncated bitstreams
- //if (p_codec->capabilities & CODEC_CAP_TRUNCATED)
- // m_context->flags |= CODEC_FLAG_TRUNCATED;
-
// Open codec
if (avcodec_open2(m_context, p_codec, NULL) < 0)
throw std::runtime_error("avcodec_open() failed");
diff --git a/src/osgPlugins/ffmpeg/FFmpegDecoderVideo.cpp b/src/osgPlugins/ffmpeg/FFmpegDecoderVideo.cpp
index 25df979a0..5fb712476 100644
--- a/src/osgPlugins/ffmpeg/FFmpegDecoderVideo.cpp
+++ b/src/osgPlugins/ffmpeg/FFmpegDecoderVideo.cpp
@@ -45,13 +45,13 @@ FFmpegDecoderVideo::~FFmpegDecoderVideo()
if (m_swscale_ctx)
{
sws_freeContext(m_swscale_ctx);
- m_swscale_ctx = 0;
+ m_swscale_ctx = nullptr;
}
#endif
if (m_context)
{
- avcodec_close(m_context);
+ avcodec_free_context(&m_context);
}
OSG_INFO<<"Destructed FFmpegDecoderVideo"<<std::endl;
@@ -62,7 +62,8 @@ FFmpegDecoderVideo::~FFmpegDecoderVideo()
void FFmpegDecoderVideo::open(AVStream * const stream)
{
m_stream = stream;
- m_context = stream->codec;
+ m_context = avcodec_alloc_context3(nullptr);
+ avcodec_parameters_to_context(m_context, stream->codecpar);
// Trust the video size given at this point
// (avcodec_open seems to sometimes return a 0x0 size)
@@ -74,11 +75,7 @@ void FFmpegDecoderVideo::open(AVStream * const stream)
m_alpha_channel = (m_context->pix_fmt == AV_PIX_FMT_YUVA420P);
// Find out the framerate
- #if LIBAVCODEC_VERSION_MAJOR >= 56
m_frame_rate = av_q2d(stream->avg_frame_rate);
- #else
- m_frame_rate = av_q2d(stream->r_frame_rate);
- #endif
// Find the decoder for the video stream
m_codec = avcodec_find_decoder(m_context->codec_id);
@@ -86,10 +83,6 @@ void FFmpegDecoderVideo::open(AVStream * const stream)
if (m_codec == 0)
throw std::runtime_error("avcodec_find_decoder() failed");
- // Inform the codec that we can handle truncated bitstreams
- //if (p_codec->capabilities & CODEC_CAP_TRUNCATED)
- // m_context->flags |= CODEC_FLAG_TRUNCATED;
-
// Open codec
if (avcodec_open2(m_context, m_codec, NULL) < 0)
throw std::runtime_error("avcodec_open() failed");
@@ -99,13 +92,13 @@ void FFmpegDecoderVideo::open(AVStream * const stream)
// Allocate converted RGB frame
m_frame_rgba.reset(av_frame_alloc());
- m_buffer_rgba[0].resize(avpicture_get_size(AV_PIX_FMT_RGB24, width(), height()));
+ m_buffer_rgba[0].resize(av_image_get_buffer_size(AV_PIX_FMT_RGB24, width(), height(), 1));
m_buffer_rgba[1].resize(m_buffer_rgba[0].size());
// Assign appropriate parts of the buffer to image planes in m_frame_rgba
- avpicture_fill((AVPicture *) (m_frame_rgba).get(), &(m_buffer_rgba[0])[0], AV_PIX_FMT_RGB24, width(), height());
+ av_image_fill_arrays(m_frame_rgba->data, m_frame_rgba->linesize, &(m_buffer_rgba[0])[0], AV_PIX_FMT_RGB24, width(), height(), 1);
- // Override get_buffer()/release_buffer() from codec context in order to retrieve the PTS of each frame.
+ // Override get_buffer2() from codec context in order to retrieve the PTS of each frame.
m_context->opaque = this;
m_context->get_buffer2 = getBuffer;
}
@@ -123,10 +116,7 @@ void FFmpegDecoderVideo::close(bool waitForThreadToExit)
void FFmpegDecoderVideo::pause(bool pause)
{
- if(pause)
- m_paused = true;
- else
- m_paused = false;
+ m_paused = pause;
}
void FFmpegDecoderVideo::run()
@@ -167,70 +157,33 @@ void FFmpegDecoderVideo::decodeLoop()
// Decode video frame
int frame_finished = 0;
+ const int ret = avcodec_receive_frame(m_context, m_frame.get());
- // We want to use the entire packet since some codecs will require extra information for decoding
- const int bytes_decoded = avcodec_decode_video2(m_context, m_frame.get(), &frame_finished, &(packet.packet));
-
- if (bytes_decoded < 0)
- throw std::runtime_error("avcodec_decode_video failed()");
-
- m_bytes_remaining -= bytes_decoded;
- m_packet_data += bytes_decoded;
+ if (ret == 0)
+ {
+ frame_finished = 1;
+ }
+ else if (ret == AVERROR(EAGAIN))
+ {
+ break;
+ }
+ else if (ret < 0)
+ {
+ throw std::runtime_error("avcodec_receive_frame() failed");
+ }
// Publish the frame if we have decoded a complete frame
if (frame_finished)
{
-#if LIBAVCODEC_VERSION_INT <= AV_VERSION_INT(57,24,102)
- //ffmpeg-3.0 and below
- AVRational timebase;
- // Find out the frame pts
- if (m_frame->pts != int64_t(AV_NOPTS_VALUE))
- {
- pts = m_frame->pts;
- timebase = m_context->time_base;
- }
- else if (packet.packet.dts == int64_t(AV_NOPTS_VALUE) &&
- m_frame->opaque != 0 &&
- *reinterpret_cast<const int64_t*>(m_frame->opaque) != int64_t(AV_NOPTS_VALUE))
- {
- pts = *reinterpret_cast<const int64_t*>(m_frame->opaque);
- timebase = m_stream->time_base;
- }
- else if (packet.packet.dts != int64_t(AV_NOPTS_VALUE))
- {
- pts = packet.packet.dts;
- timebase = m_stream->time_base;
- }
- else
- {
- pts = 0;
- timebase = m_context->time_base;
- }
-
- pts *= av_q2d(timebase);
-
-#else
- //above ffmpeg-3.0
- // Find out the frame pts
if (m_frame->pts != int64_t(AV_NOPTS_VALUE))
{
pts = av_q2d(m_stream->time_base) * m_frame->pts;
}
- else if (packet.packet.dts == int64_t(AV_NOPTS_VALUE) &&
- m_frame->opaque != 0 &&
- *reinterpret_cast<const int64_t*>(m_frame->opaque) != int64_t(AV_NOPTS_VALUE))
- {
- pts = av_q2d(m_stream->time_base) * *reinterpret_cast<const int64_t*>(m_frame->opaque);
- }
- else if (packet.packet.dts != int64_t(AV_NOPTS_VALUE))
- {
- pts = av_q2d(m_stream->time_base) * packet.packet.dts;
- }
else
{
pts = 0;
}
-#endif
+
const double synched_pts = m_clocks.videoSynchClock(m_frame.get(), av_q2d(av_inv_q(m_context->framerate)), pts);
const double frame_delay = m_clocks.videoRefreshSchedule(synched_pts);
@@ -259,6 +212,7 @@ void FFmpegDecoderVideo::decodeLoop()
{
m_bytes_remaining = packet.packet.size;
m_packet_data = packet.packet.data;
+ avcodec_send_packet(m_context, &(packet.packet));
}
else if (packet.type == FFmpegPacket::PACKET_FLUSH)
{
@@ -283,7 +237,7 @@ void FFmpegDecoderVideo::findAspectRatio()
m_pixel_aspect_ratio = ratio;
}
-int FFmpegDecoderVideo::convert(AVPicture *dst, int dst_pix_fmt, AVPicture *src,
+int FFmpegDecoderVideo::convert(AVFrame *dst, int dst_pix_fmt, AVFrame *src,
int src_pix_fmt, int src_width, int src_height)
{
osg::Timer_t startTick = osg::Timer::instance()->tick();
@@ -305,7 +259,7 @@ int FFmpegDecoderVideo::convert(AVPicture *dst, int dst_pix_fmt, AVPicture *src,
OSG_DEBUG<<"Using img_convert ";
- int result = img_convert(dst, dst_pix_fmt, src,
+ int result = img_convert((AVPicture *)dst, dst_pix_fmt, (AVPicture *)src,
src_pix_fmt, src_width, src_height);
#endif
@@ -334,11 +288,11 @@ void FFmpegDecoderVideo::publishFrame(const double delay, bool audio_disabled)
return;
#endif
- AVPicture * const src = (AVPicture *) m_frame.get();
- AVPicture * const dst = (AVPicture *) m_frame_rgba.get();
+ AVFrame * const src = m_frame.get();
+ AVFrame * const dst = m_frame_rgba.get();
// Assign appropriate parts of the buffer to image planes in m_frame_rgba
- avpicture_fill((AVPicture *) (m_frame_rgba).get(), &(m_buffer_rgba[m_writeBuffer])[0], AV_PIX_FMT_RGB24, width(), height());
+ av_image_fill_arrays(dst->data, dst->linesize, &(m_buffer_rgba[m_writeBuffer])[0], AV_PIX_FMT_RGB24, width(), height(), 1);
// Convert YUVA420p (i.e. YUV420p plus alpha channel) using our own routine
@@ -370,7 +324,7 @@ void FFmpegDecoderVideo::publishFrame(const double delay, bool audio_disabled)
-void FFmpegDecoderVideo::yuva420pToRgba(AVPicture * const dst, AVPicture * const src, int width, int height)
+void FFmpegDecoderVideo::yuva420pToRgba(AVFrame * const dst, AVFrame * const src, int width, int height)
{
convert(dst, AV_PIX_FMT_RGB24, src, m_context->pix_fmt, width, height);
diff --git a/src/osgPlugins/ffmpeg/FFmpegDecoderVideo.hpp b/src/osgPlugins/ffmpeg/FFmpegDecoderVideo.hpp
index 778c1a9b8..77a1d9cdc 100644
--- a/src/osgPlugins/ffmpeg/FFmpegDecoderVideo.hpp
+++ b/src/osgPlugins/ffmpeg/FFmpegDecoderVideo.hpp
@@ -88,9 +88,9 @@ private:
void findAspectRatio();
void publishFrame(double delay, bool audio_disabled);
double synchronizeVideo(double pts);
- void yuva420pToRgba(AVPicture *dst, AVPicture *src, int width, int height);
+ void yuva420pToRgba(AVFrame *dst, AVFrame *src, int width, int height);
- int convert(AVPicture *dst, int dst_pix_fmt, AVPicture *src,
+ int convert(AVFrame *dst, int dst_pix_fmt, AVFrame *src,
int src_pix_fmt, int src_width, int src_height);
@@ -101,7 +101,7 @@ private:
FFmpegClocks & m_clocks;
AVStream * m_stream;
AVCodecContext * m_context;
- AVCodec * m_codec;
+ const AVCodec * m_codec;
const uint8_t * m_packet_data;
int m_bytes_remaining;
int64_t m_packet_pts;
diff --git a/src/osgPlugins/ffmpeg/FFmpegPacket.hpp b/src/osgPlugins/ffmpeg/FFmpegPacket.hpp
index e06bd8bb6..94cc13b1b 100644
--- a/src/osgPlugins/ffmpeg/FFmpegPacket.hpp
+++ b/src/osgPlugins/ffmpeg/FFmpegPacket.hpp
@@ -42,7 +42,7 @@ namespace osgFFmpeg
void clear()
{
if (packet.data != 0)
- av_free_packet(&packet);
+ av_packet_unref(&packet);
release();
}
diff --git a/src/osgPlugins/ffmpeg/FFmpegParameters.cpp b/src/osgPlugins/ffmpeg/FFmpegParameters.cpp
index 5915ab850..f56021dbd 100644
--- a/src/osgPlugins/ffmpeg/FFmpegParameters.cpp
+++ b/src/osgPlugins/ffmpeg/FFmpegParameters.cpp
@@ -52,7 +52,7 @@ void FFmpegParameters::parse(const std::string& name, const std::string& value)
#ifndef ANDROID
avdevice_register_all();
#endif
- m_format = av_find_input_format(value.c_str());
+ m_format = const_cast<AVInputFormat*>(av_find_input_format(value.c_str()));
if (!m_format)
OSG_NOTICE<<"Failed to apply input video format: "<<value.c_str()<<std::endl;
}
diff --git a/src/osgPlugins/ffmpeg/ReaderWriterFFmpeg.cpp b/src/osgPlugins/ffmpeg/ReaderWriterFFmpeg.cpp
index f468ee81f..17edfa96d 100644
--- a/src/osgPlugins/ffmpeg/ReaderWriterFFmpeg.cpp
+++ b/src/osgPlugins/ffmpeg/ReaderWriterFFmpeg.cpp
@@ -113,13 +113,6 @@ public:
av_log_set_callback(log_to_osg);
-#ifdef USE_AV_LOCK_MANAGER
- // enable thread locking
- av_lockmgr_register(&lockMgr);
-#endif
- // Register all FFmpeg formats/codecs
- av_register_all();
-
avformat_network_init();
}
@@ -218,41 +211,7 @@ private:
}
}
}
-
-#ifdef USE_AV_LOCK_MANAGER
- static int lockMgr(void **mutex, enum AVLockOp op)
- {
- // returns are 0 success
- OpenThreads::Mutex **m=(OpenThreads::Mutex**)mutex;
- if (op==AV_LOCK_CREATE)
- {
- *m=new OpenThreads::Mutex;
- return !*m;
- }
- else if (op==AV_LOCK_DESTROY)
- {
- delete *m;
- return 0;
- }
- else if (op==AV_LOCK_OBTAIN)
- {
- (*m)->lock();
- return 0;
- }
- else if (op==AV_LOCK_RELEASE)
- {
- (*m)->unlock();
- return 0;
- }
- else
- {
- return -1;
- }
- }
-#endif
-
};
-
REGISTER_OSGPLUGIN(ffmpeg, ReaderWriterFFmpeg)I think it the pull might benefit from being broken up further:
- Update ffmepg minimum version to 4.0 and remove all code supporting older verions.
- Const-Correctness
- New Decode API (avcodec_send_packet)
- Context Allocation Changes (avcodec_alloc_context3)
- Channel Layout API
- Image Functions (avpicture to av_image)
- Packet Handling (av_free_packet to av_packet_unref)
- Remove Use of deprecated API
- Code Simplifications (Simplified error handling with AvStrError and Simplified pause() function)
@bchoineubility does it not work for you with ffmpeg-5? |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
Add this suggestion to a batch that can be applied as a single commit.This suggestion is invalid because no changes were made to the code.Suggestions cannot be applied while the pull request is closed.Suggestions cannot be applied while viewing a subset of changes.Only one suggestion per line can be applied in a batch.Add this suggestion to a batch that can be applied as a single commit.Applying suggestions on deleted lines is not supported.You must change the existing code in this line in order to create a valid suggestion.Outdated suggestions cannot be applied.This suggestion has been applied or marked resolved.Suggestions cannot be applied from pending reviews.Suggestions cannot be applied on multi-line comments.Suggestions cannot be applied while the pull request is queued to merge.Suggestion cannot be applied right now. Please check back later.
No description provided.