OpenShot Library | libopenshot 0.4.0
Loading...
Searching...
No Matches
FFmpegReader.cpp
Go to the documentation of this file.
1
12// Copyright (c) 2008-2024 OpenShot Studios, LLC, Fabrice Bellard
13//
14// SPDX-License-Identifier: LGPL-3.0-or-later
15
16#include <thread> // for std::this_thread::sleep_for
17#include <chrono> // for std::chrono::milliseconds
18#include <unistd.h>
19
20#include "FFmpegUtilities.h"
21
22#include "FFmpegReader.h"
23#include "Exceptions.h"
24#include "Timeline.h"
25#include "ZmqLogger.h"
26
27#define ENABLE_VAAPI 0
28
29#if USE_HW_ACCEL
30#define MAX_SUPPORTED_WIDTH 1950
31#define MAX_SUPPORTED_HEIGHT 1100
32
33#if ENABLE_VAAPI
34#include "libavutil/hwcontext_vaapi.h"
35
36typedef struct VAAPIDecodeContext {
37 VAProfile va_profile;
38 VAEntrypoint va_entrypoint;
39 VAConfigID va_config;
40 VAContextID va_context;
41
42#if FF_API_STRUCT_VAAPI_CONTEXT
43 // FF_DISABLE_DEPRECATION_WARNINGS
44 int have_old_context;
45 struct vaapi_context *old_context;
46 AVBufferRef *device_ref;
47 // FF_ENABLE_DEPRECATION_WARNINGS
48#endif
49
50 AVHWDeviceContext *device;
51 AVVAAPIDeviceContext *hwctx;
52
53 AVHWFramesContext *frames;
54 AVVAAPIFramesContext *hwfc;
55
56 enum AVPixelFormat surface_format;
57 int surface_count;
58 } VAAPIDecodeContext;
59#endif // ENABLE_VAAPI
60#endif // USE_HW_ACCEL
61
62
63using namespace openshot;
64
65int hw_de_on = 0;
66#if USE_HW_ACCEL
67 AVPixelFormat hw_de_av_pix_fmt_global = AV_PIX_FMT_NONE;
68 AVHWDeviceType hw_de_av_device_type_global = AV_HWDEVICE_TYPE_NONE;
69#endif
70
71FFmpegReader::FFmpegReader(const std::string &path, bool inspect_reader)
72 : last_frame(0), is_seeking(0), seeking_pts(0), seeking_frame(0), seek_count(0), NO_PTS_OFFSET(-99999),
73 path(path), is_video_seek(true), check_interlace(false), check_fps(false), enable_seek(true), is_open(false),
74 seek_audio_frame_found(0), seek_video_frame_found(0),is_duration_known(false), largest_frame_processed(0),
75 current_video_frame(0), packet(NULL), max_concurrent_frames(OPEN_MP_NUM_PROCESSORS), audio_pts(0),
76 video_pts(0), pFormatCtx(NULL), videoStream(-1), audioStream(-1), pCodecCtx(NULL), aCodecCtx(NULL),
77 pStream(NULL), aStream(NULL), pFrame(NULL), previous_packet_location{-1,0},
78 hold_packet(false) {
79
80 // Initialize FFMpeg, and register all formats and codecs
83
84 // Init timestamp offsets
85 pts_offset_seconds = NO_PTS_OFFSET;
86 video_pts_seconds = NO_PTS_OFFSET;
87 audio_pts_seconds = NO_PTS_OFFSET;
88
89 // Init cache
90 working_cache.SetMaxBytesFromInfo(max_concurrent_frames * info.fps.ToDouble() * 2, info.width, info.height, info.sample_rate, info.channels);
92
93 // Open and Close the reader, to populate its attributes (such as height, width, etc...)
94 if (inspect_reader) {
95 Open();
96 Close();
97 }
98}
99
101 if (is_open)
102 // Auto close reader if not already done
103 Close();
104}
105
106// This struct holds the associated video frame and starting sample # for an audio packet.
107bool AudioLocation::is_near(AudioLocation location, int samples_per_frame, int64_t amount) {
108 // Is frame even close to this one?
109 if (abs(location.frame - frame) >= 2)
110 // This is too far away to be considered
111 return false;
112
113 // Note that samples_per_frame can vary slightly frame to frame when the
114 // audio sampling rate is not an integer multiple of the video fps.
115 int64_t diff = samples_per_frame * (location.frame - frame) + location.sample_start - sample_start;
116 if (abs(diff) <= amount)
117 // close
118 return true;
119
120 // not close
121 return false;
122}
123
124#if USE_HW_ACCEL
125
126// Get hardware pix format
127static enum AVPixelFormat get_hw_dec_format(AVCodecContext *ctx, const enum AVPixelFormat *pix_fmts)
128{
129 const enum AVPixelFormat *p;
130
131 for (p = pix_fmts; *p != AV_PIX_FMT_NONE; p++) {
132 switch (*p) {
133#if defined(__linux__)
134 // Linux pix formats
135 case AV_PIX_FMT_VAAPI:
136 hw_de_av_pix_fmt_global = AV_PIX_FMT_VAAPI;
137 hw_de_av_device_type_global = AV_HWDEVICE_TYPE_VAAPI;
138 return *p;
139 break;
140 case AV_PIX_FMT_VDPAU:
141 hw_de_av_pix_fmt_global = AV_PIX_FMT_VDPAU;
142 hw_de_av_device_type_global = AV_HWDEVICE_TYPE_VDPAU;
143 return *p;
144 break;
145#endif
146#if defined(_WIN32)
147 // Windows pix formats
148 case AV_PIX_FMT_DXVA2_VLD:
149 hw_de_av_pix_fmt_global = AV_PIX_FMT_DXVA2_VLD;
150 hw_de_av_device_type_global = AV_HWDEVICE_TYPE_DXVA2;
151 return *p;
152 break;
153 case AV_PIX_FMT_D3D11:
154 hw_de_av_pix_fmt_global = AV_PIX_FMT_D3D11;
155 hw_de_av_device_type_global = AV_HWDEVICE_TYPE_D3D11VA;
156 return *p;
157 break;
158#endif
159#if defined(__APPLE__)
160 // Apple pix formats
161 case AV_PIX_FMT_VIDEOTOOLBOX:
162 hw_de_av_pix_fmt_global = AV_PIX_FMT_VIDEOTOOLBOX;
163 hw_de_av_device_type_global = AV_HWDEVICE_TYPE_VIDEOTOOLBOX;
164 return *p;
165 break;
166#endif
167 // Cross-platform pix formats
168 case AV_PIX_FMT_CUDA:
169 hw_de_av_pix_fmt_global = AV_PIX_FMT_CUDA;
170 hw_de_av_device_type_global = AV_HWDEVICE_TYPE_CUDA;
171 return *p;
172 break;
173 case AV_PIX_FMT_QSV:
174 hw_de_av_pix_fmt_global = AV_PIX_FMT_QSV;
175 hw_de_av_device_type_global = AV_HWDEVICE_TYPE_QSV;
176 return *p;
177 break;
178 default:
179 // This is only here to silence unused-enum warnings
180 break;
181 }
182 }
183 ZmqLogger::Instance()->AppendDebugMethod("FFmpegReader::get_hw_dec_format (Unable to decode this file using hardware decode)");
184 return AV_PIX_FMT_NONE;
185}
186
187int FFmpegReader::IsHardwareDecodeSupported(int codecid)
188{
189 int ret;
190 switch (codecid) {
191 case AV_CODEC_ID_H264:
192 case AV_CODEC_ID_MPEG2VIDEO:
193 case AV_CODEC_ID_VC1:
194 case AV_CODEC_ID_WMV1:
195 case AV_CODEC_ID_WMV2:
196 case AV_CODEC_ID_WMV3:
197 ret = 1;
198 break;
199 default :
200 ret = 0;
201 break;
202 }
203 return ret;
204}
205#endif // USE_HW_ACCEL
206
208 // Open reader if not already open
209 if (!is_open) {
210 // Prevent async calls to the following code
211 const std::lock_guard<std::recursive_mutex> lock(getFrameMutex);
212
213 // Initialize format context
214 pFormatCtx = NULL;
215 {
217 ZmqLogger::Instance()->AppendDebugMethod("Decode hardware acceleration settings", "hw_de_on", hw_de_on, "HARDWARE_DECODER", openshot::Settings::Instance()->HARDWARE_DECODER);
218 }
219
220 // Open video file
221 if (avformat_open_input(&pFormatCtx, path.c_str(), NULL, NULL) != 0)
222 throw InvalidFile("File could not be opened.", path);
223
224 // Retrieve stream information
225 if (avformat_find_stream_info(pFormatCtx, NULL) < 0)
226 throw NoStreamsFound("No streams found in file.", path);
227
228 videoStream = -1;
229 audioStream = -1;
230
231 // Init end-of-file detection variables
232 packet_status.reset(true);
233
234 // Loop through each stream, and identify the video and audio stream index
235 for (unsigned int i = 0; i < pFormatCtx->nb_streams; i++) {
236 // Is this a video stream?
237 if (AV_GET_CODEC_TYPE(pFormatCtx->streams[i]) == AVMEDIA_TYPE_VIDEO && videoStream < 0) {
238 videoStream = i;
239 packet_status.video_eof = false;
240 packet_status.packets_eof = false;
241 packet_status.end_of_file = false;
242 }
243 // Is this an audio stream?
244 if (AV_GET_CODEC_TYPE(pFormatCtx->streams[i]) == AVMEDIA_TYPE_AUDIO && audioStream < 0) {
245 audioStream = i;
246 packet_status.audio_eof = false;
247 packet_status.packets_eof = false;
248 packet_status.end_of_file = false;
249 }
250 }
251 if (videoStream == -1 && audioStream == -1)
252 throw NoStreamsFound("No video or audio streams found in this file.", path);
253
254 // Is there a video stream?
255 if (videoStream != -1) {
256 // Set the stream index
257 info.video_stream_index = videoStream;
258
259 // Set the codec and codec context pointers
260 pStream = pFormatCtx->streams[videoStream];
261
262 // Find the codec ID from stream
263 const AVCodecID codecId = AV_FIND_DECODER_CODEC_ID(pStream);
264
265 // Get codec and codec context from stream
266 const AVCodec *pCodec = avcodec_find_decoder(codecId);
267 AVDictionary *opts = NULL;
268 int retry_decode_open = 2;
269 // If hw accel is selected but hardware cannot handle repeat with software decoding
270 do {
271 pCodecCtx = AV_GET_CODEC_CONTEXT(pStream, pCodec);
272#if USE_HW_ACCEL
273 if (hw_de_on && (retry_decode_open==2)) {
274 // Up to here no decision is made if hardware or software decode
275 hw_de_supported = IsHardwareDecodeSupported(pCodecCtx->codec_id);
276 }
277#endif
278 retry_decode_open = 0;
279
280 // Set number of threads equal to number of processors (not to exceed 16)
281 pCodecCtx->thread_count = std::min(FF_NUM_PROCESSORS, 16);
282
283 if (pCodec == NULL) {
284 throw InvalidCodec("A valid video codec could not be found for this file.", path);
285 }
286
287 // Init options
288 av_dict_set(&opts, "strict", "experimental", 0);
289#if USE_HW_ACCEL
290 if (hw_de_on && hw_de_supported) {
291 // Open Hardware Acceleration
292 int i_decoder_hw = 0;
293 char adapter[256];
294 char *adapter_ptr = NULL;
295 int adapter_num;
297 fprintf(stderr, "Hardware decoding device number: %d\n", adapter_num);
298
299 // Set hardware pix format (callback)
300 pCodecCtx->get_format = get_hw_dec_format;
301
302 if (adapter_num < 3 && adapter_num >=0) {
303#if defined(__linux__)
304 snprintf(adapter,sizeof(adapter),"/dev/dri/renderD%d", adapter_num+128);
305 adapter_ptr = adapter;
307 switch (i_decoder_hw) {
308 case 1:
309 hw_de_av_device_type = AV_HWDEVICE_TYPE_VAAPI;
310 break;
311 case 2:
312 hw_de_av_device_type = AV_HWDEVICE_TYPE_CUDA;
313 break;
314 case 6:
315 hw_de_av_device_type = AV_HWDEVICE_TYPE_VDPAU;
316 break;
317 case 7:
318 hw_de_av_device_type = AV_HWDEVICE_TYPE_QSV;
319 break;
320 default:
321 hw_de_av_device_type = AV_HWDEVICE_TYPE_VAAPI;
322 break;
323 }
324
325#elif defined(_WIN32)
326 adapter_ptr = NULL;
328 switch (i_decoder_hw) {
329 case 2:
330 hw_de_av_device_type = AV_HWDEVICE_TYPE_CUDA;
331 break;
332 case 3:
333 hw_de_av_device_type = AV_HWDEVICE_TYPE_DXVA2;
334 break;
335 case 4:
336 hw_de_av_device_type = AV_HWDEVICE_TYPE_D3D11VA;
337 break;
338 case 7:
339 hw_de_av_device_type = AV_HWDEVICE_TYPE_QSV;
340 break;
341 default:
342 hw_de_av_device_type = AV_HWDEVICE_TYPE_DXVA2;
343 break;
344 }
345#elif defined(__APPLE__)
346 adapter_ptr = NULL;
348 switch (i_decoder_hw) {
349 case 5:
350 hw_de_av_device_type = AV_HWDEVICE_TYPE_VIDEOTOOLBOX;
351 break;
352 case 7:
353 hw_de_av_device_type = AV_HWDEVICE_TYPE_QSV;
354 break;
355 default:
356 hw_de_av_device_type = AV_HWDEVICE_TYPE_VIDEOTOOLBOX;
357 break;
358 }
359#endif
360
361 } else {
362 adapter_ptr = NULL; // Just to be sure
363 }
364
365 // Check if it is there and writable
366#if defined(__linux__)
367 if( adapter_ptr != NULL && access( adapter_ptr, W_OK ) == 0 ) {
368#elif defined(_WIN32)
369 if( adapter_ptr != NULL ) {
370#elif defined(__APPLE__)
371 if( adapter_ptr != NULL ) {
372#endif
373 ZmqLogger::Instance()->AppendDebugMethod("Decode Device present using device");
374 }
375 else {
376 adapter_ptr = NULL; // use default
377 ZmqLogger::Instance()->AppendDebugMethod("Decode Device not present using default");
378 }
379
380 hw_device_ctx = NULL;
381 // Here the first hardware initialisations are made
382 if (av_hwdevice_ctx_create(&hw_device_ctx, hw_de_av_device_type, adapter_ptr, NULL, 0) >= 0) {
383 if (!(pCodecCtx->hw_device_ctx = av_buffer_ref(hw_device_ctx))) {
384 throw InvalidCodec("Hardware device reference create failed.", path);
385 }
386
387 /*
388 av_buffer_unref(&ist->hw_frames_ctx);
389 ist->hw_frames_ctx = av_hwframe_ctx_alloc(hw_device_ctx);
390 if (!ist->hw_frames_ctx) {
391 av_log(avctx, AV_LOG_ERROR, "Error creating a CUDA frames context\n");
392 return AVERROR(ENOMEM);
393 }
394
395 frames_ctx = (AVHWFramesContext*)ist->hw_frames_ctx->data;
396
397 frames_ctx->format = AV_PIX_FMT_CUDA;
398 frames_ctx->sw_format = avctx->sw_pix_fmt;
399 frames_ctx->width = avctx->width;
400 frames_ctx->height = avctx->height;
401
402 av_log(avctx, AV_LOG_DEBUG, "Initializing CUDA frames context: sw_format = %s, width = %d, height = %d\n",
403 av_get_pix_fmt_name(frames_ctx->sw_format), frames_ctx->width, frames_ctx->height);
404
405
406 ret = av_hwframe_ctx_init(pCodecCtx->hw_device_ctx);
407 ret = av_hwframe_ctx_init(ist->hw_frames_ctx);
408 if (ret < 0) {
409 av_log(avctx, AV_LOG_ERROR, "Error initializing a CUDA frame pool\n");
410 return ret;
411 }
412 */
413 }
414 else {
415 throw InvalidCodec("Hardware device create failed.", path);
416 }
417 }
418#endif // USE_HW_ACCEL
419
420 // Disable per-frame threading for album arts
421 // Using FF_THREAD_FRAME adds one frame decoding delay per thread,
422 // but there's only one frame in this case.
423 if (HasAlbumArt())
424 {
425 pCodecCtx->thread_type &= ~FF_THREAD_FRAME;
426 }
427
428 // Open video codec
429 int avcodec_return = avcodec_open2(pCodecCtx, pCodec, &opts);
430 if (avcodec_return < 0) {
431 std::stringstream avcodec_error_msg;
432 avcodec_error_msg << "A video codec was found, but could not be opened. Error: " << av_err2string(avcodec_return);
433 throw InvalidCodec(avcodec_error_msg.str(), path);
434 }
435
436#if USE_HW_ACCEL
437 if (hw_de_on && hw_de_supported) {
438 AVHWFramesConstraints *constraints = NULL;
439 void *hwconfig = NULL;
440 hwconfig = av_hwdevice_hwconfig_alloc(hw_device_ctx);
441
442// TODO: needs va_config!
443#if ENABLE_VAAPI
444 ((AVVAAPIHWConfig *)hwconfig)->config_id = ((VAAPIDecodeContext *)(pCodecCtx->priv_data))->va_config;
445 constraints = av_hwdevice_get_hwframe_constraints(hw_device_ctx,hwconfig);
446#endif // ENABLE_VAAPI
447 if (constraints) {
448 if (pCodecCtx->coded_width < constraints->min_width ||
449 pCodecCtx->coded_height < constraints->min_height ||
450 pCodecCtx->coded_width > constraints->max_width ||
451 pCodecCtx->coded_height > constraints->max_height) {
452 ZmqLogger::Instance()->AppendDebugMethod("DIMENSIONS ARE TOO LARGE for hardware acceleration\n");
453 hw_de_supported = 0;
454 retry_decode_open = 1;
455 AV_FREE_CONTEXT(pCodecCtx);
456 if (hw_device_ctx) {
457 av_buffer_unref(&hw_device_ctx);
458 hw_device_ctx = NULL;
459 }
460 }
461 else {
462 // All is just peachy
463 ZmqLogger::Instance()->AppendDebugMethod("\nDecode hardware acceleration is used\n", "Min width :", constraints->min_width, "Min Height :", constraints->min_height, "MaxWidth :", constraints->max_width, "MaxHeight :", constraints->max_height, "Frame width :", pCodecCtx->coded_width, "Frame height :", pCodecCtx->coded_height);
464 retry_decode_open = 0;
465 }
466 av_hwframe_constraints_free(&constraints);
467 if (hwconfig) {
468 av_freep(&hwconfig);
469 }
470 }
471 else {
472 int max_h, max_w;
473 //max_h = ((getenv( "LIMIT_HEIGHT_MAX" )==NULL) ? MAX_SUPPORTED_HEIGHT : atoi(getenv( "LIMIT_HEIGHT_MAX" )));
475 //max_w = ((getenv( "LIMIT_WIDTH_MAX" )==NULL) ? MAX_SUPPORTED_WIDTH : atoi(getenv( "LIMIT_WIDTH_MAX" )));
477 ZmqLogger::Instance()->AppendDebugMethod("Constraints could not be found using default limit\n");
478 //cerr << "Constraints could not be found using default limit\n";
479 if (pCodecCtx->coded_width < 0 ||
480 pCodecCtx->coded_height < 0 ||
481 pCodecCtx->coded_width > max_w ||
482 pCodecCtx->coded_height > max_h ) {
483 ZmqLogger::Instance()->AppendDebugMethod("DIMENSIONS ARE TOO LARGE for hardware acceleration\n", "Max Width :", max_w, "Max Height :", max_h, "Frame width :", pCodecCtx->coded_width, "Frame height :", pCodecCtx->coded_height);
484 hw_de_supported = 0;
485 retry_decode_open = 1;
486 AV_FREE_CONTEXT(pCodecCtx);
487 if (hw_device_ctx) {
488 av_buffer_unref(&hw_device_ctx);
489 hw_device_ctx = NULL;
490 }
491 }
492 else {
493 ZmqLogger::Instance()->AppendDebugMethod("\nDecode hardware acceleration is used\n", "Max Width :", max_w, "Max Height :", max_h, "Frame width :", pCodecCtx->coded_width, "Frame height :", pCodecCtx->coded_height);
494 retry_decode_open = 0;
495 }
496 }
497 } // if hw_de_on && hw_de_supported
498 else {
499 ZmqLogger::Instance()->AppendDebugMethod("\nDecode in software is used\n");
500 }
501#else
502 retry_decode_open = 0;
503#endif // USE_HW_ACCEL
504 } while (retry_decode_open); // retry_decode_open
505 // Free options
506 av_dict_free(&opts);
507
508 // Update the File Info struct with video details (if a video stream is found)
509 UpdateVideoInfo();
510 }
511
512 // Is there an audio stream?
513 if (audioStream != -1) {
514 // Set the stream index
515 info.audio_stream_index = audioStream;
516
517 // Get a pointer to the codec context for the audio stream
518 aStream = pFormatCtx->streams[audioStream];
519
520 // Find the codec ID from stream
521 AVCodecID codecId = AV_FIND_DECODER_CODEC_ID(aStream);
522
523 // Get codec and codec context from stream
524 const AVCodec *aCodec = avcodec_find_decoder(codecId);
525 aCodecCtx = AV_GET_CODEC_CONTEXT(aStream, aCodec);
526
527 // Set number of threads equal to number of processors (not to exceed 16)
528 aCodecCtx->thread_count = std::min(FF_NUM_PROCESSORS, 16);
529
530 if (aCodec == NULL) {
531 throw InvalidCodec("A valid audio codec could not be found for this file.", path);
532 }
533
534 // Init options
535 AVDictionary *opts = NULL;
536 av_dict_set(&opts, "strict", "experimental", 0);
537
538 // Open audio codec
539 if (avcodec_open2(aCodecCtx, aCodec, &opts) < 0)
540 throw InvalidCodec("An audio codec was found, but could not be opened.", path);
541
542 // Free options
543 av_dict_free(&opts);
544
545 // Update the File Info struct with audio details (if an audio stream is found)
546 UpdateAudioInfo();
547 }
548
549 // Add format metadata (if any)
550 AVDictionaryEntry *tag = NULL;
551 while ((tag = av_dict_get(pFormatCtx->metadata, "", tag, AV_DICT_IGNORE_SUFFIX))) {
552 QString str_key = tag->key;
553 QString str_value = tag->value;
554 info.metadata[str_key.toStdString()] = str_value.trimmed().toStdString();
555 }
556
557 // Init previous audio location to zero
558 previous_packet_location.frame = -1;
559 previous_packet_location.sample_start = 0;
560
561 // Adjust cache size based on size of frame and audio
562 working_cache.SetMaxBytesFromInfo(max_concurrent_frames * info.fps.ToDouble() * 2, info.width, info.height, info.sample_rate, info.channels);
564
565 // Scan PTS for any offsets (i.e. non-zero starting streams). At least 1 stream must start at zero timestamp.
566 // This method allows us to shift timestamps to ensure at least 1 stream is starting at zero.
567 UpdatePTSOffset();
568
569 // Override an invalid framerate
570 if (info.fps.ToFloat() > 240.0f || (info.fps.num <= 0 || info.fps.den <= 0) || info.video_length <= 0) {
571 // Calculate FPS, duration, video bit rate, and video length manually
572 // by scanning through all the video stream packets
573 CheckFPS();
574 }
575
576 // Mark as "open"
577 is_open = true;
578
579 // Seek back to beginning of file (if not already seeking)
580 if (!is_seeking) {
581 Seek(1);
582 }
583 }
584}
585
587 // Close all objects, if reader is 'open'
588 if (is_open) {
589 // Prevent async calls to the following code
590 const std::lock_guard<std::recursive_mutex> lock(getFrameMutex);
591
592 // Mark as "closed"
593 is_open = false;
594
595 // Keep track of most recent packet
596 AVPacket *recent_packet = packet;
597
598 // Drain any packets from the decoder
599 packet = NULL;
600 int attempts = 0;
601 int max_attempts = 128;
602 while (packet_status.packets_decoded() < packet_status.packets_read() && attempts < max_attempts) {
603 ZmqLogger::Instance()->AppendDebugMethod("FFmpegReader::Close (Drain decoder loop)",
604 "packets_read", packet_status.packets_read(),
605 "packets_decoded", packet_status.packets_decoded(),
606 "attempts", attempts);
607 if (packet_status.video_decoded < packet_status.video_read) {
608 ProcessVideoPacket(info.video_length);
609 }
610 if (packet_status.audio_decoded < packet_status.audio_read) {
611 ProcessAudioPacket(info.video_length);
612 }
613 attempts++;
614 }
615
616 // Remove packet
617 if (recent_packet) {
618 RemoveAVPacket(recent_packet);
619 }
620
621 // Close the video codec
622 if (info.has_video) {
623 if(avcodec_is_open(pCodecCtx)) {
624 avcodec_flush_buffers(pCodecCtx);
625 }
626 AV_FREE_CONTEXT(pCodecCtx);
627#if USE_HW_ACCEL
628 if (hw_de_on) {
629 if (hw_device_ctx) {
630 av_buffer_unref(&hw_device_ctx);
631 hw_device_ctx = NULL;
632 }
633 }
634#endif // USE_HW_ACCEL
635 }
636
637 // Close the audio codec
638 if (info.has_audio) {
639 if(avcodec_is_open(aCodecCtx)) {
640 avcodec_flush_buffers(aCodecCtx);
641 }
642 AV_FREE_CONTEXT(aCodecCtx);
643 }
644
645 // Clear final cache
647 working_cache.Clear();
648
649 // Close the video file
650 avformat_close_input(&pFormatCtx);
651 av_freep(&pFormatCtx);
652
653 // Reset some variables
654 last_frame = 0;
655 hold_packet = false;
656 largest_frame_processed = 0;
657 seek_audio_frame_found = 0;
658 seek_video_frame_found = 0;
659 current_video_frame = 0;
660 last_video_frame.reset();
661 }
662}
663
664bool FFmpegReader::HasAlbumArt() {
665 // Check if the video stream we use is an attached picture
666 // This won't return true if the file has a cover image as a secondary stream
667 // like an MKV file with an attached image file
668 return pFormatCtx && videoStream >= 0 && pFormatCtx->streams[videoStream]
669 && (pFormatCtx->streams[videoStream]->disposition & AV_DISPOSITION_ATTACHED_PIC);
670}
671
672void FFmpegReader::UpdateAudioInfo() {
673 // Set default audio channel layout (if needed)
674#if HAVE_CH_LAYOUT
675 if (!av_channel_layout_check(&(AV_GET_CODEC_ATTRIBUTES(aStream, aCodecCtx)->ch_layout)))
676 AV_GET_CODEC_ATTRIBUTES(aStream, aCodecCtx)->ch_layout = (AVChannelLayout) AV_CHANNEL_LAYOUT_STEREO;
677#else
678 if (AV_GET_CODEC_ATTRIBUTES(aStream, aCodecCtx)->channel_layout == 0)
679 AV_GET_CODEC_ATTRIBUTES(aStream, aCodecCtx)->channel_layout = av_get_default_channel_layout(AV_GET_CODEC_ATTRIBUTES(aStream, aCodecCtx)->channels);
680#endif
681
682 if (info.sample_rate > 0) {
683 // Skip init - if info struct already populated
684 return;
685 }
686
687 // Set values of FileInfo struct
688 info.has_audio = true;
689 info.file_size = pFormatCtx->pb ? avio_size(pFormatCtx->pb) : -1;
690 info.acodec = aCodecCtx->codec->name;
691#if HAVE_CH_LAYOUT
692 info.channels = AV_GET_CODEC_ATTRIBUTES(aStream, aCodecCtx)->ch_layout.nb_channels;
693 info.channel_layout = (ChannelLayout) AV_GET_CODEC_ATTRIBUTES(aStream, aCodecCtx)->ch_layout.u.mask;
694#else
695 info.channels = AV_GET_CODEC_ATTRIBUTES(aStream, aCodecCtx)->channels;
696 info.channel_layout = (ChannelLayout) AV_GET_CODEC_ATTRIBUTES(aStream, aCodecCtx)->channel_layout;
697#endif
698
699 // If channel layout is not set, guess based on the number of channels
700 if (info.channel_layout == 0) {
701 if (info.channels == 1) {
703 } else if (info.channels == 2) {
705 }
706 }
707
708 info.sample_rate = AV_GET_CODEC_ATTRIBUTES(aStream, aCodecCtx)->sample_rate;
709 info.audio_bit_rate = AV_GET_CODEC_ATTRIBUTES(aStream, aCodecCtx)->bit_rate;
710 if (info.audio_bit_rate <= 0) {
711 // Get bitrate from format
712 info.audio_bit_rate = pFormatCtx->bit_rate;
713 }
714
715 // Set audio timebase
716 info.audio_timebase.num = aStream->time_base.num;
717 info.audio_timebase.den = aStream->time_base.den;
718
719 // Get timebase of audio stream (if valid) and greater than the current duration
720 if (aStream->duration > 0 && aStream->duration > info.duration) {
721 // Get duration from audio stream
722 info.duration = aStream->duration * info.audio_timebase.ToDouble();
723 } else if (pFormatCtx->duration > 0 && info.duration <= 0.0f) {
724 // Use the format's duration
725 info.duration = float(pFormatCtx->duration) / AV_TIME_BASE;
726 }
727
728 // Calculate duration from filesize and bitrate (if any)
729 if (info.duration <= 0.0f && info.video_bit_rate > 0 && info.file_size > 0) {
730 // Estimate from bitrate, total bytes, and framerate
732 }
733
734 // Check for an invalid video length
735 if (info.has_video && info.video_length <= 0) {
736 // Calculate the video length from the audio duration
738 }
739
740 // Set video timebase (if no video stream was found)
741 if (!info.has_video) {
742 // Set a few important default video settings (so audio can be divided into frames)
743 info.fps.num = 24;
744 info.fps.den = 1;
748 info.width = 720;
749 info.height = 480;
750
751 // Use timeline to set correct width & height (if any)
752 Clip *parent = static_cast<Clip *>(ParentClip());
753 if (parent) {
754 if (parent->ParentTimeline()) {
755 // Set max width/height based on parent clip's timeline (if attached to a timeline)
758 }
759 }
760 }
761
762 // Fix invalid video lengths for certain types of files (MP3 for example)
763 if (info.has_video && ((info.duration * info.fps.ToDouble()) - info.video_length > 60)) {
765 }
766
767 // Add audio metadata (if any found)
768 AVDictionaryEntry *tag = NULL;
769 while ((tag = av_dict_get(aStream->metadata, "", tag, AV_DICT_IGNORE_SUFFIX))) {
770 QString str_key = tag->key;
771 QString str_value = tag->value;
772 info.metadata[str_key.toStdString()] = str_value.trimmed().toStdString();
773 }
774}
775
776void FFmpegReader::UpdateVideoInfo() {
777 if (info.vcodec.length() > 0) {
778 // Skip init - if info struct already populated
779 return;
780 }
781
782 // Set values of FileInfo struct
783 info.has_video = true;
784 info.file_size = pFormatCtx->pb ? avio_size(pFormatCtx->pb) : -1;
785 info.height = AV_GET_CODEC_ATTRIBUTES(pStream, pCodecCtx)->height;
786 info.width = AV_GET_CODEC_ATTRIBUTES(pStream, pCodecCtx)->width;
787 info.vcodec = pCodecCtx->codec->name;
788 info.video_bit_rate = (pFormatCtx->bit_rate / 8);
789
790 // Frame rate from the container and codec
791 AVRational framerate = av_guess_frame_rate(pFormatCtx, pStream, NULL);
792 if (!check_fps) {
793 info.fps.num = framerate.num;
794 info.fps.den = framerate.den;
795 }
796
797 ZmqLogger::Instance()->AppendDebugMethod("FFmpegReader::UpdateVideoInfo", "info.fps.num", info.fps.num, "info.fps.den", info.fps.den);
798
799 // TODO: remove excessive debug info in the next releases
800 // The debug info below is just for comparison and troubleshooting on users side during the transition period
801 ZmqLogger::Instance()->AppendDebugMethod("FFmpegReader::UpdateVideoInfo (pStream->avg_frame_rate)", "num", pStream->avg_frame_rate.num, "den", pStream->avg_frame_rate.den);
802
803 if (pStream->sample_aspect_ratio.num != 0) {
804 info.pixel_ratio.num = pStream->sample_aspect_ratio.num;
805 info.pixel_ratio.den = pStream->sample_aspect_ratio.den;
806 } else if (AV_GET_CODEC_ATTRIBUTES(pStream, pCodecCtx)->sample_aspect_ratio.num != 0) {
807 info.pixel_ratio.num = AV_GET_CODEC_ATTRIBUTES(pStream, pCodecCtx)->sample_aspect_ratio.num;
808 info.pixel_ratio.den = AV_GET_CODEC_ATTRIBUTES(pStream, pCodecCtx)->sample_aspect_ratio.den;
809 } else {
810 info.pixel_ratio.num = 1;
811 info.pixel_ratio.den = 1;
812 }
813 info.pixel_format = AV_GET_CODEC_PIXEL_FORMAT(pStream, pCodecCtx);
814
815 // Calculate the DAR (display aspect ratio)
817
818 // Reduce size fraction
819 size.Reduce();
820
821 // Set the ratio based on the reduced fraction
822 info.display_ratio.num = size.num;
823 info.display_ratio.den = size.den;
824
825 // Get scan type and order from codec context/params
826 if (!check_interlace) {
827 check_interlace = true;
828 AVFieldOrder field_order = AV_GET_CODEC_ATTRIBUTES(pStream, pCodecCtx)->field_order;
829 switch(field_order) {
830 case AV_FIELD_PROGRESSIVE:
831 info.interlaced_frame = false;
832 break;
833 case AV_FIELD_TT:
834 case AV_FIELD_TB:
835 info.interlaced_frame = true;
836 info.top_field_first = true;
837 break;
838 case AV_FIELD_BT:
839 case AV_FIELD_BB:
840 info.interlaced_frame = true;
841 info.top_field_first = false;
842 break;
843 case AV_FIELD_UNKNOWN:
844 // Check again later?
845 check_interlace = false;
846 break;
847 }
848 // check_interlace will prevent these checks being repeated,
849 // unless it was cleared because we got an AV_FIELD_UNKNOWN response.
850 }
851
852 // Set the video timebase
853 info.video_timebase.num = pStream->time_base.num;
854 info.video_timebase.den = pStream->time_base.den;
855
856 // Set the duration in seconds, and video length (# of frames)
857 info.duration = pStream->duration * info.video_timebase.ToDouble();
858
859 // Check for valid duration (if found)
860 if (info.duration <= 0.0f && pFormatCtx->duration >= 0) {
861 // Use the format's duration
862 info.duration = float(pFormatCtx->duration) / AV_TIME_BASE;
863 }
864
865 // Calculate duration from filesize and bitrate (if any)
866 if (info.duration <= 0.0f && info.video_bit_rate > 0 && info.file_size > 0) {
867 // Estimate from bitrate, total bytes, and framerate
869 }
870
871 // Certain "image" formats do not have a valid duration
872 if (info.duration <= 0.0f && pStream->duration == AV_NOPTS_VALUE && pFormatCtx->duration == AV_NOPTS_VALUE) {
873 // Force an "image" duration
874 info.duration = 60 * 60 * 1; // 1 hour duration
875 info.video_length = 1;
876 info.has_single_image = true;
877 }
878
879 // Get the # of video frames (if found in stream)
880 // Only set this 1 time (this method can be called multiple times)
881 if (pStream->nb_frames > 0 && info.video_length <= 0) {
882 info.video_length = pStream->nb_frames;
883 }
884
885 // No duration found in stream of file
886 if (info.duration <= 0.0f) {
887 // No duration is found in the video stream
888 info.duration = -1;
889 info.video_length = -1;
890 is_duration_known = false;
891 } else {
892 // Yes, a duration was found
893 is_duration_known = true;
894
895 // Calculate number of frames (if not already found in metadata)
896 // Only set this 1 time (this method can be called multiple times)
897 if (info.video_length <= 0) {
899 }
900 }
901
902 // Add video metadata (if any)
903 AVDictionaryEntry *tag = NULL;
904 while ((tag = av_dict_get(pStream->metadata, "", tag, AV_DICT_IGNORE_SUFFIX))) {
905 QString str_key = tag->key;
906 QString str_value = tag->value;
907 info.metadata[str_key.toStdString()] = str_value.trimmed().toStdString();
908 }
909}
910
912 return this->is_duration_known;
913}
914
915std::shared_ptr<Frame> FFmpegReader::GetFrame(int64_t requested_frame) {
916 // Check for open reader (or throw exception)
917 if (!is_open)
918 throw ReaderClosed("The FFmpegReader is closed. Call Open() before calling this method.", path);
919
920 // Adjust for a requested frame that is too small or too large
921 if (requested_frame < 1)
922 requested_frame = 1;
923 if (requested_frame > info.video_length && is_duration_known)
924 requested_frame = info.video_length;
925 if (info.has_video && info.video_length == 0)
926 // Invalid duration of video file
927 throw InvalidFile("Could not detect the duration of the video or audio stream.", path);
928
929 // Debug output
930 ZmqLogger::Instance()->AppendDebugMethod("FFmpegReader::GetFrame", "requested_frame", requested_frame, "last_frame", last_frame);
931
932 // Check the cache for this frame
933 std::shared_ptr<Frame> frame = final_cache.GetFrame(requested_frame);
934 if (frame) {
935 // Debug output
936 ZmqLogger::Instance()->AppendDebugMethod("FFmpegReader::GetFrame", "returned cached frame", requested_frame);
937
938 // Return the cached frame
939 return frame;
940 } else {
941
942 // Prevent async calls to the remainder of this code
943 const std::lock_guard<std::recursive_mutex> lock(getFrameMutex);
944
945 // Check the cache a 2nd time (due to the potential previous lock)
946 frame = final_cache.GetFrame(requested_frame);
947 if (frame) {
948 // Debug output
949 ZmqLogger::Instance()->AppendDebugMethod("FFmpegReader::GetFrame", "returned cached frame on 2nd look", requested_frame);
950
951 } else {
952 // Frame is not in cache
953 // Reset seek count
954 seek_count = 0;
955
956 // Are we within X frames of the requested frame?
957 int64_t diff = requested_frame - last_frame;
958 if (diff >= 1 && diff <= 20) {
959 // Continue walking the stream
960 frame = ReadStream(requested_frame);
961 } else {
962 // Greater than 30 frames away, or backwards, we need to seek to the nearest key frame
963 if (enable_seek) {
964 // Only seek if enabled
965 Seek(requested_frame);
966
967 } else if (!enable_seek && diff < 0) {
968 // Start over, since we can't seek, and the requested frame is smaller than our position
969 // Since we are seeking to frame 1, this actually just closes/re-opens the reader
970 Seek(1);
971 }
972
973 // Then continue walking the stream
974 frame = ReadStream(requested_frame);
975 }
976 }
977 return frame;
978 }
979}
980
981// Read the stream until we find the requested Frame
982std::shared_ptr<Frame> FFmpegReader::ReadStream(int64_t requested_frame) {
983 // Allocate video frame
984 bool check_seek = false;
985 int packet_error = -1;
986
987 // Debug output
988 ZmqLogger::Instance()->AppendDebugMethod("FFmpegReader::ReadStream", "requested_frame", requested_frame, "max_concurrent_frames", max_concurrent_frames);
989
990 // Loop through the stream until the correct frame is found
991 while (true) {
992 // Check if working frames are 'finished'
993 if (!is_seeking) {
994 // Check for final frames
995 CheckWorkingFrames(requested_frame);
996 }
997
998 // Check if requested 'final' frame is available (and break out of loop if found)
999 bool is_cache_found = (final_cache.GetFrame(requested_frame) != NULL);
1000 if (is_cache_found) {
1001 break;
1002 }
1003
1004 if (!hold_packet || !packet) {
1005 // Get the next packet
1006 packet_error = GetNextPacket();
1007 if (packet_error < 0 && !packet) {
1008 // No more packets to be found
1009 packet_status.packets_eof = true;
1010 }
1011 }
1012
1013 // Debug output
1014 ZmqLogger::Instance()->AppendDebugMethod("FFmpegReader::ReadStream (GetNextPacket)", "requested_frame", requested_frame,"packets_read", packet_status.packets_read(), "packets_decoded", packet_status.packets_decoded(), "is_seeking", is_seeking);
1015
1016 // Check the status of a seek (if any)
1017 if (is_seeking) {
1018 check_seek = CheckSeek(false);
1019 } else {
1020 check_seek = false;
1021 }
1022
1023 if (check_seek) {
1024 // Packet may become NULL on Close inside Seek if CheckSeek returns false
1025 // Jump to the next iteration of this loop
1026 continue;
1027 }
1028
1029 // Video packet
1030 if ((info.has_video && packet && packet->stream_index == videoStream) ||
1031 (info.has_video && packet_status.video_decoded < packet_status.video_read) ||
1032 (info.has_video && !packet && !packet_status.video_eof)) {
1033 // Process Video Packet
1034 ProcessVideoPacket(requested_frame);
1035 }
1036 // Audio packet
1037 if ((info.has_audio && packet && packet->stream_index == audioStream) ||
1038 (info.has_audio && !packet && packet_status.audio_decoded < packet_status.audio_read) ||
1039 (info.has_audio && !packet && !packet_status.audio_eof)) {
1040 // Process Audio Packet
1041 ProcessAudioPacket(requested_frame);
1042 }
1043
1044 // Remove unused packets (sometimes we purposely ignore video or audio packets,
1045 // if the has_video or has_audio properties are manually overridden)
1046 if ((!info.has_video && packet && packet->stream_index == videoStream) ||
1047 (!info.has_audio && packet && packet->stream_index == audioStream)) {
1048 // Keep track of deleted packet counts
1049 if (packet->stream_index == videoStream) {
1050 packet_status.video_decoded++;
1051 } else if (packet->stream_index == audioStream) {
1052 packet_status.audio_decoded++;
1053 }
1054
1055 // Remove unused packets (sometimes we purposely ignore video or audio packets,
1056 // if the has_video or has_audio properties are manually overridden)
1057 RemoveAVPacket(packet);
1058 packet = NULL;
1059 }
1060
1061 // Determine end-of-stream (waiting until final decoder threads finish)
1062 // Force end-of-stream in some situations
1063 packet_status.end_of_file = packet_status.packets_eof && packet_status.video_eof && packet_status.audio_eof;
1064 if ((packet_status.packets_eof && packet_status.packets_read() == packet_status.packets_decoded()) || packet_status.end_of_file) {
1065 // Force EOF (end of file) variables to true, if decoder does not support EOF detection.
1066 // If we have no more packets, and all known packets have been decoded
1067 ZmqLogger::Instance()->AppendDebugMethod("FFmpegReader::ReadStream (force EOF)", "packets_read", packet_status.packets_read(), "packets_decoded", packet_status.packets_decoded(), "packets_eof", packet_status.packets_eof, "video_eof", packet_status.video_eof, "audio_eof", packet_status.audio_eof, "end_of_file", packet_status.end_of_file);
1068 if (!packet_status.video_eof) {
1069 packet_status.video_eof = true;
1070 }
1071 if (!packet_status.audio_eof) {
1072 packet_status.audio_eof = true;
1073 }
1074 packet_status.end_of_file = true;
1075 break;
1076 }
1077 } // end while
1078
1079 // Debug output
1080 ZmqLogger::Instance()->AppendDebugMethod("FFmpegReader::ReadStream (Completed)",
1081 "packets_read", packet_status.packets_read(),
1082 "packets_decoded", packet_status.packets_decoded(),
1083 "end_of_file", packet_status.end_of_file,
1084 "largest_frame_processed", largest_frame_processed,
1085 "Working Cache Count", working_cache.Count());
1086
1087 // Have we reached end-of-stream (or the final frame)?
1088 if (!packet_status.end_of_file && requested_frame >= info.video_length) {
1089 // Force end-of-stream
1090 packet_status.end_of_file = true;
1091 }
1092 if (packet_status.end_of_file) {
1093 // Mark any other working frames as 'finished'
1094 CheckWorkingFrames(requested_frame);
1095 }
1096
1097 // Return requested frame (if found)
1098 std::shared_ptr<Frame> frame = final_cache.GetFrame(requested_frame);
1099 if (frame)
1100 // Return prepared frame
1101 return frame;
1102 else {
1103
1104 // Check if largest frame is still cached
1105 frame = final_cache.GetFrame(largest_frame_processed);
1106 int samples_in_frame = Frame::GetSamplesPerFrame(requested_frame, info.fps,
1108 if (frame) {
1109 // Copy and return the largest processed frame (assuming it was the last in the video file)
1110 std::shared_ptr<Frame> f = CreateFrame(largest_frame_processed);
1111
1112 // Use solid color (if no image data found)
1113 if (!frame->has_image_data) {
1114 // Use solid black frame if no image data available
1115 f->AddColor(info.width, info.height, "#000");
1116 }
1117 // Silence audio data (if any), since we are repeating the last frame
1118 frame->AddAudioSilence(samples_in_frame);
1119
1120 return frame;
1121 } else {
1122 // The largest processed frame is no longer in cache, return a blank frame
1123 std::shared_ptr<Frame> f = CreateFrame(largest_frame_processed);
1124 f->AddColor(info.width, info.height, "#000");
1125 f->AddAudioSilence(samples_in_frame);
1126 return f;
1127 }
1128 }
1129
1130}
1131
1132// Get the next packet (if any)
1133int FFmpegReader::GetNextPacket() {
1134 int found_packet = 0;
1135 AVPacket *next_packet;
1136 next_packet = new AVPacket();
1137 found_packet = av_read_frame(pFormatCtx, next_packet);
1138
1139 if (packet) {
1140 // Remove previous packet before getting next one
1141 RemoveAVPacket(packet);
1142 packet = NULL;
1143 }
1144 if (found_packet >= 0) {
1145 // Update current packet pointer
1146 packet = next_packet;
1147
1148 // Keep track of packet stats
1149 if (packet->stream_index == videoStream) {
1150 packet_status.video_read++;
1151 } else if (packet->stream_index == audioStream) {
1152 packet_status.audio_read++;
1153 }
1154 } else {
1155 // No more packets found
1156 delete next_packet;
1157 packet = NULL;
1158 }
1159 // Return if packet was found (or error number)
1160 return found_packet;
1161}
1162
1163// Get an AVFrame (if any)
1164bool FFmpegReader::GetAVFrame() {
1165 int frameFinished = 0;
1166
1167 // Decode video frame
1168 AVFrame *next_frame = AV_ALLOCATE_FRAME();
1169
1170#if IS_FFMPEG_3_2
1171 int send_packet_err = 0;
1172 int64_t send_packet_pts = 0;
1173 if ((packet && packet->stream_index == videoStream) || !packet) {
1174 send_packet_err = avcodec_send_packet(pCodecCtx, packet);
1175
1176 if (packet && send_packet_err >= 0) {
1177 send_packet_pts = GetPacketPTS();
1178 hold_packet = false;
1179 ZmqLogger::Instance()->AppendDebugMethod("FFmpegReader::GetAVFrame (send packet succeeded)", "send_packet_err", send_packet_err, "send_packet_pts", send_packet_pts);
1180 }
1181 }
1182
1183 #if USE_HW_ACCEL
1184 // Get the format from the variables set in get_hw_dec_format
1185 hw_de_av_pix_fmt = hw_de_av_pix_fmt_global;
1186 hw_de_av_device_type = hw_de_av_device_type_global;
1187 #endif // USE_HW_ACCEL
1188 if (send_packet_err < 0 && send_packet_err != AVERROR_EOF) {
1189 ZmqLogger::Instance()->AppendDebugMethod("FFmpegReader::GetAVFrame (send packet: Not sent [" + av_err2string(send_packet_err) + "])", "send_packet_err", send_packet_err, "send_packet_pts", send_packet_pts);
1190 if (send_packet_err == AVERROR(EAGAIN)) {
1191 hold_packet = true;
1192 ZmqLogger::Instance()->AppendDebugMethod("FFmpegReader::GetAVFrame (send packet: AVERROR(EAGAIN): user must read output with avcodec_receive_frame()", "send_packet_pts", send_packet_pts);
1193 }
1194 if (send_packet_err == AVERROR(EINVAL)) {
1195 ZmqLogger::Instance()->AppendDebugMethod("FFmpegReader::GetAVFrame (send packet: AVERROR(EINVAL): codec not opened, it is an encoder, or requires flush", "send_packet_pts", send_packet_pts);
1196 }
1197 if (send_packet_err == AVERROR(ENOMEM)) {
1198 ZmqLogger::Instance()->AppendDebugMethod("FFmpegReader::GetAVFrame (send packet: AVERROR(ENOMEM): failed to add packet to internal queue, or legitimate decoding errors", "send_packet_pts", send_packet_pts);
1199 }
1200 }
1201
1202 // Always try and receive a packet, if not EOF.
1203 // Even if the above avcodec_send_packet failed to send,
1204 // we might still need to receive a packet.
1205 int receive_frame_err = 0;
1206 AVFrame *next_frame2;
1207#if USE_HW_ACCEL
1208 if (hw_de_on && hw_de_supported) {
1209 next_frame2 = AV_ALLOCATE_FRAME();
1210 }
1211 else
1212#endif // USE_HW_ACCEL
1213 {
1214 next_frame2 = next_frame;
1215 }
1216 pFrame = AV_ALLOCATE_FRAME();
1217 while (receive_frame_err >= 0) {
1218 receive_frame_err = avcodec_receive_frame(pCodecCtx, next_frame2);
1219
1220 if (receive_frame_err != 0) {
1221 ZmqLogger::Instance()->AppendDebugMethod("FFmpegReader::GetAVFrame (receive frame: frame not ready yet from decoder [\" + av_err2string(receive_frame_err) + \"])", "receive_frame_err", receive_frame_err, "send_packet_pts", send_packet_pts);
1222
1223 if (receive_frame_err == AVERROR_EOF) {
1225 "FFmpegReader::GetAVFrame (receive frame: AVERROR_EOF: EOF detected from decoder, flushing buffers)", "send_packet_pts", send_packet_pts);
1226 avcodec_flush_buffers(pCodecCtx);
1227 packet_status.video_eof = true;
1228 }
1229 if (receive_frame_err == AVERROR(EINVAL)) {
1231 "FFmpegReader::GetAVFrame (receive frame: AVERROR(EINVAL): invalid frame received, flushing buffers)", "send_packet_pts", send_packet_pts);
1232 avcodec_flush_buffers(pCodecCtx);
1233 }
1234 if (receive_frame_err == AVERROR(EAGAIN)) {
1236 "FFmpegReader::GetAVFrame (receive frame: AVERROR(EAGAIN): output is not available in this state - user must try to send new input)", "send_packet_pts", send_packet_pts);
1237 }
1238 if (receive_frame_err == AVERROR_INPUT_CHANGED) {
1240 "FFmpegReader::GetAVFrame (receive frame: AVERROR_INPUT_CHANGED: current decoded frame has changed parameters with respect to first decoded frame)", "send_packet_pts", send_packet_pts);
1241 }
1242
1243 // Break out of decoding loop
1244 // Nothing ready for decoding yet
1245 break;
1246 }
1247
1248#if USE_HW_ACCEL
1249 if (hw_de_on && hw_de_supported) {
1250 int err;
1251 if (next_frame2->format == hw_de_av_pix_fmt) {
1252 next_frame->format = AV_PIX_FMT_YUV420P;
1253 if ((err = av_hwframe_transfer_data(next_frame,next_frame2,0)) < 0) {
1254 ZmqLogger::Instance()->AppendDebugMethod("FFmpegReader::GetAVFrame (Failed to transfer data to output frame)", "hw_de_on", hw_de_on);
1255 }
1256 if ((err = av_frame_copy_props(next_frame,next_frame2)) < 0) {
1257 ZmqLogger::Instance()->AppendDebugMethod("FFmpegReader::GetAVFrame (Failed to copy props to output frame)", "hw_de_on", hw_de_on);
1258 }
1259 }
1260 }
1261 else
1262#endif // USE_HW_ACCEL
1263 { // No hardware acceleration used -> no copy from GPU memory needed
1264 next_frame = next_frame2;
1265 }
1266
1267 // TODO also handle possible further frames
1268 // Use only the first frame like avcodec_decode_video2
1269 frameFinished = 1;
1270 packet_status.video_decoded++;
1271
1272 av_image_alloc(pFrame->data, pFrame->linesize, info.width, info.height, (AVPixelFormat)(pStream->codecpar->format), 1);
1273 av_image_copy(pFrame->data, pFrame->linesize, (const uint8_t**)next_frame->data, next_frame->linesize,
1274 (AVPixelFormat)(pStream->codecpar->format), info.width, info.height);
1275
1276 // Get display PTS from video frame, often different than packet->pts.
1277 // Sending packets to the decoder (i.e. packet->pts) is async,
1278 // and retrieving packets from the decoder (frame->pts) is async. In most decoders
1279 // sending and retrieving are separated by multiple calls to this method.
1280 if (next_frame->pts != AV_NOPTS_VALUE) {
1281 // This is the current decoded frame (and should be the pts used) for
1282 // processing this data
1283 video_pts = next_frame->pts;
1284 } else if (next_frame->pkt_dts != AV_NOPTS_VALUE) {
1285 // Some videos only set this timestamp (fallback)
1286 video_pts = next_frame->pkt_dts;
1287 }
1288
1290 "FFmpegReader::GetAVFrame (Successful frame received)", "video_pts", video_pts, "send_packet_pts", send_packet_pts);
1291
1292 // break out of loop after each successful image returned
1293 break;
1294 }
1295#if USE_HW_ACCEL
1296 if (hw_de_on && hw_de_supported) {
1297 AV_FREE_FRAME(&next_frame2);
1298 }
1299 #endif // USE_HW_ACCEL
1300#else
1301 avcodec_decode_video2(pCodecCtx, next_frame, &frameFinished, packet);
1302
1303 // always allocate pFrame (because we do that in the ffmpeg >= 3.2 as well); it will always be freed later
1304 pFrame = AV_ALLOCATE_FRAME();
1305
1306 // is frame finished
1307 if (frameFinished) {
1308 // AVFrames are clobbered on the each call to avcodec_decode_video, so we
1309 // must make a copy of the image data before this method is called again.
1310 avpicture_alloc((AVPicture *) pFrame, pCodecCtx->pix_fmt, info.width, info.height);
1311 av_picture_copy((AVPicture *) pFrame, (AVPicture *) next_frame, pCodecCtx->pix_fmt, info.width,
1312 info.height);
1313 }
1314#endif // IS_FFMPEG_3_2
1315
1316 // deallocate the frame
1317 AV_FREE_FRAME(&next_frame);
1318
1319 // Did we get a video frame?
1320 return frameFinished;
1321}
1322
1323// Check the current seek position and determine if we need to seek again
1324bool FFmpegReader::CheckSeek(bool is_video) {
1325 // Are we seeking for a specific frame?
1326 if (is_seeking) {
1327 // Determine if both an audio and video packet have been decoded since the seek happened.
1328 // If not, allow the ReadStream method to keep looping
1329 if ((is_video_seek && !seek_video_frame_found) || (!is_video_seek && !seek_audio_frame_found))
1330 return false;
1331
1332 // Check for both streams
1333 if ((info.has_video && !seek_video_frame_found) || (info.has_audio && !seek_audio_frame_found))
1334 return false;
1335
1336 // Determine max seeked frame
1337 int64_t max_seeked_frame = std::max(seek_audio_frame_found, seek_video_frame_found);
1338
1339 // determine if we are "before" the requested frame
1340 if (max_seeked_frame >= seeking_frame) {
1341 // SEEKED TOO FAR
1342 ZmqLogger::Instance()->AppendDebugMethod("FFmpegReader::CheckSeek (Too far, seek again)",
1343 "is_video_seek", is_video_seek,
1344 "max_seeked_frame", max_seeked_frame,
1345 "seeking_frame", seeking_frame,
1346 "seeking_pts", seeking_pts,
1347 "seek_video_frame_found", seek_video_frame_found,
1348 "seek_audio_frame_found", seek_audio_frame_found);
1349
1350 // Seek again... to the nearest Keyframe
1351 Seek(seeking_frame - (10 * seek_count * seek_count));
1352 } else {
1353 // SEEK WORKED
1354 ZmqLogger::Instance()->AppendDebugMethod("FFmpegReader::CheckSeek (Successful)",
1355 "is_video_seek", is_video_seek,
1356 "packet->pts", GetPacketPTS(),
1357 "seeking_pts", seeking_pts,
1358 "seeking_frame", seeking_frame,
1359 "seek_video_frame_found", seek_video_frame_found,
1360 "seek_audio_frame_found", seek_audio_frame_found);
1361
1362 // Seek worked, and we are "before" the requested frame
1363 is_seeking = false;
1364 seeking_frame = 0;
1365 seeking_pts = -1;
1366 }
1367 }
1368
1369 // return the pts to seek to (if any)
1370 return is_seeking;
1371}
1372
1373// Process a video packet
1374void FFmpegReader::ProcessVideoPacket(int64_t requested_frame) {
1375 // Get the AVFrame from the current packet
1376 // This sets the video_pts to the correct timestamp
1377 int frame_finished = GetAVFrame();
1378
1379 // Check if the AVFrame is finished and set it
1380 if (!frame_finished) {
1381 // No AVFrame decoded yet, bail out
1382 if (pFrame) {
1383 RemoveAVFrame(pFrame);
1384 }
1385 return;
1386 }
1387
1388 // Calculate current frame #
1389 int64_t current_frame = ConvertVideoPTStoFrame(video_pts);
1390
1391 // Track 1st video packet after a successful seek
1392 if (!seek_video_frame_found && is_seeking)
1393 seek_video_frame_found = current_frame;
1394
1395 // Create or get the existing frame object. Requested frame needs to be created
1396 // in working_cache at least once. Seek can clear the working_cache, so we must
1397 // add the requested frame back to the working_cache here. If it already exists,
1398 // it will be moved to the top of the working_cache.
1399 working_cache.Add(CreateFrame(requested_frame));
1400
1401 // Debug output
1402 ZmqLogger::Instance()->AppendDebugMethod("FFmpegReader::ProcessVideoPacket (Before)", "requested_frame", requested_frame, "current_frame", current_frame);
1403
1404 // Init some things local (for OpenMP)
1405 PixelFormat pix_fmt = AV_GET_CODEC_PIXEL_FORMAT(pStream, pCodecCtx);
1406 int height = info.height;
1407 int width = info.width;
1408 int64_t video_length = info.video_length;
1409
1410 // Create variables for a RGB Frame (since most videos are not in RGB, we must convert it)
1411 AVFrame *pFrameRGB = nullptr;
1412 uint8_t *buffer = nullptr;
1413
1414 // Allocate an AVFrame structure
1415 pFrameRGB = AV_ALLOCATE_FRAME();
1416 if (pFrameRGB == nullptr)
1417 throw OutOfMemory("Failed to allocate frame buffer", path);
1418
1419 // Determine the max size of this source image (based on the timeline's size, the scaling mode,
1420 // and the scaling keyframes). This is a performance improvement, to keep the images as small as possible,
1421 // without losing quality. NOTE: We cannot go smaller than the timeline itself, or the add_layer timeline
1422 // method will scale it back to timeline size before scaling it smaller again. This needs to be fixed in
1423 // the future.
1424 int max_width = info.width;
1425 int max_height = info.height;
1426
1427 Clip *parent = static_cast<Clip *>(ParentClip());
1428 if (parent) {
1429 if (parent->ParentTimeline()) {
1430 // Set max width/height based on parent clip's timeline (if attached to a timeline)
1431 max_width = parent->ParentTimeline()->preview_width;
1432 max_height = parent->ParentTimeline()->preview_height;
1433 }
1434 if (parent->scale == SCALE_FIT || parent->scale == SCALE_STRETCH) {
1435 // Best fit or Stretch scaling (based on max timeline size * scaling keyframes)
1436 float max_scale_x = parent->scale_x.GetMaxPoint().co.Y;
1437 float max_scale_y = parent->scale_y.GetMaxPoint().co.Y;
1438 max_width = std::max(float(max_width), max_width * max_scale_x);
1439 max_height = std::max(float(max_height), max_height * max_scale_y);
1440
1441 } else if (parent->scale == SCALE_CROP) {
1442 // Cropping scale mode (based on max timeline size * cropped size * scaling keyframes)
1443 float max_scale_x = parent->scale_x.GetMaxPoint().co.Y;
1444 float max_scale_y = parent->scale_y.GetMaxPoint().co.Y;
1445 QSize width_size(max_width * max_scale_x,
1446 round(max_width / (float(info.width) / float(info.height))));
1447 QSize height_size(round(max_height / (float(info.height) / float(info.width))),
1448 max_height * max_scale_y);
1449 // respect aspect ratio
1450 if (width_size.width() >= max_width && width_size.height() >= max_height) {
1451 max_width = std::max(max_width, width_size.width());
1452 max_height = std::max(max_height, width_size.height());
1453 } else {
1454 max_width = std::max(max_width, height_size.width());
1455 max_height = std::max(max_height, height_size.height());
1456 }
1457
1458 } else {
1459 // Scale video to equivalent unscaled size
1460 // Since the preview window can change sizes, we want to always
1461 // scale against the ratio of original video size to timeline size
1462 float preview_ratio = 1.0;
1463 if (parent->ParentTimeline()) {
1464 Timeline *t = (Timeline *) parent->ParentTimeline();
1465 preview_ratio = t->preview_width / float(t->info.width);
1466 }
1467 float max_scale_x = parent->scale_x.GetMaxPoint().co.Y;
1468 float max_scale_y = parent->scale_y.GetMaxPoint().co.Y;
1469 max_width = info.width * max_scale_x * preview_ratio;
1470 max_height = info.height * max_scale_y * preview_ratio;
1471 }
1472 }
1473
1474 // Determine if image needs to be scaled (for performance reasons)
1475 int original_height = height;
1476 if (max_width != 0 && max_height != 0 && max_width < width && max_height < height) {
1477 // Override width and height (but maintain aspect ratio)
1478 float ratio = float(width) / float(height);
1479 int possible_width = round(max_height * ratio);
1480 int possible_height = round(max_width / ratio);
1481
1482 if (possible_width <= max_width) {
1483 // use calculated width, and max_height
1484 width = possible_width;
1485 height = max_height;
1486 } else {
1487 // use max_width, and calculated height
1488 width = max_width;
1489 height = possible_height;
1490 }
1491 }
1492
1493 // Determine required buffer size and allocate buffer
1494 const int bytes_per_pixel = 4;
1495 int buffer_size = (width * height * bytes_per_pixel) + 128;
1496 buffer = new unsigned char[buffer_size]();
1497
1498 // Copy picture data from one AVFrame (or AVPicture) to another one.
1499 AV_COPY_PICTURE_DATA(pFrameRGB, buffer, PIX_FMT_RGBA, width, height);
1500
1501 int scale_mode = SWS_FAST_BILINEAR;
1502 if (openshot::Settings::Instance()->HIGH_QUALITY_SCALING) {
1503 scale_mode = SWS_BICUBIC;
1504 }
1505 SwsContext *img_convert_ctx = sws_getContext(info.width, info.height, AV_GET_CODEC_PIXEL_FORMAT(pStream, pCodecCtx), width,
1506 height, PIX_FMT_RGBA, scale_mode, NULL, NULL, NULL);
1507
1508 // Resize / Convert to RGB
1509 sws_scale(img_convert_ctx, pFrame->data, pFrame->linesize, 0,
1510 original_height, pFrameRGB->data, pFrameRGB->linesize);
1511
1512 // Create or get the existing frame object
1513 std::shared_ptr<Frame> f = CreateFrame(current_frame);
1514
1515 // Add Image data to frame
1516 if (!ffmpeg_has_alpha(AV_GET_CODEC_PIXEL_FORMAT(pStream, pCodecCtx))) {
1517 // Add image with no alpha channel, Speed optimization
1518 f->AddImage(width, height, bytes_per_pixel, QImage::Format_RGBA8888_Premultiplied, buffer);
1519 } else {
1520 // Add image with alpha channel (this will be converted to premultipled when needed, but is slower)
1521 f->AddImage(width, height, bytes_per_pixel, QImage::Format_RGBA8888, buffer);
1522 }
1523
1524 // Update working cache
1525 working_cache.Add(f);
1526
1527 // Keep track of last last_video_frame
1528 last_video_frame = f;
1529
1530 // Free the RGB image
1531 AV_FREE_FRAME(&pFrameRGB);
1532
1533 // Remove frame and packet
1534 RemoveAVFrame(pFrame);
1535 sws_freeContext(img_convert_ctx);
1536
1537 // Get video PTS in seconds
1538 video_pts_seconds = (double(video_pts) * info.video_timebase.ToDouble()) + pts_offset_seconds;
1539
1540 // Debug output
1541 ZmqLogger::Instance()->AppendDebugMethod("FFmpegReader::ProcessVideoPacket (After)", "requested_frame", requested_frame, "current_frame", current_frame, "f->number", f->number, "video_pts_seconds", video_pts_seconds);
1542}
1543
1544// Process an audio packet
1545void FFmpegReader::ProcessAudioPacket(int64_t requested_frame) {
1546 AudioLocation location;
1547 // Calculate location of current audio packet
1548 if (packet && packet->pts != AV_NOPTS_VALUE) {
1549 // Determine related video frame and starting sample # from audio PTS
1550 location = GetAudioPTSLocation(packet->pts);
1551
1552 // Track 1st audio packet after a successful seek
1553 if (!seek_audio_frame_found && is_seeking)
1554 seek_audio_frame_found = location.frame;
1555 }
1556
1557 // Create or get the existing frame object. Requested frame needs to be created
1558 // in working_cache at least once. Seek can clear the working_cache, so we must
1559 // add the requested frame back to the working_cache here. If it already exists,
1560 // it will be moved to the top of the working_cache.
1561 working_cache.Add(CreateFrame(requested_frame));
1562
1563 // Debug output
1564 ZmqLogger::Instance()->AppendDebugMethod("FFmpegReader::ProcessAudioPacket (Before)",
1565 "requested_frame", requested_frame,
1566 "target_frame", location.frame,
1567 "starting_sample", location.sample_start);
1568
1569 // Init an AVFrame to hold the decoded audio samples
1570 int frame_finished = 0;
1571 AVFrame *audio_frame = AV_ALLOCATE_FRAME();
1572 AV_RESET_FRAME(audio_frame);
1573
1574 int packet_samples = 0;
1575 int data_size = 0;
1576
1577#if IS_FFMPEG_3_2
1578 int send_packet_err = avcodec_send_packet(aCodecCtx, packet);
1579 if (send_packet_err < 0 && send_packet_err != AVERROR_EOF) {
1580 ZmqLogger::Instance()->AppendDebugMethod("FFmpegReader::ProcessAudioPacket (Packet not sent)");
1581 }
1582 else {
1583 int receive_frame_err = avcodec_receive_frame(aCodecCtx, audio_frame);
1584 if (receive_frame_err >= 0) {
1585 frame_finished = 1;
1586 }
1587 if (receive_frame_err == AVERROR_EOF) {
1588 ZmqLogger::Instance()->AppendDebugMethod("FFmpegReader::ProcessAudioPacket (EOF detected from decoder)");
1589 packet_status.audio_eof = true;
1590 }
1591 if (receive_frame_err == AVERROR(EINVAL) || receive_frame_err == AVERROR_EOF) {
1592 ZmqLogger::Instance()->AppendDebugMethod("FFmpegReader::ProcessAudioPacket (invalid frame received or EOF from decoder)");
1593 avcodec_flush_buffers(aCodecCtx);
1594 }
1595 if (receive_frame_err != 0) {
1596 ZmqLogger::Instance()->AppendDebugMethod("FFmpegReader::ProcessAudioPacket (frame not ready yet from decoder)");
1597 }
1598 }
1599#else
1600 int used = avcodec_decode_audio4(aCodecCtx, audio_frame, &frame_finished, packet);
1601#endif
1602
1603 if (frame_finished) {
1604 packet_status.audio_decoded++;
1605
1606 // This can be different than the current packet, so we need to look
1607 // at the current AVFrame from the audio decoder. This timestamp should
1608 // be used for the remainder of this function
1609 audio_pts = audio_frame->pts;
1610
1611 // Determine related video frame and starting sample # from audio PTS
1612 location = GetAudioPTSLocation(audio_pts);
1613
1614 // determine how many samples were decoded
1615 int plane_size = -1;
1616#if HAVE_CH_LAYOUT
1617 int nb_channels = AV_GET_CODEC_ATTRIBUTES(aStream, aCodecCtx)->ch_layout.nb_channels;
1618#else
1619 int nb_channels = AV_GET_CODEC_ATTRIBUTES(aStream, aCodecCtx)->channels;
1620#endif
1621 data_size = av_samples_get_buffer_size(&plane_size, nb_channels,
1622 audio_frame->nb_samples, (AVSampleFormat) (AV_GET_SAMPLE_FORMAT(aStream, aCodecCtx)), 1);
1623
1624 // Calculate total number of samples
1625 packet_samples = audio_frame->nb_samples * nb_channels;
1626 } else {
1627 if (audio_frame) {
1628 // Free audio frame
1629 AV_FREE_FRAME(&audio_frame);
1630 }
1631 }
1632
1633 // Estimate the # of samples and the end of this packet's location (to prevent GAPS for the next timestamp)
1634 int pts_remaining_samples = packet_samples / info.channels; // Adjust for zero based array
1635
1636 // Bail if no samples found
1637 if (pts_remaining_samples == 0) {
1638 ZmqLogger::Instance()->AppendDebugMethod("FFmpegReader::ProcessAudioPacket (No samples, bailing)",
1639 "packet_samples", packet_samples,
1640 "info.channels", info.channels,
1641 "pts_remaining_samples", pts_remaining_samples);
1642 return;
1643 }
1644
1645 while (pts_remaining_samples) {
1646 // Get Samples per frame (for this frame number)
1647 int samples_per_frame = Frame::GetSamplesPerFrame(previous_packet_location.frame, info.fps, info.sample_rate, info.channels);
1648
1649 // Calculate # of samples to add to this frame
1650 int samples = samples_per_frame - previous_packet_location.sample_start;
1651 if (samples > pts_remaining_samples)
1652 samples = pts_remaining_samples;
1653
1654 // Decrement remaining samples
1655 pts_remaining_samples -= samples;
1656
1657 if (pts_remaining_samples > 0) {
1658 // next frame
1659 previous_packet_location.frame++;
1660 previous_packet_location.sample_start = 0;
1661 } else {
1662 // Increment sample start
1663 previous_packet_location.sample_start += samples;
1664 }
1665 }
1666
1667 ZmqLogger::Instance()->AppendDebugMethod("FFmpegReader::ProcessAudioPacket (ReSample)",
1668 "packet_samples", packet_samples,
1669 "info.channels", info.channels,
1670 "info.sample_rate", info.sample_rate,
1671 "aCodecCtx->sample_fmt", AV_GET_SAMPLE_FORMAT(aStream, aCodecCtx));
1672
1673 // Create output frame
1674 AVFrame *audio_converted = AV_ALLOCATE_FRAME();
1675 AV_RESET_FRAME(audio_converted);
1676 audio_converted->nb_samples = audio_frame->nb_samples;
1677 av_samples_alloc(audio_converted->data, audio_converted->linesize, info.channels, audio_frame->nb_samples, AV_SAMPLE_FMT_FLTP, 0);
1678
1679 SWRCONTEXT *avr = NULL;
1680
1681 // setup resample context
1682 avr = SWR_ALLOC();
1683#if HAVE_CH_LAYOUT
1684 av_opt_set_chlayout(avr, "in_chlayout", &AV_GET_CODEC_ATTRIBUTES(aStream, aCodecCtx)->ch_layout, 0);
1685 av_opt_set_chlayout(avr, "out_chlayout", &AV_GET_CODEC_ATTRIBUTES(aStream, aCodecCtx)->ch_layout, 0);
1686#else
1687 av_opt_set_int(avr, "in_channel_layout", AV_GET_CODEC_ATTRIBUTES(aStream, aCodecCtx)->channel_layout, 0);
1688 av_opt_set_int(avr, "out_channel_layout", AV_GET_CODEC_ATTRIBUTES(aStream, aCodecCtx)->channel_layout, 0);
1689 av_opt_set_int(avr, "in_channels", info.channels, 0);
1690 av_opt_set_int(avr, "out_channels", info.channels, 0);
1691#endif
1692 av_opt_set_int(avr, "in_sample_fmt", AV_GET_SAMPLE_FORMAT(aStream, aCodecCtx), 0);
1693 av_opt_set_int(avr, "out_sample_fmt", AV_SAMPLE_FMT_FLTP, 0);
1694 av_opt_set_int(avr, "in_sample_rate", info.sample_rate, 0);
1695 av_opt_set_int(avr, "out_sample_rate", info.sample_rate, 0);
1696 SWR_INIT(avr);
1697
1698 // Convert audio samples
1699 int nb_samples = SWR_CONVERT(avr, // audio resample context
1700 audio_converted->data, // output data pointers
1701 audio_converted->linesize[0], // output plane size, in bytes. (0 if unknown)
1702 audio_converted->nb_samples, // maximum number of samples that the output buffer can hold
1703 audio_frame->data, // input data pointers
1704 audio_frame->linesize[0], // input plane size, in bytes (0 if unknown)
1705 audio_frame->nb_samples); // number of input samples to convert
1706
1707 // Deallocate resample buffer
1708 SWR_CLOSE(avr);
1709 SWR_FREE(&avr);
1710 avr = NULL;
1711
1712 int64_t starting_frame_number = -1;
1713 for (int channel_filter = 0; channel_filter < info.channels; channel_filter++) {
1714 // Array of floats (to hold samples for each channel)
1715 starting_frame_number = location.frame;
1716 int channel_buffer_size = nb_samples;
1717 auto *channel_buffer = (float *) (audio_converted->data[channel_filter]);
1718
1719 // Loop through samples, and add them to the correct frames
1720 int start = location.sample_start;
1721 int remaining_samples = channel_buffer_size;
1722 while (remaining_samples > 0) {
1723 // Get Samples per frame (for this frame number)
1724 int samples_per_frame = Frame::GetSamplesPerFrame(starting_frame_number, info.fps, info.sample_rate, info.channels);
1725
1726 // Calculate # of samples to add to this frame
1727 int samples = std::fmin(samples_per_frame - start, remaining_samples);
1728
1729 // Create or get the existing frame object
1730 std::shared_ptr<Frame> f = CreateFrame(starting_frame_number);
1731
1732 // Add samples for current channel to the frame.
1733 f->AddAudio(true, channel_filter, start, channel_buffer, samples, 1.0f);
1734
1735 // Debug output
1736 ZmqLogger::Instance()->AppendDebugMethod("FFmpegReader::ProcessAudioPacket (f->AddAudio)",
1737 "frame", starting_frame_number,
1738 "start", start,
1739 "samples", samples,
1740 "channel", channel_filter,
1741 "samples_per_frame", samples_per_frame);
1742
1743 // Add or update cache
1744 working_cache.Add(f);
1745
1746 // Decrement remaining samples
1747 remaining_samples -= samples;
1748
1749 // Increment buffer (to next set of samples)
1750 if (remaining_samples > 0)
1751 channel_buffer += samples;
1752
1753 // Increment frame number
1754 starting_frame_number++;
1755
1756 // Reset starting sample #
1757 start = 0;
1758 }
1759 }
1760
1761 // Free AVFrames
1762 av_free(audio_converted->data[0]);
1763 AV_FREE_FRAME(&audio_converted);
1764 AV_FREE_FRAME(&audio_frame);
1765
1766 // Get audio PTS in seconds
1767 audio_pts_seconds = (double(audio_pts) * info.audio_timebase.ToDouble()) + pts_offset_seconds;
1768
1769 // Debug output
1770 ZmqLogger::Instance()->AppendDebugMethod("FFmpegReader::ProcessAudioPacket (After)",
1771 "requested_frame", requested_frame,
1772 "starting_frame", location.frame,
1773 "end_frame", starting_frame_number - 1,
1774 "audio_pts_seconds", audio_pts_seconds);
1775
1776}
1777
1778
1779// Seek to a specific frame. This is not always frame accurate, it's more of an estimation on many codecs.
1780void FFmpegReader::Seek(int64_t requested_frame) {
1781 // Adjust for a requested frame that is too small or too large
1782 if (requested_frame < 1)
1783 requested_frame = 1;
1784 if (requested_frame > info.video_length)
1785 requested_frame = info.video_length;
1786 if (requested_frame > largest_frame_processed && packet_status.end_of_file) {
1787 // Not possible to search past largest_frame once EOF is reached (no more packets)
1788 return;
1789 }
1790
1791 // Debug output
1792 ZmqLogger::Instance()->AppendDebugMethod("FFmpegReader::Seek",
1793 "requested_frame", requested_frame,
1794 "seek_count", seek_count,
1795 "last_frame", last_frame);
1796
1797 // Clear working cache (since we are seeking to another location in the file)
1798 working_cache.Clear();
1799
1800 // Reset the last frame variable
1801 video_pts = 0.0;
1802 video_pts_seconds = NO_PTS_OFFSET;
1803 audio_pts = 0.0;
1804 audio_pts_seconds = NO_PTS_OFFSET;
1805 hold_packet = false;
1806 last_frame = 0;
1807 current_video_frame = 0;
1808 largest_frame_processed = 0;
1809 bool has_audio_override = info.has_audio;
1810 bool has_video_override = info.has_video;
1811
1812 // Init end-of-file detection variables
1813 packet_status.reset(false);
1814
1815 // Increment seek count
1816 seek_count++;
1817
1818 // If seeking near frame 1, we need to close and re-open the file (this is more reliable than seeking)
1819 int buffer_amount = std::max(max_concurrent_frames, 8);
1820 if (requested_frame - buffer_amount < 20) {
1821 // prevent Open() from seeking again
1822 is_seeking = true;
1823
1824 // Close and re-open file (basically seeking to frame 1)
1825 Close();
1826 Open();
1827
1828 // Update overrides (since closing and re-opening might update these)
1829 info.has_audio = has_audio_override;
1830 info.has_video = has_video_override;
1831
1832 // Not actually seeking, so clear these flags
1833 is_seeking = false;
1834 if (seek_count == 1) {
1835 // Don't redefine this on multiple seek attempts for a specific frame
1836 seeking_frame = 1;
1837 seeking_pts = ConvertFrameToVideoPTS(1);
1838 }
1839 seek_audio_frame_found = 0; // used to detect which frames to throw away after a seek
1840 seek_video_frame_found = 0; // used to detect which frames to throw away after a seek
1841
1842 } else {
1843 // Seek to nearest key-frame (aka, i-frame)
1844 bool seek_worked = false;
1845 int64_t seek_target = 0;
1846
1847 // Seek video stream (if any), except album arts
1848 if (!seek_worked && info.has_video && !HasAlbumArt()) {
1849 seek_target = ConvertFrameToVideoPTS(requested_frame - buffer_amount);
1850 if (av_seek_frame(pFormatCtx, info.video_stream_index, seek_target, AVSEEK_FLAG_BACKWARD) < 0) {
1851 fprintf(stderr, "%s: error while seeking video stream\n", pFormatCtx->AV_FILENAME);
1852 } else {
1853 // VIDEO SEEK
1854 is_video_seek = true;
1855 seek_worked = true;
1856 }
1857 }
1858
1859 // Seek audio stream (if not already seeked... and if an audio stream is found)
1860 if (!seek_worked && info.has_audio) {
1861 seek_target = ConvertFrameToAudioPTS(requested_frame - buffer_amount);
1862 if (av_seek_frame(pFormatCtx, info.audio_stream_index, seek_target, AVSEEK_FLAG_BACKWARD) < 0) {
1863 fprintf(stderr, "%s: error while seeking audio stream\n", pFormatCtx->AV_FILENAME);
1864 } else {
1865 // AUDIO SEEK
1866 is_video_seek = false;
1867 seek_worked = true;
1868 }
1869 }
1870
1871 // Was the seek successful?
1872 if (seek_worked) {
1873 // Flush audio buffer
1874 if (info.has_audio)
1875 avcodec_flush_buffers(aCodecCtx);
1876
1877 // Flush video buffer
1878 if (info.has_video)
1879 avcodec_flush_buffers(pCodecCtx);
1880
1881 // Reset previous audio location to zero
1882 previous_packet_location.frame = -1;
1883 previous_packet_location.sample_start = 0;
1884
1885 // init seek flags
1886 is_seeking = true;
1887 if (seek_count == 1) {
1888 // Don't redefine this on multiple seek attempts for a specific frame
1889 seeking_pts = seek_target;
1890 seeking_frame = requested_frame;
1891 }
1892 seek_audio_frame_found = 0; // used to detect which frames to throw away after a seek
1893 seek_video_frame_found = 0; // used to detect which frames to throw away after a seek
1894
1895 } else {
1896 // seek failed
1897 seeking_pts = 0;
1898 seeking_frame = 0;
1899
1900 // prevent Open() from seeking again
1901 is_seeking = true;
1902
1903 // Close and re-open file (basically seeking to frame 1)
1904 Close();
1905 Open();
1906
1907 // Not actually seeking, so clear these flags
1908 is_seeking = false;
1909
1910 // disable seeking for this reader (since it failed)
1911 enable_seek = false;
1912
1913 // Update overrides (since closing and re-opening might update these)
1914 info.has_audio = has_audio_override;
1915 info.has_video = has_video_override;
1916 }
1917 }
1918}
1919
1920// Get the PTS for the current video packet
1921int64_t FFmpegReader::GetPacketPTS() {
1922 if (packet) {
1923 int64_t current_pts = packet->pts;
1924 if (current_pts == AV_NOPTS_VALUE && packet->dts != AV_NOPTS_VALUE)
1925 current_pts = packet->dts;
1926
1927 // Return adjusted PTS
1928 return current_pts;
1929 } else {
1930 // No packet, return NO PTS
1931 return AV_NOPTS_VALUE;
1932 }
1933}
1934
1935// Update PTS Offset (if any)
1936void FFmpegReader::UpdatePTSOffset() {
1937 if (pts_offset_seconds != NO_PTS_OFFSET) {
1938 // Skip this method if we have already set PTS offset
1939 return;
1940 }
1941 pts_offset_seconds = 0.0;
1942 double video_pts_offset_seconds = 0.0;
1943 double audio_pts_offset_seconds = 0.0;
1944
1945 bool has_video_pts = false;
1946 if (!info.has_video) {
1947 // Mark as checked
1948 has_video_pts = true;
1949 }
1950 bool has_audio_pts = false;
1951 if (!info.has_audio) {
1952 // Mark as checked
1953 has_audio_pts = true;
1954 }
1955
1956 // Loop through the stream (until a packet from all streams is found)
1957 while (!has_video_pts || !has_audio_pts) {
1958 // Get the next packet (if any)
1959 if (GetNextPacket() < 0)
1960 // Break loop when no more packets found
1961 break;
1962
1963 // Get PTS of this packet
1964 int64_t pts = GetPacketPTS();
1965
1966 // Video packet
1967 if (!has_video_pts && packet->stream_index == videoStream) {
1968 // Get the video packet start time (in seconds)
1969 video_pts_offset_seconds = 0.0 - (video_pts * info.video_timebase.ToDouble());
1970
1971 // Is timestamp close to zero (within X seconds)
1972 // Ignore wildly invalid timestamps (i.e. -234923423423)
1973 if (std::abs(video_pts_offset_seconds) <= 10.0) {
1974 has_video_pts = true;
1975 }
1976 }
1977 else if (!has_audio_pts && packet->stream_index == audioStream) {
1978 // Get the audio packet start time (in seconds)
1979 audio_pts_offset_seconds = 0.0 - (pts * info.audio_timebase.ToDouble());
1980
1981 // Is timestamp close to zero (within X seconds)
1982 // Ignore wildly invalid timestamps (i.e. -234923423423)
1983 if (std::abs(audio_pts_offset_seconds) <= 10.0) {
1984 has_audio_pts = true;
1985 }
1986 }
1987 }
1988
1989 // Do we have all valid timestamps to determine PTS offset?
1990 if (has_video_pts && has_audio_pts) {
1991 // Set PTS Offset to the smallest offset
1992 // [ video timestamp ]
1993 // [ audio timestamp ]
1994 //
1995 // ** SHIFT TIMESTAMPS TO ZERO **
1996 //
1997 //[ video timestamp ]
1998 // [ audio timestamp ]
1999 //
2000 // Since all offsets are negative at this point, we want the max value, which
2001 // represents the closest to zero
2002 pts_offset_seconds = std::max(video_pts_offset_seconds, audio_pts_offset_seconds);
2003 }
2004}
2005
2006// Convert PTS into Frame Number
2007int64_t FFmpegReader::ConvertVideoPTStoFrame(int64_t pts) {
2008 // Apply PTS offset
2009 int64_t previous_video_frame = current_video_frame;
2010
2011 // Get the video packet start time (in seconds)
2012 double video_seconds = (double(pts) * info.video_timebase.ToDouble()) + pts_offset_seconds;
2013
2014 // Divide by the video timebase, to get the video frame number (frame # is decimal at this point)
2015 int64_t frame = round(video_seconds * info.fps.ToDouble()) + 1;
2016
2017 // Keep track of the expected video frame #
2018 if (current_video_frame == 0)
2019 current_video_frame = frame;
2020 else {
2021
2022 // Sometimes frames are duplicated due to identical (or similar) timestamps
2023 if (frame == previous_video_frame) {
2024 // return -1 frame number
2025 frame = -1;
2026 } else {
2027 // Increment expected frame
2028 current_video_frame++;
2029 }
2030 }
2031
2032 // Return frame #
2033 return frame;
2034}
2035
2036// Convert Frame Number into Video PTS
2037int64_t FFmpegReader::ConvertFrameToVideoPTS(int64_t frame_number) {
2038 // Get timestamp of this frame (in seconds)
2039 double seconds = (double(frame_number - 1) / info.fps.ToDouble()) + pts_offset_seconds;
2040
2041 // Calculate the # of video packets in this timestamp
2042 int64_t video_pts = round(seconds / info.video_timebase.ToDouble());
2043
2044 // Apply PTS offset (opposite)
2045 return video_pts;
2046}
2047
2048// Convert Frame Number into Video PTS
2049int64_t FFmpegReader::ConvertFrameToAudioPTS(int64_t frame_number) {
2050 // Get timestamp of this frame (in seconds)
2051 double seconds = (double(frame_number - 1) / info.fps.ToDouble()) + pts_offset_seconds;
2052
2053 // Calculate the # of audio packets in this timestamp
2054 int64_t audio_pts = round(seconds / info.audio_timebase.ToDouble());
2055
2056 // Apply PTS offset (opposite)
2057 return audio_pts;
2058}
2059
2060// Calculate Starting video frame and sample # for an audio PTS
2061AudioLocation FFmpegReader::GetAudioPTSLocation(int64_t pts) {
2062 // Get the audio packet start time (in seconds)
2063 double audio_seconds = (double(pts) * info.audio_timebase.ToDouble()) + pts_offset_seconds;
2064
2065 // Divide by the video timebase, to get the video frame number (frame # is decimal at this point)
2066 double frame = (audio_seconds * info.fps.ToDouble()) + 1;
2067
2068 // Frame # as a whole number (no more decimals)
2069 int64_t whole_frame = int64_t(frame);
2070
2071 // Remove the whole number, and only get the decimal of the frame
2072 double sample_start_percentage = frame - double(whole_frame);
2073
2074 // Get Samples per frame
2075 int samples_per_frame = Frame::GetSamplesPerFrame(whole_frame, info.fps, info.sample_rate, info.channels);
2076
2077 // Calculate the sample # to start on
2078 int sample_start = round(double(samples_per_frame) * sample_start_percentage);
2079
2080 // Protect against broken (i.e. negative) timestamps
2081 if (whole_frame < 1)
2082 whole_frame = 1;
2083 if (sample_start < 0)
2084 sample_start = 0;
2085
2086 // Prepare final audio packet location
2087 AudioLocation location = {whole_frame, sample_start};
2088
2089 // Compare to previous audio packet (and fix small gaps due to varying PTS timestamps)
2090 if (previous_packet_location.frame != -1) {
2091 if (location.is_near(previous_packet_location, samples_per_frame, samples_per_frame)) {
2092 int64_t orig_frame = location.frame;
2093 int orig_start = location.sample_start;
2094
2095 // Update sample start, to prevent gaps in audio
2096 location.sample_start = previous_packet_location.sample_start;
2097 location.frame = previous_packet_location.frame;
2098
2099 // Debug output
2100 ZmqLogger::Instance()->AppendDebugMethod("FFmpegReader::GetAudioPTSLocation (Audio Gap Detected)", "Source Frame", orig_frame, "Source Audio Sample", orig_start, "Target Frame", location.frame, "Target Audio Sample", location.sample_start, "pts", pts);
2101
2102 } else {
2103 // Debug output
2104 ZmqLogger::Instance()->AppendDebugMethod("FFmpegReader::GetAudioPTSLocation (Audio Gap Ignored - too big)", "Previous location frame", previous_packet_location.frame, "Target Frame", location.frame, "Target Audio Sample", location.sample_start, "pts", pts);
2105 }
2106 }
2107
2108 // Set previous location
2109 previous_packet_location = location;
2110
2111 // Return the associated video frame and starting sample #
2112 return location;
2113}
2114
2115// Create a new Frame (or return an existing one) and add it to the working queue.
2116std::shared_ptr<Frame> FFmpegReader::CreateFrame(int64_t requested_frame) {
2117 // Check working cache
2118 std::shared_ptr<Frame> output = working_cache.GetFrame(requested_frame);
2119
2120 if (!output) {
2121 // (re-)Check working cache
2122 output = working_cache.GetFrame(requested_frame);
2123 if(output) return output;
2124
2125 // Create a new frame on the working cache
2126 output = std::make_shared<Frame>(requested_frame, info.width, info.height, "#000000", Frame::GetSamplesPerFrame(requested_frame, info.fps, info.sample_rate, info.channels), info.channels);
2127 output->SetPixelRatio(info.pixel_ratio.num, info.pixel_ratio.den); // update pixel ratio
2128 output->ChannelsLayout(info.channel_layout); // update audio channel layout from the parent reader
2129 output->SampleRate(info.sample_rate); // update the frame's sample rate of the parent reader
2130
2131 working_cache.Add(output);
2132
2133 // Set the largest processed frame (if this is larger)
2134 if (requested_frame > largest_frame_processed)
2135 largest_frame_processed = requested_frame;
2136 }
2137 // Return frame
2138 return output;
2139}
2140
2141// Determine if frame is partial due to seek
2142bool FFmpegReader::IsPartialFrame(int64_t requested_frame) {
2143
2144 // Sometimes a seek gets partial frames, and we need to remove them
2145 bool seek_trash = false;
2146 int64_t max_seeked_frame = seek_audio_frame_found; // determine max seeked frame
2147 if (seek_video_frame_found > max_seeked_frame) {
2148 max_seeked_frame = seek_video_frame_found;
2149 }
2150 if ((info.has_audio && seek_audio_frame_found && max_seeked_frame >= requested_frame) ||
2151 (info.has_video && seek_video_frame_found && max_seeked_frame >= requested_frame)) {
2152 seek_trash = true;
2153 }
2154
2155 return seek_trash;
2156}
2157
2158// Check the working queue, and move finished frames to the finished queue
2159void FFmpegReader::CheckWorkingFrames(int64_t requested_frame) {
2160
2161 // Prevent async calls to the following code
2162 const std::lock_guard<std::recursive_mutex> lock(getFrameMutex);
2163
2164 // Get a list of current working queue frames in the cache (in-progress frames)
2165 std::vector<std::shared_ptr<openshot::Frame>> working_frames = working_cache.GetFrames();
2166 std::vector<std::shared_ptr<openshot::Frame>>::iterator working_itr;
2167
2168 // Loop through all working queue frames (sorted by frame #)
2169 for(working_itr = working_frames.begin(); working_itr != working_frames.end(); ++working_itr)
2170 {
2171 // Get working frame
2172 std::shared_ptr<Frame> f = *working_itr;
2173
2174 // Was a frame found? Is frame requested yet?
2175 if (!f || f->number > requested_frame) {
2176 // If not, skip to next one
2177 continue;
2178 }
2179
2180 // Calculate PTS in seconds (of working frame), and the most recent processed pts value
2181 double frame_pts_seconds = (double(f->number - 1) / info.fps.ToDouble()) + pts_offset_seconds;
2182 double recent_pts_seconds = std::max(video_pts_seconds, audio_pts_seconds);
2183
2184 // Determine if video and audio are ready (based on timestamps)
2185 bool is_video_ready = false;
2186 bool is_audio_ready = false;
2187 double recent_pts_diff = recent_pts_seconds - frame_pts_seconds;
2188 if ((frame_pts_seconds <= video_pts_seconds)
2189 || (recent_pts_diff > 1.5)
2190 || packet_status.video_eof || packet_status.end_of_file) {
2191 // Video stream is past this frame (so it must be done)
2192 // OR video stream is too far behind, missing, or end-of-file
2193 is_video_ready = true;
2194 ZmqLogger::Instance()->AppendDebugMethod("FFmpegReader::CheckWorkingFrames (video ready)",
2195 "frame_number", f->number,
2196 "frame_pts_seconds", frame_pts_seconds,
2197 "video_pts_seconds", video_pts_seconds,
2198 "recent_pts_diff", recent_pts_diff);
2199 if (info.has_video && !f->has_image_data) {
2200 // Frame has no image data (copy from previous frame)
2201 // Loop backwards through final frames (looking for the nearest, previous frame image)
2202 for (int64_t previous_frame = requested_frame - 1; previous_frame > 0; previous_frame--) {
2203 std::shared_ptr<Frame> previous_frame_instance = final_cache.GetFrame(previous_frame);
2204 if (previous_frame_instance && previous_frame_instance->has_image_data) {
2205 // Copy image from last decoded frame
2206 f->AddImage(std::make_shared<QImage>(previous_frame_instance->GetImage()->copy()));
2207 break;
2208 }
2209 }
2210
2211 if (last_video_frame && !f->has_image_data) {
2212 // Copy image from last decoded frame
2213 f->AddImage(std::make_shared<QImage>(last_video_frame->GetImage()->copy()));
2214 } else if (!f->has_image_data) {
2215 f->AddColor("#000000");
2216 }
2217 }
2218 }
2219
2220 double audio_pts_diff = audio_pts_seconds - frame_pts_seconds;
2221 if ((frame_pts_seconds < audio_pts_seconds && audio_pts_diff > 1.0)
2222 || (recent_pts_diff > 1.5)
2223 || packet_status.audio_eof || packet_status.end_of_file) {
2224 // Audio stream is past this frame (so it must be done)
2225 // OR audio stream is too far behind, missing, or end-of-file
2226 // Adding a bit of margin here, to allow for partial audio packets
2227 is_audio_ready = true;
2228 ZmqLogger::Instance()->AppendDebugMethod("FFmpegReader::CheckWorkingFrames (audio ready)",
2229 "frame_number", f->number,
2230 "frame_pts_seconds", frame_pts_seconds,
2231 "audio_pts_seconds", audio_pts_seconds,
2232 "audio_pts_diff", audio_pts_diff,
2233 "recent_pts_diff", recent_pts_diff);
2234 }
2235 bool is_seek_trash = IsPartialFrame(f->number);
2236
2237 // Adjust for available streams
2238 if (!info.has_video) is_video_ready = true;
2239 if (!info.has_audio) is_audio_ready = true;
2240
2241 // Debug output
2242 ZmqLogger::Instance()->AppendDebugMethod("FFmpegReader::CheckWorkingFrames",
2243 "frame_number", f->number,
2244 "is_video_ready", is_video_ready,
2245 "is_audio_ready", is_audio_ready,
2246 "video_eof", packet_status.video_eof,
2247 "audio_eof", packet_status.audio_eof,
2248 "end_of_file", packet_status.end_of_file);
2249
2250 // Check if working frame is final
2251 if ((!packet_status.end_of_file && is_video_ready && is_audio_ready) || packet_status.end_of_file || is_seek_trash) {
2252 // Debug output
2253 ZmqLogger::Instance()->AppendDebugMethod("FFmpegReader::CheckWorkingFrames (mark frame as final)",
2254 "requested_frame", requested_frame,
2255 "f->number", f->number,
2256 "is_seek_trash", is_seek_trash,
2257 "Working Cache Count", working_cache.Count(),
2258 "Final Cache Count", final_cache.Count(),
2259 "end_of_file", packet_status.end_of_file);
2260
2261 if (!is_seek_trash) {
2262 // Move frame to final cache
2263 final_cache.Add(f);
2264
2265 // Remove frame from working cache
2266 working_cache.Remove(f->number);
2267
2268 // Update last frame processed
2269 last_frame = f->number;
2270 } else {
2271 // Seek trash, so delete the frame from the working cache, and never add it to the final cache.
2272 working_cache.Remove(f->number);
2273 }
2274
2275 }
2276 }
2277
2278 // Clear vector of frames
2279 working_frames.clear();
2280 working_frames.shrink_to_fit();
2281}
2282
2283// Check for the correct frames per second (FPS) value by scanning the 1st few seconds of video packets.
2284void FFmpegReader::CheckFPS() {
2285 if (check_fps) {
2286 // Do not check FPS more than 1 time
2287 return;
2288 } else {
2289 check_fps = true;
2290 }
2291
2292 int frames_per_second[3] = {0,0,0};
2293 int max_fps_index = sizeof(frames_per_second) / sizeof(frames_per_second[0]);
2294 int fps_index = 0;
2295
2296 int all_frames_detected = 0;
2297 int starting_frames_detected = 0;
2298
2299 // Loop through the stream
2300 while (true) {
2301 // Get the next packet (if any)
2302 if (GetNextPacket() < 0)
2303 // Break loop when no more packets found
2304 break;
2305
2306 // Video packet
2307 if (packet->stream_index == videoStream) {
2308 // Get the video packet start time (in seconds)
2309 double video_seconds = (double(GetPacketPTS()) * info.video_timebase.ToDouble()) + pts_offset_seconds;
2310 fps_index = int(video_seconds); // truncate float timestamp to int (second 1, second 2, second 3)
2311
2312 // Is this video packet from the first few seconds?
2313 if (fps_index >= 0 && fps_index < max_fps_index) {
2314 // Yes, keep track of how many frames per second (over the first few seconds)
2315 starting_frames_detected++;
2316 frames_per_second[fps_index]++;
2317 }
2318
2319 // Track all video packets detected
2320 all_frames_detected++;
2321 }
2322 }
2323
2324 // Calculate FPS (based on the first few seconds of video packets)
2325 float avg_fps = 30.0;
2326 if (starting_frames_detected > 0 && fps_index > 0) {
2327 avg_fps = float(starting_frames_detected) / std::min(fps_index, max_fps_index);
2328 }
2329
2330 // Verify average FPS is a reasonable value
2331 if (avg_fps < 8.0) {
2332 // Invalid FPS assumed, so switching to a sane default FPS instead
2333 avg_fps = 30.0;
2334 }
2335
2336 // Update FPS (truncate average FPS to Integer)
2337 info.fps = Fraction(int(avg_fps), 1);
2338
2339 // Update Duration and Length
2340 if (all_frames_detected > 0) {
2341 // Use all video frames detected to calculate # of frames
2342 info.video_length = all_frames_detected;
2343 info.duration = all_frames_detected / avg_fps;
2344 } else {
2345 // Use previous duration to calculate # of frames
2346 info.video_length = info.duration * avg_fps;
2347 }
2348
2349 // Update video bit rate
2351}
2352
2353// Remove AVFrame from cache (and deallocate its memory)
2354void FFmpegReader::RemoveAVFrame(AVFrame *remove_frame) {
2355 // Remove pFrame (if exists)
2356 if (remove_frame) {
2357 // Free memory
2358 av_freep(&remove_frame->data[0]);
2359#ifndef WIN32
2360 AV_FREE_FRAME(&remove_frame);
2361#endif
2362 }
2363}
2364
2365// Remove AVPacket from cache (and deallocate its memory)
2366void FFmpegReader::RemoveAVPacket(AVPacket *remove_packet) {
2367 // deallocate memory for packet
2368 AV_FREE_PACKET(remove_packet);
2369
2370 // Delete the object
2371 delete remove_packet;
2372}
2373
2374// Generate JSON string of this object
2375std::string FFmpegReader::Json() const {
2376
2377 // Return formatted string
2378 return JsonValue().toStyledString();
2379}
2380
2381// Generate Json::Value for this object
2382Json::Value FFmpegReader::JsonValue() const {
2383
2384 // Create root json object
2385 Json::Value root = ReaderBase::JsonValue(); // get parent properties
2386 root["type"] = "FFmpegReader";
2387 root["path"] = path;
2388
2389 // return JsonValue
2390 return root;
2391}
2392
2393// Load JSON string into this object
2394void FFmpegReader::SetJson(const std::string value) {
2395
2396 // Parse JSON string into JSON objects
2397 try {
2398 const Json::Value root = openshot::stringToJson(value);
2399 // Set all values that match
2400 SetJsonValue(root);
2401 }
2402 catch (const std::exception& e) {
2403 // Error parsing JSON (or missing keys)
2404 throw InvalidJSON("JSON is invalid (missing keys or invalid data types)");
2405 }
2406}
2407
2408// Load Json::Value into this object
2409void FFmpegReader::SetJsonValue(const Json::Value root) {
2410
2411 // Set parent data
2413
2414 // Set data from Json (if key is found)
2415 if (!root["path"].isNull())
2416 path = root["path"].asString();
2417
2418 // Re-Open path, and re-init everything (if needed)
2419 if (is_open) {
2420 Close();
2421 Open();
2422 }
2423}
Header file for all Exception classes.
AVPixelFormat hw_de_av_pix_fmt_global
AVHWDeviceType hw_de_av_device_type_global
int hw_de_on
Header file for FFmpegReader class.
Header file for FFmpegUtilities.
#define AV_FREE_CONTEXT(av_context)
#define SWR_INIT(ctx)
#define AV_FREE_FRAME(av_frame)
#define SWR_CONVERT(ctx, out, linesize, out_count, in, linesize2, in_count)
#define SWR_ALLOC()
#define SWR_CLOSE(ctx)
#define AV_GET_CODEC_TYPE(av_stream)
#define PixelFormat
#define AV_GET_CODEC_PIXEL_FORMAT(av_stream, av_context)
#define AV_GET_CODEC_CONTEXT(av_stream, av_codec)
#define AV_FIND_DECODER_CODEC_ID(av_stream)
#define AV_ALLOCATE_FRAME()
#define AV_REGISTER_ALL
#define PIX_FMT_RGBA
#define SWR_FREE(ctx)
#define AV_COPY_PICTURE_DATA(av_frame, buffer, pix_fmt, width, height)
#define AV_FREE_PACKET(av_packet)
#define SWRCONTEXT
#define AVCODEC_REGISTER_ALL
#define AV_GET_CODEC_ATTRIBUTES(av_stream, av_context)
#define AV_GET_SAMPLE_FORMAT(av_stream, av_context)
#define AV_RESET_FRAME(av_frame)
AVDictionary * opts
#define FF_NUM_PROCESSORS
#define OPEN_MP_NUM_PROCESSORS
Header file for Timeline class.
Header file for ZeroMQ-based Logger class.
void SetMaxBytesFromInfo(int64_t number_of_frames, int width, int height, int sample_rate, int channels)
Set maximum bytes to a different amount based on a ReaderInfo struct.
Definition CacheBase.cpp:30
int64_t Count()
Count the frames in the queue.
void Add(std::shared_ptr< openshot::Frame > frame)
Add a Frame to the cache.
std::shared_ptr< openshot::Frame > GetFrame(int64_t frame_number)
Get a frame from the cache.
std::vector< std::shared_ptr< openshot::Frame > > GetFrames()
Get an array of all Frames.
void Remove(int64_t frame_number)
Remove a specific frame.
void Clear()
Clear the cache of all frames.
This class represents a clip (used to arrange readers on the timeline)
Definition Clip.h:89
openshot::Keyframe scale_x
Curve representing the horizontal scaling in percent (0 to 1)
Definition Clip.h:306
openshot::TimelineBase * ParentTimeline() override
Get the associated Timeline pointer (if any)
Definition Clip.h:284
openshot::Keyframe scale_y
Curve representing the vertical scaling in percent (0 to 1)
Definition Clip.h:307
openshot::ScaleType scale
The scale determines how a clip should be resized to fit its parent.
Definition Clip.h:168
double Y
The Y value of the coordinate (usually representing the value of the property being animated)
Definition Coordinate.h:41
void Open() override
Open File - which is called by the constructor automatically.
FFmpegReader(const std::string &path, bool inspect_reader=true)
Constructor for FFmpegReader.
Json::Value JsonValue() const override
Generate Json::Value for this object.
bool GetIsDurationKnown()
Return true if frame can be read with GetFrame()
void SetJsonValue(const Json::Value root) override
Load Json::Value into this object.
CacheMemory final_cache
Final cache object used to hold final frames.
virtual ~FFmpegReader()
Destructor.
std::string Json() const override
Generate JSON string of this object.
std::shared_ptr< openshot::Frame > GetFrame(int64_t requested_frame) override
void Close() override
Close File.
void SetJson(const std::string value) override
Load JSON string into this object.
This class represents a fraction.
Definition Fraction.h:30
int num
Numerator for the fraction.
Definition Fraction.h:32
float ToFloat()
Return this fraction as a float (i.e. 1/2 = 0.5)
Definition Fraction.cpp:35
double ToDouble() const
Return this fraction as a double (i.e. 1/2 = 0.5)
Definition Fraction.cpp:40
int den
Denominator for the fraction.
Definition Fraction.h:33
int GetSamplesPerFrame(openshot::Fraction fps, int sample_rate, int channels)
Calculate the # of samples per video frame (for the current frame number)
Definition Frame.cpp:484
Exception when no valid codec is found for a file.
Definition Exceptions.h:173
Exception for files that can not be found or opened.
Definition Exceptions.h:188
Exception for invalid JSON.
Definition Exceptions.h:218
Point GetMaxPoint() const
Get max point (by Y coordinate)
Definition KeyFrame.cpp:245
Exception when no streams are found in the file.
Definition Exceptions.h:286
Exception when memory could not be allocated.
Definition Exceptions.h:349
Coordinate co
This is the primary coordinate.
Definition Point.h:66
openshot::ReaderInfo info
Information about the current media file.
Definition ReaderBase.h:88
virtual void SetJsonValue(const Json::Value root)=0
Load Json::Value into this object.
virtual Json::Value JsonValue() const =0
Generate Json::Value for this object.
std::recursive_mutex getFrameMutex
Mutex for multiple threads.
Definition ReaderBase.h:79
openshot::ClipBase * ParentClip()
Parent clip object of this reader (which can be unparented and NULL)
Exception when a reader is closed, and a frame is requested.
Definition Exceptions.h:364
int DE_LIMIT_WIDTH_MAX
Maximum columns that hardware decode can handle.
Definition Settings.h:77
int HW_DE_DEVICE_SET
Which GPU to use to decode (0 is the first)
Definition Settings.h:80
int DE_LIMIT_HEIGHT_MAX
Maximum rows that hardware decode can handle.
Definition Settings.h:74
static Settings * Instance()
Create or get an instance of this logger singleton (invoke the class with this method)
Definition Settings.cpp:23
int HARDWARE_DECODER
Use video codec for faster video decoding (if supported)
Definition Settings.h:62
int preview_height
Optional preview width of timeline image. If your preview window is smaller than the timeline,...
int preview_width
Optional preview width of timeline image. If your preview window is smaller than the timeline,...
This class represents a timeline.
Definition Timeline.h:148
void AppendDebugMethod(std::string method_name, std::string arg1_name="", float arg1_value=-1.0, std::string arg2_name="", float arg2_value=-1.0, std::string arg3_name="", float arg3_value=-1.0, std::string arg4_name="", float arg4_value=-1.0, std::string arg5_name="", float arg5_value=-1.0, std::string arg6_name="", float arg6_value=-1.0)
Append debug information.
static ZmqLogger * Instance()
Create or get an instance of this logger singleton (invoke the class with this method)
Definition ZmqLogger.cpp:35
This namespace is the default namespace for all code in the openshot library.
Definition Compressor.h:29
@ SCALE_FIT
Scale the clip until either height or width fills the canvas (with no cropping)
Definition Enums.h:38
@ SCALE_STRETCH
Scale the clip until both height and width fill the canvas (distort to fit)
Definition Enums.h:39
@ SCALE_CROP
Scale the clip until both height and width fill the canvas (cropping the overlap)
Definition Enums.h:37
ChannelLayout
This enumeration determines the audio channel layout (such as stereo, mono, 5 point surround,...
const Json::Value stringToJson(const std::string value)
Definition Json.cpp:16
This struct holds the associated video frame and starting sample # for an audio packet.
bool is_near(AudioLocation location, int samples_per_frame, int64_t amount)
void reset(bool eof)
int audio_bit_rate
The bit rate of the audio stream (in bytes)
Definition ReaderBase.h:59
int video_bit_rate
The bit rate of the video stream (in bytes)
Definition ReaderBase.h:49
bool has_single_image
Determines if this file only contains a single image.
Definition ReaderBase.h:42
float duration
Length of time (in seconds)
Definition ReaderBase.h:43
openshot::Fraction audio_timebase
The audio timebase determines how long each audio packet should be played.
Definition ReaderBase.h:64
int width
The width of the video (in pixesl)
Definition ReaderBase.h:46
int channels
The number of audio channels used in the audio stream.
Definition ReaderBase.h:61
openshot::Fraction fps
Frames per second, as a fraction (i.e. 24/1 = 24 fps)
Definition ReaderBase.h:48
openshot::Fraction display_ratio
The ratio of width to height of the video stream (i.e. 640x480 has a ratio of 4/3)
Definition ReaderBase.h:51
int height
The height of the video (in pixels)
Definition ReaderBase.h:45
int pixel_format
The pixel format (i.e. YUV420P, RGB24, etc...)
Definition ReaderBase.h:47
int64_t video_length
The number of frames in the video stream.
Definition ReaderBase.h:53
std::string acodec
The name of the audio codec used to encode / decode the video stream.
Definition ReaderBase.h:58
std::map< std::string, std::string > metadata
An optional map/dictionary of metadata for this reader.
Definition ReaderBase.h:65
std::string vcodec
The name of the video codec used to encode / decode the video stream.
Definition ReaderBase.h:52
openshot::Fraction pixel_ratio
The pixel ratio of the video stream as a fraction (i.e. some pixels are not square)
Definition ReaderBase.h:50
openshot::ChannelLayout channel_layout
The channel layout (mono, stereo, 5 point surround, etc...)
Definition ReaderBase.h:62
bool has_video
Determines if this file has a video stream.
Definition ReaderBase.h:40
bool has_audio
Determines if this file has an audio stream.
Definition ReaderBase.h:41
openshot::Fraction video_timebase
The video timebase determines how long each frame stays on the screen.
Definition ReaderBase.h:55
int video_stream_index
The index of the video stream.
Definition ReaderBase.h:54
int sample_rate
The number of audio samples per second (44100 is a common sample rate)
Definition ReaderBase.h:60
int audio_stream_index
The index of the audio stream.
Definition ReaderBase.h:63
int64_t file_size
Size of file (in bytes)
Definition ReaderBase.h:44