Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
20 commits
Select commit Hold shift + click to select a range
c95fd83
Small improvement to caching sws scale context and reusing AVFrame ob…
jonoomph Jun 4, 2025
6e73a23
Changing example program to time decoding of test video, forwards and…
jonoomph Jun 4, 2025
c487aa4
Fixing the alignment of our FFmpegReader video buffers to be 32 / AVX…
jonoomph Jun 4, 2025
b2333ab
Refactoring aligned malloc to work on Mac, Windows, and Linux (inside…
jonoomph Jun 5, 2025
38afcea
Use aligned memory free for QImage / frame buffer (fix crash on Win32…
jonoomph Jun 5, 2025
60fbb7f
Fix incorrect _MSC_VER and replace with _WIN32
jonoomph Jun 5, 2025
81a04a1
Add libopenshot unit tests back into Mac builder GitLab CI script
jonoomph Jun 5, 2025
32f291c
Fixing unit test building on Mac runner
jonoomph Jun 5, 2025
15936ba
Experiment for Mac unit tests
jonoomph Jun 5, 2025
2003ae1
Experiment for Mac unit tests
jonoomph Jun 5, 2025
54b33ed
Reverting experimental libopenshot Mac build changes related to "imag…
jonoomph Jun 5, 2025
5e4bc36
Remove per-thread scalers; use single persistent frames and SwsContex…
jonoomph Jun 6, 2025
a4b2af4
Adding FFmpegWriter into openshot-example executable to test with val…
jonoomph Jun 6, 2025
a6ca7d9
Adding back Setting::VIDEO_CACHE_MAX_FRAMES, to limit the video cache…
jonoomph Jun 6, 2025
57a0bae
Refactor of Settings to no longer duplicate them in the *.h and Insta…
jonoomph Jun 6, 2025
7ee4643
Splitting FF_NUM_PROCESSORS into a VIDEO and AUDIO constant. Also lim…
jonoomph Jun 6, 2025
2d6db64
Incorporating VideoCacheThread into openshot-example executable, to e…
jonoomph Jun 6, 2025
68d3850
Exposing VideoCacheThread to SWIG bindings for Python, Ruby, and Java…
jonoomph Jun 6, 2025
0fcdcdb
Removing debug code
jonoomph Jun 6, 2025
099fe59
Including missing sstream include on Profile tests
jonoomph Jun 6, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .gitlab-ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -66,6 +66,7 @@ mac-builder:
- cmake -DCMAKE_EXE_LINKER_FLAGS="-stdlib=libc++" -DCMAKE_SHARED_LINKER_FLAGS="-stdlib=libc++" -DCMAKE_VERBOSE_MAKEFILE:BOOL=ON -D"CMAKE_INSTALL_PREFIX:PATH=$CI_PROJECT_DIR/build/install-x64" -DCMAKE_CXX_COMPILER=clang++ -DCMAKE_C_COMPILER=clang -D"CMAKE_BUILD_TYPE:STRING=Release" -D"CMAKE_OSX_SYSROOT=/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX10.14.sdk" -D"CMAKE_OSX_DEPLOYMENT_TARGET=10.12" -DCMAKE_PREFIX_PATH=/usr/local/qt5.15.X/qt5.15/5.15.0/clang_64/ -D"CMAKE_INSTALL_RPATH_USE_LINK_PATH=1" -D"ENABLE_RUBY=0" ../
- make -j 9
- make install
- make test
- PROJECT_VERSION=$(grep -E '^set\(PROJECT_VERSION_FULL "(.*)' ../CMakeLists.txt | awk '{print $2}' | tr -d '")')
- PROJECT_SO=$(grep -E '^set\(PROJECT_SO_VERSION (.*)' ../CMakeLists.txt | awk '{print $2}' | tr -d ')')
- echo -e "CI_PROJECT_NAME:$CI_PROJECT_NAME\nCI_COMMIT_REF_NAME:$CI_COMMIT_REF_NAME\nCI_COMMIT_SHA:$CI_COMMIT_SHA\nCI_JOB_ID:$CI_JOB_ID\nCI_PIPELINE_ID:$CI_PIPELINE_ID\nVERSION:$PROJECT_VERSION\nSO:$PROJECT_SO" > "install-x64/share/$CI_PROJECT_NAME.env"
Expand Down
5 changes: 5 additions & 0 deletions bindings/java/openshot.i
Original file line number Diff line number Diff line change
Expand Up @@ -91,9 +91,13 @@
#include "Settings.h"
#include "TimelineBase.h"
#include "Timeline.h"
#include "Qt/VideoCacheThread.h"
#include "ZmqLogger.h"
%}

// Prevent SWIG from ever generating a wrapper for juce::Thread’s constructor (or run())
%ignore juce::Thread::Thread;

#ifdef USE_IMAGEMAGICK
%{
#include "ImageReader.h"
Expand Down Expand Up @@ -151,6 +155,7 @@
%include "RendererBase.h"
%include "Settings.h"
%include "TimelineBase.h"
%include "Qt/VideoCacheThread.h"
%include "Timeline.h"
%include "ZmqLogger.h"

Expand Down
5 changes: 5 additions & 0 deletions bindings/python/openshot.i
Original file line number Diff line number Diff line change
Expand Up @@ -96,10 +96,14 @@
#include "Settings.h"
#include "TimelineBase.h"
#include "Timeline.h"
#include "Qt/VideoCacheThread.h"
#include "ZmqLogger.h"

%}

// Prevent SWIG from ever generating a wrapper for juce::Thread’s constructor (or run())
%ignore juce::Thread::Thread;

#ifdef USE_IMAGEMAGICK
%{
#include "ImageReader.h"
Expand Down Expand Up @@ -317,6 +321,7 @@
%include "RendererBase.h"
%include "Settings.h"
%include "TimelineBase.h"
%include "Qt/VideoCacheThread.h"
%include "Timeline.h"
%include "ZmqLogger.h"

Expand Down
5 changes: 5 additions & 0 deletions bindings/ruby/openshot.i
Original file line number Diff line number Diff line change
Expand Up @@ -99,6 +99,7 @@
#include "Settings.h"
#include "TimelineBase.h"
#include "Timeline.h"
#include "Qt/VideoCacheThread.h"
#include "ZmqLogger.h"

/* Move FFmpeg's RSHIFT to FF_RSHIFT, if present */
Expand All @@ -112,6 +113,9 @@
#endif
%}

// Prevent SWIG from ever generating a wrapper for juce::Thread’s constructor (or run())
%ignore juce::Thread::Thread;

#ifdef USE_IMAGEMAGICK
%{
#include "ImageReader.h"
Expand Down Expand Up @@ -190,6 +194,7 @@
%include "RendererBase.h"
%include "Settings.h"
%include "TimelineBase.h"
%include "Qt/VideoCacheThread.h"
%include "Timeline.h"
%include "ZmqLogger.h"

Expand Down
115 changes: 71 additions & 44 deletions examples/Example.cpp
Original file line number Diff line number Diff line change
@@ -1,68 +1,95 @@
/**
* @file
* @brief Source file for Example Executable (example app for libopenshot)
* @brief Example application showing how to attach VideoCacheThread to an FFmpegReader
* @author Jonathan Thomas <jonathan@openshot.org>
*
* @ref License
*/

// Copyright (c) 2008-2019 OpenShot Studios, LLC
// Copyright (c) 2008-2025 OpenShot Studios, LLC
//
// SPDX-License-Identifier: LGPL-3.0-or-later

#include <fstream>
#include <chrono>
#include <iostream>
#include <memory>
#include <QFileDialog>
#include "Clip.h"
#include "Frame.h"
#include "FFmpegReader.h"
#include "FFmpegWriter.h"
#include "Timeline.h"
#include "Profiles.h"
#include "Qt/VideoCacheThread.h" // <— your new header

using namespace openshot;


int main(int argc, char* argv[]) {
QString filename = "/home/jonathan/test-crash.osp";
//QString filename = "/home/jonathan/Downloads/drive-download-20221123T185423Z-001/project-3363/project-3363.osp";
//QString filename = "/home/jonathan/Downloads/drive-download-20221123T185423Z-001/project-3372/project-3372.osp";
//QString filename = "/home/jonathan/Downloads/drive-download-20221123T185423Z-001/project-3512/project-3512.osp";
QString project_json = "";
QFile file(filename);
if (!file.open(QIODevice::ReadOnly | QIODevice::Text)) {
std::cout << "File error!" << std::endl;
exit(1);
} else {
while (!file.atEnd()) {
QByteArray line = file.readLine();
project_json += line;
}


// 1) Open the FFmpegReader as usual
const char* input_path = "/home/jonathan/Downloads/openshot-testing/sintel_trailer-720p.mp4";
FFmpegReader reader(input_path);
reader.Open();

const int64_t total_frames = reader.info.video_length;
std::cout << "Total frames: " << total_frames << "\n";



Timeline timeline(reader.info.width, reader.info.height, reader.info.fps, reader.info.sample_rate, reader.info.channels, reader.info.channel_layout);
Clip c1(&reader);
timeline.AddClip(&c1);
timeline.Open();
timeline.DisplayInfo();


// 2) Construct a VideoCacheThread around 'reader' and start its background loop
// (VideoCacheThread inherits juce::Thread)
std::shared_ptr<VideoCacheThread> cache = std::make_shared<VideoCacheThread>();
cache->Reader(&timeline); // attaches the FFmpegReader and internally calls Play()
cache->StartThread(); // juce::Thread method, begins run()

// 3) Set up the writer exactly as before
FFmpegWriter writer("/home/jonathan/Downloads/performance‐cachetest.mp4");
writer.SetAudioOptions("aac", 48000, 192000);
writer.SetVideoOptions("libx264", 1280, 720, Fraction(30, 1), 5000000);
writer.Open();

// 4) Forward pass: for each frame 1…N, tell the cache thread to seek to that frame,
// then immediately call cache->GetFrame(frame), which will block only if that frame
// hasn’t been decoded into the cache yet.
auto t0 = std::chrono::high_resolution_clock::now();
cache->setSpeed(1);
for (int64_t f = 1; f <= total_frames; ++f) {
float pct = (float(f) / total_frames) * 100.0f;
std::cout << "Forward: requesting frame " << f << " (" << pct << "%)\n";

cache->Seek(f); // signal “I need frame f now (and please prefetch f+1, f+2, …)”
std::shared_ptr<Frame> framePtr = timeline.GetFrame(f);
writer.WriteFrame(framePtr);
}
auto t1 = std::chrono::high_resolution_clock::now();
auto forward_ms = std::chrono::duration_cast<std::chrono::milliseconds>(t1 - t0).count();

// 5) Backward pass: same idea in reverse
auto t2 = std::chrono::high_resolution_clock::now();
cache->setSpeed(-1);
for (int64_t f = total_frames; f >= 1; --f) {
float pct = (float(total_frames - f + 1) / total_frames) * 100.0f;
std::cout << "Backward: requesting frame " << f << " (" << pct << "%)\n";

// Open timeline reader
std::cout << "Project JSON length: " << project_json.length() << std::endl;
Timeline r(1280, 720, openshot::Fraction(30, 1), 44100, 2, openshot::LAYOUT_STEREO);
r.SetJson(project_json.toStdString());
r.DisplayInfo();
r.Open();

// Get max frame
int64_t max_frame = r.GetMaxFrame();
std::cout << "max_frame: " << max_frame << ", r.info.video_length: " << r.info.video_length << std::endl;

for (long int frame = 1; frame <= max_frame; frame++)
{
float percent = (float(frame) / max_frame) * 100.0;
std::cout << "Requesting Frame #: " << frame << " (" << percent << "%)" << std::endl;
std::shared_ptr<Frame> f = r.GetFrame(frame);

// Preview frame image
if (frame % 1 == 0) {
f->Save("preview.jpg", 1.0, "jpg", 100);
}
cache->Seek(f);
std::shared_ptr<Frame> framePtr = timeline.GetFrame(f);
writer.WriteFrame(framePtr);
}
r.Close();
auto t3 = std::chrono::high_resolution_clock::now();
auto backward_ms = std::chrono::duration_cast<std::chrono::milliseconds>(t3 - t2).count();

std::cout << "\nForward pass elapsed: " << forward_ms << " ms\n";
std::cout << "Backward pass elapsed: " << backward_ms << " ms\n";

exit(0);
// 6) Shut down the cache thread, close everything
cache->StopThread(10000); // politely tells run() to exit, waits up to 10s
reader.Close();
writer.Close();
timeline.Close();
return 0;
}
72 changes: 44 additions & 28 deletions src/FFmpegReader.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -74,8 +74,8 @@
seek_audio_frame_found(0), seek_video_frame_found(0),is_duration_known(false), largest_frame_processed(0),
current_video_frame(0), packet(NULL), max_concurrent_frames(OPEN_MP_NUM_PROCESSORS), audio_pts(0),
video_pts(0), pFormatCtx(NULL), videoStream(-1), audioStream(-1), pCodecCtx(NULL), aCodecCtx(NULL),
pStream(NULL), aStream(NULL), pFrame(NULL), previous_packet_location{-1,0},
hold_packet(false) {
pStream(NULL), aStream(NULL), pFrame(NULL), previous_packet_location{-1,0},
hold_packet(false) {

// Initialize FFMpeg, and register all formats and codecs
AV_REGISTER_ALL
Expand Down Expand Up @@ -278,7 +278,7 @@
retry_decode_open = 0;

// Set number of threads equal to number of processors (not to exceed 16)
pCodecCtx->thread_count = std::min(FF_NUM_PROCESSORS, 16);
pCodecCtx->thread_count = std::min(FF_VIDEO_NUM_PROCESSORS, 16);

if (pCodec == NULL) {
throw InvalidCodec("A valid video codec could not be found for this file.", path);
Expand Down Expand Up @@ -524,8 +524,8 @@
const AVCodec *aCodec = avcodec_find_decoder(codecId);
aCodecCtx = AV_GET_CODEC_CONTEXT(aStream, aCodec);

// Set number of threads equal to number of processors (not to exceed 16)
aCodecCtx->thread_count = std::min(FF_NUM_PROCESSORS, 16);
// Audio encoding does not typically use more than 2 threads (most codecs use 1 thread)
aCodecCtx->thread_count = std::min(FF_AUDIO_NUM_PROCESSORS, 2);

if (aCodec == NULL) {
throw InvalidCodec("A valid audio codec could not be found for this file.", path);
Expand Down Expand Up @@ -678,6 +678,13 @@
}
}
#endif // USE_HW_ACCEL
if (img_convert_ctx) {
sws_freeContext(img_convert_ctx);
img_convert_ctx = nullptr;
}
if (pFrameRGB_cached) {
AV_FREE_FRAME(&pFrameRGB_cached);
}
}

// Close the audio codec
Expand All @@ -686,6 +693,11 @@
avcodec_flush_buffers(aCodecCtx);
}
AV_FREE_CONTEXT(aCodecCtx);
if (avr_ctx) {
SWR_CLOSE(avr_ctx);
SWR_FREE(&avr_ctx);
avr_ctx = nullptr;
}
}

// Clear final cache
Expand Down Expand Up @@ -1469,15 +1481,17 @@
int width = info.width;
int64_t video_length = info.video_length;

// Create variables for a RGB Frame (since most videos are not in RGB, we must convert it)
AVFrame *pFrameRGB = nullptr;
// Create or reuse a RGB Frame (since most videos are not in RGB, we must convert it)
AVFrame *pFrameRGB = pFrameRGB_cached;
if (!pFrameRGB) {
pFrameRGB = AV_ALLOCATE_FRAME();
if (pFrameRGB == nullptr)
throw OutOfMemory("Failed to allocate frame buffer", path);

Check warning on line 1489 in src/FFmpegReader.cpp

View check run for this annotation

Codecov / codecov/patch

src/FFmpegReader.cpp#L1489

Added line #L1489 was not covered by tests
pFrameRGB_cached = pFrameRGB;
}
AV_RESET_FRAME(pFrameRGB);
uint8_t *buffer = nullptr;

// Allocate an AVFrame structure
pFrameRGB = AV_ALLOCATE_FRAME();
if (pFrameRGB == nullptr)
throw OutOfMemory("Failed to allocate frame buffer", path);

// Determine the max size of this source image (based on the timeline's size, the scaling mode,
// and the scaling keyframes). This is a performance improvement, to keep the images as small as possible,
// without losing quality. NOTE: We cannot go smaller than the timeline itself, or the add_layer timeline
Expand Down Expand Up @@ -1554,8 +1568,12 @@

// Determine required buffer size and allocate buffer
const int bytes_per_pixel = 4;
int buffer_size = (width * height * bytes_per_pixel) + 128;
buffer = new unsigned char[buffer_size]();
int raw_buffer_size = (width * height * bytes_per_pixel) + 128;

// Aligned memory allocation (for speed)
constexpr size_t ALIGNMENT = 32; // AVX2
int buffer_size = ((raw_buffer_size + ALIGNMENT - 1) / ALIGNMENT) * ALIGNMENT;
buffer = (unsigned char*) aligned_malloc(buffer_size, ALIGNMENT);

// Copy picture data from one AVFrame (or AVPicture) to another one.
AV_COPY_PICTURE_DATA(pFrameRGB, buffer, PIX_FMT_RGBA, width, height);
Expand All @@ -1564,8 +1582,9 @@
if (openshot::Settings::Instance()->HIGH_QUALITY_SCALING) {
scale_mode = SWS_BICUBIC;
}
SwsContext *img_convert_ctx = sws_getContext(info.width, info.height, AV_GET_CODEC_PIXEL_FORMAT(pStream, pCodecCtx), width,
height, PIX_FMT_RGBA, scale_mode, NULL, NULL, NULL);
img_convert_ctx = sws_getCachedContext(img_convert_ctx, info.width, info.height, AV_GET_CODEC_PIXEL_FORMAT(pStream, pCodecCtx), width, height, PIX_FMT_RGBA, scale_mode, NULL, NULL, NULL);
if (!img_convert_ctx)
throw OutOfMemory("Failed to initialize sws context", path);

Check warning on line 1587 in src/FFmpegReader.cpp

View check run for this annotation

Codecov / codecov/patch

src/FFmpegReader.cpp#L1587

Added line #L1587 was not covered by tests

// Resize / Convert to RGB
sws_scale(img_convert_ctx, pFrame->data, pFrame->linesize, 0,
Expand All @@ -1590,11 +1609,10 @@
last_video_frame = f;

// Free the RGB image
AV_FREE_FRAME(&pFrameRGB);
AV_RESET_FRAME(pFrameRGB);

// Remove frame and packet
RemoveAVFrame(pFrame);
sws_freeContext(img_convert_ctx);
// Remove frame and packet
RemoveAVFrame(pFrame);

// Get video PTS in seconds
video_pts_seconds = (double(video_pts) * info.video_timebase.ToDouble()) + pts_offset_seconds;
Expand Down Expand Up @@ -1738,10 +1756,10 @@
audio_converted->nb_samples = audio_frame->nb_samples;
av_samples_alloc(audio_converted->data, audio_converted->linesize, info.channels, audio_frame->nb_samples, AV_SAMPLE_FMT_FLTP, 0);

SWRCONTEXT *avr = NULL;

// setup resample context
avr = SWR_ALLOC();
SWRCONTEXT *avr = avr_ctx;
// setup resample context if needed
if (!avr) {
avr = SWR_ALLOC();
#if HAVE_CH_LAYOUT
av_opt_set_chlayout(avr, "in_chlayout", &AV_GET_CODEC_ATTRIBUTES(aStream, aCodecCtx)->ch_layout, 0);
av_opt_set_chlayout(avr, "out_chlayout", &AV_GET_CODEC_ATTRIBUTES(aStream, aCodecCtx)->ch_layout, 0);
Expand All @@ -1756,6 +1774,8 @@
av_opt_set_int(avr, "in_sample_rate", info.sample_rate, 0);
av_opt_set_int(avr, "out_sample_rate", info.sample_rate, 0);
SWR_INIT(avr);
avr_ctx = avr;
}

// Convert audio samples
int nb_samples = SWR_CONVERT(avr, // audio resample context
Expand All @@ -1766,10 +1786,6 @@
audio_frame->linesize[0], // input plane size, in bytes (0 if unknown)
audio_frame->nb_samples); // number of input samples to convert

// Deallocate resample buffer
SWR_CLOSE(avr);
SWR_FREE(&avr);
avr = NULL;

int64_t starting_frame_number = -1;
for (int channel_filter = 0; channel_filter < info.channels; channel_filter++) {
Expand Down
6 changes: 6 additions & 0 deletions src/FFmpegReader.h
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@
#include "Clip.h"
#include "OpenMPUtilities.h"
#include "Settings.h"
#include <cstdlib>


namespace openshot {
Expand Down Expand Up @@ -148,6 +149,11 @@ namespace openshot {
int64_t NO_PTS_OFFSET;
PacketStatus packet_status;

// Cached conversion contexts and frames for performance
SwsContext *img_convert_ctx = nullptr; ///< Cached video scaler context
SWRCONTEXT *avr_ctx = nullptr; ///< Cached audio resample context
AVFrame *pFrameRGB_cached = nullptr; ///< Temporary frame used for video conversion

int hw_de_supported = 0; // Is set by FFmpegReader
#if USE_HW_ACCEL
AVPixelFormat hw_de_av_pix_fmt = AV_PIX_FMT_NONE;
Expand Down
Loading