diff --git a/examples/local_video/src/publisher.rs b/examples/local_video/src/publisher.rs index dc56f5969..10067ae0e 100644 --- a/examples/local_video/src/publisher.rs +++ b/examples/local_video/src/publisher.rs @@ -5,6 +5,7 @@ use livekit::prelude::*; use livekit::webrtc::video_frame::{I420Buffer, VideoFrame, VideoRotation}; use livekit::webrtc::video_source::native::NativeVideoSource; use livekit::webrtc::video_source::{RtcVideoSource, VideoResolution}; +use libwebrtc::stats::RtcStats; use livekit_api::access_token; use log::{debug, info}; use nokhwa::pixel_format::RgbFormat; @@ -21,6 +22,12 @@ use std::sync::{ use std::time::{Duration, Instant}; use yuv_sys; +#[path = "test_pattern.rs"] +mod test_pattern; +use test_pattern::{ + fill_color_bars, parse_video_source, render_timecode_overlay, VideoSourceMode, +}; + #[derive(Parser, Debug)] #[command(author, version, about, long_about = None)] struct Args { @@ -28,9 +35,9 @@ struct Args { #[arg(long)] list_cameras: bool, - /// Camera index to use (numeric) - #[arg(long, default_value_t = 0)] - camera_index: usize, + /// Camera index (number), or 'static'/'timecode' for test patterns + #[arg(long, default_value = "0")] + camera_index: String, /// Desired width #[arg(long, default_value_t = 1280)] @@ -109,6 +116,8 @@ async fn run(args: Args, ctrl_c_received: Arc) -> Result<()> { return list_cameras(); } + let source_mode = parse_video_source(&args.camera_index)?; + // LiveKit connection details let url = args .url @@ -153,39 +162,71 @@ async fn run(args: Args, ctrl_c_received: Arc) -> Result<()> { }); } - // Setup camera - let index = CameraIndex::Index(args.camera_index as u32); - let requested = - RequestedFormat::new::(RequestedFormatType::AbsoluteHighestFrameRate); - let mut camera = Camera::new(index, requested)?; - // Try raw YUYV first (cheaper than MJPEG), fall back to MJPEG - let wanted = - CameraFormat::new(Resolution::new(args.width, args.height), FrameFormat::YUYV, args.fps); - let mut using_fmt = "YUYV"; - if let Err(_) = camera - .set_camera_requset(RequestedFormat::new::(RequestedFormatType::Exact(wanted))) - { - let alt = CameraFormat::new( - Resolution::new(args.width, args.height), - FrameFormat::MJPEG, - args.fps, - ); - using_fmt = "MJPEG"; - let _ = camera - .set_camera_requset(RequestedFormat::new::(RequestedFormatType::Exact(alt))); + // Determine frame dimensions (and optionally open camera) + let width; + let height; + let mut camera_state: Option<(Camera, bool)> = None; + + match &source_mode { + VideoSourceMode::Camera(idx) => { + let index = CameraIndex::Index(*idx); + let requested = + RequestedFormat::new::(RequestedFormatType::AbsoluteHighestFrameRate); + let mut camera = Camera::new(index, requested)?; + let wanted = CameraFormat::new( + Resolution::new(args.width, args.height), + FrameFormat::YUYV, + args.fps, + ); + let mut using_fmt = "YUYV"; + if camera + .set_camera_requset(RequestedFormat::new::( + RequestedFormatType::Exact(wanted), + )) + .is_err() + { + let alt = CameraFormat::new( + Resolution::new(args.width, args.height), + FrameFormat::MJPEG, + args.fps, + ); + using_fmt = "MJPEG"; + let _ = camera.set_camera_requset(RequestedFormat::new::( + RequestedFormatType::Exact(alt), + )); + } + camera.open_stream()?; + let fmt = camera.camera_format(); + width = fmt.width(); + height = fmt.height(); + let is_yuyv = fmt.format() == FrameFormat::YUYV; + info!( + "Camera opened: {}x{} @ {} fps (format: {})", + width, + height, + fmt.frame_rate(), + using_fmt + ); + debug!("Negotiated nokhwa CameraFormat: {:?}", fmt); + camera_state = Some((camera, is_yuyv)); + } + _ => { + width = args.width; + height = args.height; + info!( + "Test pattern mode ({:?}): {}x{} @ {} fps", + source_mode, width, height, args.fps + ); + } } - camera.open_stream()?; - let fmt = camera.camera_format(); - let width = fmt.width(); - let height = fmt.height(); - let fps = fmt.frame_rate(); - info!("Camera opened: {}x{} @ {} fps (format: {})", width, height, fps, using_fmt); - debug!("Negotiated nokhwa CameraFormat: {:?}", fmt); - // Pace publishing at the requested FPS (not the camera-reported FPS) to hit desired cadence + let pace_fps = args.fps as f64; - // Create LiveKit video source and track - let rtc_source = NativeVideoSource::new(VideoResolution { width, height }, false); + // Create LiveKit video source and track. + // For test patterns, mark the source as screencast so WebRTC's AdaptedVideoTrackSource + // preserves the full resolution rather than scaling it down via AdaptFrame. + let is_screencast = !matches!(source_mode, VideoSourceMode::Camera(_)); + let rtc_source = NativeVideoSource::new(VideoResolution { width, height }, is_screencast); let track = LocalVideoTrack::create_video_track("camera", RtcVideoSource::Native(rtc_source.clone())); @@ -218,12 +259,76 @@ async fn run(args: Args, ctrl_c_received: Arc) -> Result<()> { room.local_participant() .publish_track(LocalTrack::Video(track.clone()), publish_opts(VideoCodec::H264)) .await?; - info!("Published camera track with H.264 fallback"); + info!("Published track with H.264 fallback"); } else { return Err(e.into()); } } else { - info!("Published camera track"); + info!("Published track"); + } + + // Periodically log encoder details + { + let track = track.clone(); + let ctrl_c_stats = ctrl_c_received.clone(); + tokio::spawn(async move { + tokio::time::sleep(Duration::from_secs(3)).await; + let mut interval = tokio::time::interval(Duration::from_secs(5)); + interval.tick().await; + loop { + if ctrl_c_stats.load(Ordering::Acquire) { + break; + } + match track.get_stats().await { + Ok(stats) => { + for stat in &stats { + if let RtcStats::OutboundRtp(rtp) = stat { + let enc = &rtp.outbound.encoder_implementation; + if enc.is_empty() { + continue; + } + let hw_sw = if rtp.outbound.power_efficient_encoder { + "HW" + } else { + "SW" + }; + let codec = stats + .iter() + .find_map(|s| match s { + RtcStats::Codec(c) + if c.rtc.id == rtp.stream.codec_id => + { + Some(c.codec.mime_type.as_str()) + } + _ => None, + }) + .unwrap_or("?"); + let rid = if rtp.outbound.rid.is_empty() { + "-" + } else { + &rtp.outbound.rid + }; + info!( + "Encoder [{}]: {} ({}, {}) — {}x{} @ {:.1} fps, target {:.0} kbps", + rid, + enc, + hw_sw, + codec, + rtp.outbound.frame_width, + rtp.outbound.frame_height, + rtp.outbound.frames_per_second, + rtp.outbound.target_bitrate / 1000.0, + ); + } + } + } + Err(e) => { + debug!("Could not retrieve encoder stats: {}", e); + } + } + interval.tick().await; + } + }); } // Reusable I420 buffer and frame @@ -232,214 +337,250 @@ async fn run(args: Args, ctrl_c_received: Arc) -> Result<()> { timestamp_us: 0, buffer: I420Buffer::new(width, height), }; - let is_yuyv = fmt.format() == FrameFormat::YUYV; - info!( - "Selected conversion path: {}", - if is_yuyv { "YUYV->I420 (libyuv)" } else { "Auto (RGB24 or MJPEG)" } - ); // Accurate pacing using absolute schedule (no drift) let mut ticker = tokio::time::interval(Duration::from_secs_f64(1.0 / pace_fps)); ticker.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Skip); - // Align the first tick to now ticker.tick().await; let start_ts = Instant::now(); - // Capture loop let mut frames: u64 = 0; let mut last_fps_log = Instant::now(); let target = Duration::from_secs_f64(1.0 / pace_fps); - info!("Target frame interval: {:.2} ms", target.as_secs_f64() * 1000.0); - - // Timing accumulators (ms) for rolling stats - let mut sum_get_ms = 0.0; - let mut sum_decode_ms = 0.0; - let mut sum_convert_ms = 0.0; - let mut sum_capture_ms = 0.0; - let mut sum_sleep_ms = 0.0; - let mut sum_iter_ms = 0.0; - let mut logged_mjpeg_fallback = false; - loop { - if ctrl_c_received.load(Ordering::Acquire) { - break; - } - // Wait until the scheduled next frame time - let wait_start = Instant::now(); - ticker.tick().await; - let iter_start = Instant::now(); - - // Get frame as RGB24 (decoded by nokhwa if needed) - let t0 = Instant::now(); - let frame_buf = camera.frame()?; - let t1 = Instant::now(); - let (stride_y, stride_u, stride_v) = frame.buffer.strides(); - let (data_y, data_u, data_v) = frame.buffer.data_mut(); - // Fast path for YUYV: convert directly to I420 via libyuv - let t2 = if is_yuyv { - let src = frame_buf.buffer(); - let src_bytes = src.as_ref(); - let src_stride = (width * 2) as i32; // YUYV packed 4:2:2 - let t2_local = t1; // no decode step in YUYV path - unsafe { - // returns 0 on success - let _ = yuv_sys::rs_YUY2ToI420( - src_bytes.as_ptr(), - src_stride, - data_y.as_mut_ptr(), - stride_y as i32, - data_u.as_mut_ptr(), - stride_u as i32, - data_v.as_mut_ptr(), - stride_v as i32, - width as i32, - height as i32, - ); - } - t2_local - } else { - // Auto path (either RGB24 already or compressed MJPEG) - let src = frame_buf.buffer(); - let t2_local = if src.len() == (width as usize * height as usize * 3) { - // Already RGB24 from backend; convert directly - unsafe { - let _ = yuv_sys::rs_RGB24ToI420( - src.as_ref().as_ptr(), - (width * 3) as i32, - data_y.as_mut_ptr(), - stride_y as i32, - data_u.as_mut_ptr(), - stride_u as i32, - data_v.as_mut_ptr(), - stride_v as i32, - width as i32, - height as i32, - ); + + match &source_mode { + // ----------------------------------------------------------------- + // Camera capture loop + // ----------------------------------------------------------------- + VideoSourceMode::Camera(_) => { + let (mut camera, is_yuyv) = camera_state.unwrap(); + info!( + "Selected conversion path: {}", + if is_yuyv { "YUYV->I420 (libyuv)" } else { "Auto (RGB24 or MJPEG)" } + ); + info!("Target frame interval: {:.2} ms", target.as_secs_f64() * 1000.0); + + let mut sum_get_ms = 0.0; + let mut sum_decode_ms = 0.0; + let mut sum_convert_ms = 0.0; + let mut sum_capture_ms = 0.0; + let mut sum_sleep_ms = 0.0; + let mut sum_iter_ms = 0.0; + let mut logged_mjpeg_fallback = false; + + loop { + if ctrl_c_received.load(Ordering::Acquire) { + break; } - Instant::now() - } else { - // Try fast MJPEG->I420 via libyuv if available; fallback to image crate - let mut used_fast_mjpeg = false; - let t2_try = unsafe { - // rs_MJPGToI420 returns 0 on success - let ret = yuv_sys::rs_MJPGToI420( - src.as_ref().as_ptr(), - src.len(), - data_y.as_mut_ptr(), - stride_y as i32, - data_u.as_mut_ptr(), - stride_u as i32, - data_v.as_mut_ptr(), - stride_v as i32, - width as i32, - height as i32, - width as i32, - height as i32, - ); - if ret == 0 { - used_fast_mjpeg = true; - Instant::now() - } else { - t1 + let wait_start = Instant::now(); + ticker.tick().await; + let iter_start = Instant::now(); + + let t0 = Instant::now(); + let frame_buf = camera.frame()?; + let t1 = Instant::now(); + let (stride_y, stride_u, stride_v) = frame.buffer.strides(); + let (data_y, data_u, data_v) = frame.buffer.data_mut(); + // Fast path for YUYV: convert directly to I420 via libyuv + let t2 = if is_yuyv { + let src = frame_buf.buffer(); + let src_bytes = src.as_ref(); + let src_stride = (width * 2) as i32; + unsafe { + let _ = yuv_sys::rs_YUY2ToI420( + src_bytes.as_ptr(), + src_stride, + data_y.as_mut_ptr(), + stride_y as i32, + data_u.as_mut_ptr(), + stride_u as i32, + data_v.as_mut_ptr(), + stride_v as i32, + width as i32, + height as i32, + ); } - }; - if used_fast_mjpeg { - t2_try + t1 } else { - // Fallback: decode MJPEG using image crate then RGB24->I420 - match image::load_from_memory(src.as_ref()) { - Ok(img_dyn) => { - let rgb8 = img_dyn.to_rgb8(); - let dec_w = rgb8.width() as u32; - let dec_h = rgb8.height() as u32; - if dec_w != width || dec_h != height { - log::warn!( - "Decoded MJPEG size {}x{} differs from requested {}x{}; dropping frame", - dec_w, dec_h, width, height - ); - continue; - } - unsafe { - let _ = yuv_sys::rs_RGB24ToI420( - rgb8.as_raw().as_ptr(), - (dec_w * 3) as i32, - data_y.as_mut_ptr(), - stride_y as i32, - data_u.as_mut_ptr(), - stride_u as i32, - data_v.as_mut_ptr(), - stride_v as i32, - width as i32, - height as i32, - ); - } - Instant::now() + // Auto path (either RGB24 already or compressed MJPEG) + let src = frame_buf.buffer(); + let t2_local = if src.len() == (width as usize * height as usize * 3) { + unsafe { + let _ = yuv_sys::rs_RGB24ToI420( + src.as_ref().as_ptr(), + (width * 3) as i32, + data_y.as_mut_ptr(), + stride_y as i32, + data_u.as_mut_ptr(), + stride_u as i32, + data_v.as_mut_ptr(), + stride_v as i32, + width as i32, + height as i32, + ); } - Err(e2) => { - if !logged_mjpeg_fallback { - log::error!( - "MJPEG decode failed; buffer not RGB24 and image decode failed: {}", - e2 - ); - logged_mjpeg_fallback = true; + Instant::now() + } else { + let mut used_fast_mjpeg = false; + let t2_try = unsafe { + let ret = yuv_sys::rs_MJPGToI420( + src.as_ref().as_ptr(), + src.len(), + data_y.as_mut_ptr(), + stride_y as i32, + data_u.as_mut_ptr(), + stride_u as i32, + data_v.as_mut_ptr(), + stride_v as i32, + width as i32, + height as i32, + width as i32, + height as i32, + ); + if ret == 0 { + used_fast_mjpeg = true; + Instant::now() + } else { + t1 + } + }; + if used_fast_mjpeg { + t2_try + } else { + match image::load_from_memory(src.as_ref()) { + Ok(img_dyn) => { + let rgb8 = img_dyn.to_rgb8(); + let dec_w = rgb8.width() as u32; + let dec_h = rgb8.height() as u32; + if dec_w != width || dec_h != height { + log::warn!( + "Decoded MJPEG size {}x{} differs from requested {}x{}; dropping frame", + dec_w, dec_h, width, height + ); + continue; + } + unsafe { + let _ = yuv_sys::rs_RGB24ToI420( + rgb8.as_raw().as_ptr(), + (dec_w * 3) as i32, + data_y.as_mut_ptr(), + stride_y as i32, + data_u.as_mut_ptr(), + stride_u as i32, + data_v.as_mut_ptr(), + stride_v as i32, + width as i32, + height as i32, + ); + } + Instant::now() + } + Err(e2) => { + if !logged_mjpeg_fallback { + log::error!( + "MJPEG decode failed; buffer not RGB24 and image decode failed: {}", + e2 + ); + logged_mjpeg_fallback = true; + } + continue; + } } - continue; } - } + }; + t2_local + }; + let t3 = Instant::now(); + + frame.timestamp_us = start_ts.elapsed().as_micros() as i64; + rtc_source.capture_frame(&frame); + let t4 = Instant::now(); + + frames += 1; + let sleep_dur = iter_start - wait_start; + let t_end = Instant::now(); + let get_ms = (t1 - t0).as_secs_f64() * 1000.0; + let decode_ms = (t2 - t1).as_secs_f64() * 1000.0; + let convert_ms = (t3 - t2).as_secs_f64() * 1000.0; + let capture_ms = (t4 - t3).as_secs_f64() * 1000.0; + let sleep_ms = sleep_dur.as_secs_f64() * 1000.0; + let iter_ms = (t_end - iter_start).as_secs_f64() * 1000.0; + sum_get_ms += get_ms; + sum_decode_ms += decode_ms; + sum_convert_ms += convert_ms; + sum_capture_ms += capture_ms; + sum_sleep_ms += sleep_ms; + sum_iter_ms += iter_ms; + + if last_fps_log.elapsed() >= Duration::from_secs(2) { + let secs = last_fps_log.elapsed().as_secs_f64(); + let fps_est = frames as f64 / secs; + let n = frames.max(1) as f64; + info!( + "Publishing video: {}x{}, ~{:.1} fps | avg ms: get {:.2}, decode {:.2}, convert {:.2}, capture {:.2}, sleep {:.2}, iter {:.2} | target {:.2}", + width, height, fps_est, + sum_get_ms / n, sum_decode_ms / n, sum_convert_ms / n, + sum_capture_ms / n, sum_sleep_ms / n, sum_iter_ms / n, + target.as_secs_f64() * 1000.0, + ); + frames = 0; + sum_get_ms = 0.0; + sum_decode_ms = 0.0; + sum_convert_ms = 0.0; + sum_capture_ms = 0.0; + sum_sleep_ms = 0.0; + sum_iter_ms = 0.0; + last_fps_log = Instant::now(); } - }; - t2_local - }; - let t3 = Instant::now(); - - // Update RTP timestamp (monotonic, microseconds since start) - frame.timestamp_us = start_ts.elapsed().as_micros() as i64; - rtc_source.capture_frame(&frame); - let t4 = Instant::now(); - - frames += 1; - // We already paced via interval; measure actual sleep time for logging only - let sleep_dur = iter_start - wait_start; - - // Per-iteration timing bookkeeping - let t_end = Instant::now(); - let get_ms = (t1 - t0).as_secs_f64() * 1000.0; - let decode_ms = (t2 - t1).as_secs_f64() * 1000.0; - let convert_ms = (t3 - t2).as_secs_f64() * 1000.0; - let capture_ms = (t4 - t3).as_secs_f64() * 1000.0; - let sleep_ms = sleep_dur.as_secs_f64() * 1000.0; - let iter_ms = (t_end - iter_start).as_secs_f64() * 1000.0; - sum_get_ms += get_ms; - sum_decode_ms += decode_ms; - sum_convert_ms += convert_ms; - sum_capture_ms += capture_ms; - sum_sleep_ms += sleep_ms; - sum_iter_ms += iter_ms; - - if last_fps_log.elapsed() >= std::time::Duration::from_secs(2) { - let secs = last_fps_log.elapsed().as_secs_f64(); - let fps_est = frames as f64 / secs; - let n = frames.max(1) as f64; - info!( - "Publishing video: {}x{}, ~{:.1} fps | avg ms: get {:.2}, decode {:.2}, convert {:.2}, capture {:.2}, sleep {:.2}, iter {:.2} | target {:.2}", - width, - height, - fps_est, - sum_get_ms / n, - sum_decode_ms / n, - sum_convert_ms / n, - sum_capture_ms / n, - sum_sleep_ms / n, - sum_iter_ms / n, - target.as_secs_f64() * 1000.0, - ); - frames = 0; - sum_get_ms = 0.0; - sum_decode_ms = 0.0; - sum_convert_ms = 0.0; - sum_capture_ms = 0.0; - sum_sleep_ms = 0.0; - sum_iter_ms = 0.0; - last_fps_log = Instant::now(); + } + } + + // ----------------------------------------------------------------- + // Test-pattern capture loop (static or timecode) + // ----------------------------------------------------------------- + mode => { + // For the static pattern, fill the buffer once and reuse every frame + // (identical content → encoder sees zero motion). + if matches!(mode, VideoSourceMode::Static) { + let (stride_y, stride_u, stride_v) = frame.buffer.strides(); + let (y_data, u_data, v_data) = frame.buffer.data_mut(); + fill_color_bars( + y_data, u_data, v_data, stride_y, stride_u, stride_v, width, height, + ); + } + + loop { + if ctrl_c_received.load(Ordering::Acquire) { + break; + } + ticker.tick().await; + + if matches!(mode, VideoSourceMode::Timecode) { + let (stride_y, stride_u, stride_v) = frame.buffer.strides(); + let (y_data, u_data, v_data) = frame.buffer.data_mut(); + fill_color_bars( + y_data, u_data, v_data, stride_y, stride_u, stride_v, width, height, + ); + render_timecode_overlay( + y_data, u_data, v_data, stride_y, stride_u, stride_v, width, height, + ); + } + + frame.timestamp_us = start_ts.elapsed().as_micros() as i64; + rtc_source.capture_frame(&frame); + frames += 1; + + if last_fps_log.elapsed() >= Duration::from_secs(2) { + let secs = last_fps_log.elapsed().as_secs_f64(); + let fps_est = frames as f64 / secs; + info!( + "Publishing {:?} pattern: {}x{}, ~{:.1} fps", + mode, width, height, fps_est + ); + frames = 0; + last_fps_log = Instant::now(); + } + } } } diff --git a/examples/local_video/src/subscriber.rs b/examples/local_video/src/subscriber.rs index 255f9f867..533861c2e 100644 --- a/examples/local_video/src/subscriber.rs +++ b/examples/local_video/src/subscriber.rs @@ -329,18 +329,49 @@ async fn handle_track_subscribed( frames = 0; last_log = Instant::now(); } - // Periodically infer active simulcast quality from inbound stats - if last_stats.elapsed() >= Duration::from_secs(1) { + // Periodically infer active simulcast quality and log decoder details + if last_stats.elapsed() >= Duration::from_secs(5) { if let Ok(stats) = rt_clone.block_on(video_track.get_stats()) { let mut inbound: Option = None; + let mut codec_by_id: HashMap = HashMap::new(); for s in stats.iter() { - if let livekit::webrtc::stats::RtcStats::InboundRtp(i) = s { - if i.stream.kind == "video" { - inbound = Some(i.clone()); + match s { + livekit::webrtc::stats::RtcStats::InboundRtp(i) => { + if i.stream.kind == "video" { + inbound = Some(i.clone()); + } } + livekit::webrtc::stats::RtcStats::Codec(c) => { + codec_by_id.insert( + c.rtc.id.clone(), + (c.codec.mime_type.clone(), c.codec.sdp_fmtp_line.clone()), + ); + } + _ => {} } } if let Some(i) = inbound { + let dec = &i.inbound.decoder_implementation; + if !dec.is_empty() { + let hw_sw = if i.inbound.power_efficient_decoder { + "HW" + } else { + "SW" + }; + let codec_label = codec_by_id + .get(&i.stream.codec_id) + .map(|(m, _)| m.as_str()) + .unwrap_or("?"); + info!( + "Decoder: {} ({}, {}) — {}x{} @ {:.1} fps", + dec, + hw_sw, + codec_label, + i.inbound.frame_width, + i.inbound.frame_height, + i.inbound.frames_per_second, + ); + } if let Some((fw, fh)) = simulcast_state_full_dims(&simulcast2) { let q = infer_quality_from_dims( fw, diff --git a/examples/local_video/src/test_pattern.rs b/examples/local_video/src/test_pattern.rs new file mode 100644 index 000000000..2e8c1bb6b --- /dev/null +++ b/examples/local_video/src/test_pattern.rs @@ -0,0 +1,223 @@ +use anyhow::Result; +use std::time::{SystemTime, UNIX_EPOCH}; + +// --------------------------------------------------------------------------- +// Source mode +// --------------------------------------------------------------------------- + +#[derive(Debug)] +pub enum VideoSourceMode { + Camera(u32), + Static, + Timecode, +} + +pub fn parse_video_source(s: &str) -> Result { + match s { + "static" => Ok(VideoSourceMode::Static), + "timecode" => Ok(VideoSourceMode::Timecode), + other => { + let idx: u32 = other.parse().map_err(|_| { + anyhow::anyhow!( + "Invalid --camera-index '{}': use a number, 'static', or 'timecode'", + other + ) + })?; + Ok(VideoSourceMode::Camera(idx)) + } + } +} + +// --------------------------------------------------------------------------- +// Bitmap font (5×7, digits + colon + letters) +// --------------------------------------------------------------------------- + +const GLYPH_W: u32 = 5; +const GLYPH_H: u32 = 7; + +#[rustfmt::skip] +const DIGIT_GLYPHS: [[u8; 7]; 10] = [ + [0x0E, 0x11, 0x13, 0x15, 0x19, 0x11, 0x0E], // 0 + [0x04, 0x0C, 0x04, 0x04, 0x04, 0x04, 0x0E], // 1 + [0x0E, 0x11, 0x01, 0x06, 0x08, 0x10, 0x1F], // 2 + [0x0E, 0x11, 0x01, 0x06, 0x01, 0x11, 0x0E], // 3 + [0x02, 0x06, 0x0A, 0x12, 0x1F, 0x02, 0x02], // 4 + [0x1F, 0x10, 0x1E, 0x01, 0x01, 0x11, 0x0E], // 5 + [0x0E, 0x11, 0x10, 0x1E, 0x11, 0x11, 0x0E], // 6 + [0x1F, 0x01, 0x02, 0x04, 0x08, 0x08, 0x08], // 7 + [0x0E, 0x11, 0x11, 0x0E, 0x11, 0x11, 0x0E], // 8 + [0x0E, 0x11, 0x11, 0x0F, 0x01, 0x11, 0x0E], // 9 +]; + +const COLON_GLYPH: [u8; 7] = [0x00, 0x04, 0x04, 0x00, 0x04, 0x04, 0x00]; +const SPACE_GLYPH: [u8; 7] = [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]; + +#[rustfmt::skip] +const GLYPH_U: [u8; 7] = [0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x0E]; // U +#[rustfmt::skip] +const GLYPH_T: [u8; 7] = [0x1F, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04]; // T +#[rustfmt::skip] +const GLYPH_C: [u8; 7] = [0x0E, 0x11, 0x10, 0x10, 0x10, 0x11, 0x0E]; // C + +fn glyph_for(c: char) -> Option<&'static [u8; 7]> { + match c { + '0'..='9' => Some(&DIGIT_GLYPHS[(c as u8 - b'0') as usize]), + ':' => Some(&COLON_GLYPH), + ' ' => Some(&SPACE_GLYPH), + 'U' => Some(&GLYPH_U), + 'T' => Some(&GLYPH_T), + 'C' => Some(&GLYPH_C), + _ => None, + } +} + +// --------------------------------------------------------------------------- +// Colour helpers +// --------------------------------------------------------------------------- + +/// BT.601 RGB → YCbCr +fn rgb_to_yuv(r: u8, g: u8, b: u8) -> (u8, u8, u8) { + let (r, g, b) = (r as i32, g as i32, b as i32); + let y = ((66 * r + 129 * g + 25 * b + 128) >> 8) + 16; + let u = ((-38 * r - 74 * g + 112 * b + 128) >> 8) + 128; + let v = ((112 * r - 94 * g - 18 * b + 128) >> 8) + 128; + (y.clamp(0, 255) as u8, u.clamp(0, 255) as u8, v.clamp(0, 255) as u8) +} + +// --------------------------------------------------------------------------- +// Pattern rendering (operates on raw I420 plane slices) +// --------------------------------------------------------------------------- + +/// Fill I420 planes with 75 % SMPTE colour bars. +pub fn fill_color_bars( + y_data: &mut [u8], + u_data: &mut [u8], + v_data: &mut [u8], + stride_y: u32, + stride_u: u32, + stride_v: u32, + width: u32, + height: u32, +) { + const BARS: [(u8, u8, u8); 7] = [ + (192, 192, 192), // white + (192, 192, 0), // yellow + (0, 192, 192), // cyan + (0, 192, 0), // green + (192, 0, 192), // magenta + (192, 0, 0), // red + (0, 0, 192), // blue + ]; + let yuv: Vec<_> = BARS.iter().map(|&(r, g, b)| rgb_to_yuv(r, g, b)).collect(); + let bar_w = width / 7; + for row in 0..height { + for col in 0..width { + let i = (col / bar_w).min(6) as usize; + y_data[(row * stride_y + col) as usize] = yuv[i].0; + if row % 2 == 0 && col % 2 == 0 { + u_data[((row / 2) * stride_u + col / 2) as usize] = yuv[i].1; + v_data[((row / 2) * stride_v + col / 2) as usize] = yuv[i].2; + } + } + } +} + +/// Render a single bitmap-font character (white on existing background). +fn draw_glyph( + y_data: &mut [u8], + u_data: &mut [u8], + v_data: &mut [u8], + stride_y: u32, + stride_u: u32, + stride_v: u32, + c: char, + x0: u32, + y0: u32, + scale: u32, + frame_w: u32, + frame_h: u32, +) { + let glyph = match glyph_for(c) { + Some(g) => g, + None => return, + }; + for gy in 0..GLYPH_H { + let bits = glyph[gy as usize]; + for gx in 0..GLYPH_W { + if (bits >> (GLYPH_W - 1 - gx)) & 1 == 0 { + continue; + } + for sy in 0..scale { + for sx in 0..scale { + let px = x0 + gx * scale + sx; + let py = y0 + gy * scale + sy; + if px >= frame_w || py >= frame_h { + continue; + } + y_data[(py * stride_y + px) as usize] = 235; + if px % 2 == 0 && py % 2 == 0 { + u_data[((py / 2) * stride_u + px / 2) as usize] = 128; + v_data[((py / 2) * stride_v + px / 2) as usize] = 128; + } + } + } + } + } +} + +/// Draw HH:MM:SS:mmm wall-clock timecode and a vertical sweep line on the I420 planes. +pub fn render_timecode_overlay( + y_data: &mut [u8], + u_data: &mut [u8], + v_data: &mut [u8], + stride_y: u32, + stride_u: u32, + stride_v: u32, + width: u32, + height: u32, +) { + let since_epoch = SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap_or_default(); + let total_secs = since_epoch.as_secs(); + let millis = since_epoch.subsec_millis(); + let ss = total_secs % 60; + let mm = (total_secs / 60) % 60; + let hh = (total_secs / 3600) % 24; + + let tc = format!("{:02}:{:02}:{:02}:{:03} UTC", hh, mm, ss, millis); + + let scale = (height / 120).max(1).min(12); + let char_w = GLYPH_W * scale; + let spacing = scale; + let text_w = tc.len() as u32 * (char_w + spacing) - spacing; + let char_h = GLYPH_H * scale; + let text_x = width.saturating_sub(text_w) / 2; + let text_y = height / 6; + + // Dark background rectangle behind timecode text + let pad = scale * 2; + let bx0 = text_x.saturating_sub(pad); + let by0 = text_y.saturating_sub(pad); + let bx1 = (text_x + text_w + pad).min(width); + let by1 = (text_y + char_h + pad).min(height); + for row in by0..by1 { + for col in bx0..bx1 { + y_data[(row * stride_y + col) as usize] = 16; + if row % 2 == 0 && col % 2 == 0 { + u_data[((row / 2) * stride_u + col / 2) as usize] = 128; + v_data[((row / 2) * stride_v + col / 2) as usize] = 128; + } + } + } + + let mut cx = text_x; + for c in tc.chars() { + draw_glyph( + y_data, u_data, v_data, stride_y, stride_u, stride_v, c, cx, text_y, scale, width, + height, + ); + cx += char_w + spacing; + } + +}