new fall back to gdi

This commit is contained in:
open-trade 2021-09-07 19:03:59 +08:00
parent d369955790
commit 8138a661a6

View File

@ -1,395 +1,409 @@
// 24FPS (actually 23.976FPS) is what video professionals ages ago determined to be the // 24FPS (actually 23.976FPS) is what video professionals ages ago determined to be the
// slowest playback rate that still looks smooth enough to feel real. // slowest playback rate that still looks smooth enough to feel real.
// Our eyes can see a slight difference and even though 30FPS actually shows // Our eyes can see a slight difference and even though 30FPS actually shows
// more information and is more realistic. // more information and is more realistic.
// 60FPS is commonly used in game, teamviewer 12 support this for video editing user. // 60FPS is commonly used in game, teamviewer 12 support this for video editing user.
// how to capture with mouse cursor: // how to capture with mouse cursor:
// https://docs.microsoft.com/zh-cn/windows/win32/direct3ddxgi/desktop-dup-api?redirectedfrom=MSDN // https://docs.microsoft.com/zh-cn/windows/win32/direct3ddxgi/desktop-dup-api?redirectedfrom=MSDN
// 实现了硬件编解码和音频抓取,还绘制了鼠标 // 实现了硬件编解码和音频抓取,还绘制了鼠标
// https://github.com/PHZ76/DesktopSharing // https://github.com/PHZ76/DesktopSharing
// dxgi memory leak issue // dxgi memory leak issue
// https://stackoverflow.com/questions/47801238/memory-leak-in-creating-direct2d-device // https://stackoverflow.com/questions/47801238/memory-leak-in-creating-direct2d-device
// but per my test, it is more related to AcquireNextFrame, // but per my test, it is more related to AcquireNextFrame,
// https://forums.developer.nvidia.com/t/dxgi-outputduplication-memory-leak-when-using-nv-but-not-amd-drivers/108582 // https://forums.developer.nvidia.com/t/dxgi-outputduplication-memory-leak-when-using-nv-but-not-amd-drivers/108582
// to-do: // to-do:
// https://slhck.info/video/2017/03/01/rate-control.html // https://slhck.info/video/2017/03/01/rate-control.html
use super::*; use super::*;
use scrap::{Capturer, Config, Display, EncodeFrame, Encoder, VideoCodecId, STRIDE_ALIGN}; use scrap::{Capturer, Config, Display, EncodeFrame, Encoder, VideoCodecId, STRIDE_ALIGN};
use std::{ use std::{
io::ErrorKind::WouldBlock, io::ErrorKind::WouldBlock,
time::{self, Instant}, time::{self, Instant},
}; };
const WAIT_BASE: i32 = 17; const WAIT_BASE: i32 = 17;
pub const NAME: &'static str = "video"; pub const NAME: &'static str = "video";
lazy_static::lazy_static! { lazy_static::lazy_static! {
static ref CURRENT_DISPLAY: Arc<Mutex<usize>> = Arc::new(Mutex::new(usize::MAX)); static ref CURRENT_DISPLAY: Arc<Mutex<usize>> = Arc::new(Mutex::new(usize::MAX));
static ref LAST_ACTIVE: Arc<Mutex<Instant>> = Arc::new(Mutex::new(Instant::now())); static ref LAST_ACTIVE: Arc<Mutex<Instant>> = Arc::new(Mutex::new(Instant::now()));
static ref SWITCH: Arc<Mutex<bool>> = Default::default(); static ref SWITCH: Arc<Mutex<bool>> = Default::default();
static ref INTERNAL_LATENCIES: Arc<Mutex<HashMap<i32, i64>>> = Default::default(); static ref INTERNAL_LATENCIES: Arc<Mutex<HashMap<i32, i64>>> = Default::default();
static ref TEST_LATENCIES: Arc<Mutex<HashMap<i32, i64>>> = Default::default(); static ref TEST_LATENCIES: Arc<Mutex<HashMap<i32, i64>>> = Default::default();
static ref IMAGE_QUALITIES: Arc<Mutex<HashMap<i32, i32>>> = Default::default(); static ref IMAGE_QUALITIES: Arc<Mutex<HashMap<i32, i32>>> = Default::default();
} }
pub fn new() -> GenericService { pub fn new() -> GenericService {
let sp = GenericService::new(NAME, true); let sp = GenericService::new(NAME, true);
sp.run(run); sp.run(run);
sp sp
} }
fn run(sp: GenericService) -> ResultType<()> { fn run(sp: GenericService) -> ResultType<()> {
let fps = 30; let fps = 30;
let spf = time::Duration::from_secs_f32(1. / (fps as f32)); let spf = time::Duration::from_secs_f32(1. / (fps as f32));
let (ndisplay, current, display) = get_current_display()?; let (ndisplay, current, display) = get_current_display()?;
let (origin, width, height) = (display.origin(), display.width(), display.height()); let (origin, width, height) = (display.origin(), display.width(), display.height());
log::debug!( log::debug!(
"#displays={}, current={}, origin: {:?}, width={}, height={}", "#displays={}, current={}, origin: {:?}, width={}, height={}",
ndisplay, ndisplay,
current, current,
&origin, &origin,
width, width,
height height
); );
// Capturer object is expensive, avoiding to create it frequently. // Capturer object is expensive, avoiding to create it frequently.
let mut c = Capturer::new(display, true).with_context(|| "Failed to create capturer")?; let mut c = Capturer::new(display, true).with_context(|| "Failed to create capturer")?;
let q = get_image_quality(); let q = get_image_quality();
let (bitrate, rc_min_quantizer, rc_max_quantizer, speed) = get_quality(width, height, q); let (bitrate, rc_min_quantizer, rc_max_quantizer, speed) = get_quality(width, height, q);
log::info!("bitrate={}, rc_min_quantizer={}", bitrate, rc_min_quantizer); log::info!("bitrate={}, rc_min_quantizer={}", bitrate, rc_min_quantizer);
let mut wait = WAIT_BASE; let mut wait = WAIT_BASE;
let cfg = Config { let cfg = Config {
width: width as _, width: width as _,
height: height as _, height: height as _,
timebase: [1, 1000], // Output timestamp precision timebase: [1, 1000], // Output timestamp precision
bitrate, bitrate,
codec: VideoCodecId::VP9, codec: VideoCodecId::VP9,
rc_min_quantizer, rc_min_quantizer,
rc_max_quantizer, rc_max_quantizer,
speed, speed,
}; };
let mut vpx; let mut vpx;
let mut n = ((width * height) as f64 / (1920 * 1080) as f64).round() as u32; let mut n = ((width * height) as f64 / (1920 * 1080) as f64).round() as u32;
if n < 1 { if n < 1 {
n = 1; n = 1;
} }
match Encoder::new(&cfg, n) { match Encoder::new(&cfg, n) {
Ok(x) => vpx = x, Ok(x) => vpx = x,
Err(err) => bail!("Failed to create encoder: {}", err), Err(err) => bail!("Failed to create encoder: {}", err),
} }
if *SWITCH.lock().unwrap() { if *SWITCH.lock().unwrap() {
log::debug!("Broadcasting display switch"); log::debug!("Broadcasting display switch");
let mut misc = Misc::new(); let mut misc = Misc::new();
misc.set_switch_display(SwitchDisplay { misc.set_switch_display(SwitchDisplay {
display: current as _, display: current as _,
x: origin.0 as _, x: origin.0 as _,
y: origin.1 as _, y: origin.1 as _,
width: width as _, width: width as _,
height: height as _, height: height as _,
..Default::default() ..Default::default()
}); });
let mut msg_out = Message::new(); let mut msg_out = Message::new();
msg_out.set_misc(misc); msg_out.set_misc(misc);
*SWITCH.lock().unwrap() = false; *SWITCH.lock().unwrap() = false;
sp.send(msg_out); sp.send(msg_out);
} }
let mut crc = (0, 0); let mut crc = (0, 0);
let start = time::Instant::now(); let start = time::Instant::now();
let mut last_sent = time::Instant::now(); let mut last_sent = time::Instant::now();
let mut last_check_displays = time::Instant::now(); let mut last_check_displays = time::Instant::now();
while sp.ok() { #[cfg(windows)]
if *SWITCH.lock().unwrap() { let mut try_gdi = true;
bail!("SWITCH"); #[cfg(windows)]
} log::info!("gdi: {}", c.is_gdi());
if current != *CURRENT_DISPLAY.lock().unwrap() { while sp.ok() {
*SWITCH.lock().unwrap() = true; if *SWITCH.lock().unwrap() {
bail!("SWITCH"); bail!("SWITCH");
} }
if get_image_quality() != q { if current != *CURRENT_DISPLAY.lock().unwrap() {
bail!("SWITCH"); *SWITCH.lock().unwrap() = true;
} bail!("SWITCH");
#[cfg(windows)] }
{ if get_image_quality() != q {
if crate::platform::windows::desktop_changed() { bail!("SWITCH");
bail!("Desktop changed"); }
} #[cfg(windows)]
} {
let now = time::Instant::now(); if crate::platform::windows::desktop_changed() {
if last_check_displays.elapsed().as_millis() > 1000 { bail!("Desktop changed");
last_check_displays = now; }
if ndisplay != get_display_num() { }
log::info!("Displays changed"); let now = time::Instant::now();
*SWITCH.lock().unwrap() = true; if last_check_displays.elapsed().as_millis() > 1000 {
bail!("SWITCH"); last_check_displays = now;
} if ndisplay != get_display_num() {
} log::info!("Displays changed");
*LAST_ACTIVE.lock().unwrap() = now; *SWITCH.lock().unwrap() = true;
if get_latency() < 1000 || last_sent.elapsed().as_millis() > 1000 { bail!("SWITCH");
match c.frame(wait as _) { }
Ok(frame) => { }
let time = now - start; *LAST_ACTIVE.lock().unwrap() = now;
let ms = (time.as_secs() * 1000 + time.subsec_millis() as u64) as i64; if get_latency() < 1000 || last_sent.elapsed().as_millis() > 1000 {
handle_one_frame(&sp, &frame, ms, &mut crc, &mut vpx)?; match c.frame(wait as _) {
last_sent = now; Ok(frame) => {
} let time = now - start;
Err(ref e) if e.kind() == WouldBlock => { let ms = (time.as_secs() * 1000 + time.subsec_millis() as u64) as i64;
// https://github.com/NVIDIA/video-sdk-samples/tree/master/nvEncDXGIOutputDuplicationSample handle_one_frame(&sp, &frame, ms, &mut crc, &mut vpx)?;
wait = WAIT_BASE - now.elapsed().as_millis() as i32; last_sent = now;
if wait < 0 { #[cfg(windows)]
wait = 0 {
} try_gdi = false;
continue; }
} }
Err(err) => { Err(ref e) if e.kind() == WouldBlock => {
return Err(err.into()); // https://github.com/NVIDIA/video-sdk-samples/tree/master/nvEncDXGIOutputDuplicationSample
} wait = WAIT_BASE - now.elapsed().as_millis() as i32;
} if wait < 0 {
} wait = 0
let elapsed = now.elapsed(); }
// may need to enable frame(timeout) #[cfg(windows)]
log::trace!("{:?} {:?}", time::Instant::now(), elapsed); if try_gdi && !c.is_gdi() {
if elapsed < spf { c.set_gdi();
std::thread::sleep(spf - elapsed); try_gdi = false;
} log::info!("No image, fall back to gdi");
} }
Ok(()) continue;
} }
Err(err) => {
#[inline] return Err(err.into());
fn create_msg(vp9s: Vec<VP9>) -> Message { }
let mut msg_out = Message::new(); }
let mut vf = VideoFrame::new(); }
vf.set_vp9s(VP9s { let elapsed = now.elapsed();
frames: vp9s.into(), // may need to enable frame(timeout)
..Default::default() log::trace!("{:?} {:?}", time::Instant::now(), elapsed);
}); if elapsed < spf {
msg_out.set_video_frame(vf); std::thread::sleep(spf - elapsed);
msg_out }
} }
Ok(())
#[inline] }
fn create_frame(frame: &EncodeFrame) -> VP9 {
VP9 { #[inline]
data: frame.data.to_vec(), fn create_msg(vp9s: Vec<VP9>) -> Message {
key: frame.key, let mut msg_out = Message::new();
pts: frame.pts, let mut vf = VideoFrame::new();
..Default::default() vf.set_vp9s(VP9s {
} frames: vp9s.into(),
} ..Default::default()
});
#[inline] msg_out.set_video_frame(vf);
fn handle_one_frame( msg_out
sp: &GenericService, }
frame: &[u8],
ms: i64, #[inline]
crc: &mut (u32, u32), fn create_frame(frame: &EncodeFrame) -> VP9 {
vpx: &mut Encoder, VP9 {
) -> ResultType<()> { data: frame.data.to_vec(),
sp.snapshot(|sps| { key: frame.key,
// so that new sub and old sub share the same encoder after switch pts: frame.pts,
if sps.has_subscribes() { ..Default::default()
bail!("SWITCH"); }
} }
Ok(())
})?; #[inline]
let mut hasher = crc32fast::Hasher::new(); fn handle_one_frame(
hasher.update(frame); sp: &GenericService,
let checksum = hasher.finalize(); frame: &[u8],
if checksum != crc.0 { ms: i64,
crc.0 = checksum; crc: &mut (u32, u32),
crc.1 = 0; vpx: &mut Encoder,
} else { ) -> ResultType<()> {
crc.1 += 1; sp.snapshot(|sps| {
} // so that new sub and old sub share the same encoder after switch
if crc.1 <= 180 && crc.1 % 5 == 0 { if sps.has_subscribes() {
let mut frames = Vec::new(); bail!("SWITCH");
for ref frame in vpx }
.encode(ms, frame, STRIDE_ALIGN) Ok(())
.with_context(|| "Failed to encode")? })?;
{ let mut hasher = crc32fast::Hasher::new();
frames.push(create_frame(frame)); hasher.update(frame);
} let checksum = hasher.finalize();
for ref frame in vpx.flush().with_context(|| "Failed to flush")? { if checksum != crc.0 {
frames.push(create_frame(frame)); crc.0 = checksum;
} crc.1 = 0;
// to-do: flush periodically, e.g. 1 second } else {
if frames.len() > 0 { crc.1 += 1;
sp.send(create_msg(frames)); }
} if crc.1 <= 180 && crc.1 % 5 == 0 {
} let mut frames = Vec::new();
Ok(()) for ref frame in vpx
} .encode(ms, frame, STRIDE_ALIGN)
.with_context(|| "Failed to encode")?
fn get_display_num() -> usize { {
if let Ok(d) = Display::all() { frames.push(create_frame(frame));
d.len() }
} else { for ref frame in vpx.flush().with_context(|| "Failed to flush")? {
0 frames.push(create_frame(frame));
} }
} // to-do: flush periodically, e.g. 1 second
if frames.len() > 0 {
pub fn get_displays() -> ResultType<(usize, Vec<DisplayInfo>)> { sp.send(create_msg(frames));
// switch to primary display if long time (30 seconds) no users }
if LAST_ACTIVE.lock().unwrap().elapsed().as_secs() >= 30 { }
*CURRENT_DISPLAY.lock().unwrap() = usize::MAX; Ok(())
} }
let mut displays = Vec::new();
let mut primary = 0; fn get_display_num() -> usize {
for (i, d) in Display::all()?.iter().enumerate() { if let Ok(d) = Display::all() {
if d.is_primary() { d.len()
primary = i; } else {
} 0
displays.push(DisplayInfo { }
x: d.origin().0 as _, }
y: d.origin().1 as _,
width: d.width() as _, pub fn get_displays() -> ResultType<(usize, Vec<DisplayInfo>)> {
height: d.height() as _, // switch to primary display if long time (30 seconds) no users
name: d.name(), if LAST_ACTIVE.lock().unwrap().elapsed().as_secs() >= 30 {
online: d.is_online(), *CURRENT_DISPLAY.lock().unwrap() = usize::MAX;
..Default::default() }
}); let mut displays = Vec::new();
} let mut primary = 0;
let mut lock = CURRENT_DISPLAY.lock().unwrap(); for (i, d) in Display::all()?.iter().enumerate() {
if *lock >= displays.len() { if d.is_primary() {
*lock = primary primary = i;
} }
Ok((*lock, displays)) displays.push(DisplayInfo {
} x: d.origin().0 as _,
y: d.origin().1 as _,
pub fn switch_display(i: i32) { width: d.width() as _,
let i = i as usize; height: d.height() as _,
if let Ok((_, displays)) = get_displays() { name: d.name(),
if i < displays.len() { online: d.is_online(),
*CURRENT_DISPLAY.lock().unwrap() = i; ..Default::default()
} });
} }
} let mut lock = CURRENT_DISPLAY.lock().unwrap();
if *lock >= displays.len() {
pub fn refresh() { *lock = primary
*SWITCH.lock().unwrap() = true; }
} Ok((*lock, displays))
}
fn get_primary() -> usize {
if let Ok(all) = Display::all() { pub fn switch_display(i: i32) {
for (i, d) in all.iter().enumerate() { let i = i as usize;
if d.is_primary() { if let Ok((_, displays)) = get_displays() {
return i; if i < displays.len() {
} *CURRENT_DISPLAY.lock().unwrap() = i;
} }
} }
0 }
}
pub fn refresh() {
pub fn switch_to_primary() { *SWITCH.lock().unwrap() = true;
switch_display(get_primary() as _); }
}
fn get_primary() -> usize {
fn get_current_display() -> ResultType<(usize, usize, Display)> { if let Ok(all) = Display::all() {
let mut current = *CURRENT_DISPLAY.lock().unwrap() as usize; for (i, d) in all.iter().enumerate() {
let mut displays = Display::all()?; if d.is_primary() {
if displays.len() == 0 { return i;
bail!("No displays"); }
} }
let n = displays.len(); }
if current >= n { 0
current = 0; }
for (i, d) in displays.iter().enumerate() {
if d.is_primary() { pub fn switch_to_primary() {
current = i; switch_display(get_primary() as _);
break; }
}
} fn get_current_display() -> ResultType<(usize, usize, Display)> {
*CURRENT_DISPLAY.lock().unwrap() = current; let mut current = *CURRENT_DISPLAY.lock().unwrap() as usize;
} let mut displays = Display::all()?;
return Ok((n, current, displays.remove(current))); if displays.len() == 0 {
} bail!("No displays");
}
#[inline] let n = displays.len();
fn update_latency(id: i32, latency: i64, latencies: &mut HashMap<i32, i64>) { if current >= n {
if latency <= 0 { current = 0;
latencies.remove(&id); for (i, d) in displays.iter().enumerate() {
} else { if d.is_primary() {
latencies.insert(id, latency); current = i;
} break;
} }
}
pub fn update_test_latency(id: i32, latency: i64) { *CURRENT_DISPLAY.lock().unwrap() = current;
update_latency(id, latency, &mut *TEST_LATENCIES.lock().unwrap()); }
} return Ok((n, current, displays.remove(current)));
}
pub fn update_internal_latency(id: i32, latency: i64) {
update_latency(id, latency, &mut *INTERNAL_LATENCIES.lock().unwrap()); #[inline]
} fn update_latency(id: i32, latency: i64, latencies: &mut HashMap<i32, i64>) {
if latency <= 0 {
pub fn get_latency() -> i64 { latencies.remove(&id);
INTERNAL_LATENCIES } else {
.lock() latencies.insert(id, latency);
.unwrap() }
.values() }
.max()
.unwrap_or(&0) pub fn update_test_latency(id: i32, latency: i64) {
.clone() update_latency(id, latency, &mut *TEST_LATENCIES.lock().unwrap());
} }
fn convert_quality(q: i32) -> i32 { pub fn update_internal_latency(id: i32, latency: i64) {
let q = { update_latency(id, latency, &mut *INTERNAL_LATENCIES.lock().unwrap());
if q == ImageQuality::Balanced.value() { }
(100 * 2 / 3, 12)
} else if q == ImageQuality::Low.value() { pub fn get_latency() -> i64 {
(100 / 2, 18) INTERNAL_LATENCIES
} else if q == ImageQuality::Best.value() { .lock()
(100, 12) .unwrap()
} else { .values()
let bitrate = q >> 8 & 0xFF; .max()
let quantizer = q & 0xFF; .unwrap_or(&0)
(bitrate * 2, (100 - quantizer) * 36 / 100) .clone()
} }
};
if q.0 <= 0 { fn convert_quality(q: i32) -> i32 {
0 let q = {
} else { if q == ImageQuality::Balanced.value() {
q.0 << 8 | q.1 (100 * 2 / 3, 12)
} } else if q == ImageQuality::Low.value() {
} (100 / 2, 18)
} else if q == ImageQuality::Best.value() {
pub fn update_image_quality(id: i32, q: Option<i32>) { (100, 12)
match q { } else {
Some(q) => { let bitrate = q >> 8 & 0xFF;
let q = convert_quality(q); let quantizer = q & 0xFF;
if q > 0 { (bitrate * 2, (100 - quantizer) * 36 / 100)
IMAGE_QUALITIES.lock().unwrap().insert(id, q); }
} else { };
IMAGE_QUALITIES.lock().unwrap().remove(&id); if q.0 <= 0 {
} 0
} } else {
None => { q.0 << 8 | q.1
IMAGE_QUALITIES.lock().unwrap().remove(&id); }
} }
}
} pub fn update_image_quality(id: i32, q: Option<i32>) {
match q {
fn get_image_quality() -> i32 { Some(q) => {
IMAGE_QUALITIES let q = convert_quality(q);
.lock() if q > 0 {
.unwrap() IMAGE_QUALITIES.lock().unwrap().insert(id, q);
.values() } else {
.min() IMAGE_QUALITIES.lock().unwrap().remove(&id);
.unwrap_or(&convert_quality(ImageQuality::Balanced.value())) }
.clone() }
} None => {
IMAGE_QUALITIES.lock().unwrap().remove(&id);
#[inline] }
fn get_quality(w: usize, h: usize, q: i32) -> (u32, u32, u32, i32) { }
// https://www.nvidia.com/en-us/geforce/guides/broadcasting-guide/ }
let bitrate = q >> 8 & 0xFF;
let quantizer = q & 0xFF; fn get_image_quality() -> i32 {
let b = ((w * h) / 1000) as u32; IMAGE_QUALITIES
(bitrate as u32 * b / 100, quantizer as _, 56, 7) .lock()
} .unwrap()
.values()
.min()
.unwrap_or(&convert_quality(ImageQuality::Balanced.value()))
.clone()
}
#[inline]
fn get_quality(w: usize, h: usize, q: i32) -> (u32, u32, u32, i32) {
// https://www.nvidia.com/en-us/geforce/guides/broadcasting-guide/
let bitrate = q >> 8 & 0xFF;
let quantizer = q & 0xFF;
let b = ((w * h) / 1000) as u32;
(bitrate as u32 * b / 100, quantizer as _, 56, 7)
}