mirror of
https://github.com/ilya-zlobintsev/LACT.git
synced 2025-02-25 18:55:26 -06:00
fix: reload profile watcher in case of unresponsiveness after inactivity
should finally fully resolve https://github.com/ilya-zlobintsev/LACT/issues/448
This commit is contained in:
parent
4cff104c10
commit
3f278e7dbf
@ -6,6 +6,7 @@ use copes::solver::PEvent;
|
|||||||
use futures::StreamExt;
|
use futures::StreamExt;
|
||||||
use lact_schema::{ProfileRule, ProfileWatcherState};
|
use lact_schema::{ProfileRule, ProfileWatcherState};
|
||||||
use std::{
|
use std::{
|
||||||
|
process::Command,
|
||||||
rc::Rc,
|
rc::Rc,
|
||||||
time::{Duration, Instant},
|
time::{Duration, Instant},
|
||||||
};
|
};
|
||||||
@ -14,13 +15,14 @@ use tokio::{
|
|||||||
sync::{mpsc, Mutex, Notify},
|
sync::{mpsc, Mutex, Notify},
|
||||||
time::sleep,
|
time::sleep,
|
||||||
};
|
};
|
||||||
use tracing::{debug, error, info, trace};
|
use tracing::{debug, error, info, trace, warn};
|
||||||
use zbus::AsyncDrop;
|
use zbus::AsyncDrop;
|
||||||
|
|
||||||
static PROFILE_WATCHER_LOCK: Mutex<()> = Mutex::const_new(());
|
static PROFILE_WATCHER_LOCK: Mutex<()> = Mutex::const_new(());
|
||||||
|
|
||||||
const PROFILE_WATCHER_MIN_DELAY_MS: u64 = 50;
|
const PROFILE_WATCHER_MIN_DELAY_MS: u64 = 50;
|
||||||
const PROFILE_WATCHER_MAX_DELAY_MS: u64 = 500;
|
const PROFILE_WATCHER_MAX_DELAY_MS: u64 = 500;
|
||||||
|
const SUSPICIOUS_INACTIVITY_PERIOD_SECS: u64 = 30;
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
enum ProfileWatcherEvent {
|
enum ProfileWatcherEvent {
|
||||||
@ -120,6 +122,12 @@ pub async fn run_watcher(handler: Handler, mut command_rx: mpsc::Receiver<Profil
|
|||||||
|
|
||||||
update_profile(&handler).await;
|
update_profile(&handler).await;
|
||||||
|
|
||||||
|
let mut should_reload = false;
|
||||||
|
let mut suspiciously_quiet = false;
|
||||||
|
|
||||||
|
let inactivity_timer = sleep(Duration::from_secs(SUSPICIOUS_INACTIVITY_PERIOD_SECS));
|
||||||
|
tokio::pin!(inactivity_timer);
|
||||||
|
|
||||||
loop {
|
loop {
|
||||||
select! {
|
select! {
|
||||||
Some(cmd) = command_rx.recv() => {
|
Some(cmd) = command_rx.recv() => {
|
||||||
@ -131,7 +139,10 @@ pub async fn run_watcher(handler: Handler, mut command_rx: mpsc::Receiver<Profil
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
Some(event) = event_rx.recv() => {
|
Some(event) = event_rx.recv() => {
|
||||||
handle_profile_event(&event, &handler).await;
|
suspiciously_quiet = false;
|
||||||
|
inactivity_timer.as_mut().reset(tokio::time::Instant::now() + Duration::from_secs(SUSPICIOUS_INACTIVITY_PERIOD_SECS));
|
||||||
|
|
||||||
|
handle_profile_event(&event, &handler, &mut should_reload);
|
||||||
|
|
||||||
// It is very common during system usage that multiple processes start at the same time, or there are processes
|
// It is very common during system usage that multiple processes start at the same time, or there are processes
|
||||||
// that start and exit right away.
|
// that start and exit right away.
|
||||||
@ -160,13 +171,38 @@ pub async fn run_watcher(handler: Handler, mut command_rx: mpsc::Receiver<Profil
|
|||||||
Some(event) = event_rx.recv() => {
|
Some(event) = event_rx.recv() => {
|
||||||
trace!("got another process event, delaying profile update");
|
trace!("got another process event, delaying profile update");
|
||||||
min_timeout.as_mut().reset(tokio::time::Instant::now() + Duration::from_millis(PROFILE_WATCHER_MIN_DELAY_MS));
|
min_timeout.as_mut().reset(tokio::time::Instant::now() + Duration::from_millis(PROFILE_WATCHER_MIN_DELAY_MS));
|
||||||
handle_profile_event(&event, &handler).await;
|
handle_profile_event(&event, &handler, &mut should_reload);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
update_profile(&handler).await;
|
update_profile(&handler).await;
|
||||||
},
|
},
|
||||||
|
() = &mut inactivity_timer => {
|
||||||
|
if suspiciously_quiet {
|
||||||
|
error!("no profile watcher events detected even after manual invocation, restarting watcher");
|
||||||
|
should_reload = true;
|
||||||
|
} else {
|
||||||
|
debug!("no event activity detected in {SUSPICIOUS_INACTIVITY_PERIOD_SECS} seconds, checking listener liveness by spawning a process");
|
||||||
|
let next_check_delta = match Command::new("uname").output() {
|
||||||
|
Ok(_) => {
|
||||||
|
suspiciously_quiet = true;
|
||||||
|
// The event regarding the just spawned command should be received within this time period
|
||||||
|
Duration::from_secs(SUSPICIOUS_INACTIVITY_PERIOD_SECS / 3)
|
||||||
|
}
|
||||||
|
Err(err) => {
|
||||||
|
warn!("could not spawn test command: {err}");
|
||||||
|
Duration::from_secs(SUSPICIOUS_INACTIVITY_PERIOD_SECS)
|
||||||
|
}
|
||||||
|
};
|
||||||
|
inactivity_timer.as_mut().reset(tokio::time::Instant::now() + next_check_delta);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if should_reload {
|
||||||
|
handler.start_profile_watcher().await;
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -178,8 +214,7 @@ pub async fn run_watcher(handler: Handler, mut command_rx: mpsc::Receiver<Profil
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn handle_profile_event(event: &ProfileWatcherEvent, handler: &Handler) {
|
fn handle_profile_event(event: &ProfileWatcherEvent, handler: &Handler, should_reload: &mut bool) {
|
||||||
let mut should_reload = false;
|
|
||||||
{
|
{
|
||||||
let mut state_guard = handler.profile_watcher_state.borrow_mut();
|
let mut state_guard = handler.profile_watcher_state.borrow_mut();
|
||||||
let Some(state) = state_guard.as_mut() else {
|
let Some(state) = state_guard.as_mut() else {
|
||||||
@ -192,7 +227,7 @@ async fn handle_profile_event(event: &ProfileWatcherEvent, handler: &Handler) {
|
|||||||
trace!("process {pid} ({}) started", info.name);
|
trace!("process {pid} ({}) started", info.name);
|
||||||
if info.name.as_ref() == gamemode::PROCESS_NAME {
|
if info.name.as_ref() == gamemode::PROCESS_NAME {
|
||||||
info!("detected gamemode daemon, reloading profile watcher");
|
info!("detected gamemode daemon, reloading profile watcher");
|
||||||
should_reload = true;
|
*should_reload = true;
|
||||||
}
|
}
|
||||||
state.push_process(*pid.as_ref(), info);
|
state.push_process(*pid.as_ref(), info);
|
||||||
}
|
}
|
||||||
@ -201,8 +236,11 @@ async fn handle_profile_event(event: &ProfileWatcherEvent, handler: &Handler) {
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
ProfileWatcherEvent::Process(PEvent::Exit(pid)) => {
|
ProfileWatcherEvent::Process(PEvent::Exit(pid)) => {
|
||||||
|
if let Some(info) = state.remove_process(*pid.as_ref()) {
|
||||||
|
trace!("process {pid} ({}) exited", info.name);
|
||||||
|
} else {
|
||||||
trace!("process {pid} exited");
|
trace!("process {pid} exited");
|
||||||
state.remove_process(*pid.as_ref());
|
}
|
||||||
}
|
}
|
||||||
ProfileWatcherEvent::Gamemode(PEvent::Exec(pid)) => {
|
ProfileWatcherEvent::Gamemode(PEvent::Exec(pid)) => {
|
||||||
state.gamemode_games.insert(*pid.as_ref());
|
state.gamemode_games.insert(*pid.as_ref());
|
||||||
@ -212,10 +250,6 @@ async fn handle_profile_event(event: &ProfileWatcherEvent, handler: &Handler) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if should_reload {
|
|
||||||
handler.start_profile_watcher().await;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn update_profile(handler: &Handler) {
|
async fn update_profile(handler: &Handler) {
|
||||||
|
@ -13,7 +13,7 @@ use std::{
|
|||||||
sync::atomic::{AtomicBool, Ordering},
|
sync::atomic::{AtomicBool, Ordering},
|
||||||
};
|
};
|
||||||
use tokio::{process::Command, sync::Notify};
|
use tokio::{process::Command, sync::Notify};
|
||||||
use tracing::{error, info, warn};
|
use tracing::{debug, error, info, warn};
|
||||||
|
|
||||||
static OC_TOGGLED: AtomicBool = AtomicBool::new(false);
|
static OC_TOGGLED: AtomicBool = AtomicBool::new(false);
|
||||||
|
|
||||||
@ -226,6 +226,11 @@ pub(crate) fn listen_netlink_kernel_event(notify: &Notify) -> anyhow::Result<()>
|
|||||||
for raw_line in buf.split(|c| *c == b'\0') {
|
for raw_line in buf.split(|c| *c == b'\0') {
|
||||||
match std::str::from_utf8(raw_line) {
|
match std::str::from_utf8(raw_line) {
|
||||||
Ok(line) => {
|
Ok(line) => {
|
||||||
|
if line.is_empty() {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
debug!("kernel event line: '{line}'");
|
||||||
if let Some(subsystem) = line.strip_prefix("SUBSYSTEM=") {
|
if let Some(subsystem) = line.strip_prefix("SUBSYSTEM=") {
|
||||||
if subsystem == "drm" {
|
if subsystem == "drm" {
|
||||||
notify.notify_one();
|
notify.notify_one();
|
||||||
|
@ -29,14 +29,17 @@ impl ProfileWatcherState {
|
|||||||
self.process_names_map.entry(name).or_default().insert(pid);
|
self.process_names_map.entry(name).or_default().insert(pid);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn remove_process(&mut self, pid: i32) {
|
pub fn remove_process(&mut self, pid: i32) -> Option<ProcessInfo> {
|
||||||
if let Some(info) = self.process_list.shift_remove(&pid) {
|
if let Some(mut info) = self.process_list.shift_remove(&pid) {
|
||||||
if let Entry::Occupied(mut entry) = self.process_names_map.entry(info.name) {
|
if let Entry::Occupied(mut entry) = self.process_names_map.entry(info.name) {
|
||||||
entry.get_mut().remove(&pid);
|
entry.get_mut().remove(&pid);
|
||||||
if entry.get().is_empty() {
|
if entry.get().is_empty() {
|
||||||
entry.remove();
|
let (key, _) = entry.remove_entry();
|
||||||
|
info.name = key;
|
||||||
|
return Some(info);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
None
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user