From e93025164e33c51cb3540c8295d35cdca8a3a8e8 Mon Sep 17 00:00:00 2001 From: Ilya Zlobintsev Date: Sun, 5 Jan 2025 10:14:22 +0200 Subject: [PATCH 1/2] refactor: simplify lact-client by removing ResponseBuffer --- lact-cli/src/lib.rs | 16 +++---- lact-client/src/lib.rs | 91 +++++++++++++-------------------------- lact-client/src/macros.rs | 4 +- lact-gui/src/app.rs | 64 ++++++--------------------- 4 files changed, 49 insertions(+), 126 deletions(-) diff --git a/lact-cli/src/lib.rs b/lact-cli/src/lib.rs index 1b82f79..d2b1c6b 100644 --- a/lact-cli/src/lib.rs +++ b/lact-cli/src/lib.rs @@ -19,8 +19,8 @@ pub fn run(args: CliArgs) -> Result<()> { } async fn list_gpus(_: &CliArgs, client: &DaemonClient) -> Result<()> { - let buffer = client.list_devices().await?; - for entry in buffer.inner()? { + let entries = client.list_devices().await?; + for entry in entries { let id = entry.id; if let Some(name) = entry.name { println!("{id} ({name})"); @@ -33,8 +33,7 @@ async fn list_gpus(_: &CliArgs, client: &DaemonClient) -> Result<()> { async fn info(args: &CliArgs, client: &DaemonClient) -> Result<()> { for id in extract_gpu_ids(args, client).await { - let info_buffer = client.get_device_info(&id).await?; - let info = info_buffer.inner()?; + let info = client.get_device_info(&id).await?; let pci_info = info.pci_info.context("GPU reports no pci info")?; if let Some(ref vendor) = pci_info.device_pci_info.vendor { @@ -56,10 +55,8 @@ async fn extract_gpu_ids(args: &CliArgs, client: &DaemonClient) -> Vec { match args.gpu_id { Some(ref id) => vec![id.clone()], None => { - let buffer = client.list_devices().await.expect("Could not list GPUs"); - buffer - .inner() - .expect("Could not deserialize GPUs response") + let entries = client.list_devices().await.expect("Could not list GPUs"); + entries .into_iter() .map(|entry| entry.id.to_owned()) .collect() @@ -68,8 +65,7 @@ async fn extract_gpu_ids(args: &CliArgs, client: &DaemonClient) -> Vec { } async fn snapshot(client: &DaemonClient) -> Result<()> { - let buffer = client.generate_debug_snapshot().await?; - let path = buffer.inner()?; + let path = client.generate_debug_snapshot().await?; println!("Generated debug snapshot in {path}"); Ok(()) } diff --git a/lact-client/src/lib.rs b/lact-client/src/lib.rs index 7d57b2c..92a63ad 100644 --- a/lact-client/src/lib.rs +++ b/lact-client/src/lib.rs @@ -16,10 +16,9 @@ use schema::{ ClocksInfo, DeviceInfo, DeviceListEntry, DeviceStats, FanOptions, PowerStates, ProfilesInfo, Request, Response, SystemInfo, }; -use serde::Deserialize; +use serde::de::DeserializeOwned; use std::{ - future::Future, marker::PhantomData, os::unix::net::UnixStream, path::PathBuf, pin::Pin, - rc::Rc, time::Duration, + future::Future, os::unix::net::UnixStream, path::PathBuf, pin::Pin, rc::Rc, time::Duration, }; use tokio::{ net::ToSocketAddrs, @@ -73,19 +72,24 @@ impl DaemonClient { self.status_tx.subscribe() } - fn make_request<'a, 'r, T: Deserialize<'r>>( + fn make_request<'a, T: DeserializeOwned>( &'a self, request: Request<'a>, - ) -> Pin>> + 'a>> { + ) -> Pin> + 'a>> { Box::pin(async { let mut stream = self.stream.lock().await; let request_payload = serde_json::to_string(&request)?; match stream.request(&request_payload).await { - Ok(response_payload) => Ok(ResponseBuffer { - buf: response_payload, - _phantom: PhantomData, - }), + Ok(response_payload) => { + let response: Response = serde_json::from_str(&response_payload) + .context("Could not deserialize response from daemon")?; + match response { + Response::Ok(data) => Ok(data), + Response::Error(err) => Err(anyhow::Error::new(err) + .context("Got error from daemon, end of client boundary")), + } + } Err(err) => { error!("Could not make request: {err}, reconnecting to socket"); let _ = self.status_tx.send(ConnectionStatusMsg::Disconnected); @@ -113,20 +117,16 @@ impl DaemonClient { }) } - pub async fn list_devices(&self) -> anyhow::Result>> { + pub async fn list_devices(&self) -> anyhow::Result> { self.make_request(Request::ListDevices).await } pub async fn set_fan_control(&self, cmd: FanOptions<'_>) -> anyhow::Result { - self.make_request(Request::SetFanControl(cmd)) - .await? - .inner() + self.make_request(Request::SetFanControl(cmd)).await } pub async fn set_power_cap(&self, id: &str, cap: Option) -> anyhow::Result { - self.make_request(Request::SetPowerCap { id, cap }) - .await? - .inner() + self.make_request(Request::SetPowerCap { id, cap }).await } request_plain!(get_system_info, SystemInfo, SystemInfo); @@ -148,38 +148,31 @@ impl DaemonClient { pub async fn list_profiles(&self, include_state: bool) -> anyhow::Result { self.make_request(Request::ListProfiles { include_state }) - .await? - .inner() + .await } pub async fn set_profile(&self, name: Option, auto_switch: bool) -> anyhow::Result<()> { self.make_request(Request::SetProfile { name, auto_switch }) - .await? - .inner() + .await } pub async fn create_profile(&self, name: String, base: ProfileBase) -> anyhow::Result<()> { self.make_request(Request::CreateProfile { name, base }) - .await? - .inner() + .await } pub async fn delete_profile(&self, name: String) -> anyhow::Result<()> { - self.make_request(Request::DeleteProfile { name }) - .await? - .inner() + self.make_request(Request::DeleteProfile { name }).await } pub async fn move_profile(&self, name: String, new_position: usize) -> anyhow::Result<()> { self.make_request(Request::MoveProfile { name, new_position }) - .await? - .inner() + .await } pub async fn evaluate_profile_rule(&self, rule: ProfileRule) -> anyhow::Result { self.make_request(Request::EvaluateProfileRule { rule }) - .await? - .inner() + .await } pub async fn set_profile_rule( @@ -188,8 +181,7 @@ impl DaemonClient { rule: Option, ) -> anyhow::Result<()> { self.make_request(Request::SetProfileRule { name, rule }) - .await? - .inner() + .await } pub async fn set_performance_level( @@ -201,8 +193,7 @@ impl DaemonClient { id, performance_level, }) - .await? - .inner() + .await } pub async fn set_clocks_value( @@ -211,8 +202,7 @@ impl DaemonClient { command: SetClocksCommand, ) -> anyhow::Result { self.make_request(Request::SetClocksValue { id, command }) - .await? - .inner() + .await } pub async fn batch_set_clocks_value( @@ -221,8 +211,7 @@ impl DaemonClient { commands: Vec, ) -> anyhow::Result { self.make_request(Request::BatchSetClocksValue { id, commands }) - .await? - .inner() + .await } pub async fn set_enabled_power_states( @@ -232,8 +221,7 @@ impl DaemonClient { states: Vec, ) -> anyhow::Result { self.make_request(Request::SetEnabledPowerStates { id, kind, states }) - .await? - .inner() + .await } pub async fn set_power_profile_mode( @@ -247,14 +235,12 @@ impl DaemonClient { index, custom_heuristics, }) - .await? - .inner() + .await } pub async fn confirm_pending_config(&self, command: ConfirmCommand) -> anyhow::Result<()> { self.make_request(Request::ConfirmPendingConfig(command)) - .await? - .inner() + .await } } @@ -275,25 +261,6 @@ fn get_socket_path() -> Option { } } -pub struct ResponseBuffer { - buf: String, - _phantom: PhantomData, -} - -impl<'a, T: Deserialize<'a>> ResponseBuffer { - pub fn inner(&'a self) -> anyhow::Result { - let response: Response = serde_json::from_str(&self.buf) - .context("Could not deserialize response from daemon")?; - match response { - Response::Ok(data) => Ok(data), - Response::Error(err) => { - Err(anyhow::Error::new(err) - .context("Got error from daemon, end of client boundary")) - } - } - } -} - #[derive(Debug, Clone, Copy)] pub enum ConnectionStatusMsg { Disconnected, diff --git a/lact-client/src/macros.rs b/lact-client/src/macros.rs index c14ca0b..e3b1f16 100644 --- a/lact-client/src/macros.rs +++ b/lact-client/src/macros.rs @@ -1,6 +1,6 @@ macro_rules! request_with_id { ($name:ident, $variant:ident, $response:ty) => { - pub async fn $name(&self, id: &str) -> anyhow::Result> { + pub async fn $name(&self, id: &str) -> anyhow::Result<$response> { self.make_request(Request::$variant { id }).await } }; @@ -8,7 +8,7 @@ macro_rules! request_with_id { macro_rules! request_plain { ($name:ident, $variant:ident, $response:ty) => { - pub async fn $name(&self) -> anyhow::Result> { + pub async fn $name(&self) -> anyhow::Result<$response> { self.make_request(Request::$variant).await } }; diff --git a/lact-gui/src/app.rs b/lact-gui/src/app.rs index 74cde1e..6d3428e 100644 --- a/lact-gui/src/app.rs +++ b/lact-gui/src/app.rs @@ -161,17 +161,15 @@ impl AsyncComponent for AppModel { register_actions(&sender); - let system_info_buf = daemon_client + let system_info = daemon_client .get_system_info() .await .expect("Could not fetch system info"); - let system_info = system_info_buf.inner().expect("Invalid system info buffer"); - let devices_buf = daemon_client + let devices = daemon_client .list_devices() .await .expect("Could not list devices"); - let devices = devices_buf.inner().expect("Could not access devices"); if system_info.version != GUI_VERSION || system_info.commit.as_deref() != Some(GIT_COMMIT) { let err = anyhow!("Version mismatch between GUI and daemon ({GUI_VERSION}-{GIT_COMMIT} vs {}-{})! If you have updated LACT, you need to restart the service with `sudo systemctl restart lactd`.", system_info.version, system_info.commit.as_deref().unwrap_or_default()); @@ -449,7 +447,7 @@ impl AppModel { .get_device_info(&gpu_id) .await .context("Could not fetch info")?; - let info = Arc::new(info_buf.inner()?); + let info = Arc::new(info_buf); // Plain `nvidia` means that the nvidia driver is loaded, but it does not contain a version fetched from NVML if info.driver == "nvidia" { @@ -492,8 +490,7 @@ impl AppModel { .daemon_client .get_device_stats(&gpu_id) .await - .context("Could not fetch stats")? - .inner()?; + .context("Could not fetch stats")?; let stats = Arc::new(stats); self.oc_page.set_stats(&stats, true); @@ -502,13 +499,7 @@ impl AppModel { self.info_page.emit(PageUpdate::Stats(stats)); let maybe_clocks_table = match self.daemon_client.get_device_clocks_info(&gpu_id).await { - Ok(clocks_buf) => match clocks_buf.inner() { - Ok(info) => info.table, - Err(err) => { - debug!("could not extract clocks info: {err:?}"); - None - } - }, + Ok(info) => info.table, Err(err) => { debug!("could not fetch clocks info: {err:?}"); None @@ -521,13 +512,7 @@ impl AppModel { .get_device_power_profile_modes(&gpu_id) .await { - Ok(buf) => match buf.inner() { - Ok(table) => Some(table), - Err(err) => { - debug!("Could not extract profile modes table: {err:?}"); - None - } - }, + Ok(buf) => Some(buf), Err(err) => { debug!("Could not get profile modes table: {err:?}"); None @@ -537,12 +522,7 @@ impl AppModel { .performance_frame .set_power_profile_modes(maybe_modes_table); - match self - .daemon_client - .get_power_states(&gpu_id) - .await - .and_then(|states| states.inner()) - { + match self.daemon_client.get_power_states(&gpu_id).await { Ok(power_states) => { self.oc_page .power_states_frame @@ -773,12 +753,7 @@ impl AppModel { } async fn dump_vbios(&self, gpu_id: &str, root: >k::ApplicationWindow) { - match self - .daemon_client - .dump_vbios(gpu_id) - .await - .and_then(|response| response.inner()) - { + match self.daemon_client.dump_vbios(gpu_id).await { Ok(vbios_data) => { let file_chooser = FileChooserDialog::new( Some("Save VBIOS file"), @@ -826,12 +801,7 @@ impl AppModel { } async fn generate_debug_snapshot(&self, root: >k::ApplicationWindow) { - match self - .daemon_client - .generate_debug_snapshot() - .await - .and_then(|response| response.inner()) - { + match self.daemon_client.generate_debug_snapshot().await { Ok(path) => { let path_label = gtk::Label::builder() .use_markup(true) @@ -968,11 +938,7 @@ fn start_stats_update_loop( loop { tokio::time::sleep(duration).await; - match daemon_client - .get_device_stats(&gpu_id) - .await - .and_then(|buffer| buffer.inner()) - { + match daemon_client.get_device_stats(&gpu_id).await { Ok(stats) => { sender.input(AppMsg::Stats(Arc::new(stats))); } @@ -1043,15 +1009,9 @@ async fn toggle_overdrive(daemon_client: &DaemonClient, enable: bool, root: Appl dialog.show(); let result = if enable { - daemon_client - .enable_overdrive() - .await - .and_then(|buffer| buffer.inner()) + daemon_client.enable_overdrive().await } else { - daemon_client - .disable_overdrive() - .await - .and_then(|buffer| buffer.inner()) + daemon_client.disable_overdrive().await }; dialog.hide(); From 740e38863ba0cfb76f4fd461df9fd9f6b8bd3e62 Mon Sep 17 00:00:00 2001 From: Ilya Zlobintsev Date: Sun, 5 Jan 2025 11:59:26 +0200 Subject: [PATCH 2/2] feat: use VRAM offset ratio on Ada, add config migration system to erase old memory clock settings --- lact-daemon/src/config.rs | 104 +++++++++++++++++- lact-daemon/src/lib.rs | 10 +- lact-daemon/src/server/gpu_controller/amd.rs | 3 +- lact-daemon/src/server/gpu_controller/mod.rs | 3 + .../src/server/gpu_controller/nvidia.rs | 21 +++- ...lact_daemon__config__tests__parse_doc.snap | 2 +- .../src/app/pages/oc_page/clocks_frame.rs | 3 +- lact-schema/src/lib.rs | 1 + 8 files changed, 138 insertions(+), 9 deletions(-) diff --git a/lact-daemon/src/config.rs b/lact-daemon/src/config.rs index f96e827..3b4201e 100644 --- a/lact-daemon/src/config.rs +++ b/lact-daemon/src/config.rs @@ -1,4 +1,4 @@ -use crate::server::gpu_controller::fan_control::FanCurve; +use crate::server::gpu_controller::{fan_control::FanCurve, VENDOR_NVIDIA}; use amdgpu_sysfs::gpu_handle::{PerformanceLevel, PowerLevelKind}; use anyhow::Context; use indexmap::IndexMap; @@ -18,7 +18,7 @@ use std::{ time::{Duration, Instant}, }; use tokio::{sync::mpsc, time}; -use tracing::{debug, error}; +use tracing::{debug, error, info}; const FILE_NAME: &str = "config.yaml"; const DEFAULT_ADMIN_GROUPS: [&str; 2] = ["wheel", "sudo"]; @@ -29,6 +29,8 @@ const SELF_CONFIG_EDIT_PERIOD_MILLIS: u64 = 1000; #[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] pub struct Config { + #[serde(default)] + pub version: u64, pub daemon: Daemon, #[serde(default = "default_apply_settings_timer")] pub apply_settings_timer: u64, @@ -51,6 +53,7 @@ impl Default for Config { profiles: IndexMap::new(), current_profile: None, auto_switch_profiles: false, + version: 0, } } } @@ -206,6 +209,36 @@ impl Config { } } + pub fn migrate_versions(&mut self) { + loop { + let next_version = self.version + 1; + match next_version { + 0 => unreachable!(), + // Reset VRAM settings on Nvidia after new offset ratio logic + 1 => { + for (id, gpu) in &mut self.gpus { + if id.starts_with(VENDOR_NVIDIA) { + gpu.clocks_configuration.max_memory_clock = None; + gpu.clocks_configuration.min_memory_clock = None; + } + } + + for profile in &mut self.profiles.values_mut() { + for (id, gpu) in &mut profile.gpus { + if id.starts_with(VENDOR_NVIDIA) { + gpu.clocks_configuration.max_memory_clock = None; + gpu.clocks_configuration.min_memory_clock = None; + } + } + } + } + _ => break, + } + info!("migrated config version {} to {next_version}", self.version); + self.version = next_version; + } + } + /// Gets the GPU configs according to the current profile. Returns an error if the current profile could not be found. pub fn gpus(&self) -> anyhow::Result<&IndexMap> { match &self.current_profile { @@ -364,6 +397,7 @@ fn default_apply_settings_timer() -> u64 { mod tests { use super::{ClocksConfiguration, Config, Daemon, FanControlSettings, Gpu}; use crate::server::gpu_controller::fan_control::FanCurve; + use indexmap::IndexMap; use insta::assert_yaml_snapshot; use lact_schema::{FanControlMode, PmfwOptions}; use std::collections::HashMap; @@ -431,4 +465,70 @@ mod tests { gpu.clocks_configuration.voltage_offset = Some(10); assert!(gpu.is_core_clocks_used()); } + + #[test] + fn migrate_versions() { + let mut config = Config { + version: 0, + daemon: Daemon::default(), + apply_settings_timer: 5, + gpus: IndexMap::from([ + ( + "10DE:2704-1462:5110-0000:09:00.0".to_owned(), + Gpu { + clocks_configuration: ClocksConfiguration { + max_core_clock: Some(3000), + max_memory_clock: Some(10_000), + ..Default::default() + }, + ..Default::default() + }, + ), + ( + "1002:687F-1043:0555-0000:0b:00.0".to_owned(), + Gpu { + clocks_configuration: ClocksConfiguration { + max_core_clock: Some(1500), + max_memory_clock: Some(920), + ..Default::default() + }, + ..Default::default() + }, + ), + ]), + profiles: IndexMap::new(), + current_profile: None, + auto_switch_profiles: false, + }; + + config.migrate_versions(); + + assert_eq!( + config + .gpus + .get("10DE:2704-1462:5110-0000:09:00.0") + .unwrap() + .clocks_configuration + .max_core_clock, + Some(3000) + ); + assert_eq!( + config + .gpus + .get("10DE:2704-1462:5110-0000:09:00.0") + .unwrap() + .clocks_configuration + .max_memory_clock, + None, + ); + assert_eq!( + config + .gpus + .get("1002:687F-1043:0555-0000:0b:00.0") + .unwrap() + .clocks_configuration + .max_memory_clock, + Some(920), + ); + } } diff --git a/lact-daemon/src/lib.rs b/lact-daemon/src/lib.rs index d1a26cd..4c30e27 100644 --- a/lact-daemon/src/lib.rs +++ b/lact-daemon/src/lib.rs @@ -12,6 +12,8 @@ use anyhow::Context; use config::Config; use futures::future::select_all; use server::{handle_stream, handler::Handler, Server}; +use std::cell::Cell; +use std::time::Instant; use std::{os::unix::net::UnixStream as StdUnixStream, time::Duration}; use tokio::net::UnixStream; use tokio::{ @@ -46,7 +48,7 @@ pub fn run() -> anyhow::Result<()> { .build() .expect("Could not initialize tokio runtime"); rt.block_on(async { - let config = Config::load_or_create()?; + let mut config = Config::load_or_create()?; let env_filter = EnvFilter::builder() .with_default_directive(LevelFilter::INFO.into()) @@ -54,6 +56,12 @@ pub fn run() -> anyhow::Result<()> { .context("Invalid log level")?; tracing_subscriber::fmt().with_env_filter(env_filter).init(); + let original_version = config.version; + config.migrate_versions(); + if config.version != original_version { + config.save(&Cell::new(Instant::now()))?; + } + ensure_sufficient_uptime().await; LocalSet::new() diff --git a/lact-daemon/src/server/gpu_controller/amd.rs b/lact-daemon/src/server/gpu_controller/amd.rs index 0a57a43..eee5ea5 100644 --- a/lact-daemon/src/server/gpu_controller/amd.rs +++ b/lact-daemon/src/server/gpu_controller/amd.rs @@ -1,4 +1,4 @@ -use super::{fan_control::FanCurve, FanControlHandle, GpuController}; +use super::{fan_control::FanCurve, FanControlHandle, GpuController, VENDOR_AMD}; use crate::{ config::{self, ClocksConfiguration, FanControlSettings}, server::vulkan::get_vulkan_info, @@ -47,7 +47,6 @@ use { const GPU_CLOCKDOWN_TIMEOUT_SECS: u64 = 3; const MAX_PSTATE_READ_ATTEMPTS: u32 = 5; -const VENDOR_AMD: &str = "1002"; const STEAM_DECK_IDS: [&str; 2] = ["163F", "1435"]; pub struct AmdGpuController { diff --git a/lact-daemon/src/server/gpu_controller/mod.rs b/lact-daemon/src/server/gpu_controller/mod.rs index d2e280f..0171adf 100644 --- a/lact-daemon/src/server/gpu_controller/mod.rs +++ b/lact-daemon/src/server/gpu_controller/mod.rs @@ -3,6 +3,9 @@ mod amd; pub mod fan_control; mod nvidia; +pub const VENDOR_AMD: &str = "1002"; +pub const VENDOR_NVIDIA: &str = "10DE"; + pub use amd::AmdGpuController; pub use nvidia::NvidiaGpuController; diff --git a/lact-daemon/src/server/gpu_controller/nvidia.rs b/lact-daemon/src/server/gpu_controller/nvidia.rs index d1891c1..76e2929 100644 --- a/lact-daemon/src/server/gpu_controller/nvidia.rs +++ b/lact-daemon/src/server/gpu_controller/nvidia.rs @@ -17,7 +17,8 @@ use lact_schema::{ }; use nvml_wrapper::{ bitmasks::device::ThrottleReasons, - enum_wrappers::device::{Clock, TemperatureSensor, TemperatureThreshold}, + enum_wrappers::device::{Brand, Clock, TemperatureSensor, TemperatureThreshold}, + enums::device::DeviceArchitecture, Device, Nvml, }; use std::{ @@ -248,6 +249,20 @@ impl NvidiaGpuController { Ok(power_states) } + + // See https://github.com/ilya-zlobintsev/LACT/issues/418 + fn vram_offset_ratio(&self) -> i32 { + let device = self.device(); + if let (Ok(brand), Ok(architecture)) = (device.brand(), device.architecture()) { + let ratio = match (brand, architecture) { + (Brand::GeForce, DeviceArchitecture::Ada) => 2, + // TODO: check others + _ => 1, + }; + return ratio; + } + 1 + } } impl GpuController for NvidiaGpuController { @@ -495,6 +510,7 @@ impl GpuController for NvidiaGpuController { gpc = Some(NvidiaClockInfo { max: max as i32, offset, + offset_ratio: 1, offset_range, }); } @@ -511,6 +527,7 @@ impl GpuController for NvidiaGpuController { mem = Some(NvidiaClockInfo { max: max as i32, offset, + offset_ratio: self.vram_offset_ratio(), offset_range, }); } @@ -600,7 +617,7 @@ impl GpuController for NvidiaGpuController { let default_max_clock = device .max_clock_info(Clock::Memory) .context("Could not read max memory clock")?; - let offset = max_mem_clock - default_max_clock as i32; + let offset = (max_mem_clock - default_max_clock as i32) * self.vram_offset_ratio(); debug!("Using mem clock offset {offset} (default max clock: {default_max_clock})"); device diff --git a/lact-daemon/src/snapshots/lact_daemon__config__tests__parse_doc.snap b/lact-daemon/src/snapshots/lact_daemon__config__tests__parse_doc.snap index 0b8b6a1..2315b4e 100644 --- a/lact-daemon/src/snapshots/lact_daemon__config__tests__parse_doc.snap +++ b/lact-daemon/src/snapshots/lact_daemon__config__tests__parse_doc.snap @@ -1,8 +1,8 @@ --- source: lact-daemon/src/config.rs expression: deserialized_config -snapshot_kind: text --- +version: 0 daemon: log_level: info admin_groups: diff --git a/lact-gui/src/app/pages/oc_page/clocks_frame.rs b/lact-gui/src/app/pages/oc_page/clocks_frame.rs index d3182db..935699c 100644 --- a/lact-gui/src/app/pages/oc_page/clocks_frame.rs +++ b/lact-gui/src/app/pages/oc_page/clocks_frame.rs @@ -412,7 +412,8 @@ fn set_nvidia_clock_offset(clock_info: &NvidiaClockInfo, adjustment_row: &Adjust let oc_adjustment = &adjustment_row.imp().adjustment; oc_adjustment.set_lower((clock_info.max + clock_info.offset_range.0) as f64); oc_adjustment.set_upper((clock_info.max + clock_info.offset_range.1) as f64); - oc_adjustment.set_value((clock_info.max + clock_info.offset) as f64); + oc_adjustment + .set_value((clock_info.max + (clock_info.offset / clock_info.offset_ratio)) as f64); adjustment_row.set_visible(true); } diff --git a/lact-schema/src/lib.rs b/lact-schema/src/lib.rs index a4de06d..3e2f719 100644 --- a/lact-schema/src/lib.rs +++ b/lact-schema/src/lib.rs @@ -159,6 +159,7 @@ pub struct NvidiaClocksTable { pub struct NvidiaClockInfo { pub max: i32, pub offset: i32, + pub offset_ratio: i32, pub offset_range: (i32, i32), }