Files
clash-verge-rev-lite/src-tauri/src/ipc/monitor.rs
Tunglies 537d27d10b fix: clippy errors with new config (#4428)
* refactor: improve code quality with clippy fixes and standardized logging

- Replace dangerous unwrap()/expect() calls with proper error handling
- Standardize logging from log:: to logging\! macro with Type:: classifications
- Fix app handle panics with graceful fallback patterns
- Improve error resilience across 35+ modules without breaking functionality
- Reduce clippy warnings from 300+ to 0 in main library code

* chore: update Cargo.toml configuration

* refactor: resolve all clippy warnings
- Fix Arc clone warnings using explicit Arc::clone syntax across 9 files
- Add #[allow(clippy::expect_used)] to test functions for appropriate expect usage
- Remove no-effect statements from debug code cleanup
- Apply clippy auto-fixes for dbg\! macro removals and path statements
- Achieve zero clippy warnings on all targets with -D warnings flag

* chore: update Cargo.toml clippy configuration

* refactor: simplify macOS job configuration and improve caching

* refactor: remove unnecessary async/await from service and proxy functions

* refactor: streamline pnpm installation in CI configuration

* refactor: simplify error handling and remove unnecessary else statements

* refactor: replace async/await with synchronous locks for core management

* refactor: add workflow_dispatch trigger to clippy job

* refactor: convert async functions to synchronous for service management

* refactor: convert async functions to synchronous for UWP tool invocation

* fix: change wrong logging

* refactor: convert proxy restoration functions to async

* Revert "refactor: convert proxy restoration functions to async"

This reverts commit b82f5d250b2af7151e4dfd7dd411630b34ed2c18.

* refactor: update proxy restoration functions to return Result types

* fix: handle errors during proxy restoration and update async function signatures

* fix: handle errors during proxy restoration and update async function signatures

* refactor: update restore_pac_proxy and restore_sys_proxy functions to async

* fix: convert restore_pac_proxy and restore_sys_proxy functions to async

* fix: await restore_sys_proxy calls in proxy restoration logic

* fix: suppress clippy warnings for unused async functions in proxy restoration

* fix: suppress clippy warnings for unused async functions in proxy restoration
2025-08-18 02:02:25 +08:00

120 lines
3.4 KiB
Rust

use kode_bridge::IpcStreamClient;
use std::sync::Arc;
use tokio::{sync::RwLock, time::Duration};
use crate::{
logging,
utils::{dirs::ipc_path, logging::Type},
};
/// Generic base structure for IPC monitoring data with freshness tracking
pub trait MonitorData: Clone + Send + Sync + 'static {
/// Update the last_updated timestamp to now
fn mark_fresh(&mut self);
/// Check if data is fresh based on the given duration
fn is_fresh_within(&self, duration: Duration) -> bool;
}
/// Trait for parsing streaming data and updating monitor state
pub trait StreamingParser: MonitorData {
/// Parse a line of streaming data and update the current state
fn parse_and_update(
line: &str,
current: Arc<RwLock<Self>>,
) -> Result<(), Box<dyn std::error::Error + Send + Sync>>;
}
/// Generic IPC stream monitor that handles the common streaming pattern
pub struct IpcStreamMonitor<T>
where
T: MonitorData + StreamingParser + Default,
{
current: Arc<RwLock<T>>,
#[allow(dead_code)]
endpoint: String,
#[allow(dead_code)]
timeout: Duration,
#[allow(dead_code)]
retry_interval: Duration,
freshness_duration: Duration,
}
impl<T> IpcStreamMonitor<T>
where
T: MonitorData + StreamingParser + Default,
{
pub fn new(
endpoint: String,
timeout: Duration,
retry_interval: Duration,
freshness_duration: Duration,
) -> Self {
let current = Arc::new(RwLock::new(T::default()));
let monitor_current = Arc::clone(&current);
let endpoint_clone = endpoint.clone();
// Start the monitoring task
tokio::spawn(async move {
Self::streaming_task(monitor_current, endpoint_clone, timeout, retry_interval).await;
});
Self {
current,
endpoint,
timeout,
retry_interval,
freshness_duration,
}
}
pub async fn current(&self) -> T {
self.current.read().await.clone()
}
pub async fn is_fresh(&self) -> bool {
self.current
.read()
.await
.is_fresh_within(self.freshness_duration)
}
/// The core streaming task that can be specialized per monitor type
async fn streaming_task(
current: Arc<RwLock<T>>,
endpoint: String,
timeout: Duration,
retry_interval: Duration,
) {
loop {
let ipc_path_buf = match ipc_path() {
Ok(path) => path,
Err(e) => {
logging!(error, Type::Ipc, true, "Failed to get IPC path: {}", e);
tokio::time::sleep(retry_interval).await;
continue;
}
};
let ipc_path = ipc_path_buf.to_str().unwrap_or_default();
let client = match IpcStreamClient::new(ipc_path) {
Ok(client) => client,
Err(e) => {
logging!(error, Type::Ipc, true, "Failed to create IPC client: {}", e);
tokio::time::sleep(retry_interval).await;
continue;
}
};
let _ = client
.get(&endpoint)
.timeout(timeout)
.process_lines(|line| T::parse_and_update(line, Arc::clone(&current)))
.await;
tokio::time::sleep(retry_interval).await;
}
}
}