refactor: invock mihomo api by use tauri-plugin-mihomo (#4926)

* feat: add tauri-plugin-mihomo

* refactor: invock mihomo api by use tauri-plugin-mihomo

* chore: todo

* chore: update

* chore: update

* chore: update

* chore: update

* fix: incorrect delay status and update pretty config

* chore: update

* chore: remove cache

* chore: update

* chore: update

* fix: app freezed when change group proxy

* chore: update

* chore: update

* chore: add rustfmt.toml to tauri-plugin-mihomo

* chore: happy clippy

* refactor: connect mihomo websocket

* chore: update

* chore: update

* fix: parse bigint to number

* chore: update

* Revert "fix: parse bigint to number"

This reverts commit 74c006522e23aa52cf8979a8fb47d2b1ae0bb043.

* chore: use number instead of bigint

* chore: cleanup

* fix: rule data not refresh when switch profile

* chore: update

* chore: cleanup

* chore: update

* fix: traffic graph data display

* feat: add ipc connection pool

* chore: update

* chore: clippy

* fix: incorrect delay status

* fix: typo

* fix: empty proxies tray menu

* chore: clippy

* chore: import tauri-plugin-mihomo by using git repo

* chore: cleanup

* fix: mihomo api

* fix: incorrect delay status

* chore: update tauri-plugin-mihomo dep

chore: update
This commit is contained in:
oomeow
2025-10-08 12:32:40 +08:00
committed by GitHub
parent 72aa56007c
commit 7fc238c27b
85 changed files with 1780 additions and 3344 deletions

View File

@@ -1,21 +1,15 @@
use std::collections::VecDeque;
use super::CmdResult;
use crate::{
cache::CacheProxy,
config::Config,
core::{CoreManager, handle},
};
use crate::{
config::*,
feat,
ipc::{self, IpcManager},
logging,
utils::logging::Type,
wrap_err,
core::{self, CoreManager, RunningMode, handle, logger},
};
use crate::{config::*, feat, logging, utils::logging::Type, wrap_err};
use serde_yaml_ng::Mapping;
use std::time::Duration;
// use std::time::Duration;
const CONFIG_REFRESH_INTERVAL: Duration = Duration::from_secs(60);
// const CONFIG_REFRESH_INTERVAL: Duration = Duration::from_secs(60);
/// 复制Clash环境变量
#[tauri::command]
@@ -112,20 +106,6 @@ pub async fn restart_core() -> CmdResult {
result
}
/// 获取代理延迟
#[tauri::command]
pub async fn clash_api_get_proxy_delay(
name: String,
url: Option<String>,
timeout: i32,
) -> CmdResult<serde_json::Value> {
wrap_err!(
IpcManager::global()
.test_proxy_delay(&name, url, timeout)
.await
)
}
/// 测试URL延迟
#[tauri::command]
pub async fn test_delay(url: String) -> CmdResult<u32> {
@@ -307,317 +287,13 @@ pub async fn validate_dns_config() -> CmdResult<(bool, String)> {
}
}
/// 获取Clash版本信息
#[tauri::command]
pub async fn get_clash_version() -> CmdResult<serde_json::Value> {
wrap_err!(IpcManager::global().get_version().await)
}
/// 获取Clash配置
#[tauri::command]
pub async fn get_clash_config() -> CmdResult<serde_json::Value> {
let manager = IpcManager::global();
let cache = CacheProxy::global();
let key = CacheProxy::make_key("clash_config", "default");
let value = cache
.get_or_fetch(key, CONFIG_REFRESH_INTERVAL, || async {
manager.get_config().await.unwrap_or_else(|e| {
logging!(error, Type::Cmd, "Failed to fetch clash config: {e}");
serde_json::Value::Object(serde_json::Map::new())
})
})
.await;
Ok((*value).clone())
}
/// 强制刷新Clash配置缓存
#[tauri::command]
pub async fn force_refresh_clash_config() -> CmdResult<serde_json::Value> {
let cache = CacheProxy::global();
let key = CacheProxy::make_key("clash_config", "default");
cache.map.remove(&key);
get_clash_config().await
}
/// 更新地理数据
#[tauri::command]
pub async fn update_geo_data() -> CmdResult {
wrap_err!(IpcManager::global().update_geo_data().await)
}
/// 升级Clash核心
#[tauri::command]
pub async fn upgrade_clash_core() -> CmdResult {
wrap_err!(IpcManager::global().upgrade_core().await)
}
/// 获取规则
#[tauri::command]
pub async fn get_clash_rules() -> CmdResult<serde_json::Value> {
wrap_err!(IpcManager::global().get_rules().await)
}
/// 更新代理选择
#[tauri::command]
pub async fn update_proxy_choice(group: String, proxy: String) -> CmdResult {
wrap_err!(IpcManager::global().update_proxy(&group, &proxy).await)
}
/// 获取代理提供者
#[tauri::command]
pub async fn get_proxy_providers() -> CmdResult<serde_json::Value> {
wrap_err!(IpcManager::global().get_providers_proxies().await)
}
/// 获取规则提供者
#[tauri::command]
pub async fn get_rule_providers() -> CmdResult<serde_json::Value> {
wrap_err!(IpcManager::global().get_rule_providers().await)
}
/// 代理提供者健康检查
#[tauri::command]
pub async fn proxy_provider_health_check(name: String) -> CmdResult {
wrap_err!(
IpcManager::global()
.proxy_provider_health_check(&name)
.await
)
}
/// 更新代理提供者
#[tauri::command]
pub async fn update_proxy_provider(name: String) -> CmdResult {
wrap_err!(IpcManager::global().update_proxy_provider(&name).await)
}
/// 更新规则提供者
#[tauri::command]
pub async fn update_rule_provider(name: String) -> CmdResult {
wrap_err!(IpcManager::global().update_rule_provider(&name).await)
}
/// 获取连接
#[tauri::command]
pub async fn get_clash_connections() -> CmdResult<serde_json::Value> {
wrap_err!(IpcManager::global().get_connections().await)
}
/// 删除连接
#[tauri::command]
pub async fn delete_clash_connection(id: String) -> CmdResult {
wrap_err!(IpcManager::global().delete_connection(&id).await)
}
/// 关闭所有连接
#[tauri::command]
pub async fn close_all_clash_connections() -> CmdResult {
wrap_err!(IpcManager::global().close_all_connections().await)
}
/// 获取流量数据 (使用新的IPC流式监控)
#[tauri::command]
pub async fn get_traffic_data() -> CmdResult<serde_json::Value> {
let traffic = crate::ipc::get_current_traffic().await;
let result = serde_json::json!({
"up": traffic.total_up,
"down": traffic.total_down,
"up_rate": traffic.up_rate,
"down_rate": traffic.down_rate,
"last_updated": traffic.last_updated.elapsed().as_secs()
});
Ok(result)
}
/// 获取内存数据 (使用新的IPC流式监控)
#[tauri::command]
pub async fn get_memory_data() -> CmdResult<serde_json::Value> {
let memory = crate::ipc::get_current_memory().await;
let usage_percent = if memory.oslimit > 0 {
(memory.inuse as f64 / memory.oslimit as f64) * 100.0
} else {
0.0
pub async fn get_clash_logs() -> CmdResult<VecDeque<String>> {
let logs = match core::CoreManager::global().get_running_mode() {
// TODO: 服务模式下日志获取接口
RunningMode::Service => VecDeque::new(),
RunningMode::Sidecar => logger::Logger::global().get_logs().clone(),
_ => VecDeque::new(),
};
let result = serde_json::json!({
"inuse": memory.inuse,
"oslimit": memory.oslimit,
"usage_percent": usage_percent,
"last_updated": memory.last_updated.elapsed().as_secs()
});
Ok(result)
}
/// 启动流量监控服务 (IPC流式监控自动启动此函数为兼容性保留)
#[tauri::command]
pub async fn start_traffic_service() -> CmdResult {
logging!(trace, Type::Ipc, "启动流量监控服务 (IPC流式监控)");
// 新的IPC监控在首次访问时自动启动
// 触发一次访问以确保监控器已初始化
let _ = crate::ipc::get_current_traffic().await;
let _ = crate::ipc::get_current_memory().await;
logging!(info, Type::Ipc, "IPC流式监控已激活");
Ok(())
}
/// 停止流量监控服务 (IPC流式监控无需显式停止此函数为兼容性保留)
#[tauri::command]
pub async fn stop_traffic_service() -> CmdResult {
logging!(trace, Type::Ipc, "停止流量监控服务请求 (IPC流式监控)");
// 新的IPC监控是持久的无需显式停止
logging!(info, Type::Ipc, "IPC流式监控继续运行");
Ok(())
}
/// 获取格式化的流量数据 (包含单位,便于前端显示)
#[tauri::command]
pub async fn get_formatted_traffic_data() -> CmdResult<serde_json::Value> {
logging!(trace, Type::Ipc, "获取格式化流量数据");
let (up_rate, down_rate, total_up, total_down, is_fresh) =
crate::ipc::get_formatted_traffic().await;
let result = serde_json::json!({
"up_rate_formatted": up_rate,
"down_rate_formatted": down_rate,
"total_up_formatted": total_up,
"total_down_formatted": total_down,
"is_fresh": is_fresh
});
logging!(
debug,
Type::Ipc,
"格式化流量数据: ↑{up_rate}/s ↓{down_rate}/s (总计: ↑{total_up} ↓{total_down})"
);
Ok(result)
}
/// 获取格式化的内存数据 (包含单位,便于前端显示)
#[tauri::command]
pub async fn get_formatted_memory_data() -> CmdResult<serde_json::Value> {
logging!(info, Type::Ipc, "获取格式化内存数据");
let (inuse, oslimit, usage_percent, is_fresh) = crate::ipc::get_formatted_memory().await;
let result = serde_json::json!({
"inuse_formatted": inuse,
"oslimit_formatted": oslimit,
"usage_percent": usage_percent,
"is_fresh": is_fresh
});
logging!(
debug,
Type::Ipc,
"格式化内存数据: {inuse} / {oslimit} ({usage_percent:.1}%)"
);
Ok(result)
}
/// 获取系统监控概览 (流量+内存,便于前端一次性获取所有状态)
#[tauri::command]
pub async fn get_system_monitor_overview() -> CmdResult<serde_json::Value> {
logging!(debug, Type::Ipc, "获取系统监控概览");
// 并发获取流量和内存数据
let (traffic, memory) = tokio::join!(
crate::ipc::get_current_traffic(),
crate::ipc::get_current_memory()
);
let (traffic_formatted, memory_formatted) = tokio::join!(
crate::ipc::get_formatted_traffic(),
crate::ipc::get_formatted_memory()
);
let traffic_is_fresh = traffic.last_updated.elapsed().as_secs() < 5;
let memory_is_fresh = memory.last_updated.elapsed().as_secs() < 10;
let result = serde_json::json!({
"traffic": {
"raw": {
"up": traffic.total_up,
"down": traffic.total_down,
"up_rate": traffic.up_rate,
"down_rate": traffic.down_rate
},
"formatted": {
"up_rate": traffic_formatted.0,
"down_rate": traffic_formatted.1,
"total_up": traffic_formatted.2,
"total_down": traffic_formatted.3
},
"is_fresh": traffic_is_fresh
},
"memory": {
"raw": {
"inuse": memory.inuse,
"oslimit": memory.oslimit,
"usage_percent": if memory.oslimit > 0 {
(memory.inuse as f64 / memory.oslimit as f64) * 100.0
} else {
0.0
}
},
"formatted": {
"inuse": memory_formatted.0,
"oslimit": memory_formatted.1,
"usage_percent": memory_formatted.2
},
"is_fresh": memory_is_fresh
},
"overall_status": if traffic_is_fresh && memory_is_fresh { "healthy" } else { "stale" }
});
Ok(result)
}
/// 获取代理组延迟
#[tauri::command]
pub async fn get_group_proxy_delays(
group_name: String,
url: Option<String>,
timeout: Option<i32>,
) -> CmdResult<serde_json::Value> {
wrap_err!(
IpcManager::global()
.get_group_proxy_delays(&group_name, url, timeout.unwrap_or(10000))
.await
)
}
/// 检查调试是否启用
#[tauri::command]
pub async fn is_clash_debug_enabled() -> CmdResult<bool> {
match IpcManager::global().is_debug_enabled().await {
Ok(enabled) => Ok(enabled),
Err(_) => Ok(false),
}
}
/// 垃圾回收
#[tauri::command]
pub async fn clash_gc() -> CmdResult {
wrap_err!(IpcManager::global().gc().await)
}
/// 获取日志 (使用新的流式实现)
#[tauri::command]
pub async fn get_clash_logs() -> CmdResult<serde_json::Value> {
Ok(ipc::get_logs_json().await)
}
/// 启动日志监控
#[tauri::command]
pub async fn start_logs_monitoring(level: Option<String>) -> CmdResult {
ipc::start_logs_monitoring(level).await;
Ok(())
}
/// 停止日志监控
#[tauri::command]
pub async fn stop_logs_monitoring() -> CmdResult {
ipc::stop_logs_monitoring().await;
Ok(())
}
/// 清除日志
#[tauri::command]
pub async fn clear_logs() -> CmdResult {
ipc::clear_logs().await;
Ok(())
Ok(logs)
}

View File

@@ -503,11 +503,11 @@ pub async fn patch_profiles_config(profiles: IProfiles) -> CmdResult<bool> {
handle::Handle::refresh_clash();
// 强制刷新代理缓存确保profile切换后立即获取最新节点数据
crate::process::AsyncHandler::spawn(|| async move {
if let Err(e) = super::proxy::force_refresh_proxies().await {
log::warn!(target: "app", "强制刷新代理缓存失败: {e}");
}
});
// crate::process::AsyncHandler::spawn(|| async move {
// if let Err(e) = super::proxy::force_refresh_proxies().await {
// log::warn!(target: "app", "强制刷新代理缓存失败: {e}");
// }
// });
if let Err(e) = Tray::global().update_tooltip().await {
log::warn!(target: "app", "异步更新托盘提示失败: {e}");

View File

@@ -1,59 +1,7 @@
use tauri::Emitter;
use super::CmdResult;
use crate::{
cache::CacheProxy,
core::{handle::Handle, tray::Tray},
ipc::IpcManager,
logging,
utils::logging::Type,
};
use std::time::Duration;
const PROXIES_REFRESH_INTERVAL: Duration = Duration::from_secs(60);
const PROVIDERS_REFRESH_INTERVAL: Duration = Duration::from_secs(60);
#[tauri::command]
pub async fn get_proxies() -> CmdResult<serde_json::Value> {
let cache = CacheProxy::global();
let key = CacheProxy::make_key("proxies", "default");
let value = cache
.get_or_fetch(key, PROXIES_REFRESH_INTERVAL, || async {
let manager = IpcManager::global();
manager.get_proxies().await.unwrap_or_else(|e| {
logging!(error, Type::Cmd, "Failed to fetch proxies: {e}");
serde_json::Value::Object(serde_json::Map::new())
})
})
.await;
Ok((*value).clone())
}
/// 强制刷新代理缓存用于profile切换
#[tauri::command]
pub async fn force_refresh_proxies() -> CmdResult<serde_json::Value> {
let cache = CacheProxy::global();
let key = CacheProxy::make_key("proxies", "default");
cache.map.remove(&key);
get_proxies().await
}
#[tauri::command]
pub async fn get_providers_proxies() -> CmdResult<serde_json::Value> {
let cache = CacheProxy::global();
let key = CacheProxy::make_key("providers", "default");
let value = cache
.get_or_fetch(key, PROVIDERS_REFRESH_INTERVAL, || async {
let manager = IpcManager::global();
manager.get_providers_proxies().await.unwrap_or_else(|e| {
logging!(error, Type::Cmd, "Failed to fetch provider proxies: {e}");
serde_json::Value::Object(serde_json::Map::new())
})
})
.await;
Ok((*value).clone())
}
use crate::{logging, utils::logging::Type};
// TODO: 前端通过 emit 发送更新事件, tray 监听更新事件
/// 同步托盘和GUI的代理选择状态
#[tauri::command]
pub async fn sync_tray_proxy_selection() -> CmdResult<()> {
@@ -70,54 +18,3 @@ pub async fn sync_tray_proxy_selection() -> CmdResult<()> {
}
}
}
/// 更新代理选择并同步托盘和GUI状态
#[tauri::command]
pub async fn update_proxy_and_sync(group: String, proxy: String) -> CmdResult<()> {
match IpcManager::global().update_proxy(&group, &proxy).await {
Ok(_) => {
// println!("Proxy updated successfully: {} -> {}", group,proxy);
logging!(
info,
Type::Cmd,
"Proxy updated successfully: {} -> {}",
group,
proxy
);
let cache = CacheProxy::global();
let key = CacheProxy::make_key("proxies", "default");
cache.map.remove(&key);
if let Err(e) = Tray::global().update_menu().await {
logging!(error, Type::Cmd, "Failed to sync tray menu: {}", e);
}
if let Some(app_handle) = Handle::global().app_handle() {
let _ = app_handle.emit("verge://force-refresh-proxies", ());
let _ = app_handle.emit("verge://refresh-proxy-config", ());
}
logging!(
info,
Type::Cmd,
"Proxy and sync completed successfully: {} -> {}",
group,
proxy
);
Ok(())
}
Err(e) => {
println!("1111111111111111");
logging!(
error,
Type::Cmd,
"Failed to update proxy: {} -> {}, error: {}",
group,
proxy,
e
);
Err(e.to_string())
}
}
}

View File

@@ -28,9 +28,7 @@ pub async fn export_diagnostic_info() -> CmdResult<()> {
let sysinfo = PlatformSpecification::new_sync();
let info = format!("{sysinfo:?}");
let app_handle = handle::Handle::global()
.app_handle()
.ok_or("Failed to get app handle")?;
let app_handle = handle::Handle::app_handle();
let cliboard = app_handle.clipboard();
if cliboard.write_text(info).is_err() {
logging!(error, Type::System, "Failed to write to clipboard");