diff --git a/UPDATELOG.md b/UPDATELOG.md index 06ff83ef..4e78ca64 100644 --- a/UPDATELOG.md +++ b/UPDATELOG.md @@ -44,6 +44,7 @@ - 修复 `Windows` 安装器参数使用错误问题 - 修复 `IPC` 迁移后节点测速功能异常 - 修复 `IPC` 迁移后连接上下行速率计算功能异常 +- 修复 `IPC` 迁移后内核日志功能异常 - 修复 `External-Controller-Cors` 无法保存所需前置条件 - 修复首页端口不一致问题 diff --git a/src-tauri/src/cmd/clash.rs b/src-tauri/src/cmd/clash.rs index ce7f79fd..a8c1822b 100644 --- a/src-tauri/src/cmd/clash.rs +++ b/src-tauri/src/cmd/clash.rs @@ -1,7 +1,12 @@ use super::CmdResult; use crate::{ - config::*, core::*, feat, ipc::IpcManager, process::AsyncHandler, - state::proxy::ProxyRequestCache, wrap_err, + config::*, + core::*, + feat, + ipc::{self, IpcManager}, + process::AsyncHandler, + state::proxy::ProxyRequestCache, + wrap_err, }; use serde_yaml::Mapping; use std::time::Duration; @@ -572,3 +577,23 @@ pub async fn is_clash_debug_enabled() -> CmdResult { pub async fn clash_gc() -> CmdResult { wrap_err!(IpcManager::global().gc().await) } + +/// 获取日志 (使用新的流式实现) +#[tauri::command] +pub async fn get_clash_logs(level: Option) -> CmdResult { + Ok(ipc::get_logs_json(level).await) +} + +/// 启动日志监控 +#[tauri::command] +pub async fn start_logs_monitoring(level: Option) -> CmdResult { + ipc::start_logs_monitoring(level).await; + Ok(()) +} + +/// 清除日志 +#[tauri::command] +pub async fn clear_logs() -> CmdResult { + ipc::clear_logs().await; + Ok(()) +} diff --git a/src-tauri/src/ipc/general.rs b/src-tauri/src/ipc/general.rs index 647edca1..5b1312c0 100644 --- a/src-tauri/src/ipc/general.rs +++ b/src-tauri/src/ipc/general.rs @@ -382,29 +382,5 @@ impl IpcManager { } } - // 流量数据相关 - #[allow(dead_code)] - pub async fn get_traffic(&self) -> AnyResult { - let url = "/traffic"; - logging!(info, Type::Ipc, true, "IPC: 发送 GET 请求到 {}", url); - let result = self.send_request("GET", url, None).await; - logging!( - info, - Type::Ipc, - true, - "IPC: /traffic 请求结果: {:?}", - result - ); - result - } - - // 内存相关 - #[allow(dead_code)] - pub async fn get_memory(&self) -> AnyResult { - let url = "/memory"; - logging!(info, Type::Ipc, true, "IPC: 发送 GET 请求到 {}", url); - let result = self.send_request("GET", url, None).await; - logging!(info, Type::Ipc, true, "IPC: /memory 请求结果: {:?}", result); - result - } + // 日志相关功能已迁移到 logs.rs 模块,使用流式处理 } diff --git a/src-tauri/src/ipc/logs.rs b/src-tauri/src/ipc/logs.rs new file mode 100644 index 00000000..a5cedc2d --- /dev/null +++ b/src-tauri/src/ipc/logs.rs @@ -0,0 +1,295 @@ +use kode_bridge::IpcStreamClient; +use serde::{Deserialize, Serialize}; +use std::{ + collections::VecDeque, + sync::{Arc, OnceLock}, + time::Instant, +}; +use tokio::{sync::RwLock, task::JoinHandle, time::Duration}; + +use crate::{ + logging, + utils::{dirs::ipc_path, logging::Type}, +}; + +#[derive(Debug, Clone, Deserialize, Serialize)] +pub struct LogData { + #[serde(rename = "type")] + pub log_type: String, + pub payload: String, +} + +#[derive(Debug, Clone)] +pub struct LogItem { + pub log_type: String, + pub payload: String, + pub time: String, +} + +impl LogItem { + fn new(log_type: String, payload: String) -> Self { + use std::time::{SystemTime, UNIX_EPOCH}; + + let now = SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap() + .as_secs(); + + // Simple time formatting (HH:MM:SS) + let hours = (now / 3600) % 24; + let minutes = (now / 60) % 60; + let seconds = now % 60; + let time_str = format!("{hours:02}:{minutes:02}:{seconds:02}"); + + Self { + log_type, + payload, + time: time_str, + } + } +} + +#[derive(Debug, Clone)] +pub struct CurrentLogs { + pub logs: VecDeque, + pub level: String, + pub last_updated: Instant, +} + +impl Default for CurrentLogs { + fn default() -> Self { + Self { + logs: VecDeque::with_capacity(1000), + level: "info".to_string(), + last_updated: Instant::now(), + } + } +} + +// Logs monitor with streaming support +pub struct LogsMonitor { + current: Arc>, + task_handle: Arc>>>, + current_monitoring_level: Arc>>, +} + +static INSTANCE: OnceLock = OnceLock::new(); + +impl LogsMonitor { + pub fn global() -> &'static LogsMonitor { + INSTANCE.get_or_init(|| { + let instance = LogsMonitor::new(); + logging!(info, Type::Ipc, true, "LogsMonitor initialized"); + instance + }) + } + + fn new() -> Self { + let current = Arc::new(RwLock::new(CurrentLogs::default())); + + Self { + current, + task_handle: Arc::new(RwLock::new(None)), + current_monitoring_level: Arc::new(RwLock::new(None)), + } + } + + pub async fn start_monitoring(&self, level: Option) { + let filter_level = level.clone().unwrap_or_else(|| "info".to_string()); + + // Check if we're already monitoring the same level + { + let current_level = self.current_monitoring_level.read().await; + if let Some(existing_level) = current_level.as_ref() { + if existing_level == &filter_level { + logging!( + info, + Type::Ipc, + true, + "LogsMonitor: Already monitoring level '{}', skipping duplicate request", + filter_level + ); + return; + } + } + } + + // Stop existing monitoring task if level changed or first time + { + let mut handle = self.task_handle.write().await; + if let Some(task) = handle.take() { + task.abort(); + logging!( + info, + Type::Ipc, + true, + "LogsMonitor: Stopped previous monitoring task (level changed)" + ); + } + } + + // Update current monitoring level + { + let mut current_level = self.current_monitoring_level.write().await; + *current_level = Some(filter_level.clone()); + } + + let monitor_current = self.current.clone(); + let ipc_path_buf = ipc_path().unwrap(); + let ipc_path = ipc_path_buf.to_str().unwrap_or_default(); + let client = IpcStreamClient::new(ipc_path).unwrap(); + + // Update current level in data structure + { + let mut current = monitor_current.write().await; + current.level = filter_level.clone(); + } + + let task = tokio::spawn(async move { + loop { + let url = if filter_level == "info" { + "/logs".to_string() + } else { + let level_param = if filter_level == "all" { + "debug" + } else { + &filter_level + }; + format!("/logs?level={level_param}") + }; + + logging!( + info, + Type::Ipc, + true, + "LogsMonitor: Starting stream for {}", + url + ); + + let _ = client + .get(&url) + .timeout(Duration::from_secs(30)) + .process_lines(|line| { + if let Ok(log_data) = serde_json::from_str::(line.trim()) { + // Filter logs based on level if needed + let should_include = match filter_level.as_str() { + "all" => true, + level => log_data.log_type.to_lowercase() == level.to_lowercase(), + }; + + if should_include { + let log_item = LogItem::new(log_data.log_type, log_data.payload); + + tokio::spawn({ + let current = monitor_current.clone(); + async move { + let mut logs = current.write().await; + + // Add new log + logs.logs.push_back(log_item); + + // Keep only the last 1000 logs + if logs.logs.len() > 1000 { + logs.logs.pop_front(); + } + + logs.last_updated = Instant::now(); + } + }); + } + } + Ok(()) + }) + .await; + + // Wait before retrying + tokio::time::sleep(Duration::from_secs(2)).await; + } + }); + + // Store the task handle + { + let mut handle = self.task_handle.write().await; + *handle = Some(task); + } + + logging!( + info, + Type::Ipc, + true, + "LogsMonitor: Started new monitoring task for level: {:?}", + level + ); + } + + pub async fn current(&self) -> CurrentLogs { + self.current.read().await.clone() + } + + pub async fn clear_logs(&self) { + let mut current = self.current.write().await; + current.logs.clear(); + current.last_updated = Instant::now(); + + // Also reset monitoring level when clearing logs + { + let mut monitoring_level = self.current_monitoring_level.write().await; + *monitoring_level = None; + } + + // Abort current monitoring task + { + let mut handle = self.task_handle.write().await; + if let Some(task) = handle.take() { + task.abort(); + logging!( + info, + Type::Ipc, + true, + "LogsMonitor: Stopped monitoring task due to clear_logs" + ); + } + } + } + + pub async fn get_logs_as_json(&self, level: Option) -> serde_json::Value { + let current = self.current().await; + + let filtered_logs: Vec = current + .logs + .iter() + .filter(|log| { + if let Some(ref filter_level) = level { + if filter_level == "all" { + true + } else { + log.log_type.to_lowercase() == filter_level.to_lowercase() + } + } else { + true + } + }) + .map(|log| { + serde_json::json!({ + "type": log.log_type, + "payload": log.payload, + "time": log.time + }) + }) + .collect(); + + serde_json::Value::Array(filtered_logs) + } +} + +pub async fn start_logs_monitoring(level: Option) { + LogsMonitor::global().start_monitoring(level).await; +} + +pub async fn clear_logs() { + LogsMonitor::global().clear_logs().await; +} + +pub async fn get_logs_json(level: Option) -> serde_json::Value { + LogsMonitor::global().get_logs_as_json(level).await +} diff --git a/src-tauri/src/ipc/memory.rs b/src-tauri/src/ipc/memory.rs index 778f6e2d..c3cbdeef 100644 --- a/src-tauri/src/ipc/memory.rs +++ b/src-tauri/src/ipc/memory.rs @@ -8,7 +8,7 @@ use tokio::{sync::RwLock, time::Duration}; use crate::{ logging, - utils::{dirs::ipc_path, logging::Type}, + utils::{dirs::ipc_path, format::fmt_bytes, logging::Type}, }; #[derive(Debug, Clone, Deserialize, Serialize)] @@ -101,16 +101,6 @@ impl MemoryMonitor { } } -fn fmt_bytes(bytes: u64) -> String { - const UNITS: &[&str] = &["B", "KB", "MB", "GB"]; - let (mut val, mut unit) = (bytes as f64, 0); - while val >= 1024.0 && unit < 3 { - val /= 1024.0; - unit += 1; - } - format!("{:.1}{}", val, UNITS[unit]) -} - pub async fn get_current_memory() -> CurrentMemory { MemoryMonitor::global().current().await } diff --git a/src-tauri/src/ipc/mod.rs b/src-tauri/src/ipc/mod.rs index 7bddb68e..c04d9d21 100644 --- a/src-tauri/src/ipc/mod.rs +++ b/src-tauri/src/ipc/mod.rs @@ -1,8 +1,10 @@ pub mod general; +pub mod logs; pub mod memory; pub mod traffic; pub use general::IpcManager; +pub use logs::{clear_logs, get_logs_json, start_logs_monitoring}; pub use memory::{get_current_memory, get_formatted_memory}; pub use traffic::{get_current_traffic, get_formatted_traffic}; diff --git a/src-tauri/src/ipc/traffic.rs b/src-tauri/src/ipc/traffic.rs index 0e40e88f..5c971d89 100644 --- a/src-tauri/src/ipc/traffic.rs +++ b/src-tauri/src/ipc/traffic.rs @@ -8,7 +8,7 @@ use tokio::{sync::RwLock, time::Duration}; use crate::{ logging, - utils::{dirs::ipc_path, logging::Type}, + utils::{dirs::ipc_path, format::fmt_bytes, logging::Type}, }; #[derive(Debug, Clone, Deserialize, Serialize)] @@ -119,16 +119,6 @@ impl TrafficMonitor { } } -fn fmt_bytes(bytes: u64) -> String { - const UNITS: &[&str] = &["B", "KB", "MB", "GB"]; - let (mut val, mut unit) = (bytes as f64, 0); - while val >= 1024.0 && unit < 3 { - val /= 1024.0; - unit += 1; - } - format!("{:.1}{}", val, UNITS[unit]) -} - pub async fn get_current_traffic() -> CurrentTraffic { TrafficMonitor::global().current().await } diff --git a/src-tauri/src/lib.rs b/src-tauri/src/lib.rs index 86886e75..d7417cee 100644 --- a/src-tauri/src/lib.rs +++ b/src-tauri/src/lib.rs @@ -289,6 +289,9 @@ pub fn run() { cmd::get_group_proxy_delays, cmd::is_clash_debug_enabled, cmd::clash_gc, + cmd::get_clash_logs, + cmd::start_logs_monitoring, + cmd::clear_logs, cmd::get_traffic_data, cmd::get_memory_data, cmd::get_formatted_traffic_data, diff --git a/src-tauri/src/utils/format.rs b/src-tauri/src/utils/format.rs new file mode 100644 index 00000000..270206c3 --- /dev/null +++ b/src-tauri/src/utils/format.rs @@ -0,0 +1,25 @@ +/// Format bytes into human readable string (B, KB, MB, GB) +pub fn fmt_bytes(bytes: u64) -> String { + const UNITS: &[&str] = &["B", "KB", "MB", "GB"]; + let (mut val, mut unit) = (bytes as f64, 0); + while val >= 1024.0 && unit < 3 { + val /= 1024.0; + unit += 1; + } + format!("{:.1}{}", val, UNITS[unit]) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_fmt_bytes() { + assert_eq!(fmt_bytes(0), "0.0B"); + assert_eq!(fmt_bytes(512), "512.0B"); + assert_eq!(fmt_bytes(1024), "1.0KB"); + assert_eq!(fmt_bytes(1536), "1.5KB"); + assert_eq!(fmt_bytes(1024 * 1024), "1.0MB"); + assert_eq!(fmt_bytes(1024 * 1024 * 1024), "1.0GB"); + } +} diff --git a/src-tauri/src/utils/mod.rs b/src-tauri/src/utils/mod.rs index 0cb644b4..e00e3bd6 100644 --- a/src-tauri/src/utils/mod.rs +++ b/src-tauri/src/utils/mod.rs @@ -1,5 +1,6 @@ pub mod autostart; pub mod dirs; +pub mod format; pub mod help; pub mod i18n; pub mod init; diff --git a/src/hooks/use-log-data.ts b/src/hooks/use-log-data.ts index cecf7168..be64ba23 100644 --- a/src/hooks/use-log-data.ts +++ b/src/hooks/use-log-data.ts @@ -12,18 +12,6 @@ export type { ILogItem }; const MAX_LOG_NUM = 1000; -const buildWSUrl = (server: string, logLevel: LogLevel) => { - let baseUrl = `${server}/logs`; - - // 只处理日志级别参数 - if (logLevel && logLevel !== "info") { - const level = logLevel === "all" ? "debug" : logLevel; - baseUrl += `?level=${level}`; - } - - return baseUrl; -}; - interface LogStore { logs: ILogItem[]; clearLogs: () => void; diff --git a/src/pages/_layout.tsx b/src/pages/_layout.tsx index aca5f26b..ab1645a1 100644 --- a/src/pages/_layout.tsx +++ b/src/pages/_layout.tsx @@ -184,7 +184,7 @@ const Layout = () => { useEffect(() => { if (clashInfo) { const { server = "", secret = "" } = clashInfo; - initGlobalLogService(server, secret, enableLog, "info"); + initGlobalLogService(enableLog, "info"); } }, [clashInfo, enableLog]); diff --git a/src/pages/logs.tsx b/src/pages/logs.tsx index 0b4778dc..a937fa29 100644 --- a/src/pages/logs.tsx +++ b/src/pages/logs.tsx @@ -71,18 +71,12 @@ const LogPage = () => { const handleLogLevelChange = (newLevel: LogLevel) => { setLogLevel(newLevel); - if (clashInfo) { - const { server = "", secret = "" } = clashInfo; - changeLogLevel(newLevel, server, secret); - } + changeLogLevel(newLevel); }; const handleToggleLog = () => { - if (clashInfo) { - const { server = "", secret = "" } = clashInfo; - toggleLogEnabled(server, secret); - setEnableLog(!enableLog); - } + toggleLogEnabled(); + setEnableLog(!enableLog); }; return ( diff --git a/src/services/cmds.ts b/src/services/cmds.ts index d4021c3e..b71429fd 100644 --- a/src/services/cmds.ts +++ b/src/services/cmds.ts @@ -412,6 +412,18 @@ export async function gc() { return invoke("clash_gc"); } +export async function getClashLogs(level?: string) { + return invoke("get_clash_logs", { level }); +} + +export async function startLogsMonitoring(level?: string) { + return invoke("start_logs_monitoring", { level }); +} + +export async function clearLogs() { + return invoke("clear_logs"); +} + export async function getVergeConfig() { return invoke("get_verge_config"); } diff --git a/src/services/global-log-service.ts b/src/services/global-log-service.ts index 8e948325..d2fd1e5f 100644 --- a/src/services/global-log-service.ts +++ b/src/services/global-log-service.ts @@ -1,6 +1,10 @@ // 全局日志服务,使应用在任何页面都能收集日志 import { create } from "zustand"; -import { createAuthSockette } from "@/utils/websocket"; +import { + fetchLogsViaIPC, + startLogsStreaming, + clearLogs as clearLogsIPC, +} from "@/services/ipc-log-service"; import dayjs from "dayjs"; // 最大日志数量 @@ -24,6 +28,7 @@ interface GlobalLogStore { setCurrentLevel: (level: LogLevel) => void; clearLogs: () => void; appendLog: (log: ILogItem) => void; + setLogs: (logs: ILogItem[]) => void; } // 创建全局状态存储 @@ -43,124 +48,117 @@ export const useGlobalLogStore = create((set) => ({ : [...state.logs, log]; return { logs: newLogs }; }), + setLogs: (logs: ILogItem[]) => set({ logs }), })); -// 构建WebSocket URL -const buildWSUrl = (server: string, logLevel: LogLevel) => { - let baseUrl = `${server}/logs`; - - // 只处理日志级别参数 - if (logLevel && logLevel !== "info") { - const level = logLevel === "all" ? "debug" : logLevel; - baseUrl += `?level=${level}`; +// IPC 日志获取函数 +export const fetchLogsViaIPCPeriodically = async ( + logLevel: LogLevel = "info", +) => { + try { + const logs = await fetchLogsViaIPC(logLevel); + useGlobalLogStore.getState().setLogs(logs); + console.log(`[GlobalLog-IPC] 成功获取 ${logs.length} 条日志`); + } catch (error) { + console.error("[GlobalLog-IPC] 获取日志失败:", error); } - - return baseUrl; }; -// 初始化全局日志服务 -let globalLogSocket: any = null; +// 初始化全局日志服务 (仅IPC模式) +let ipcPollingInterval: number | null = null; +let isInitializing = false; // 添加初始化标志 export const initGlobalLogService = ( - server: string, - secret: string, enabled: boolean = false, logLevel: LogLevel = "info", ) => { - const { appendLog, setEnabled } = useGlobalLogStore.getState(); + // 防止重复初始化 + if (isInitializing) { + console.log("[GlobalLog-IPC] 正在初始化中,跳过重复调用"); + return; + } + + const { setEnabled, setCurrentLevel } = useGlobalLogStore.getState(); // 更新启用状态 setEnabled(enabled); + setCurrentLevel(logLevel); - // 如果不启用或没有服务器信息,则不初始化 - if (!enabled || !server) { - closeGlobalLogConnection(); - return; - } - - // 关闭现有连接 - closeGlobalLogConnection(); - - // 创建新的WebSocket连接,使用新的认证方法 - const wsUrl = buildWSUrl(server, logLevel); - console.log(`[GlobalLog] 正在连接日志服务: ${wsUrl}`); - - if (!server) { - console.warn("[GlobalLog] 服务器地址为空,无法建立连接"); - return; - } - - globalLogSocket = createAuthSockette(wsUrl, secret, { - timeout: 8000, // 8秒超时 - onmessage(event) { - try { - const data = JSON.parse(event.data) as ILogItem; - const time = dayjs().format("MM-DD HH:mm:ss"); - appendLog({ ...data, time }); - } catch (error) { - console.error("[GlobalLog] 解析日志数据失败:", error); - } - }, - onerror(event) { - console.error("[GlobalLog] WebSocket连接错误", event); - - // 记录错误状态但不关闭连接,让重连机制起作用 - useGlobalLogStore.setState({ isConnected: false }); - - // 只有在重试彻底失败后才关闭连接 - if ( - event && - typeof event === "object" && - "type" in event && - event.type === "error" - ) { - console.error("[GlobalLog] 连接已彻底失败,关闭连接"); - closeGlobalLogConnection(); - } - }, - onclose(event) { - console.log("[GlobalLog] WebSocket连接关闭", event); - useGlobalLogStore.setState({ isConnected: false }); - }, - onopen(event) { - console.log("[GlobalLog] WebSocket连接已建立", event); - useGlobalLogStore.setState({ isConnected: true }); - }, - }); -}; - -// 关闭全局日志连接 -export const closeGlobalLogConnection = () => { - if (globalLogSocket) { - globalLogSocket.close(); - globalLogSocket = null; + // 如果不启用,则不初始化 + if (!enabled) { + clearIpcPolling(); useGlobalLogStore.setState({ isConnected: false }); + return; + } + + isInitializing = true; + + // 使用IPC流式模式 + console.log("[GlobalLog-IPC] 启用IPC流式日志服务"); + + // 启动流式监控 + startLogsStreaming(logLevel); + + // 立即获取一次日志 + fetchLogsViaIPCPeriodically(logLevel); + + // 设置定期轮询来同步流式缓存的数据 + clearIpcPolling(); + ipcPollingInterval = setInterval(() => { + fetchLogsViaIPCPeriodically(logLevel); + }, 1000); // 每1秒同步一次流式缓存 + + // 设置连接状态 + useGlobalLogStore.setState({ isConnected: true }); + + isInitializing = false; +}; + +// 清除IPC轮询 +const clearIpcPolling = () => { + if (ipcPollingInterval) { + clearInterval(ipcPollingInterval); + ipcPollingInterval = null; + console.log("[GlobalLog-IPC] 轮询已停止"); } }; -// 切换日志级别 -export const changeLogLevel = ( - level: LogLevel, - server: string, - secret: string, -) => { +// 关闭全局日志连接 (仅IPC模式) +export const closeGlobalLogConnection = () => { + clearIpcPolling(); + isInitializing = false; // 重置初始化标志 + useGlobalLogStore.setState({ isConnected: false }); + console.log("[GlobalLog-IPC] 日志服务已关闭"); +}; + +// 切换日志级别 (仅IPC模式) +export const changeLogLevel = (level: LogLevel) => { const { enabled } = useGlobalLogStore.getState(); useGlobalLogStore.setState({ currentLevel: level }); - if (enabled && server) { - initGlobalLogService(server, secret, enabled, level); + // 如果正在初始化,则跳过,避免重复启动 + if (isInitializing) { + console.log("[GlobalLog-IPC] 正在初始化中,跳过级别变更流启动"); + return; + } + + if (enabled) { + // IPC流式模式下重新启动监控 + startLogsStreaming(level); + fetchLogsViaIPCPeriodically(level); } }; -// 切换启用状态 -export const toggleLogEnabled = (server: string, secret: string) => { +// 切换启用状态 (仅IPC模式) +export const toggleLogEnabled = () => { const { enabled, currentLevel } = useGlobalLogStore.getState(); const newEnabled = !enabled; useGlobalLogStore.setState({ enabled: newEnabled }); - if (newEnabled && server) { - initGlobalLogService(server, secret, newEnabled, currentLevel); + if (newEnabled) { + // IPC模式下直接启动 + initGlobalLogService(newEnabled, currentLevel); } else { closeGlobalLogConnection(); } @@ -169,6 +167,8 @@ export const toggleLogEnabled = (server: string, secret: string) => { // 获取日志清理函数 export const clearGlobalLogs = () => { useGlobalLogStore.getState().clearLogs(); + // 同时清理后端流式缓存 + clearLogsIPC(); }; // 自定义钩子,用于获取过滤后的日志数据 diff --git a/src/services/ipc-log-service.ts b/src/services/ipc-log-service.ts new file mode 100644 index 00000000..3ba3a482 --- /dev/null +++ b/src/services/ipc-log-service.ts @@ -0,0 +1,63 @@ +// IPC-based log service using Tauri commands with streaming support +import { + getClashLogs, + startLogsMonitoring, + clearLogs as clearLogsCmd, +} from "@/services/cmds"; +import dayjs from "dayjs"; + +export type LogLevel = "warning" | "info" | "debug" | "error" | "all"; + +export interface ILogItem { + time?: string; + type: string; + payload: string; + [key: string]: any; +} + +// Start logs monitoring with specified level +export const startLogsStreaming = async (logLevel: LogLevel = "info") => { + try { + const level = logLevel === "all" ? undefined : logLevel; + await startLogsMonitoring(level); + console.log( + `[IPC-LogService] Started logs monitoring with level: ${logLevel}`, + ); + } catch (error) { + console.error("[IPC-LogService] Failed to start logs monitoring:", error); + } +}; + +// Fetch logs using IPC command (now from streaming cache) +export const fetchLogsViaIPC = async ( + logLevel: LogLevel = "info", +): Promise => { + try { + const level = logLevel === "all" ? undefined : logLevel; + const response = await getClashLogs(level); + + // The response should be in the format expected by the frontend + // Transform the logs to match the expected format + if (Array.isArray(response)) { + return response.map((log: any) => ({ + ...log, + time: log.time || dayjs().format("HH:mm:ss"), + })); + } + + return []; + } catch (error) { + console.error("[IPC-LogService] Failed to fetch logs:", error); + return []; + } +}; + +// Clear logs +export const clearLogs = async () => { + try { + await clearLogsCmd(); + console.log("[IPC-LogService] Logs cleared"); + } catch (error) { + console.error("[IPC-LogService] Failed to clear logs:", error); + } +};