refactor: streamline SWR configuration and improve error handling in AppDataProvider
This commit is contained in:
152
src-tauri/src/core/manager/config.rs
Normal file
152
src-tauri/src/core/manager/config.rs
Normal file
@@ -0,0 +1,152 @@
|
||||
use super::CoreManager;
|
||||
use crate::{
|
||||
config::*,
|
||||
constants::timing,
|
||||
core::{handle, validate::CoreConfigValidator},
|
||||
logging,
|
||||
utils::{dirs, help, logging::Type},
|
||||
};
|
||||
use anyhow::{Result, anyhow};
|
||||
use std::{path::PathBuf, time::Instant};
|
||||
use tauri_plugin_mihomo::Error as MihomoError;
|
||||
use tokio::time::sleep;
|
||||
|
||||
impl CoreManager {
|
||||
pub async fn use_default_config(&self, error_key: &str, error_msg: &str) -> Result<()> {
|
||||
use crate::constants::files::RUNTIME_CONFIG;
|
||||
|
||||
let runtime_path = dirs::app_home_dir()?.join(RUNTIME_CONFIG);
|
||||
let clash_config = Config::clash().await.latest_ref().0.clone();
|
||||
|
||||
*Config::runtime().await.draft_mut() = Box::new(IRuntime {
|
||||
config: Some(clash_config.clone()),
|
||||
exists_keys: vec![],
|
||||
chain_logs: Default::default(),
|
||||
});
|
||||
|
||||
help::save_yaml(&runtime_path, &clash_config, Some("# Clash Verge Runtime")).await?;
|
||||
handle::Handle::notice_message(error_key, error_msg);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn update_config(&self) -> Result<(bool, String)> {
|
||||
if handle::Handle::global().is_exiting() {
|
||||
return Ok((true, String::new()));
|
||||
}
|
||||
|
||||
if !self.should_update_config()? {
|
||||
return Ok((true, String::new()));
|
||||
}
|
||||
|
||||
let _permit = self.update_semaphore.try_acquire()
|
||||
.map_err(|_| anyhow!("Config update already in progress"))?;
|
||||
|
||||
self.perform_config_update().await
|
||||
}
|
||||
|
||||
fn should_update_config(&self) -> Result<bool> {
|
||||
let now = Instant::now();
|
||||
let mut last = self.last_update.lock();
|
||||
|
||||
if let Some(last_time) = *last {
|
||||
if now.duration_since(last_time) < timing::CONFIG_UPDATE_DEBOUNCE {
|
||||
return Ok(false);
|
||||
}
|
||||
}
|
||||
|
||||
*last = Some(now);
|
||||
Ok(true)
|
||||
}
|
||||
|
||||
async fn perform_config_update(&self) -> Result<(bool, String)> {
|
||||
Config::generate().await?;
|
||||
|
||||
match CoreConfigValidator::global().validate_config().await {
|
||||
Ok((true, _)) => {
|
||||
let run_path = Config::generate_file(ConfigType::Run).await?;
|
||||
self.apply_config(run_path).await?;
|
||||
Ok((true, String::new()))
|
||||
}
|
||||
Ok((false, error_msg)) => {
|
||||
Config::runtime().await.discard();
|
||||
Ok((false, error_msg))
|
||||
}
|
||||
Err(e) => {
|
||||
Config::runtime().await.discard();
|
||||
Err(e)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn put_configs_force(&self, path: PathBuf) -> Result<()> {
|
||||
self.apply_config(path).await
|
||||
}
|
||||
|
||||
pub(super) async fn apply_config(&self, path: PathBuf) -> Result<()> {
|
||||
let path_str = dirs::path_to_str(&path)?;
|
||||
|
||||
match self.reload_config(path_str).await {
|
||||
Ok(_) => {
|
||||
Config::runtime().await.apply();
|
||||
logging!(info, Type::Core, "Configuration applied");
|
||||
Ok(())
|
||||
}
|
||||
Err(err) if Self::should_restart_on_error(&err) => {
|
||||
self.retry_with_restart(path_str).await
|
||||
}
|
||||
Err(err) => {
|
||||
Config::runtime().await.discard();
|
||||
Err(anyhow!("Failed to apply config: {}", err))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn retry_with_restart(&self, config_path: &str) -> Result<()> {
|
||||
if handle::Handle::global().is_exiting() {
|
||||
return Err(anyhow!("Application exiting"));
|
||||
}
|
||||
|
||||
logging!(warn, Type::Core, "Restarting core for config reload");
|
||||
self.restart_core().await?;
|
||||
sleep(timing::CONFIG_RELOAD_DELAY).await;
|
||||
|
||||
self.reload_config(config_path).await?;
|
||||
Config::runtime().await.apply();
|
||||
logging!(info, Type::Core, "Configuration applied after restart");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn reload_config(&self, path: &str) -> Result<(), MihomoError> {
|
||||
handle::Handle::mihomo().await.reload_config(true, path).await
|
||||
}
|
||||
|
||||
fn should_restart_on_error(err: &MihomoError) -> bool {
|
||||
match err {
|
||||
MihomoError::ConnectionFailed | MihomoError::ConnectionLost => true,
|
||||
MihomoError::Io(io_err) => Self::is_connection_io_error(io_err.kind()),
|
||||
MihomoError::Reqwest(req_err) => {
|
||||
req_err.is_connect()
|
||||
|| req_err.is_timeout()
|
||||
|| Self::contains_error_pattern(&req_err.to_string())
|
||||
}
|
||||
MihomoError::FailedResponse(msg) => Self::contains_error_pattern(msg),
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
|
||||
fn is_connection_io_error(kind: std::io::ErrorKind) -> bool {
|
||||
matches!(
|
||||
kind,
|
||||
std::io::ErrorKind::ConnectionAborted
|
||||
| std::io::ErrorKind::ConnectionRefused
|
||||
| std::io::ErrorKind::ConnectionReset
|
||||
| std::io::ErrorKind::NotFound
|
||||
)
|
||||
}
|
||||
|
||||
fn contains_error_pattern(text: &str) -> bool {
|
||||
use crate::constants::error_patterns::CONNECTION_ERRORS;
|
||||
CONNECTION_ERRORS.iter().any(|p| text.contains(p))
|
||||
}
|
||||
}
|
||||
|
||||
120
src-tauri/src/core/manager/lifecycle.rs
Normal file
120
src-tauri/src/core/manager/lifecycle.rs
Normal file
@@ -0,0 +1,120 @@
|
||||
use super::{CoreManager, RunningMode};
|
||||
use crate::{
|
||||
core::{logger::ClashLogger, service::{ServiceStatus, SERVICE_MANAGER}},
|
||||
logging,
|
||||
utils::logging::Type,
|
||||
};
|
||||
use anyhow::Result;
|
||||
|
||||
impl CoreManager {
|
||||
pub async fn start_core(&self) -> Result<()> {
|
||||
self.prepare_startup().await?;
|
||||
|
||||
match self.get_running_mode() {
|
||||
RunningMode::Service => self.start_core_by_service().await,
|
||||
RunningMode::NotRunning | RunningMode::Sidecar => self.start_core_by_sidecar().await,
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn stop_core(&self) -> Result<()> {
|
||||
ClashLogger::global().clear_logs();
|
||||
|
||||
match self.get_running_mode() {
|
||||
RunningMode::Service => self.stop_core_by_service().await,
|
||||
RunningMode::Sidecar => self.stop_core_by_sidecar(),
|
||||
RunningMode::NotRunning => Ok(()),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn restart_core(&self) -> Result<()> {
|
||||
logging!(info, Type::Core, "Restarting core");
|
||||
self.stop_core().await?;
|
||||
|
||||
if SERVICE_MANAGER.lock().await.init().await.is_ok() {
|
||||
let _ = SERVICE_MANAGER.lock().await.refresh().await;
|
||||
}
|
||||
|
||||
self.start_core().await
|
||||
}
|
||||
|
||||
pub async fn change_core(&self, clash_core: Option<String>) -> Result<(), String> {
|
||||
use crate::config::{Config, ConfigType, IVerge};
|
||||
|
||||
let core = clash_core.as_ref()
|
||||
.ok_or_else(|| "Clash core cannot be None".to_string())?;
|
||||
|
||||
if !IVerge::VALID_CLASH_CORES.contains(&core.as_str()) {
|
||||
return Err(format!("Invalid clash core: {}", core));
|
||||
}
|
||||
|
||||
Config::verge().await.draft_mut().clash_core = clash_core;
|
||||
Config::verge().await.apply();
|
||||
|
||||
let verge_data = Config::verge().await.latest_ref().clone();
|
||||
verge_data.save_file().await.map_err(|e| e.to_string())?;
|
||||
|
||||
let run_path = Config::generate_file(ConfigType::Run).await
|
||||
.map_err(|e| e.to_string())?;
|
||||
|
||||
self.apply_config(run_path).await.map_err(|e| e.to_string())
|
||||
}
|
||||
|
||||
async fn prepare_startup(&self) -> Result<()> {
|
||||
self.wait_for_service_if_needed().await;
|
||||
|
||||
let mode = match SERVICE_MANAGER.lock().await.current() {
|
||||
ServiceStatus::Ready => RunningMode::Service,
|
||||
_ => RunningMode::Sidecar,
|
||||
};
|
||||
|
||||
self.set_running_mode(mode);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(target_os = "windows")]
|
||||
async fn wait_for_service_if_needed(&self) {
|
||||
use crate::{config::Config, constants::timing};
|
||||
use backoff::{Error as BackoffError, ExponentialBackoff};
|
||||
|
||||
let needs_service = Config::verge().await
|
||||
.latest_ref()
|
||||
.enable_tun_mode
|
||||
.unwrap_or(false);
|
||||
|
||||
if !needs_service {
|
||||
return;
|
||||
}
|
||||
|
||||
let backoff = ExponentialBackoff {
|
||||
initial_interval: timing::SERVICE_WAIT_INTERVAL,
|
||||
max_interval: timing::SERVICE_WAIT_INTERVAL,
|
||||
max_elapsed_time: Some(timing::SERVICE_WAIT_MAX),
|
||||
multiplier: 1.0,
|
||||
randomization_factor: 0.0,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let operation = || async {
|
||||
let mut manager = SERVICE_MANAGER.lock().await;
|
||||
|
||||
if matches!(manager.current(), ServiceStatus::Ready) {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
manager.init().await.map_err(BackoffError::transient)?;
|
||||
let _ = manager.refresh().await;
|
||||
|
||||
if matches!(manager.current(), ServiceStatus::Ready) {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(BackoffError::transient(anyhow::anyhow!("Service not ready")))
|
||||
}
|
||||
};
|
||||
|
||||
let _ = backoff::future::retry(backoff, operation).await;
|
||||
}
|
||||
|
||||
#[cfg(not(target_os = "windows"))]
|
||||
async fn wait_for_service_if_needed(&self) {}
|
||||
}
|
||||
|
||||
80
src-tauri/src/core/manager/mod.rs
Normal file
80
src-tauri/src/core/manager/mod.rs
Normal file
@@ -0,0 +1,80 @@
|
||||
mod config;
|
||||
mod lifecycle;
|
||||
mod process;
|
||||
mod state;
|
||||
|
||||
use anyhow::Result;
|
||||
use parking_lot::Mutex;
|
||||
use std::{fmt, sync::Arc, time::Instant};
|
||||
use tokio::sync::Semaphore;
|
||||
|
||||
use crate::process::CommandChildGuard;
|
||||
use crate::singleton_lazy;
|
||||
|
||||
#[derive(Debug, Clone, Copy, serde::Serialize, PartialEq, Eq)]
|
||||
pub enum RunningMode {
|
||||
Service,
|
||||
Sidecar,
|
||||
NotRunning,
|
||||
}
|
||||
|
||||
impl fmt::Display for RunningMode {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match self {
|
||||
Self::Service => write!(f, "Service"),
|
||||
Self::Sidecar => write!(f, "Sidecar"),
|
||||
Self::NotRunning => write!(f, "NotRunning"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct CoreManager {
|
||||
state: Arc<Mutex<State>>,
|
||||
update_semaphore: Arc<Semaphore>,
|
||||
last_update: Arc<Mutex<Option<Instant>>>,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct State {
|
||||
running_mode: RunningMode,
|
||||
child_sidecar: Option<CommandChildGuard>,
|
||||
}
|
||||
|
||||
impl Default for State {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
running_mode: RunningMode::NotRunning,
|
||||
child_sidecar: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for CoreManager {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
state: Arc::new(Mutex::new(State::default())),
|
||||
update_semaphore: Arc::new(Semaphore::new(1)),
|
||||
last_update: Arc::new(Mutex::new(None)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl CoreManager {
|
||||
pub fn get_running_mode(&self) -> RunningMode {
|
||||
self.state.lock().running_mode
|
||||
}
|
||||
|
||||
pub fn set_running_mode(&self, mode: RunningMode) {
|
||||
self.state.lock().running_mode = mode;
|
||||
}
|
||||
|
||||
pub async fn init(&self) -> Result<()> {
|
||||
self.cleanup_orphaned_processes().await?;
|
||||
self.start_core().await?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
singleton_lazy!(CoreManager, CORE_MANAGER, CoreManager::default);
|
||||
|
||||
204
src-tauri/src/core/manager/process.rs
Normal file
204
src-tauri/src/core/manager/process.rs
Normal file
@@ -0,0 +1,204 @@
|
||||
use super::CoreManager;
|
||||
use crate::{
|
||||
AsyncHandler,
|
||||
constants::{process, timing},
|
||||
logging,
|
||||
utils::logging::Type,
|
||||
};
|
||||
use anyhow::{Result, anyhow};
|
||||
|
||||
impl CoreManager {
|
||||
pub async fn cleanup_orphaned_processes(&self) -> Result<()> {
|
||||
logging!(info, Type::Core, "Cleaning orphaned mihomo processes");
|
||||
|
||||
let current_pid = self.state.lock().child_sidecar.as_ref().and_then(|c| c.pid());
|
||||
let target_processes = process::process_names();
|
||||
|
||||
let process_futures = target_processes.iter().map(|&name| {
|
||||
let process_name = process::with_extension(name);
|
||||
self.find_processes_by_name(process_name, name)
|
||||
});
|
||||
|
||||
let process_results = futures::future::join_all(process_futures).await;
|
||||
|
||||
let pids_to_kill: Vec<_> = process_results
|
||||
.into_iter()
|
||||
.filter_map(Result::ok)
|
||||
.flat_map(|(pids, name)| {
|
||||
pids.into_iter()
|
||||
.filter(move |&pid| Some(pid) != current_pid)
|
||||
.map(move |pid| (pid, name.clone()))
|
||||
})
|
||||
.collect();
|
||||
|
||||
if pids_to_kill.is_empty() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let kill_futures = pids_to_kill
|
||||
.iter()
|
||||
.map(|(pid, name)| self.kill_process_verified(*pid, name.clone()));
|
||||
|
||||
let killed_count = futures::future::join_all(kill_futures)
|
||||
.await
|
||||
.into_iter()
|
||||
.filter(|&success| success)
|
||||
.count();
|
||||
|
||||
if killed_count > 0 {
|
||||
logging!(info, Type::Core, "Cleaned {} orphaned processes", killed_count);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn find_processes_by_name(&self, process_name: String, _target: &str) -> Result<(Vec<u32>, String)> {
|
||||
#[cfg(windows)]
|
||||
{
|
||||
use std::mem;
|
||||
use winapi::um::{
|
||||
handleapi::CloseHandle,
|
||||
tlhelp32::{CreateToolhelp32Snapshot, PROCESSENTRY32W, Process32FirstW, Process32NextW, TH32CS_SNAPPROCESS},
|
||||
};
|
||||
|
||||
let process_name_clone = process_name.clone();
|
||||
let pids = AsyncHandler::spawn_blocking(move || -> Result<Vec<u32>> {
|
||||
let mut pids = Vec::with_capacity(8);
|
||||
|
||||
unsafe {
|
||||
let snapshot = CreateToolhelp32Snapshot(TH32CS_SNAPPROCESS, 0);
|
||||
if snapshot == winapi::um::handleapi::INVALID_HANDLE_VALUE {
|
||||
return Err(anyhow!("Failed to create process snapshot"));
|
||||
}
|
||||
|
||||
let mut pe32: PROCESSENTRY32W = mem::zeroed();
|
||||
pe32.dwSize = mem::size_of::<PROCESSENTRY32W>() as u32;
|
||||
|
||||
if Process32FirstW(snapshot, &mut pe32) != 0 {
|
||||
loop {
|
||||
let end_pos = pe32.szExeFile.iter().position(|&x| x == 0)
|
||||
.unwrap_or(pe32.szExeFile.len());
|
||||
|
||||
if end_pos > 0 {
|
||||
let exe_file = String::from_utf16_lossy(&pe32.szExeFile[..end_pos]);
|
||||
if exe_file.eq_ignore_ascii_case(&process_name_clone) {
|
||||
pids.push(pe32.th32ProcessID);
|
||||
}
|
||||
}
|
||||
|
||||
if Process32NextW(snapshot, &mut pe32) == 0 {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
CloseHandle(snapshot);
|
||||
}
|
||||
|
||||
Ok(pids)
|
||||
}).await??;
|
||||
|
||||
Ok((pids, process_name))
|
||||
}
|
||||
|
||||
#[cfg(not(windows))]
|
||||
{
|
||||
let cmd = if cfg!(target_os = "macos") { "pgrep" } else { "pidof" };
|
||||
let output = tokio::process::Command::new(cmd)
|
||||
.arg(&process_name)
|
||||
.output()
|
||||
.await?;
|
||||
|
||||
if !output.status.success() {
|
||||
return Ok((Vec::new(), process_name));
|
||||
}
|
||||
|
||||
let stdout = String::from_utf8_lossy(&output.stdout);
|
||||
let pids: Vec<u32> = stdout
|
||||
.split_whitespace()
|
||||
.filter_map(|s| s.parse().ok())
|
||||
.collect();
|
||||
|
||||
Ok((pids, process_name))
|
||||
}
|
||||
}
|
||||
|
||||
async fn kill_process_verified(&self, pid: u32, process_name: String) -> bool {
|
||||
#[cfg(windows)]
|
||||
let success = {
|
||||
use winapi::um::{
|
||||
handleapi::CloseHandle,
|
||||
processthreadsapi::{OpenProcess, TerminateProcess},
|
||||
winnt::{HANDLE, PROCESS_TERMINATE},
|
||||
};
|
||||
|
||||
AsyncHandler::spawn_blocking(move || unsafe {
|
||||
let handle: HANDLE = OpenProcess(PROCESS_TERMINATE, 0, pid);
|
||||
if handle.is_null() {
|
||||
return false;
|
||||
}
|
||||
let result = TerminateProcess(handle, 1) != 0;
|
||||
CloseHandle(handle);
|
||||
result
|
||||
}).await.unwrap_or(false)
|
||||
};
|
||||
|
||||
#[cfg(not(windows))]
|
||||
let success = tokio::process::Command::new("kill")
|
||||
.args(["-9", &pid.to_string()])
|
||||
.output()
|
||||
.await
|
||||
.map(|output| output.status.success())
|
||||
.unwrap_or(false);
|
||||
|
||||
if !success {
|
||||
return false;
|
||||
}
|
||||
|
||||
tokio::time::sleep(timing::PROCESS_VERIFY_DELAY).await;
|
||||
|
||||
if self.is_process_running(pid).await.unwrap_or(false) {
|
||||
logging!(warn, Type::Core, "Process {} (PID: {}) still running after termination", process_name, pid);
|
||||
false
|
||||
} else {
|
||||
logging!(info, Type::Core, "Terminated process {} (PID: {})", process_name, pid);
|
||||
true
|
||||
}
|
||||
}
|
||||
|
||||
async fn is_process_running(&self, pid: u32) -> Result<bool> {
|
||||
#[cfg(windows)]
|
||||
{
|
||||
use winapi::{
|
||||
shared::minwindef::DWORD,
|
||||
um::{
|
||||
handleapi::CloseHandle,
|
||||
processthreadsapi::{GetExitCodeProcess, OpenProcess},
|
||||
winnt::{HANDLE, PROCESS_QUERY_INFORMATION},
|
||||
},
|
||||
};
|
||||
|
||||
AsyncHandler::spawn_blocking(move || unsafe {
|
||||
let handle: HANDLE = OpenProcess(PROCESS_QUERY_INFORMATION, 0, pid);
|
||||
if handle.is_null() {
|
||||
return Ok(false);
|
||||
}
|
||||
let mut exit_code: DWORD = 0;
|
||||
let result = GetExitCodeProcess(handle, &mut exit_code);
|
||||
CloseHandle(handle);
|
||||
Ok(result != 0 && exit_code == 259)
|
||||
}).await?
|
||||
}
|
||||
|
||||
#[cfg(not(windows))]
|
||||
{
|
||||
let output = tokio::process::Command::new("ps")
|
||||
.args(["-p", &pid.to_string()])
|
||||
.output()
|
||||
.await?;
|
||||
|
||||
Ok(output.status.success() && !output.stdout.is_empty())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
125
src-tauri/src/core/manager/state.rs
Normal file
125
src-tauri/src/core/manager/state.rs
Normal file
@@ -0,0 +1,125 @@
|
||||
use super::{CoreManager, RunningMode};
|
||||
use crate::{
|
||||
AsyncHandler,
|
||||
config::Config,
|
||||
core::{
|
||||
handle,
|
||||
logger::ClashLogger,
|
||||
service,
|
||||
},
|
||||
logging,
|
||||
process::CommandChildGuard,
|
||||
utils::{
|
||||
dirs,
|
||||
init::sidecar_writer,
|
||||
logging::{SharedWriter, Type, write_sidecar_log},
|
||||
},
|
||||
};
|
||||
use anyhow::Result;
|
||||
use compact_str::CompactString;
|
||||
use flexi_logger::DeferredNow;
|
||||
use log::Level;
|
||||
use std::collections::VecDeque;
|
||||
use tauri_plugin_shell::ShellExt;
|
||||
|
||||
impl CoreManager {
|
||||
pub async fn get_clash_logs(&self) -> Result<VecDeque<CompactString>> {
|
||||
match self.get_running_mode() {
|
||||
RunningMode::Service => service::get_clash_logs_by_service().await,
|
||||
RunningMode::Sidecar => Ok(ClashLogger::global().get_logs().clone()),
|
||||
RunningMode::NotRunning => Ok(VecDeque::new()),
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) async fn start_core_by_sidecar(&self) -> Result<()> {
|
||||
logging!(info, Type::Core, "Starting core in sidecar mode");
|
||||
|
||||
let config_file = Config::generate_file(crate::config::ConfigType::Run).await?;
|
||||
let app_handle = handle::Handle::app_handle();
|
||||
let clash_core = Config::verge().await.latest_ref().get_valid_clash_core();
|
||||
let config_dir = dirs::app_home_dir()?;
|
||||
|
||||
let (mut rx, child) = app_handle
|
||||
.shell()
|
||||
.sidecar(&clash_core)?
|
||||
.args([
|
||||
"-d",
|
||||
dirs::path_to_str(&config_dir)?,
|
||||
"-f",
|
||||
dirs::path_to_str(&config_file)?,
|
||||
])
|
||||
.spawn()?;
|
||||
|
||||
let pid = child.pid();
|
||||
logging!(trace, Type::Core, "Sidecar started with PID: {}", pid);
|
||||
|
||||
{
|
||||
let mut state = self.state.lock();
|
||||
state.child_sidecar = Some(CommandChildGuard::new(child));
|
||||
state.running_mode = RunningMode::Sidecar;
|
||||
}
|
||||
|
||||
let shared_writer: SharedWriter = std::sync::Arc::new(tokio::sync::Mutex::new(sidecar_writer().await?));
|
||||
|
||||
AsyncHandler::spawn(|| async move {
|
||||
while let Some(event) = rx.recv().await {
|
||||
match event {
|
||||
tauri_plugin_shell::process::CommandEvent::Stdout(line)
|
||||
| tauri_plugin_shell::process::CommandEvent::Stderr(line) => {
|
||||
let mut now = DeferredNow::default();
|
||||
let message = CompactString::from(String::from_utf8_lossy(&line).as_ref());
|
||||
let w = shared_writer.lock().await;
|
||||
write_sidecar_log(w, &mut now, Level::Error, &message);
|
||||
ClashLogger::global().append_log(message);
|
||||
}
|
||||
tauri_plugin_shell::process::CommandEvent::Terminated(term) => {
|
||||
let mut now = DeferredNow::default();
|
||||
let message = if let Some(code) = term.code {
|
||||
CompactString::from(format!("Process terminated with code: {}", code))
|
||||
} else if let Some(signal) = term.signal {
|
||||
CompactString::from(format!("Process terminated by signal: {}", signal))
|
||||
} else {
|
||||
CompactString::from("Process terminated")
|
||||
};
|
||||
let w = shared_writer.lock().await;
|
||||
write_sidecar_log(w, &mut now, Level::Info, &message);
|
||||
ClashLogger::global().clear_logs();
|
||||
break;
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(super) fn stop_core_by_sidecar(&self) -> Result<()> {
|
||||
logging!(info, Type::Core, "Stopping sidecar");
|
||||
|
||||
let mut state = self.state.lock();
|
||||
if let Some(child) = state.child_sidecar.take() {
|
||||
let pid = child.pid();
|
||||
drop(child);
|
||||
logging!(trace, Type::Core, "Sidecar stopped (PID: {:?})", pid);
|
||||
}
|
||||
state.running_mode = RunningMode::NotRunning;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(super) async fn start_core_by_service(&self) -> Result<()> {
|
||||
logging!(info, Type::Core, "Starting core in service mode");
|
||||
let config_file = Config::generate_file(crate::config::ConfigType::Run).await?;
|
||||
service::run_core_by_service(&config_file).await?;
|
||||
self.set_running_mode(RunningMode::Service);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(super) async fn stop_core_by_service(&self) -> Result<()> {
|
||||
logging!(info, Type::Core, "Stopping service");
|
||||
service::stop_core_by_service().await?;
|
||||
self.set_running_mode(RunningMode::NotRunning);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user