refactor(profile_switch): modularize switch handler

- Break monolithic switch handler into proper module hierarchy
- Move shared globals, constants, and SwitchScope guard to state.rs
- Isolate queue orchestration and async task spawning in driver.rs
- Consolidate switch pipeline and config patching in workflow.rs
- Extract request pre-checks/YAML validation into validation.rs
This commit is contained in:
Slinetrac
2025-10-26 21:01:51 +08:00
Unverified
parent 14eb733f51
commit 32ff1b759f
5 changed files with 428 additions and 394 deletions

View File

@@ -0,0 +1,224 @@
use super::{
CmdResult,
state::{SWITCH_MUTEX, SWITCH_TASK_SEQUENCE, SwitchRequest},
workflow,
};
use crate::{logging, process::AsyncHandler, utils::logging::Type};
use once_cell::sync::OnceCell;
use smartstring::alias::String as SmartString;
use std::collections::VecDeque;
use std::sync::atomic::Ordering;
use tokio::sync::{
Mutex,
mpsc::{self, error::TrySendError},
oneshot,
};
const SWITCH_QUEUE_CAPACITY: usize = 32;
static SWITCH_QUEUE: OnceCell<mpsc::Sender<SwitchDriverMessage>> = OnceCell::new();
#[derive(Debug, Default)]
struct SwitchDriverState {
active: Option<SwitchRequest>,
queue: VecDeque<SwitchRequest>,
}
#[derive(Debug)]
enum SwitchDriverMessage {
Request {
request: SwitchRequest,
respond_to: oneshot::Sender<bool>,
},
Completion {
request: SwitchRequest,
success: bool,
},
}
pub(super) async fn switch_profile(
profile_index: impl Into<SmartString>,
notify_success: bool,
) -> CmdResult<bool> {
let profile_index: SmartString = profile_index.into();
let sender = switch_driver_sender();
let task_id = SWITCH_TASK_SEQUENCE.fetch_add(1, Ordering::SeqCst) + 1;
logging!(
info,
Type::Cmd,
"Queue profile switch task {} -> {} (notify={})",
task_id,
profile_index,
notify_success
);
let request = SwitchRequest {
task_id,
profile_id: profile_index.clone(),
notify: notify_success,
};
let (tx, rx) = oneshot::channel();
match sender.try_send(SwitchDriverMessage::Request {
request,
respond_to: tx,
}) {
Ok(_) => match rx.await {
Ok(result) => Ok(result),
Err(err) => {
logging!(
error,
Type::Cmd,
"Failed to receive enqueue result for profile {}: {}",
profile_index,
err
);
Err("switch profile queue unavailable".into())
}
},
Err(TrySendError::Full(msg)) => {
logging!(
warn,
Type::Cmd,
"Profile switch queue is full; waiting for space: {}",
profile_index
);
match sender.send(msg).await {
Ok(_) => match rx.await {
Ok(result) => Ok(result),
Err(err) => {
logging!(
error,
Type::Cmd,
"Failed to receive enqueue result after wait for {}: {}",
profile_index,
err
);
Err("switch profile queue unavailable".into())
}
},
Err(err) => {
logging!(
error,
Type::Cmd,
"Profile switch queue closed while waiting ({}): {}",
profile_index,
err
);
Err("switch profile queue unavailable".into())
}
}
}
Err(TrySendError::Closed(_)) => {
logging!(
error,
Type::Cmd,
"Profile switch queue is closed, cannot enqueue: {}",
profile_index
);
Err("switch profile queue unavailable".into())
}
}
}
fn switch_driver_sender() -> &'static mpsc::Sender<SwitchDriverMessage> {
SWITCH_QUEUE.get_or_init(|| {
let (tx, mut rx) = mpsc::channel::<SwitchDriverMessage>(SWITCH_QUEUE_CAPACITY);
let driver_tx = tx.clone();
tokio::spawn(async move {
let mutex = SWITCH_MUTEX.get_or_init(|| Mutex::new(()));
let mut state = SwitchDriverState::default();
while let Some(message) = rx.recv().await {
match message {
SwitchDriverMessage::Request {
request,
respond_to,
} => {
let accepted = true;
let mut responder = Some(respond_to);
if let Some(active) = &mut state.active
&& active.profile_id == request.profile_id
{
active.notify |= request.notify;
if let Some(sender) = responder.take() {
let _ = sender.send(accepted);
}
continue;
}
if let Some(existing) = state
.queue
.iter_mut()
.find(|queued| queued.profile_id == request.profile_id)
{
existing.notify |= request.notify;
if let Some(sender) = responder.take() {
let _ = sender.send(accepted);
}
continue;
}
if state.active.is_none() {
state.active = Some(request.clone());
if let Some(sender) = responder.take() {
let _ = sender.send(accepted);
}
start_switch_job(driver_tx.clone(), mutex, request);
} else {
state.queue.push_back(request.clone());
if let Some(sender) = responder.take() {
let _ = sender.send(accepted);
}
}
}
SwitchDriverMessage::Completion { request, success } => {
logging!(
info,
Type::Cmd,
"Switch task {} completed (success={})",
request.task_id,
success
);
if let Some(active) = &state.active
&& active.task_id == request.task_id
{
state.active = None;
}
if state.active.is_none()
&& let Some(next) = state.queue.pop_front()
{
state.active = Some(next.clone());
start_switch_job(driver_tx.clone(), mutex, next);
}
}
}
}
});
tx
})
}
fn start_switch_job(
driver_tx: mpsc::Sender<SwitchDriverMessage>,
mutex: &'static Mutex<()>,
request: SwitchRequest,
) {
AsyncHandler::spawn(move || async move {
let success = workflow::run_switch_job(mutex, &request).await;
let task_id = request.task_id;
let profile_id = request.profile_id.clone();
if let Err(err) = driver_tx
.send(SwitchDriverMessage::Completion { request, success })
.await
{
logging!(
error,
Type::Cmd,
"Failed to push switch completion to driver (task={} profile={}): {}",
task_id,
profile_id,
err
);
}
});
}

View File

@@ -0,0 +1,22 @@
mod driver;
mod state;
mod validation;
mod workflow;
use smartstring::alias::String;
use super::CmdResult;
pub(super) async fn patch_profiles_config(profiles: crate::config::IProfiles) -> CmdResult<bool> {
workflow::patch_profiles_config(profiles).await
}
pub(super) async fn patch_profiles_config_by_profile_index(
profile_index: String,
) -> CmdResult<bool> {
driver::switch_profile(profile_index, false).await
}
pub(super) async fn switch_profile(profile_index: String, notify_success: bool) -> CmdResult<bool> {
driver::switch_profile(profile_index, notify_success).await
}

View File

@@ -0,0 +1,35 @@
use once_cell::sync::OnceCell;
use smartstring::alias::String;
use std::sync::atomic::{AtomicBool, AtomicU64, Ordering};
use std::time::Duration;
use tokio::sync::Mutex;
pub(super) static SWITCH_MUTEX: OnceCell<Mutex<()>> = OnceCell::new();
pub(super) static CURRENT_REQUEST_SEQUENCE: AtomicU64 = AtomicU64::new(0);
pub(super) static CURRENT_SWITCHING_PROFILE: AtomicBool = AtomicBool::new(false);
pub(super) static SWITCH_TASK_SEQUENCE: AtomicU64 = AtomicU64::new(0);
pub(super) const SWITCH_JOB_TIMEOUT: Duration = Duration::from_secs(30);
pub(super) const SWITCH_CLEANUP_TIMEOUT: Duration = Duration::from_secs(5);
#[derive(Debug, Clone)]
pub(super) struct SwitchRequest {
pub(super) task_id: u64,
pub(super) profile_id: String,
pub(super) notify: bool,
}
pub(super) struct SwitchScope;
impl SwitchScope {
pub(super) fn begin() -> Self {
CURRENT_SWITCHING_PROFILE.store(true, Ordering::SeqCst);
Self
}
}
impl Drop for SwitchScope {
fn drop(&mut self) {
CURRENT_SWITCHING_PROFILE.store(false, Ordering::SeqCst);
}
}

View File

@@ -0,0 +1,75 @@
use crate::{
config::Config,
logging,
utils::{dirs, logging::Type},
};
use serde_yaml_ng as serde_yaml;
use smartstring::alias::String;
use std::fs;
pub(super) async fn validate_switch_request(task_id: u64, profile_id: &str) -> Result<(), String> {
logging!(
info,
Type::Cmd,
"Validating profile switch task {} -> {}",
task_id,
profile_id
);
let profile_key: String = profile_id.into();
let (file_path, profile_type, is_current, remote_url) = {
let profiles_guard = Config::profiles().await;
let latest = profiles_guard.latest_ref();
let item = latest.get_item(&profile_key).map_err(|err| -> String {
format!("Target profile {} not found: {}", profile_id, err).into()
})?;
(
item.file.clone().map(|f| f.to_string()),
item.itype.clone().map(|t| t.to_string()),
latest
.current
.as_ref()
.map(|current| current.as_str() == profile_id)
.unwrap_or(false),
item.url.clone().map(|u| u.to_string()),
)
};
if is_current {
logging!(
info,
Type::Cmd,
"Switch task {} is targeting the current profile {}; skipping validation",
task_id,
profile_id
);
return Ok(());
}
if matches!(profile_type.as_deref(), Some("remote")) {
let has_url = remote_url.as_ref().map(|u| !u.is_empty()).unwrap_or(false);
if !has_url {
return Err({
let msg = format!("Remote profile {} is missing a download URL", profile_id);
msg.into()
});
}
}
if let Some(file) = file_path {
let profiles_dir = dirs::app_profiles_dir().map_err(|err| -> String {
format!("Failed to resolve profiles directory: {}", err).into()
})?;
let path = profiles_dir.join(&file);
let contents = fs::read_to_string(&path).map_err(|err| -> String {
format!("Failed to read profile file {}: {}", path.display(), err).into()
})?;
serde_yaml::from_str::<serde_yaml::Value>(&contents).map_err(|err| -> String {
format!("Profile YAML parse failed for {}: {}", path.display(), err).into()
})?;
}
Ok(())
}

View File

@@ -1,4 +1,12 @@
use super::{CmdResult, StringifyErr};
use super::{
CmdResult,
state::{
CURRENT_REQUEST_SEQUENCE, CURRENT_SWITCHING_PROFILE, SWITCH_CLEANUP_TIMEOUT,
SWITCH_JOB_TIMEOUT, SWITCH_MUTEX, SwitchRequest, SwitchScope,
},
validation::validate_switch_request,
};
use crate::cmd::StringifyErr;
use crate::{
config::{Config, IProfiles, profiles::profiles_save_file_safe},
core::{CoreManager, handle, tray::Tray},
@@ -7,181 +15,11 @@ use crate::{
utils::{dirs, logging::Type},
};
use futures::FutureExt;
use once_cell::sync::OnceCell;
use serde_yaml_ng as serde_yaml;
use smartstring::alias::String;
use std::{
any::Any,
collections::VecDeque,
fs,
panic::AssertUnwindSafe,
sync::atomic::{AtomicBool, AtomicU64, Ordering},
time::Duration,
};
use tokio::{
fs as tokio_fs,
sync::{
Mutex,
mpsc::{self, error::TrySendError},
oneshot,
},
time,
};
use std::{any::Any, panic::AssertUnwindSafe, sync::atomic::Ordering, time::Duration};
use tokio::{fs as tokio_fs, sync::Mutex, time};
static SWITCH_MUTEX: OnceCell<Mutex<()>> = OnceCell::new();
static SWITCH_QUEUE: OnceCell<mpsc::Sender<SwitchDriverMessage>> = OnceCell::new();
const SWITCH_QUEUE_CAPACITY: usize = 32;
// Track global request sequence to avoid stale queued execution.
static CURRENT_REQUEST_SEQUENCE: AtomicU64 = AtomicU64::new(0);
static CURRENT_SWITCHING_PROFILE: AtomicBool = AtomicBool::new(false);
static SWITCH_TASK_SEQUENCE: AtomicU64 = AtomicU64::new(0);
const SWITCH_JOB_TIMEOUT: Duration = Duration::from_secs(30);
const SWITCH_CLEANUP_TIMEOUT: Duration = Duration::from_secs(5);
#[derive(Debug, Clone)]
struct SwitchRequest {
task_id: u64,
profile_id: String,
notify: bool,
}
#[derive(Debug, Default)]
struct SwitchDriverState {
active: Option<SwitchRequest>,
queue: VecDeque<SwitchRequest>,
}
#[derive(Debug)]
enum SwitchDriverMessage {
Request {
request: SwitchRequest,
respond_to: oneshot::Sender<bool>,
},
Completion {
request: SwitchRequest,
success: bool,
},
}
struct SwitchScope;
impl SwitchScope {
fn begin() -> Self {
CURRENT_SWITCHING_PROFILE.store(true, Ordering::SeqCst);
Self
}
}
impl Drop for SwitchScope {
fn drop(&mut self) {
CURRENT_SWITCHING_PROFILE.store(false, Ordering::SeqCst);
}
}
fn switch_driver_sender() -> &'static mpsc::Sender<SwitchDriverMessage> {
SWITCH_QUEUE.get_or_init(|| {
let (tx, mut rx) = mpsc::channel::<SwitchDriverMessage>(SWITCH_QUEUE_CAPACITY);
let driver_tx = tx.clone();
tokio::spawn(async move {
let mutex = SWITCH_MUTEX.get_or_init(|| Mutex::new(()));
let mut state = SwitchDriverState::default();
while let Some(message) = rx.recv().await {
match message {
SwitchDriverMessage::Request {
request,
respond_to,
} => {
let accepted = true;
let mut responder = Some(respond_to);
if let Some(active) = &mut state.active
&& active.profile_id == request.profile_id
{
active.notify |= request.notify;
if let Some(sender) = responder.take() {
let _ = sender.send(accepted);
}
continue;
}
if let Some(existing) = state
.queue
.iter_mut()
.find(|queued| queued.profile_id == request.profile_id)
{
existing.notify |= request.notify;
if let Some(sender) = responder.take() {
let _ = sender.send(accepted);
}
continue;
}
if state.active.is_none() {
state.active = Some(request.clone());
if let Some(sender) = responder.take() {
let _ = sender.send(accepted);
}
start_switch_job(driver_tx.clone(), mutex, request);
} else {
state.queue.push_back(request.clone());
if let Some(sender) = responder.take() {
let _ = sender.send(accepted);
}
}
}
SwitchDriverMessage::Completion { request, success } => {
logging!(
info,
Type::Cmd,
"Switch task {} completed (success={})",
request.task_id,
success
);
if let Some(active) = &state.active
&& active.task_id == request.task_id
{
state.active = None;
}
if state.active.is_none()
&& let Some(next) = state.queue.pop_front()
{
state.active = Some(next.clone());
start_switch_job(driver_tx.clone(), mutex, next);
}
}
}
}
});
tx
})
}
fn start_switch_job(
driver_tx: mpsc::Sender<SwitchDriverMessage>,
mutex: &'static Mutex<()>,
request: SwitchRequest,
) {
AsyncHandler::spawn(move || async move {
let success = run_switch_job(mutex, &request).await;
let task_id = request.task_id;
let profile_id = request.profile_id.clone();
if let Err(err) = driver_tx
.send(SwitchDriverMessage::Completion { request, success })
.await
{
logging!(
error,
Type::Cmd,
"Failed to push switch completion to driver (task={} profile={}): {}",
task_id,
profile_id,
err
);
}
});
}
async fn run_switch_job(mutex: &'static Mutex<()>, request: &SwitchRequest) -> bool {
pub(super) async fn run_switch_job(mutex: &'static Mutex<()>, request: &SwitchRequest) -> bool {
let profile_id = request.profile_id.clone();
let task_id = request.task_id;
let notify = request.notify;
@@ -309,137 +147,12 @@ async fn run_switch_job(mutex: &'static Mutex<()>, request: &SwitchRequest) -> b
}
}
async fn close_connections_after_switch(profile_id: &str) {
match time::timeout(SWITCH_CLEANUP_TIMEOUT, async {
handle::Handle::mihomo().await.close_all_connections().await
})
.await
{
Ok(Ok(())) => {}
Ok(Err(err)) => {
logging!(
warn,
Type::Cmd,
"Failed to close connections after profile switch ({}): {}",
profile_id,
err
);
}
Err(_) => {
logging!(
warn,
Type::Cmd,
"Closing connections after profile switch ({}) timed out after {:?}",
profile_id,
SWITCH_CLEANUP_TIMEOUT
);
}
}
}
fn describe_panic_payload(payload: &(dyn Any + Send)) -> String {
if let Some(message) = payload.downcast_ref::<&str>() {
(*message).to_string().into()
} else if let Some(message) = payload.downcast_ref::<std::string::String>() {
message.clone().into()
} else {
"unknown panic".into()
}
}
pub(super) async fn patch_profiles_config(profiles: IProfiles) -> CmdResult<bool> {
let mutex = SWITCH_MUTEX.get_or_init(|| Mutex::new(()));
let _guard = mutex.lock().await;
patch_profiles_config_internal(profiles).await
}
pub(super) async fn patch_profiles_config_by_profile_index(
profile_index: String,
) -> CmdResult<bool> {
switch_profile(profile_index, false).await
}
pub(super) async fn switch_profile(profile_index: String, notify_success: bool) -> CmdResult<bool> {
let sender = switch_driver_sender();
let task_id = SWITCH_TASK_SEQUENCE.fetch_add(1, Ordering::SeqCst) + 1;
logging!(
info,
Type::Cmd,
"Queue profile switch task {} -> {} (notify={})",
task_id,
profile_index,
notify_success
);
let request = SwitchRequest {
task_id,
profile_id: profile_index.clone(),
notify: notify_success,
};
let (tx, rx) = oneshot::channel();
match sender.try_send(SwitchDriverMessage::Request {
request,
respond_to: tx,
}) {
Ok(_) => match rx.await {
Ok(result) => Ok(result),
Err(err) => {
logging!(
error,
Type::Cmd,
"Failed to receive enqueue result for profile {}: {}",
profile_index,
err
);
Err("switch profile queue unavailable".into())
}
},
Err(TrySendError::Full(msg)) => {
logging!(
warn,
Type::Cmd,
"Profile switch queue is full; waiting for space: {}",
profile_index
);
match sender.send(msg).await {
Ok(_) => match rx.await {
Ok(result) => Ok(result),
Err(err) => {
logging!(
error,
Type::Cmd,
"Failed to receive enqueue result after wait for {}: {}",
profile_index,
err
);
Err("switch profile queue unavailable".into())
}
},
Err(err) => {
logging!(
error,
Type::Cmd,
"Profile switch queue closed while waiting ({}): {}",
profile_index,
err
);
Err("switch profile queue unavailable".into())
}
}
}
Err(TrySendError::Closed(_)) => {
logging!(
error,
Type::Cmd,
"Profile switch queue is closed, cannot enqueue: {}",
profile_index
);
Err("switch profile queue unavailable".into())
}
}
}
async fn patch_profiles_config_internal(profiles: IProfiles) -> CmdResult<bool> {
if CURRENT_SWITCHING_PROFILE.load(Ordering::SeqCst) {
logging!(
@@ -451,7 +164,6 @@ async fn patch_profiles_config_internal(profiles: IProfiles) -> CmdResult<bool>
}
let _switch_guard = SwitchScope::begin();
// Assign a sequence number to the current request
let current_sequence = CURRENT_REQUEST_SEQUENCE.fetch_add(1, Ordering::SeqCst) + 1;
let target_profile = profiles.current.clone();
@@ -475,17 +187,14 @@ async fn patch_profiles_config_internal(profiles: IProfiles) -> CmdResult<bool>
return Ok(false);
}
// Save the current configuration so it can be restored if validation fails
let current_profile = Config::profiles().await.latest_ref().current.clone();
logging!(info, Type::Cmd, "Current profile: {:?}", current_profile);
// Before switching, validate the target profile for syntax errors
if let Some(new_profile) = profiles.current.as_ref()
&& current_profile.as_ref() != Some(new_profile)
{
logging!(info, Type::Cmd, "Switching to new profile: {}", new_profile);
// Resolve the target profile file path
let config_file_result = {
let profiles_config = Config::profiles().await;
let profiles_data = profiles_config.latest_ref();
@@ -510,7 +219,6 @@ async fn patch_profiles_config_internal(profiles: IProfiles) -> CmdResult<bool>
}
};
// If we have a file path, validate YAML syntax
if let Some(file_path) = config_file_result {
if !file_path.exists() {
logging!(
@@ -526,7 +234,6 @@ async fn patch_profiles_config_internal(profiles: IProfiles) -> CmdResult<bool>
return Ok(false);
}
// Timeout guard
let file_read_result =
time::timeout(Duration::from_secs(5), tokio_fs::read_to_string(&file_path)).await;
@@ -588,7 +295,6 @@ async fn patch_profiles_config_internal(profiles: IProfiles) -> CmdResult<bool>
}
}
// Validate the request after acquiring the lock
let latest_sequence = CURRENT_REQUEST_SEQUENCE.load(Ordering::SeqCst);
if current_sequence < latest_sequence {
logging!(
@@ -601,7 +307,6 @@ async fn patch_profiles_config_internal(profiles: IProfiles) -> CmdResult<bool>
return Ok(false);
}
// Update profiles configuration
logging!(
info,
Type::Cmd,
@@ -613,7 +318,6 @@ async fn patch_profiles_config_internal(profiles: IProfiles) -> CmdResult<bool>
let _ = Config::profiles().await.draft_mut().patch_config(profiles);
// Before invoking the core, validate the request again
let latest_sequence = CURRENT_REQUEST_SEQUENCE.load(Ordering::SeqCst);
if current_sequence < latest_sequence {
logging!(
@@ -627,7 +331,6 @@ async fn patch_profiles_config_internal(profiles: IProfiles) -> CmdResult<bool>
return Ok(false);
}
// Add timeout protection for the configuration update
logging!(
info,
Type::Cmd,
@@ -635,15 +338,13 @@ async fn patch_profiles_config_internal(profiles: IProfiles) -> CmdResult<bool>
current_sequence
);
let update_result = time::timeout(
Duration::from_secs(30), // 30-second timeout
Duration::from_secs(30),
CoreManager::global().update_config(),
)
.await;
// Apply the configuration and validate the result
match update_result {
Ok(Ok((true, _))) => {
// After the core operation completes, verify the request again
let latest_sequence = CURRENT_REQUEST_SEQUENCE.load(Ordering::SeqCst);
if current_sequence < latest_sequence {
logging!(
@@ -666,27 +367,33 @@ async fn patch_profiles_config_internal(profiles: IProfiles) -> CmdResult<bool>
Config::profiles().await.apply();
handle::Handle::refresh_clash();
// Force refresh proxy cache to ensure the latest nodes are fetched after switching
// crate::process::AsyncHandler::spawn(|| async move {
// if let Err(e) = super::proxy::force_refresh_proxies().await {
// log::warn!(target: "app", "Failed to force refresh proxy cache: {e}");
// }
// });
if let Err(e) = Tray::global().update_tooltip().await {
log::warn!(target: "app", "Failed to update tray tooltip asynchronously: {e}");
logging!(
warn,
Type::Cmd,
"Failed to update tray tooltip asynchronously: {}",
e
);
}
if let Err(e) = Tray::global().update_menu().await {
log::warn!(target: "app", "Failed to update tray menu asynchronously: {e}");
logging!(
warn,
Type::Cmd,
"Failed to update tray menu asynchronously: {}",
e
);
}
// Persist configuration file
if let Err(e) = profiles_save_file_safe().await {
log::warn!(target: "app", "Failed to persist configuration file asynchronously: {e}");
logging!(
warn,
Type::Cmd,
"Failed to persist configuration file asynchronously: {}",
e
);
}
// Immediately notify the frontend about the configuration change
if let Some(current) = &current_value {
logging!(
info,
@@ -709,7 +416,6 @@ async fn patch_profiles_config_internal(profiles: IProfiles) -> CmdResult<bool>
error_msg
);
Config::profiles().await.discard();
// If validation fails, restore the previous configuration
if let Some(prev_profile) = current_profile {
logging!(
info,
@@ -721,7 +427,6 @@ async fn patch_profiles_config_internal(profiles: IProfiles) -> CmdResult<bool>
current: Some(prev_profile),
items: None,
};
// Restore silently without triggering validation
Config::profiles()
.await
.draft_mut()
@@ -729,9 +434,14 @@ async fn patch_profiles_config_internal(profiles: IProfiles) -> CmdResult<bool>
.stringify_err()?;
Config::profiles().await.apply();
crate::process::AsyncHandler::spawn(|| async move {
AsyncHandler::spawn(|| async move {
if let Err(e) = profiles_save_file_safe().await {
log::warn!(target: "app", "Failed to persist restored configuration asynchronously: {e}");
logging!(
warn,
Type::Cmd,
"Failed to persist restored configuration asynchronously: {}",
e
);
}
});
@@ -742,7 +452,6 @@ async fn patch_profiles_config_internal(profiles: IProfiles) -> CmdResult<bool>
);
}
// Emit validation error notification
handle::Handle::notice_message("config_validate::error", error_msg.to_string());
CURRENT_SWITCHING_PROFILE.store(false, Ordering::SeqCst);
Ok(false)
@@ -762,7 +471,6 @@ async fn patch_profiles_config_internal(profiles: IProfiles) -> CmdResult<bool>
Ok(false)
}
Err(_) => {
// Timeout handling
let timeout_msg = "Configuration update timed out (30s); possible validation or core communication stall";
logging!(
error,
@@ -800,70 +508,40 @@ async fn patch_profiles_config_internal(profiles: IProfiles) -> CmdResult<bool>
}
}
/// Validate profile switch request before queueing.
pub(super) async fn validate_switch_request(task_id: u64, profile_id: &str) -> Result<(), String> {
logging!(
info,
Type::Cmd,
"Validating profile switch task {} -> {}",
task_id,
profile_id
);
let profile_key: String = profile_id.into();
let (file_path, profile_type, is_current, remote_url) = {
let profiles_guard = Config::profiles().await;
let latest = profiles_guard.latest_ref();
let item = latest.get_item(&profile_key).map_err(|err| -> String {
format!("Target profile {} not found: {}", profile_id, err).into()
})?;
(
item.file.clone().map(|f| f.to_string()),
item.itype.clone().map(|t| t.to_string()),
latest
.current
.as_ref()
.map(|current| current.as_str() == profile_id)
.unwrap_or(false),
item.url.clone().map(|u| u.to_string()),
)
};
if is_current {
logging!(
info,
Type::Cmd,
"Switch task {} is targeting the current profile {}; skipping validation",
task_id,
profile_id
);
return Ok(());
}
if matches!(profile_type.as_deref(), Some("remote")) {
let has_url = remote_url.as_ref().map(|u| !u.is_empty()).unwrap_or(false);
if !has_url {
return Err({
let msg = format!("Remote profile {} is missing a download URL", profile_id);
msg.into()
});
async fn close_connections_after_switch(profile_id: &str) {
match time::timeout(SWITCH_CLEANUP_TIMEOUT, async {
handle::Handle::mihomo().await.close_all_connections().await
})
.await
{
Ok(Ok(())) => {}
Ok(Err(err)) => {
logging!(
warn,
Type::Cmd,
"Failed to close connections after profile switch ({}): {}",
profile_id,
err
);
}
Err(_) => {
logging!(
warn,
Type::Cmd,
"Closing connections after profile switch ({}) timed out after {:?}",
profile_id,
SWITCH_CLEANUP_TIMEOUT
);
}
}
if let Some(file) = file_path {
let profiles_dir = dirs::app_profiles_dir().map_err(|err| -> String {
format!("Failed to resolve profiles directory: {}", err).into()
})?;
let path = profiles_dir.join(&file);
let contents = fs::read_to_string(&path).map_err(|err| -> String {
format!("Failed to read profile file {}: {}", path.display(), err).into()
})?;
serde_yaml::from_str::<serde_yaml::Value>(&contents).map_err(|err| -> String {
format!("Profile YAML parse failed for {}: {}", path.display(), err).into()
})?;
}
Ok(())
}
fn describe_panic_payload(payload: &(dyn Any + Send)) -> String {
if let Some(message) = payload.downcast_ref::<&str>() {
(*message).to_string()
} else if let Some(message) = payload.downcast_ref::<std::string::String>() {
message.clone()
} else {
"unknown panic".into()
}
}