Files
clash-proxy/src-tauri/src/core/notification.rs

219 lines
6.2 KiB
Rust
Raw Normal View History

use super::handle::Handle;
use crate::{
constants::{retry, timing},
logging,
utils::logging::Type,
};
use parking_lot::RwLock;
use smartstring::alias::String;
use std::{
sync::{
atomic::{AtomicU64, Ordering},
mpsc,
},
thread,
time::Instant,
};
use tauri::{Emitter, WebviewWindow};
#[derive(Debug, Clone)]
pub enum FrontendEvent {
RefreshClash,
RefreshVerge,
NoticeMessage { status: String, message: String },
ProfileChanged { current_profile_id: String },
TimerUpdated { profile_index: String },
ProfileUpdateStarted { uid: String },
ProfileUpdateCompleted { uid: String },
}
#[derive(Debug, Default)]
struct EventStats {
total_sent: AtomicU64,
total_errors: AtomicU64,
last_error_time: RwLock<Option<Instant>>,
}
#[derive(Debug, Clone)]
pub struct ErrorMessage {
pub status: String,
pub message: String,
}
#[derive(Debug)]
pub struct NotificationSystem {
sender: Option<mpsc::Sender<FrontendEvent>>,
#[allow(clippy::type_complexity)]
worker_handle: Option<thread::JoinHandle<()>>,
pub(super) is_running: bool,
stats: EventStats,
emergency_mode: RwLock<bool>,
}
impl Default for NotificationSystem {
fn default() -> Self {
Self::new()
}
}
impl NotificationSystem {
pub fn new() -> Self {
Self {
sender: None,
worker_handle: None,
is_running: false,
stats: EventStats::default(),
emergency_mode: RwLock::new(false),
}
}
pub fn start(&mut self) {
if self.is_running {
return;
}
let (tx, rx) = mpsc::channel();
self.sender = Some(tx);
self.is_running = true;
let result = thread::Builder::new()
.name("frontend-notifier".into())
.spawn(move || Self::worker_loop(rx));
match result {
Ok(handle) => self.worker_handle = Some(handle),
Err(e) => logging!(
error,
Type::System,
"Failed to start notification worker: {}",
e
),
}
}
fn worker_loop(rx: mpsc::Receiver<FrontendEvent>) {
let handle = Handle::global();
while !handle.is_exiting() {
match rx.try_recv() {
Ok(event) => Self::process_event(handle, event),
Err(mpsc::TryRecvError::Disconnected) => break,
Err(mpsc::TryRecvError::Empty) => break,
}
}
}
// Clippy 似乎对 parking lot 的 RwLock 有误报,这里禁用相关警告
#[allow(clippy::significant_drop_tightening)]
fn process_event(handle: &super::handle::Handle, event: FrontendEvent) {
let binding = handle.notification_system.read();
let system = match binding.as_ref() {
Some(s) => s,
None => return,
};
if system.should_skip_event(&event) {
return;
}
if let Some(window) = super::handle::Handle::get_window() {
system.emit_to_window(&window, event);
thread::sleep(timing::EVENT_EMIT_DELAY);
}
}
fn should_skip_event(&self, event: &FrontendEvent) -> bool {
let is_emergency = *self.emergency_mode.read();
matches!(
(is_emergency, event),
(true, FrontendEvent::NoticeMessage { status, .. }) if status == "info"
)
}
fn emit_to_window(&self, window: &WebviewWindow, event: FrontendEvent) {
let (event_name, payload) = self.serialize_event(event);
refactor: profile switch (#5197) * refactor: proxy refresh * fix(proxy-store): properly hydrate and filter backend provider snapshots * fix(proxy-store): add monotonic fetch guard and event bridge cleanup * fix(proxy-store): tweak fetch sequencing guard to prevent snapshot invalidation from wiping fast responses * docs: UPDATELOG.md * fix(proxy-snapshot, proxy-groups): restore last-selected proxy and group info * fix(proxy): merge static and provider entries in snapshot; fix Virtuoso viewport height * fix(proxy-groups): restrict reduced-height viewport to chain-mode column * refactor(profiles): introduce a state machine * refactor:replace state machine with reducer * refactor:introduce a profile switch worker * refactor: hooked up a backend-driven profile switch flow * refactor(profile-switch): serialize switches with async queue and enrich frontend events * feat(profiles): centralize profile switching with reducer/driver queue to fix stuck UI on rapid toggles * chore: translate comments and log messages to English to avoid encoding issues * refactor: migrate backend queue to SwitchDriver actor * fix(profile): unify error string types in validation helper * refactor(profile): make switch driver fully async and handle panics safely * refactor(cmd): move switch-validation helper into new profile_switch module * refactor(profile): modularize switch logic into profile_switch.rs * refactor(profile_switch): modularize switch handler - Break monolithic switch handler into proper module hierarchy - Move shared globals, constants, and SwitchScope guard to state.rs - Isolate queue orchestration and async task spawning in driver.rs - Consolidate switch pipeline and config patching in workflow.rs - Extract request pre-checks/YAML validation into validation.rs * refactor(profile_switch): centralize state management and add cancellation flow - Introduced SwitchManager in state.rs to unify mutex, sequencing, and SwitchScope handling. - Added SwitchCancellation and SwitchRequest wrappers to encapsulate cancel tokens and notifications. - Updated driver to allocate task IDs via SwitchManager, cancel old tokens, and queue next jobs in order. - Updated workflow to check cancellation and sequence at each phase, replacing global flags with manager APIs. * feat(profile_switch): integrate explicit state machine for profile switching - workflow.rs:24 now delegates each switch to SwitchStateMachine, passing an owned SwitchRequest. Queue cancellation and state-sequence checks are centralized inside the machine instead of scattered guards. - workflow.rs:176 replaces the old helper with `SwitchStateMachine::new(manager(), None, profiles).run().await`, ensuring manual profile patches follow the same workflow (locking, validation, rollback) as queued switches. - workflow.rs:180 & 275 expose `validate_profile_yaml` and `restore_previous_profile` for reuse inside the state machine. - workflow/state_machine.rs:1 introduces a dedicated state machine module. It manages global mutex acquisition, request/cancellation state, YAML validation, draft patching, `CoreManager::update_config`, failure rollback, and tray/notification side-effects. Transitions check for cancellations and stale sequences; completions release guards via `SwitchScope` drop. * refactor(profile-switch): integrate stage-aware panic handling - src-tauri/src/cmd/profile_switch/workflow/state_machine.rs:1 Defines SwitchStage and SwitchPanicInfo as crate-visible, wraps each transition in with_stage(...) with catch_unwind, and propagates CmdResult<bool> to distinguish validation failures from panics while keeping cancellation semantics. - src-tauri/src/cmd/profile_switch/workflow.rs:25 Updates run_switch_job to return Result<bool, SwitchPanicInfo>, routing timeout, validation, config, and stage panic cases separately. Reuses SwitchPanicInfo for logging/UI notifications; patch_profiles_config maps state-machine panics into user-facing error strings. - src-tauri/src/cmd/profile_switch/driver.rs:1 Adds SwitchJobOutcome to unify workflow results: normal completions carry bool, and panics propagate SwitchPanicInfo. The driver loop now logs panics explicitly and uses AssertUnwindSafe(...).catch_unwind() to guard setup-phase panics. * refactor(profile-switch): add watchdog, heartbeat, and async timeout guards - Introduce SwitchHeartbeat for stage tracking and timing; log stage transitions with elapsed durations. - Add watchdog in driver to cancel stalled switches (5s heartbeat timeout). - Wrap blocking ops (Config::apply, tray updates, profiles_save_file_safe, etc.) with time::timeout to prevent async stalls. - Improve logs for stage transitions and watchdog timeouts to clarify cancellation points. * refactor(profile-switch): async post-switch tasks, early lock release, and spawn_blocking for IO * feat(profile-switch): track cleanup and coordinate pipeline - Add explicit cleanup tracking in the driver (`cleanup_profiles` map + `CleanupDone` messages) to know when background post-switch work is still running before starting a new workflow. (driver.rs:29-50) - Update `handle_enqueue` to detect “cleanup in progress”: same-profile retries are short-circuited; other requests collapse the pending queue, cancelling old tokens so only the latest intent survives. (driver.rs:176-247) - Rework scheduling helpers: `start_next_job` refuses to start while cleanup is outstanding; discarded requests release cancellation tokens; cleanup completion explicitly restarts the pipeline. (driver.rs:258-442) * feat(profile-switch): unify post-switch cleanup handling - workflow.rs (25-427) returns `SwitchWorkflowResult` (success + CleanupHandle) or `SwitchWorkflowError`. All failure/timeout paths stash post-switch work into a single CleanupHandle. Cleanup helpers (`notify_profile_switch_finished` and `close_connections_after_switch`) run inside that task for proper lifetime handling. - driver.rs (29-439) propagates CleanupHandle through `SwitchJobOutcome`, spawns a bridge to wait for completion, and blocks `start_next_job` until done. Direct driver-side panics now schedule failure cleanup via the shared helper. * tmp * Revert "tmp" This reverts commit e582cf4a652231a67a7c951802cb19b385f6afd7. * refactor: queue frontend events through async dispatcher * refactor: queue frontend switch/proxy events and throttle notices * chore: frontend debug log * fix: re-enable only ProfileSwitchFinished events - keep others suppressed for crash isolation - Re-enabled only ProfileSwitchFinished events; RefreshClash, RefreshProxy, and ProfileChanged remain suppressed (they log suppression messages) - Allows frontend to receive task completion notifications for UI feedback while crash isolation continues - src-tauri/src/core/handle.rs now only suppresses notify_profile_changed - Serialized emitter, frontend logging bridge, and other diagnostics unchanged * refactor: refreshClashData * refactor(proxy): stabilize proxy switch pipeline and rendering - Add coalescing buffer in notification.rs to emit only the latest proxies-updated snapshot - Replace nextTick with queueMicrotask in asyncQueue.ts for same-frame hydration - Hide auto-generated GLOBAL snapshot and preserve optional metadata in proxy-snapshot.ts - Introduce stable proxy rendering state in AppDataProvider (proxyTargetProfileId, proxyDisplayProfileId, isProxyRefreshPending) - Update proxy page to fade content during refresh and overlay status banner instead of showing incomplete snapshot * refactor(profiles): move manual activating logic to reducer for deterministic queue tracking * refactor: replace proxy-data event bridge with pure polling and simplify proxy store - Replaced the proxy-data event bridge with pure polling: AppDataProvider now fetches the initial snapshot and drives refreshes from the polled switchStatus, removing verge://refresh-* listeners (src/providers/app-data-provider.tsx). - Simplified proxy-store by dropping the proxies-updated listener queue and unused payload/normalizer helpers; relies on SWR/provider fetch path + calcuProxies for live updates (src/stores/proxy-store.ts). - Trimmed layout-level event wiring to keep only notice/show/hide subscriptions, removing obsolete refresh listeners (src/pages/_layout/useLayoutEvents.ts). * refactor(proxy): streamline proxies-updated handling and store event flow - AppDataProvider now treats `proxies-updated` as the fast path: the listener calls `applyLiveProxyPayload` immediately and schedules only a single fallback `fetchLiveProxies` ~600 ms later (replacing the old 0/250/1000/2000 cascade). Expensive provider/rule refreshes run in parallel via `Promise.allSettled`, and the multi-stage queue on profile updates completion was removed (src/providers/app-data-provider.tsx). - Rebuilt proxy-store to support the event flow: restored `setLive`, provider normalization, and an animation-frame + async queue that applies payloads without blocking. Exposed `applyLiveProxyPayload` so providers can push events directly into the store (src/stores/proxy-store.ts). * refactor: switch delay * refactor(app-data-provider): trigger getProfileSwitchStatus revalidation on profile-switch-finished - AppDataProvider now listens to `profile-switch-finished` and calls `mutate("getProfileSwitchStatus")` to immediately update state and unlock buttons (src/providers/app-data-provider.tsx). - Retain existing detailed timing logs for monitoring other stages. - Frontend success notifications remain instant; background refreshes continue asynchronously. * fix(profiles): prevent duplicate toast on page remount * refactor(profile-switch): make active switches preemptible and prevent queue piling - Add notify mechanism to SwitchCancellation to await cancellation without busy-waiting (state.rs:82) - Collapse pending queue to a single entry in the driver; cancel in-flight task on newer request (driver.rs:232) - Update handle_update_core to watch cancel token and 30s timeout; release locks, discard draft, and exit early if canceled (state_machine.rs:301) - Providers revalidate status immediately on profile-switch-finished events (app-data-provider.tsx:208) * refactor(core): make core reload phase controllable, reduce 0xcfffffff risk - CoreManager::apply_config now calls `reload_config_with_retry`, each attempt waits up to 5s, retries 3 times; on failure, returns error with duration logged and triggers core restart if needed (src-tauri/src/core/manager/config.rs:175, 205) - `reload_config_with_retry` logs attempt info on timeout or error; if error is a Mihomo connection issue, fallback to original restart logic (src-tauri/src/core/manager/config.rs:211) - `reload_config_once` retains original Mihomo call for retry wrapper usage (src-tauri/src/core/manager/config.rs:247) * chore(frontend-logs): downgrade routine event logs from info to debug - Logs like `emit_via_app entering spawn_blocking`, `Async emit…`, `Buffered proxies…` are now debug-level (src-tauri/src/core/notification.rs:155, :265, :309…) - Genuine warnings/errors (failures/timeouts) remain at warn/error - Core stage logs remain info to keep backend tracking visible * refactor(frontend-emit): make emit_via_app fire-and-forget async - `emit_via_app` now a regular function; spawns with `tokio::spawn` and logs a warn if `emit_to` fails, caller returns immediately (src-tauri/src/core/notification.rs:269) - Removed `.await` at Async emit and flush_proxies calls; only record dispatch duration and warn on failure (src-tauri/src/core/notification.rs:211, :329) * refactor(ui): restructure profile switch for event-driven speed + polling stability - Backend - SwitchManager maintains a lightweight event queue: added `event_sequence`, `recent_events`, and `SwitchResultEvent`; provides `push_event` / `events_after` (state.rs) - `handle_completion` pushes events on success/failure and keeps `last_result` (driver.rs) for frontend incremental fetch - New Tauri command `get_profile_switch_events(after_sequence)` exposes `events_after` (profile_switch/mod.rs → profile.rs → lib.rs) - Notification system - `NotificationSystem::process_event` only logs debug, disables WebView `emit_to`, fixes 0xcfffffff - Related emit/buffer functions now safe no-op, removed unused structures and warnings (notification.rs) - Frontend - services/cmds.ts defines `SwitchResultEvent` and `getProfileSwitchEvents` - `AppDataProvider` holds `switchEventSeqRef`, polls incremental events every 0.25s (busy) / 1s (idle); each event triggers: - immediate `globalMutate("getProfiles")` to refresh current profile - background refresh of proxies/providers/rules via `Promise.allSettled` (failures logged, non-blocking) - forced `mutateSwitchStatus` to correct state - original switchStatus effect calls `handleSwitchResult` as fallback; other toast/activation logic handled in profiles.tsx - Commands / API cleanup - removed `pub use profile_switch::*;` in cmd::mod.rs to avoid conflicts; frontend uses new command polling * refactor(frontend): optimize profile switch with optimistic updates * refactor(profile-switch): switch to event-driven flow with Profile Store - SwitchManager pushes events; frontend polls get_profile_switch_events - Zustand store handles optimistic profiles; AppDataProvider applies updates and background-fetches - UI flicker removed * fix(app-data): re-hook profile store updates during switch hydration * fix(notification): restore frontend event dispatch and non-blocking emits * fix(app-data-provider): restore proxy refresh and seed snapshot after refactor * fix: ensure switch completion events are received and handle proxies-updated * fix(app-data-provider): dedupe switch results by taskId and fix stale profile state * fix(profile-switch): ensure patch_profiles_config_by_profile_index waits for real completion and handle join failures in apply_config_with_timeout * docs: UPDATELOG.md * chore: add necessary comments * fix(core): always dispatch async proxy snapshot after RefreshClash event * fix(proxy-store, provider): handle pending snapshots and proxy profiles - Added pending snapshot tracking in proxy-store so `lastAppliedFetchId` no longer jumps on seed. Profile adoption is deferred until a qualifying fetch completes. Exposed `clearPendingProfile` for rollback support. - Cleared pending snapshot state whenever live payloads apply or the store resets, preventing stale optimistic profile IDs after failures. - In provider integration, subscribed to the pending proxy profile and fed it into target-profile derivation. Cleared it on failed switch results so hydration can advance and UI status remains accurate. * fix(proxy): re-hook tray refresh events into proxy refresh queue - Reattached listen("verge://refresh-proxy-config", …) at src/providers/app-data-provider.tsx:402 and registered it for cleanup. - Added matching window fallback handler at src/providers/app-data-provider.tsx:430 so in-app dispatches share the same refresh path. * fix(proxy-snapshot/proxy-groups): address review findings on snapshot placeholders - src/utils/proxy-snapshot.ts:72-95 now derives snapshot group members solely from proxy-groups.proxies, so provider ids under `use` no longer generate placeholder proxy items. - src/components/proxy/proxy-groups.tsx:665-677 lets the hydration overlay capture pointer events (and shows a wait cursor) so users can’t interact with snapshot-only placeholders before live data is ready. * fix(profile-switch): preserve queued requests and avoid stale connection teardown - Keep earlier queued switches intact by dropping the blanket “collapse” call: after removing duplicates for the same profile, new requests are simply appended, leaving other profiles pending (driver.rs:376). Resolves queue-loss scenario. - Gate connection cleanup on real successes so cancelled/stale runs no longer tear down Mihomo connections; success handler now skips close_connections_after_switch when success == false (workflow.rs:419). * fix(profile-switch, layout): improve profile validation and restore backend refresh - Hardened profile validation using `tokio::fs` with a 5s timeout and offloading YAML parsing to `AsyncHandler::spawn_blocking`, preventing slow disks or malformed files from freezing the runtime (src-tauri/src/cmd/profile_switch/validation.rs:9, 71). - Restored backend-triggered refresh handling by listening for `verge://refresh-clash-config` / `verge://refresh-verge-config` and invoking shared refresh services so SWR caches stay in sync with core events (src/pages/_layout/useLayoutEvents.ts:6, 45, 55). * feat(profile-switch): handle cancellations for superseded requests - Added a `cancelled` flag and constructor so superseded requests publish an explicit cancellation instead of a failure (src-tauri/src/cmd/profile_switch/state.rs:249, src-tauri/src/cmd/profile_switch/driver.rs:482) - Updated the profile switch effect to log cancellations as info, retain the shared `mutate` call, and skip emitting error toasts while still refreshing follow-up work (src/pages/profiles.tsx:554, src/pages/profiles.tsx:581) - Exposed the new flag on the TypeScript contract to keep downstream consumers type-safe (src/services/cmds.ts:20) * fix(profiles): wrap logging payload for Tauri frontend_log * fix(profile-switch): add rollback and error propagation for failed persistence - Added rollback on apply failure so Mihomo restores to the previous profile before exiting the success path early (state_machine.rs:474). - Reworked persist_profiles_with_timeout to surface timeout/join/save errors, convert them into CmdResult failures, and trigger rollback + error propagation when persistence fails (state_machine.rs:703). * fix(profile-switch): prevent mid-finalize reentrancy and lingering tasks * fix(profile-switch): preserve pending queue and surface discarded switches * fix(profile-switch): avoid draining Mihomo sockets on failed/cancelled switches * fix(app-data-provider): restore backend-driven refresh and reattach fallbacks * fix(profile-switch): queue concurrent updates and add bounded wait/backoff * fix(proxy): trigger live refresh on app start for proxy snapshot * refactor(profile-switch): split flow into layers and centralize async cleanup - Introduced `SwitchDriver` to encapsulate queue and driver logic while keeping the public Tauri command API. - Added workflow/cleanup helpers for notification dispatch and Mihomo connection draining, re-exported for API consistency. - Replaced monolithic state machine with `core.rs`, `context.rs`, and `stages.rs`, plus a thin `mod.rs` re-export layer; stage methods are now individually testable. - Removed legacy `workflow/state_machine.rs` and adjusted visibility on re-exported types/constants to ensure compilation.
2025-10-30 17:29:15 +08:00
let Ok(payload) = payload else {
self.stats.total_errors.fetch_add(1, Ordering::Relaxed);
return;
};
refactor: profile switch (#5197) * refactor: proxy refresh * fix(proxy-store): properly hydrate and filter backend provider snapshots * fix(proxy-store): add monotonic fetch guard and event bridge cleanup * fix(proxy-store): tweak fetch sequencing guard to prevent snapshot invalidation from wiping fast responses * docs: UPDATELOG.md * fix(proxy-snapshot, proxy-groups): restore last-selected proxy and group info * fix(proxy): merge static and provider entries in snapshot; fix Virtuoso viewport height * fix(proxy-groups): restrict reduced-height viewport to chain-mode column * refactor(profiles): introduce a state machine * refactor:replace state machine with reducer * refactor:introduce a profile switch worker * refactor: hooked up a backend-driven profile switch flow * refactor(profile-switch): serialize switches with async queue and enrich frontend events * feat(profiles): centralize profile switching with reducer/driver queue to fix stuck UI on rapid toggles * chore: translate comments and log messages to English to avoid encoding issues * refactor: migrate backend queue to SwitchDriver actor * fix(profile): unify error string types in validation helper * refactor(profile): make switch driver fully async and handle panics safely * refactor(cmd): move switch-validation helper into new profile_switch module * refactor(profile): modularize switch logic into profile_switch.rs * refactor(profile_switch): modularize switch handler - Break monolithic switch handler into proper module hierarchy - Move shared globals, constants, and SwitchScope guard to state.rs - Isolate queue orchestration and async task spawning in driver.rs - Consolidate switch pipeline and config patching in workflow.rs - Extract request pre-checks/YAML validation into validation.rs * refactor(profile_switch): centralize state management and add cancellation flow - Introduced SwitchManager in state.rs to unify mutex, sequencing, and SwitchScope handling. - Added SwitchCancellation and SwitchRequest wrappers to encapsulate cancel tokens and notifications. - Updated driver to allocate task IDs via SwitchManager, cancel old tokens, and queue next jobs in order. - Updated workflow to check cancellation and sequence at each phase, replacing global flags with manager APIs. * feat(profile_switch): integrate explicit state machine for profile switching - workflow.rs:24 now delegates each switch to SwitchStateMachine, passing an owned SwitchRequest. Queue cancellation and state-sequence checks are centralized inside the machine instead of scattered guards. - workflow.rs:176 replaces the old helper with `SwitchStateMachine::new(manager(), None, profiles).run().await`, ensuring manual profile patches follow the same workflow (locking, validation, rollback) as queued switches. - workflow.rs:180 & 275 expose `validate_profile_yaml` and `restore_previous_profile` for reuse inside the state machine. - workflow/state_machine.rs:1 introduces a dedicated state machine module. It manages global mutex acquisition, request/cancellation state, YAML validation, draft patching, `CoreManager::update_config`, failure rollback, and tray/notification side-effects. Transitions check for cancellations and stale sequences; completions release guards via `SwitchScope` drop. * refactor(profile-switch): integrate stage-aware panic handling - src-tauri/src/cmd/profile_switch/workflow/state_machine.rs:1 Defines SwitchStage and SwitchPanicInfo as crate-visible, wraps each transition in with_stage(...) with catch_unwind, and propagates CmdResult<bool> to distinguish validation failures from panics while keeping cancellation semantics. - src-tauri/src/cmd/profile_switch/workflow.rs:25 Updates run_switch_job to return Result<bool, SwitchPanicInfo>, routing timeout, validation, config, and stage panic cases separately. Reuses SwitchPanicInfo for logging/UI notifications; patch_profiles_config maps state-machine panics into user-facing error strings. - src-tauri/src/cmd/profile_switch/driver.rs:1 Adds SwitchJobOutcome to unify workflow results: normal completions carry bool, and panics propagate SwitchPanicInfo. The driver loop now logs panics explicitly and uses AssertUnwindSafe(...).catch_unwind() to guard setup-phase panics. * refactor(profile-switch): add watchdog, heartbeat, and async timeout guards - Introduce SwitchHeartbeat for stage tracking and timing; log stage transitions with elapsed durations. - Add watchdog in driver to cancel stalled switches (5s heartbeat timeout). - Wrap blocking ops (Config::apply, tray updates, profiles_save_file_safe, etc.) with time::timeout to prevent async stalls. - Improve logs for stage transitions and watchdog timeouts to clarify cancellation points. * refactor(profile-switch): async post-switch tasks, early lock release, and spawn_blocking for IO * feat(profile-switch): track cleanup and coordinate pipeline - Add explicit cleanup tracking in the driver (`cleanup_profiles` map + `CleanupDone` messages) to know when background post-switch work is still running before starting a new workflow. (driver.rs:29-50) - Update `handle_enqueue` to detect “cleanup in progress”: same-profile retries are short-circuited; other requests collapse the pending queue, cancelling old tokens so only the latest intent survives. (driver.rs:176-247) - Rework scheduling helpers: `start_next_job` refuses to start while cleanup is outstanding; discarded requests release cancellation tokens; cleanup completion explicitly restarts the pipeline. (driver.rs:258-442) * feat(profile-switch): unify post-switch cleanup handling - workflow.rs (25-427) returns `SwitchWorkflowResult` (success + CleanupHandle) or `SwitchWorkflowError`. All failure/timeout paths stash post-switch work into a single CleanupHandle. Cleanup helpers (`notify_profile_switch_finished` and `close_connections_after_switch`) run inside that task for proper lifetime handling. - driver.rs (29-439) propagates CleanupHandle through `SwitchJobOutcome`, spawns a bridge to wait for completion, and blocks `start_next_job` until done. Direct driver-side panics now schedule failure cleanup via the shared helper. * tmp * Revert "tmp" This reverts commit e582cf4a652231a67a7c951802cb19b385f6afd7. * refactor: queue frontend events through async dispatcher * refactor: queue frontend switch/proxy events and throttle notices * chore: frontend debug log * fix: re-enable only ProfileSwitchFinished events - keep others suppressed for crash isolation - Re-enabled only ProfileSwitchFinished events; RefreshClash, RefreshProxy, and ProfileChanged remain suppressed (they log suppression messages) - Allows frontend to receive task completion notifications for UI feedback while crash isolation continues - src-tauri/src/core/handle.rs now only suppresses notify_profile_changed - Serialized emitter, frontend logging bridge, and other diagnostics unchanged * refactor: refreshClashData * refactor(proxy): stabilize proxy switch pipeline and rendering - Add coalescing buffer in notification.rs to emit only the latest proxies-updated snapshot - Replace nextTick with queueMicrotask in asyncQueue.ts for same-frame hydration - Hide auto-generated GLOBAL snapshot and preserve optional metadata in proxy-snapshot.ts - Introduce stable proxy rendering state in AppDataProvider (proxyTargetProfileId, proxyDisplayProfileId, isProxyRefreshPending) - Update proxy page to fade content during refresh and overlay status banner instead of showing incomplete snapshot * refactor(profiles): move manual activating logic to reducer for deterministic queue tracking * refactor: replace proxy-data event bridge with pure polling and simplify proxy store - Replaced the proxy-data event bridge with pure polling: AppDataProvider now fetches the initial snapshot and drives refreshes from the polled switchStatus, removing verge://refresh-* listeners (src/providers/app-data-provider.tsx). - Simplified proxy-store by dropping the proxies-updated listener queue and unused payload/normalizer helpers; relies on SWR/provider fetch path + calcuProxies for live updates (src/stores/proxy-store.ts). - Trimmed layout-level event wiring to keep only notice/show/hide subscriptions, removing obsolete refresh listeners (src/pages/_layout/useLayoutEvents.ts). * refactor(proxy): streamline proxies-updated handling and store event flow - AppDataProvider now treats `proxies-updated` as the fast path: the listener calls `applyLiveProxyPayload` immediately and schedules only a single fallback `fetchLiveProxies` ~600 ms later (replacing the old 0/250/1000/2000 cascade). Expensive provider/rule refreshes run in parallel via `Promise.allSettled`, and the multi-stage queue on profile updates completion was removed (src/providers/app-data-provider.tsx). - Rebuilt proxy-store to support the event flow: restored `setLive`, provider normalization, and an animation-frame + async queue that applies payloads without blocking. Exposed `applyLiveProxyPayload` so providers can push events directly into the store (src/stores/proxy-store.ts). * refactor: switch delay * refactor(app-data-provider): trigger getProfileSwitchStatus revalidation on profile-switch-finished - AppDataProvider now listens to `profile-switch-finished` and calls `mutate("getProfileSwitchStatus")` to immediately update state and unlock buttons (src/providers/app-data-provider.tsx). - Retain existing detailed timing logs for monitoring other stages. - Frontend success notifications remain instant; background refreshes continue asynchronously. * fix(profiles): prevent duplicate toast on page remount * refactor(profile-switch): make active switches preemptible and prevent queue piling - Add notify mechanism to SwitchCancellation to await cancellation without busy-waiting (state.rs:82) - Collapse pending queue to a single entry in the driver; cancel in-flight task on newer request (driver.rs:232) - Update handle_update_core to watch cancel token and 30s timeout; release locks, discard draft, and exit early if canceled (state_machine.rs:301) - Providers revalidate status immediately on profile-switch-finished events (app-data-provider.tsx:208) * refactor(core): make core reload phase controllable, reduce 0xcfffffff risk - CoreManager::apply_config now calls `reload_config_with_retry`, each attempt waits up to 5s, retries 3 times; on failure, returns error with duration logged and triggers core restart if needed (src-tauri/src/core/manager/config.rs:175, 205) - `reload_config_with_retry` logs attempt info on timeout or error; if error is a Mihomo connection issue, fallback to original restart logic (src-tauri/src/core/manager/config.rs:211) - `reload_config_once` retains original Mihomo call for retry wrapper usage (src-tauri/src/core/manager/config.rs:247) * chore(frontend-logs): downgrade routine event logs from info to debug - Logs like `emit_via_app entering spawn_blocking`, `Async emit…`, `Buffered proxies…` are now debug-level (src-tauri/src/core/notification.rs:155, :265, :309…) - Genuine warnings/errors (failures/timeouts) remain at warn/error - Core stage logs remain info to keep backend tracking visible * refactor(frontend-emit): make emit_via_app fire-and-forget async - `emit_via_app` now a regular function; spawns with `tokio::spawn` and logs a warn if `emit_to` fails, caller returns immediately (src-tauri/src/core/notification.rs:269) - Removed `.await` at Async emit and flush_proxies calls; only record dispatch duration and warn on failure (src-tauri/src/core/notification.rs:211, :329) * refactor(ui): restructure profile switch for event-driven speed + polling stability - Backend - SwitchManager maintains a lightweight event queue: added `event_sequence`, `recent_events`, and `SwitchResultEvent`; provides `push_event` / `events_after` (state.rs) - `handle_completion` pushes events on success/failure and keeps `last_result` (driver.rs) for frontend incremental fetch - New Tauri command `get_profile_switch_events(after_sequence)` exposes `events_after` (profile_switch/mod.rs → profile.rs → lib.rs) - Notification system - `NotificationSystem::process_event` only logs debug, disables WebView `emit_to`, fixes 0xcfffffff - Related emit/buffer functions now safe no-op, removed unused structures and warnings (notification.rs) - Frontend - services/cmds.ts defines `SwitchResultEvent` and `getProfileSwitchEvents` - `AppDataProvider` holds `switchEventSeqRef`, polls incremental events every 0.25s (busy) / 1s (idle); each event triggers: - immediate `globalMutate("getProfiles")` to refresh current profile - background refresh of proxies/providers/rules via `Promise.allSettled` (failures logged, non-blocking) - forced `mutateSwitchStatus` to correct state - original switchStatus effect calls `handleSwitchResult` as fallback; other toast/activation logic handled in profiles.tsx - Commands / API cleanup - removed `pub use profile_switch::*;` in cmd::mod.rs to avoid conflicts; frontend uses new command polling * refactor(frontend): optimize profile switch with optimistic updates * refactor(profile-switch): switch to event-driven flow with Profile Store - SwitchManager pushes events; frontend polls get_profile_switch_events - Zustand store handles optimistic profiles; AppDataProvider applies updates and background-fetches - UI flicker removed * fix(app-data): re-hook profile store updates during switch hydration * fix(notification): restore frontend event dispatch and non-blocking emits * fix(app-data-provider): restore proxy refresh and seed snapshot after refactor * fix: ensure switch completion events are received and handle proxies-updated * fix(app-data-provider): dedupe switch results by taskId and fix stale profile state * fix(profile-switch): ensure patch_profiles_config_by_profile_index waits for real completion and handle join failures in apply_config_with_timeout * docs: UPDATELOG.md * chore: add necessary comments * fix(core): always dispatch async proxy snapshot after RefreshClash event * fix(proxy-store, provider): handle pending snapshots and proxy profiles - Added pending snapshot tracking in proxy-store so `lastAppliedFetchId` no longer jumps on seed. Profile adoption is deferred until a qualifying fetch completes. Exposed `clearPendingProfile` for rollback support. - Cleared pending snapshot state whenever live payloads apply or the store resets, preventing stale optimistic profile IDs after failures. - In provider integration, subscribed to the pending proxy profile and fed it into target-profile derivation. Cleared it on failed switch results so hydration can advance and UI status remains accurate. * fix(proxy): re-hook tray refresh events into proxy refresh queue - Reattached listen("verge://refresh-proxy-config", …) at src/providers/app-data-provider.tsx:402 and registered it for cleanup. - Added matching window fallback handler at src/providers/app-data-provider.tsx:430 so in-app dispatches share the same refresh path. * fix(proxy-snapshot/proxy-groups): address review findings on snapshot placeholders - src/utils/proxy-snapshot.ts:72-95 now derives snapshot group members solely from proxy-groups.proxies, so provider ids under `use` no longer generate placeholder proxy items. - src/components/proxy/proxy-groups.tsx:665-677 lets the hydration overlay capture pointer events (and shows a wait cursor) so users can’t interact with snapshot-only placeholders before live data is ready. * fix(profile-switch): preserve queued requests and avoid stale connection teardown - Keep earlier queued switches intact by dropping the blanket “collapse” call: after removing duplicates for the same profile, new requests are simply appended, leaving other profiles pending (driver.rs:376). Resolves queue-loss scenario. - Gate connection cleanup on real successes so cancelled/stale runs no longer tear down Mihomo connections; success handler now skips close_connections_after_switch when success == false (workflow.rs:419). * fix(profile-switch, layout): improve profile validation and restore backend refresh - Hardened profile validation using `tokio::fs` with a 5s timeout and offloading YAML parsing to `AsyncHandler::spawn_blocking`, preventing slow disks or malformed files from freezing the runtime (src-tauri/src/cmd/profile_switch/validation.rs:9, 71). - Restored backend-triggered refresh handling by listening for `verge://refresh-clash-config` / `verge://refresh-verge-config` and invoking shared refresh services so SWR caches stay in sync with core events (src/pages/_layout/useLayoutEvents.ts:6, 45, 55). * feat(profile-switch): handle cancellations for superseded requests - Added a `cancelled` flag and constructor so superseded requests publish an explicit cancellation instead of a failure (src-tauri/src/cmd/profile_switch/state.rs:249, src-tauri/src/cmd/profile_switch/driver.rs:482) - Updated the profile switch effect to log cancellations as info, retain the shared `mutate` call, and skip emitting error toasts while still refreshing follow-up work (src/pages/profiles.tsx:554, src/pages/profiles.tsx:581) - Exposed the new flag on the TypeScript contract to keep downstream consumers type-safe (src/services/cmds.ts:20) * fix(profiles): wrap logging payload for Tauri frontend_log * fix(profile-switch): add rollback and error propagation for failed persistence - Added rollback on apply failure so Mihomo restores to the previous profile before exiting the success path early (state_machine.rs:474). - Reworked persist_profiles_with_timeout to surface timeout/join/save errors, convert them into CmdResult failures, and trigger rollback + error propagation when persistence fails (state_machine.rs:703). * fix(profile-switch): prevent mid-finalize reentrancy and lingering tasks * fix(profile-switch): preserve pending queue and surface discarded switches * fix(profile-switch): avoid draining Mihomo sockets on failed/cancelled switches * fix(app-data-provider): restore backend-driven refresh and reattach fallbacks * fix(profile-switch): queue concurrent updates and add bounded wait/backoff * fix(proxy): trigger live refresh on app start for proxy snapshot * refactor(profile-switch): split flow into layers and centralize async cleanup - Introduced `SwitchDriver` to encapsulate queue and driver logic while keeping the public Tauri command API. - Added workflow/cleanup helpers for notification dispatch and Mihomo connection draining, re-exported for API consistency. - Replaced monolithic state machine with `core.rs`, `context.rs`, and `stages.rs`, plus a thin `mod.rs` re-export layer; stage methods are now individually testable. - Removed legacy `workflow/state_machine.rs` and adjusted visibility on re-exported types/constants to ensure compilation.
2025-10-30 17:29:15 +08:00
match window.emit(event_name, payload) {
Ok(_) => {
self.stats.total_sent.fetch_add(1, Ordering::Relaxed);
}
Err(e) => {
logging!(warn, Type::Frontend, "Event emit failed: {}", e);
self.handle_emit_error();
refactor: profile switch (#5197) * refactor: proxy refresh * fix(proxy-store): properly hydrate and filter backend provider snapshots * fix(proxy-store): add monotonic fetch guard and event bridge cleanup * fix(proxy-store): tweak fetch sequencing guard to prevent snapshot invalidation from wiping fast responses * docs: UPDATELOG.md * fix(proxy-snapshot, proxy-groups): restore last-selected proxy and group info * fix(proxy): merge static and provider entries in snapshot; fix Virtuoso viewport height * fix(proxy-groups): restrict reduced-height viewport to chain-mode column * refactor(profiles): introduce a state machine * refactor:replace state machine with reducer * refactor:introduce a profile switch worker * refactor: hooked up a backend-driven profile switch flow * refactor(profile-switch): serialize switches with async queue and enrich frontend events * feat(profiles): centralize profile switching with reducer/driver queue to fix stuck UI on rapid toggles * chore: translate comments and log messages to English to avoid encoding issues * refactor: migrate backend queue to SwitchDriver actor * fix(profile): unify error string types in validation helper * refactor(profile): make switch driver fully async and handle panics safely * refactor(cmd): move switch-validation helper into new profile_switch module * refactor(profile): modularize switch logic into profile_switch.rs * refactor(profile_switch): modularize switch handler - Break monolithic switch handler into proper module hierarchy - Move shared globals, constants, and SwitchScope guard to state.rs - Isolate queue orchestration and async task spawning in driver.rs - Consolidate switch pipeline and config patching in workflow.rs - Extract request pre-checks/YAML validation into validation.rs * refactor(profile_switch): centralize state management and add cancellation flow - Introduced SwitchManager in state.rs to unify mutex, sequencing, and SwitchScope handling. - Added SwitchCancellation and SwitchRequest wrappers to encapsulate cancel tokens and notifications. - Updated driver to allocate task IDs via SwitchManager, cancel old tokens, and queue next jobs in order. - Updated workflow to check cancellation and sequence at each phase, replacing global flags with manager APIs. * feat(profile_switch): integrate explicit state machine for profile switching - workflow.rs:24 now delegates each switch to SwitchStateMachine, passing an owned SwitchRequest. Queue cancellation and state-sequence checks are centralized inside the machine instead of scattered guards. - workflow.rs:176 replaces the old helper with `SwitchStateMachine::new(manager(), None, profiles).run().await`, ensuring manual profile patches follow the same workflow (locking, validation, rollback) as queued switches. - workflow.rs:180 & 275 expose `validate_profile_yaml` and `restore_previous_profile` for reuse inside the state machine. - workflow/state_machine.rs:1 introduces a dedicated state machine module. It manages global mutex acquisition, request/cancellation state, YAML validation, draft patching, `CoreManager::update_config`, failure rollback, and tray/notification side-effects. Transitions check for cancellations and stale sequences; completions release guards via `SwitchScope` drop. * refactor(profile-switch): integrate stage-aware panic handling - src-tauri/src/cmd/profile_switch/workflow/state_machine.rs:1 Defines SwitchStage and SwitchPanicInfo as crate-visible, wraps each transition in with_stage(...) with catch_unwind, and propagates CmdResult<bool> to distinguish validation failures from panics while keeping cancellation semantics. - src-tauri/src/cmd/profile_switch/workflow.rs:25 Updates run_switch_job to return Result<bool, SwitchPanicInfo>, routing timeout, validation, config, and stage panic cases separately. Reuses SwitchPanicInfo for logging/UI notifications; patch_profiles_config maps state-machine panics into user-facing error strings. - src-tauri/src/cmd/profile_switch/driver.rs:1 Adds SwitchJobOutcome to unify workflow results: normal completions carry bool, and panics propagate SwitchPanicInfo. The driver loop now logs panics explicitly and uses AssertUnwindSafe(...).catch_unwind() to guard setup-phase panics. * refactor(profile-switch): add watchdog, heartbeat, and async timeout guards - Introduce SwitchHeartbeat for stage tracking and timing; log stage transitions with elapsed durations. - Add watchdog in driver to cancel stalled switches (5s heartbeat timeout). - Wrap blocking ops (Config::apply, tray updates, profiles_save_file_safe, etc.) with time::timeout to prevent async stalls. - Improve logs for stage transitions and watchdog timeouts to clarify cancellation points. * refactor(profile-switch): async post-switch tasks, early lock release, and spawn_blocking for IO * feat(profile-switch): track cleanup and coordinate pipeline - Add explicit cleanup tracking in the driver (`cleanup_profiles` map + `CleanupDone` messages) to know when background post-switch work is still running before starting a new workflow. (driver.rs:29-50) - Update `handle_enqueue` to detect “cleanup in progress”: same-profile retries are short-circuited; other requests collapse the pending queue, cancelling old tokens so only the latest intent survives. (driver.rs:176-247) - Rework scheduling helpers: `start_next_job` refuses to start while cleanup is outstanding; discarded requests release cancellation tokens; cleanup completion explicitly restarts the pipeline. (driver.rs:258-442) * feat(profile-switch): unify post-switch cleanup handling - workflow.rs (25-427) returns `SwitchWorkflowResult` (success + CleanupHandle) or `SwitchWorkflowError`. All failure/timeout paths stash post-switch work into a single CleanupHandle. Cleanup helpers (`notify_profile_switch_finished` and `close_connections_after_switch`) run inside that task for proper lifetime handling. - driver.rs (29-439) propagates CleanupHandle through `SwitchJobOutcome`, spawns a bridge to wait for completion, and blocks `start_next_job` until done. Direct driver-side panics now schedule failure cleanup via the shared helper. * tmp * Revert "tmp" This reverts commit e582cf4a652231a67a7c951802cb19b385f6afd7. * refactor: queue frontend events through async dispatcher * refactor: queue frontend switch/proxy events and throttle notices * chore: frontend debug log * fix: re-enable only ProfileSwitchFinished events - keep others suppressed for crash isolation - Re-enabled only ProfileSwitchFinished events; RefreshClash, RefreshProxy, and ProfileChanged remain suppressed (they log suppression messages) - Allows frontend to receive task completion notifications for UI feedback while crash isolation continues - src-tauri/src/core/handle.rs now only suppresses notify_profile_changed - Serialized emitter, frontend logging bridge, and other diagnostics unchanged * refactor: refreshClashData * refactor(proxy): stabilize proxy switch pipeline and rendering - Add coalescing buffer in notification.rs to emit only the latest proxies-updated snapshot - Replace nextTick with queueMicrotask in asyncQueue.ts for same-frame hydration - Hide auto-generated GLOBAL snapshot and preserve optional metadata in proxy-snapshot.ts - Introduce stable proxy rendering state in AppDataProvider (proxyTargetProfileId, proxyDisplayProfileId, isProxyRefreshPending) - Update proxy page to fade content during refresh and overlay status banner instead of showing incomplete snapshot * refactor(profiles): move manual activating logic to reducer for deterministic queue tracking * refactor: replace proxy-data event bridge with pure polling and simplify proxy store - Replaced the proxy-data event bridge with pure polling: AppDataProvider now fetches the initial snapshot and drives refreshes from the polled switchStatus, removing verge://refresh-* listeners (src/providers/app-data-provider.tsx). - Simplified proxy-store by dropping the proxies-updated listener queue and unused payload/normalizer helpers; relies on SWR/provider fetch path + calcuProxies for live updates (src/stores/proxy-store.ts). - Trimmed layout-level event wiring to keep only notice/show/hide subscriptions, removing obsolete refresh listeners (src/pages/_layout/useLayoutEvents.ts). * refactor(proxy): streamline proxies-updated handling and store event flow - AppDataProvider now treats `proxies-updated` as the fast path: the listener calls `applyLiveProxyPayload` immediately and schedules only a single fallback `fetchLiveProxies` ~600 ms later (replacing the old 0/250/1000/2000 cascade). Expensive provider/rule refreshes run in parallel via `Promise.allSettled`, and the multi-stage queue on profile updates completion was removed (src/providers/app-data-provider.tsx). - Rebuilt proxy-store to support the event flow: restored `setLive`, provider normalization, and an animation-frame + async queue that applies payloads without blocking. Exposed `applyLiveProxyPayload` so providers can push events directly into the store (src/stores/proxy-store.ts). * refactor: switch delay * refactor(app-data-provider): trigger getProfileSwitchStatus revalidation on profile-switch-finished - AppDataProvider now listens to `profile-switch-finished` and calls `mutate("getProfileSwitchStatus")` to immediately update state and unlock buttons (src/providers/app-data-provider.tsx). - Retain existing detailed timing logs for monitoring other stages. - Frontend success notifications remain instant; background refreshes continue asynchronously. * fix(profiles): prevent duplicate toast on page remount * refactor(profile-switch): make active switches preemptible and prevent queue piling - Add notify mechanism to SwitchCancellation to await cancellation without busy-waiting (state.rs:82) - Collapse pending queue to a single entry in the driver; cancel in-flight task on newer request (driver.rs:232) - Update handle_update_core to watch cancel token and 30s timeout; release locks, discard draft, and exit early if canceled (state_machine.rs:301) - Providers revalidate status immediately on profile-switch-finished events (app-data-provider.tsx:208) * refactor(core): make core reload phase controllable, reduce 0xcfffffff risk - CoreManager::apply_config now calls `reload_config_with_retry`, each attempt waits up to 5s, retries 3 times; on failure, returns error with duration logged and triggers core restart if needed (src-tauri/src/core/manager/config.rs:175, 205) - `reload_config_with_retry` logs attempt info on timeout or error; if error is a Mihomo connection issue, fallback to original restart logic (src-tauri/src/core/manager/config.rs:211) - `reload_config_once` retains original Mihomo call for retry wrapper usage (src-tauri/src/core/manager/config.rs:247) * chore(frontend-logs): downgrade routine event logs from info to debug - Logs like `emit_via_app entering spawn_blocking`, `Async emit…`, `Buffered proxies…` are now debug-level (src-tauri/src/core/notification.rs:155, :265, :309…) - Genuine warnings/errors (failures/timeouts) remain at warn/error - Core stage logs remain info to keep backend tracking visible * refactor(frontend-emit): make emit_via_app fire-and-forget async - `emit_via_app` now a regular function; spawns with `tokio::spawn` and logs a warn if `emit_to` fails, caller returns immediately (src-tauri/src/core/notification.rs:269) - Removed `.await` at Async emit and flush_proxies calls; only record dispatch duration and warn on failure (src-tauri/src/core/notification.rs:211, :329) * refactor(ui): restructure profile switch for event-driven speed + polling stability - Backend - SwitchManager maintains a lightweight event queue: added `event_sequence`, `recent_events`, and `SwitchResultEvent`; provides `push_event` / `events_after` (state.rs) - `handle_completion` pushes events on success/failure and keeps `last_result` (driver.rs) for frontend incremental fetch - New Tauri command `get_profile_switch_events(after_sequence)` exposes `events_after` (profile_switch/mod.rs → profile.rs → lib.rs) - Notification system - `NotificationSystem::process_event` only logs debug, disables WebView `emit_to`, fixes 0xcfffffff - Related emit/buffer functions now safe no-op, removed unused structures and warnings (notification.rs) - Frontend - services/cmds.ts defines `SwitchResultEvent` and `getProfileSwitchEvents` - `AppDataProvider` holds `switchEventSeqRef`, polls incremental events every 0.25s (busy) / 1s (idle); each event triggers: - immediate `globalMutate("getProfiles")` to refresh current profile - background refresh of proxies/providers/rules via `Promise.allSettled` (failures logged, non-blocking) - forced `mutateSwitchStatus` to correct state - original switchStatus effect calls `handleSwitchResult` as fallback; other toast/activation logic handled in profiles.tsx - Commands / API cleanup - removed `pub use profile_switch::*;` in cmd::mod.rs to avoid conflicts; frontend uses new command polling * refactor(frontend): optimize profile switch with optimistic updates * refactor(profile-switch): switch to event-driven flow with Profile Store - SwitchManager pushes events; frontend polls get_profile_switch_events - Zustand store handles optimistic profiles; AppDataProvider applies updates and background-fetches - UI flicker removed * fix(app-data): re-hook profile store updates during switch hydration * fix(notification): restore frontend event dispatch and non-blocking emits * fix(app-data-provider): restore proxy refresh and seed snapshot after refactor * fix: ensure switch completion events are received and handle proxies-updated * fix(app-data-provider): dedupe switch results by taskId and fix stale profile state * fix(profile-switch): ensure patch_profiles_config_by_profile_index waits for real completion and handle join failures in apply_config_with_timeout * docs: UPDATELOG.md * chore: add necessary comments * fix(core): always dispatch async proxy snapshot after RefreshClash event * fix(proxy-store, provider): handle pending snapshots and proxy profiles - Added pending snapshot tracking in proxy-store so `lastAppliedFetchId` no longer jumps on seed. Profile adoption is deferred until a qualifying fetch completes. Exposed `clearPendingProfile` for rollback support. - Cleared pending snapshot state whenever live payloads apply or the store resets, preventing stale optimistic profile IDs after failures. - In provider integration, subscribed to the pending proxy profile and fed it into target-profile derivation. Cleared it on failed switch results so hydration can advance and UI status remains accurate. * fix(proxy): re-hook tray refresh events into proxy refresh queue - Reattached listen("verge://refresh-proxy-config", …) at src/providers/app-data-provider.tsx:402 and registered it for cleanup. - Added matching window fallback handler at src/providers/app-data-provider.tsx:430 so in-app dispatches share the same refresh path. * fix(proxy-snapshot/proxy-groups): address review findings on snapshot placeholders - src/utils/proxy-snapshot.ts:72-95 now derives snapshot group members solely from proxy-groups.proxies, so provider ids under `use` no longer generate placeholder proxy items. - src/components/proxy/proxy-groups.tsx:665-677 lets the hydration overlay capture pointer events (and shows a wait cursor) so users can’t interact with snapshot-only placeholders before live data is ready. * fix(profile-switch): preserve queued requests and avoid stale connection teardown - Keep earlier queued switches intact by dropping the blanket “collapse” call: after removing duplicates for the same profile, new requests are simply appended, leaving other profiles pending (driver.rs:376). Resolves queue-loss scenario. - Gate connection cleanup on real successes so cancelled/stale runs no longer tear down Mihomo connections; success handler now skips close_connections_after_switch when success == false (workflow.rs:419). * fix(profile-switch, layout): improve profile validation and restore backend refresh - Hardened profile validation using `tokio::fs` with a 5s timeout and offloading YAML parsing to `AsyncHandler::spawn_blocking`, preventing slow disks or malformed files from freezing the runtime (src-tauri/src/cmd/profile_switch/validation.rs:9, 71). - Restored backend-triggered refresh handling by listening for `verge://refresh-clash-config` / `verge://refresh-verge-config` and invoking shared refresh services so SWR caches stay in sync with core events (src/pages/_layout/useLayoutEvents.ts:6, 45, 55). * feat(profile-switch): handle cancellations for superseded requests - Added a `cancelled` flag and constructor so superseded requests publish an explicit cancellation instead of a failure (src-tauri/src/cmd/profile_switch/state.rs:249, src-tauri/src/cmd/profile_switch/driver.rs:482) - Updated the profile switch effect to log cancellations as info, retain the shared `mutate` call, and skip emitting error toasts while still refreshing follow-up work (src/pages/profiles.tsx:554, src/pages/profiles.tsx:581) - Exposed the new flag on the TypeScript contract to keep downstream consumers type-safe (src/services/cmds.ts:20) * fix(profiles): wrap logging payload for Tauri frontend_log * fix(profile-switch): add rollback and error propagation for failed persistence - Added rollback on apply failure so Mihomo restores to the previous profile before exiting the success path early (state_machine.rs:474). - Reworked persist_profiles_with_timeout to surface timeout/join/save errors, convert them into CmdResult failures, and trigger rollback + error propagation when persistence fails (state_machine.rs:703). * fix(profile-switch): prevent mid-finalize reentrancy and lingering tasks * fix(profile-switch): preserve pending queue and surface discarded switches * fix(profile-switch): avoid draining Mihomo sockets on failed/cancelled switches * fix(app-data-provider): restore backend-driven refresh and reattach fallbacks * fix(profile-switch): queue concurrent updates and add bounded wait/backoff * fix(proxy): trigger live refresh on app start for proxy snapshot * refactor(profile-switch): split flow into layers and centralize async cleanup - Introduced `SwitchDriver` to encapsulate queue and driver logic while keeping the public Tauri command API. - Added workflow/cleanup helpers for notification dispatch and Mihomo connection draining, re-exported for API consistency. - Replaced monolithic state machine with `core.rs`, `context.rs`, and `stages.rs`, plus a thin `mod.rs` re-export layer; stage methods are now individually testable. - Removed legacy `workflow/state_machine.rs` and adjusted visibility on re-exported types/constants to ensure compilation.
2025-10-30 17:29:15 +08:00
}
}
}
fn serialize_event(
&self,
event: FrontendEvent,
) -> (&'static str, Result<serde_json::Value, serde_json::Error>) {
use serde_json::json;
match event {
FrontendEvent::RefreshClash => ("verge://refresh-clash-config", Ok(json!("yes"))),
FrontendEvent::RefreshVerge => ("verge://refresh-verge-config", Ok(json!("yes"))),
FrontendEvent::NoticeMessage { status, message } => (
"verge://notice-message",
serde_json::to_value((status, message)),
),
FrontendEvent::ProfileChanged { current_profile_id } => {
("profile-changed", Ok(json!(current_profile_id)))
}
FrontendEvent::TimerUpdated { profile_index } => {
("verge://timer-updated", Ok(json!(profile_index)))
}
FrontendEvent::ProfileUpdateStarted { uid } => {
("profile-update-started", Ok(json!({ "uid": uid })))
}
FrontendEvent::ProfileUpdateCompleted { uid } => {
("profile-update-completed", Ok(json!({ "uid": uid })))
}
}
}
fn handle_emit_error(&self) {
self.stats.total_errors.fetch_add(1, Ordering::Relaxed);
*self.stats.last_error_time.write() = Some(Instant::now());
let errors = self.stats.total_errors.load(Ordering::Relaxed);
if errors > retry::EVENT_EMIT_THRESHOLD && !*self.emergency_mode.read() {
logging!(
warn,
Type::Frontend,
"Entering emergency mode after {} errors",
errors
);
*self.emergency_mode.write() = true;
}
}
pub fn send_event(&self, event: FrontendEvent) -> bool {
if self.should_skip_event(&event) {
return false;
}
if let Some(sender) = &self.sender {
sender.send(event).is_ok()
} else {
false
}
}
pub fn shutdown(&mut self) {
self.is_running = false;
if let Some(sender) = self.sender.take() {
drop(sender);
}
if let Some(handle) = self.worker_handle.take() {
let _ = handle.join();
}
}
}