Files
clash-proxy/src/components/proxy/provider-button.tsx

344 lines
11 KiB
TypeScript
Raw Normal View History

import { RefreshRounded, StorageOutlined } from "@mui/icons-material";
import {
Box,
Button,
Dialog,
DialogActions,
DialogContent,
DialogTitle,
Divider,
2023-08-05 21:38:44 +08:00
IconButton,
LinearProgress,
2023-08-05 21:38:44 +08:00
List,
ListItem,
ListItemText,
2024-01-18 01:01:47 +08:00
Typography,
alpha,
styled,
2023-08-05 21:38:44 +08:00
} from "@mui/material";
import { useLockFn } from "ahooks";
import dayjs from "dayjs";
import { useState } from "react";
import { useTranslation } from "react-i18next";
import { updateProxyProvider } from "tauri-plugin-mihomo-api";
import { useAppData } from "@/providers/app-data-context";
2025-05-04 22:17:08 +08:00
import { showNotice } from "@/services/noticeService";
import parseTraffic from "@/utils/parse-traffic";
2023-08-05 21:38:44 +08:00
// 样式化组件 - 类型框
const TypeBox = styled(Box)<{ component?: React.ElementType }>(({ theme }) => ({
display: "inline-block",
border: "1px solid #ccc",
borderColor: alpha(theme.palette.secondary.main, 0.5),
color: alpha(theme.palette.secondary.main, 0.8),
borderRadius: 4,
fontSize: 10,
marginRight: "4px",
padding: "0 2px",
lineHeight: 1.25,
}));
// 解析过期时间
const parseExpire = (expire?: number) => {
if (!expire) return "-";
return dayjs(expire * 1000).format("YYYY-MM-DD");
};
2023-08-05 21:38:44 +08:00
export const ProviderButton = () => {
const { t } = useTranslation();
const [open, setOpen] = useState(false);
const { proxyProviders, refreshProxy, refreshProxyProviders } = useAppData();
const [updating, setUpdating] = useState<Record<string, boolean>>({});
// 检查是否有提供者
const hasProviders = Object.keys(proxyProviders || {}).length > 0;
// 更新单个代理提供者
const updateProvider = useLockFn(async (name: string) => {
try {
// 设置更新状态
setUpdating((prev) => ({ ...prev, [name]: true }));
await updateProxyProvider(name);
// 刷新数据
refactor: profile switch (#5197) * refactor: proxy refresh * fix(proxy-store): properly hydrate and filter backend provider snapshots * fix(proxy-store): add monotonic fetch guard and event bridge cleanup * fix(proxy-store): tweak fetch sequencing guard to prevent snapshot invalidation from wiping fast responses * docs: UPDATELOG.md * fix(proxy-snapshot, proxy-groups): restore last-selected proxy and group info * fix(proxy): merge static and provider entries in snapshot; fix Virtuoso viewport height * fix(proxy-groups): restrict reduced-height viewport to chain-mode column * refactor(profiles): introduce a state machine * refactor:replace state machine with reducer * refactor:introduce a profile switch worker * refactor: hooked up a backend-driven profile switch flow * refactor(profile-switch): serialize switches with async queue and enrich frontend events * feat(profiles): centralize profile switching with reducer/driver queue to fix stuck UI on rapid toggles * chore: translate comments and log messages to English to avoid encoding issues * refactor: migrate backend queue to SwitchDriver actor * fix(profile): unify error string types in validation helper * refactor(profile): make switch driver fully async and handle panics safely * refactor(cmd): move switch-validation helper into new profile_switch module * refactor(profile): modularize switch logic into profile_switch.rs * refactor(profile_switch): modularize switch handler - Break monolithic switch handler into proper module hierarchy - Move shared globals, constants, and SwitchScope guard to state.rs - Isolate queue orchestration and async task spawning in driver.rs - Consolidate switch pipeline and config patching in workflow.rs - Extract request pre-checks/YAML validation into validation.rs * refactor(profile_switch): centralize state management and add cancellation flow - Introduced SwitchManager in state.rs to unify mutex, sequencing, and SwitchScope handling. - Added SwitchCancellation and SwitchRequest wrappers to encapsulate cancel tokens and notifications. - Updated driver to allocate task IDs via SwitchManager, cancel old tokens, and queue next jobs in order. - Updated workflow to check cancellation and sequence at each phase, replacing global flags with manager APIs. * feat(profile_switch): integrate explicit state machine for profile switching - workflow.rs:24 now delegates each switch to SwitchStateMachine, passing an owned SwitchRequest. Queue cancellation and state-sequence checks are centralized inside the machine instead of scattered guards. - workflow.rs:176 replaces the old helper with `SwitchStateMachine::new(manager(), None, profiles).run().await`, ensuring manual profile patches follow the same workflow (locking, validation, rollback) as queued switches. - workflow.rs:180 & 275 expose `validate_profile_yaml` and `restore_previous_profile` for reuse inside the state machine. - workflow/state_machine.rs:1 introduces a dedicated state machine module. It manages global mutex acquisition, request/cancellation state, YAML validation, draft patching, `CoreManager::update_config`, failure rollback, and tray/notification side-effects. Transitions check for cancellations and stale sequences; completions release guards via `SwitchScope` drop. * refactor(profile-switch): integrate stage-aware panic handling - src-tauri/src/cmd/profile_switch/workflow/state_machine.rs:1 Defines SwitchStage and SwitchPanicInfo as crate-visible, wraps each transition in with_stage(...) with catch_unwind, and propagates CmdResult<bool> to distinguish validation failures from panics while keeping cancellation semantics. - src-tauri/src/cmd/profile_switch/workflow.rs:25 Updates run_switch_job to return Result<bool, SwitchPanicInfo>, routing timeout, validation, config, and stage panic cases separately. Reuses SwitchPanicInfo for logging/UI notifications; patch_profiles_config maps state-machine panics into user-facing error strings. - src-tauri/src/cmd/profile_switch/driver.rs:1 Adds SwitchJobOutcome to unify workflow results: normal completions carry bool, and panics propagate SwitchPanicInfo. The driver loop now logs panics explicitly and uses AssertUnwindSafe(...).catch_unwind() to guard setup-phase panics. * refactor(profile-switch): add watchdog, heartbeat, and async timeout guards - Introduce SwitchHeartbeat for stage tracking and timing; log stage transitions with elapsed durations. - Add watchdog in driver to cancel stalled switches (5s heartbeat timeout). - Wrap blocking ops (Config::apply, tray updates, profiles_save_file_safe, etc.) with time::timeout to prevent async stalls. - Improve logs for stage transitions and watchdog timeouts to clarify cancellation points. * refactor(profile-switch): async post-switch tasks, early lock release, and spawn_blocking for IO * feat(profile-switch): track cleanup and coordinate pipeline - Add explicit cleanup tracking in the driver (`cleanup_profiles` map + `CleanupDone` messages) to know when background post-switch work is still running before starting a new workflow. (driver.rs:29-50) - Update `handle_enqueue` to detect “cleanup in progress”: same-profile retries are short-circuited; other requests collapse the pending queue, cancelling old tokens so only the latest intent survives. (driver.rs:176-247) - Rework scheduling helpers: `start_next_job` refuses to start while cleanup is outstanding; discarded requests release cancellation tokens; cleanup completion explicitly restarts the pipeline. (driver.rs:258-442) * feat(profile-switch): unify post-switch cleanup handling - workflow.rs (25-427) returns `SwitchWorkflowResult` (success + CleanupHandle) or `SwitchWorkflowError`. All failure/timeout paths stash post-switch work into a single CleanupHandle. Cleanup helpers (`notify_profile_switch_finished` and `close_connections_after_switch`) run inside that task for proper lifetime handling. - driver.rs (29-439) propagates CleanupHandle through `SwitchJobOutcome`, spawns a bridge to wait for completion, and blocks `start_next_job` until done. Direct driver-side panics now schedule failure cleanup via the shared helper. * tmp * Revert "tmp" This reverts commit e582cf4a652231a67a7c951802cb19b385f6afd7. * refactor: queue frontend events through async dispatcher * refactor: queue frontend switch/proxy events and throttle notices * chore: frontend debug log * fix: re-enable only ProfileSwitchFinished events - keep others suppressed for crash isolation - Re-enabled only ProfileSwitchFinished events; RefreshClash, RefreshProxy, and ProfileChanged remain suppressed (they log suppression messages) - Allows frontend to receive task completion notifications for UI feedback while crash isolation continues - src-tauri/src/core/handle.rs now only suppresses notify_profile_changed - Serialized emitter, frontend logging bridge, and other diagnostics unchanged * refactor: refreshClashData * refactor(proxy): stabilize proxy switch pipeline and rendering - Add coalescing buffer in notification.rs to emit only the latest proxies-updated snapshot - Replace nextTick with queueMicrotask in asyncQueue.ts for same-frame hydration - Hide auto-generated GLOBAL snapshot and preserve optional metadata in proxy-snapshot.ts - Introduce stable proxy rendering state in AppDataProvider (proxyTargetProfileId, proxyDisplayProfileId, isProxyRefreshPending) - Update proxy page to fade content during refresh and overlay status banner instead of showing incomplete snapshot * refactor(profiles): move manual activating logic to reducer for deterministic queue tracking * refactor: replace proxy-data event bridge with pure polling and simplify proxy store - Replaced the proxy-data event bridge with pure polling: AppDataProvider now fetches the initial snapshot and drives refreshes from the polled switchStatus, removing verge://refresh-* listeners (src/providers/app-data-provider.tsx). - Simplified proxy-store by dropping the proxies-updated listener queue and unused payload/normalizer helpers; relies on SWR/provider fetch path + calcuProxies for live updates (src/stores/proxy-store.ts). - Trimmed layout-level event wiring to keep only notice/show/hide subscriptions, removing obsolete refresh listeners (src/pages/_layout/useLayoutEvents.ts). * refactor(proxy): streamline proxies-updated handling and store event flow - AppDataProvider now treats `proxies-updated` as the fast path: the listener calls `applyLiveProxyPayload` immediately and schedules only a single fallback `fetchLiveProxies` ~600 ms later (replacing the old 0/250/1000/2000 cascade). Expensive provider/rule refreshes run in parallel via `Promise.allSettled`, and the multi-stage queue on profile updates completion was removed (src/providers/app-data-provider.tsx). - Rebuilt proxy-store to support the event flow: restored `setLive`, provider normalization, and an animation-frame + async queue that applies payloads without blocking. Exposed `applyLiveProxyPayload` so providers can push events directly into the store (src/stores/proxy-store.ts). * refactor: switch delay * refactor(app-data-provider): trigger getProfileSwitchStatus revalidation on profile-switch-finished - AppDataProvider now listens to `profile-switch-finished` and calls `mutate("getProfileSwitchStatus")` to immediately update state and unlock buttons (src/providers/app-data-provider.tsx). - Retain existing detailed timing logs for monitoring other stages. - Frontend success notifications remain instant; background refreshes continue asynchronously. * fix(profiles): prevent duplicate toast on page remount * refactor(profile-switch): make active switches preemptible and prevent queue piling - Add notify mechanism to SwitchCancellation to await cancellation without busy-waiting (state.rs:82) - Collapse pending queue to a single entry in the driver; cancel in-flight task on newer request (driver.rs:232) - Update handle_update_core to watch cancel token and 30s timeout; release locks, discard draft, and exit early if canceled (state_machine.rs:301) - Providers revalidate status immediately on profile-switch-finished events (app-data-provider.tsx:208) * refactor(core): make core reload phase controllable, reduce 0xcfffffff risk - CoreManager::apply_config now calls `reload_config_with_retry`, each attempt waits up to 5s, retries 3 times; on failure, returns error with duration logged and triggers core restart if needed (src-tauri/src/core/manager/config.rs:175, 205) - `reload_config_with_retry` logs attempt info on timeout or error; if error is a Mihomo connection issue, fallback to original restart logic (src-tauri/src/core/manager/config.rs:211) - `reload_config_once` retains original Mihomo call for retry wrapper usage (src-tauri/src/core/manager/config.rs:247) * chore(frontend-logs): downgrade routine event logs from info to debug - Logs like `emit_via_app entering spawn_blocking`, `Async emit…`, `Buffered proxies…` are now debug-level (src-tauri/src/core/notification.rs:155, :265, :309…) - Genuine warnings/errors (failures/timeouts) remain at warn/error - Core stage logs remain info to keep backend tracking visible * refactor(frontend-emit): make emit_via_app fire-and-forget async - `emit_via_app` now a regular function; spawns with `tokio::spawn` and logs a warn if `emit_to` fails, caller returns immediately (src-tauri/src/core/notification.rs:269) - Removed `.await` at Async emit and flush_proxies calls; only record dispatch duration and warn on failure (src-tauri/src/core/notification.rs:211, :329) * refactor(ui): restructure profile switch for event-driven speed + polling stability - Backend - SwitchManager maintains a lightweight event queue: added `event_sequence`, `recent_events`, and `SwitchResultEvent`; provides `push_event` / `events_after` (state.rs) - `handle_completion` pushes events on success/failure and keeps `last_result` (driver.rs) for frontend incremental fetch - New Tauri command `get_profile_switch_events(after_sequence)` exposes `events_after` (profile_switch/mod.rs → profile.rs → lib.rs) - Notification system - `NotificationSystem::process_event` only logs debug, disables WebView `emit_to`, fixes 0xcfffffff - Related emit/buffer functions now safe no-op, removed unused structures and warnings (notification.rs) - Frontend - services/cmds.ts defines `SwitchResultEvent` and `getProfileSwitchEvents` - `AppDataProvider` holds `switchEventSeqRef`, polls incremental events every 0.25s (busy) / 1s (idle); each event triggers: - immediate `globalMutate("getProfiles")` to refresh current profile - background refresh of proxies/providers/rules via `Promise.allSettled` (failures logged, non-blocking) - forced `mutateSwitchStatus` to correct state - original switchStatus effect calls `handleSwitchResult` as fallback; other toast/activation logic handled in profiles.tsx - Commands / API cleanup - removed `pub use profile_switch::*;` in cmd::mod.rs to avoid conflicts; frontend uses new command polling * refactor(frontend): optimize profile switch with optimistic updates * refactor(profile-switch): switch to event-driven flow with Profile Store - SwitchManager pushes events; frontend polls get_profile_switch_events - Zustand store handles optimistic profiles; AppDataProvider applies updates and background-fetches - UI flicker removed * fix(app-data): re-hook profile store updates during switch hydration * fix(notification): restore frontend event dispatch and non-blocking emits * fix(app-data-provider): restore proxy refresh and seed snapshot after refactor * fix: ensure switch completion events are received and handle proxies-updated * fix(app-data-provider): dedupe switch results by taskId and fix stale profile state * fix(profile-switch): ensure patch_profiles_config_by_profile_index waits for real completion and handle join failures in apply_config_with_timeout * docs: UPDATELOG.md * chore: add necessary comments * fix(core): always dispatch async proxy snapshot after RefreshClash event * fix(proxy-store, provider): handle pending snapshots and proxy profiles - Added pending snapshot tracking in proxy-store so `lastAppliedFetchId` no longer jumps on seed. Profile adoption is deferred until a qualifying fetch completes. Exposed `clearPendingProfile` for rollback support. - Cleared pending snapshot state whenever live payloads apply or the store resets, preventing stale optimistic profile IDs after failures. - In provider integration, subscribed to the pending proxy profile and fed it into target-profile derivation. Cleared it on failed switch results so hydration can advance and UI status remains accurate. * fix(proxy): re-hook tray refresh events into proxy refresh queue - Reattached listen("verge://refresh-proxy-config", …) at src/providers/app-data-provider.tsx:402 and registered it for cleanup. - Added matching window fallback handler at src/providers/app-data-provider.tsx:430 so in-app dispatches share the same refresh path. * fix(proxy-snapshot/proxy-groups): address review findings on snapshot placeholders - src/utils/proxy-snapshot.ts:72-95 now derives snapshot group members solely from proxy-groups.proxies, so provider ids under `use` no longer generate placeholder proxy items. - src/components/proxy/proxy-groups.tsx:665-677 lets the hydration overlay capture pointer events (and shows a wait cursor) so users can’t interact with snapshot-only placeholders before live data is ready. * fix(profile-switch): preserve queued requests and avoid stale connection teardown - Keep earlier queued switches intact by dropping the blanket “collapse” call: after removing duplicates for the same profile, new requests are simply appended, leaving other profiles pending (driver.rs:376). Resolves queue-loss scenario. - Gate connection cleanup on real successes so cancelled/stale runs no longer tear down Mihomo connections; success handler now skips close_connections_after_switch when success == false (workflow.rs:419). * fix(profile-switch, layout): improve profile validation and restore backend refresh - Hardened profile validation using `tokio::fs` with a 5s timeout and offloading YAML parsing to `AsyncHandler::spawn_blocking`, preventing slow disks or malformed files from freezing the runtime (src-tauri/src/cmd/profile_switch/validation.rs:9, 71). - Restored backend-triggered refresh handling by listening for `verge://refresh-clash-config` / `verge://refresh-verge-config` and invoking shared refresh services so SWR caches stay in sync with core events (src/pages/_layout/useLayoutEvents.ts:6, 45, 55). * feat(profile-switch): handle cancellations for superseded requests - Added a `cancelled` flag and constructor so superseded requests publish an explicit cancellation instead of a failure (src-tauri/src/cmd/profile_switch/state.rs:249, src-tauri/src/cmd/profile_switch/driver.rs:482) - Updated the profile switch effect to log cancellations as info, retain the shared `mutate` call, and skip emitting error toasts while still refreshing follow-up work (src/pages/profiles.tsx:554, src/pages/profiles.tsx:581) - Exposed the new flag on the TypeScript contract to keep downstream consumers type-safe (src/services/cmds.ts:20) * fix(profiles): wrap logging payload for Tauri frontend_log * fix(profile-switch): add rollback and error propagation for failed persistence - Added rollback on apply failure so Mihomo restores to the previous profile before exiting the success path early (state_machine.rs:474). - Reworked persist_profiles_with_timeout to surface timeout/join/save errors, convert them into CmdResult failures, and trigger rollback + error propagation when persistence fails (state_machine.rs:703). * fix(profile-switch): prevent mid-finalize reentrancy and lingering tasks * fix(profile-switch): preserve pending queue and surface discarded switches * fix(profile-switch): avoid draining Mihomo sockets on failed/cancelled switches * fix(app-data-provider): restore backend-driven refresh and reattach fallbacks * fix(profile-switch): queue concurrent updates and add bounded wait/backoff * fix(proxy): trigger live refresh on app start for proxy snapshot * refactor(profile-switch): split flow into layers and centralize async cleanup - Introduced `SwitchDriver` to encapsulate queue and driver logic while keeping the public Tauri command API. - Added workflow/cleanup helpers for notification dispatch and Mihomo connection draining, re-exported for API consistency. - Replaced monolithic state machine with `core.rs`, `context.rs`, and `stages.rs`, plus a thin `mod.rs` re-export layer; stage methods are now individually testable. - Removed legacy `workflow/state_machine.rs` and adjusted visibility on re-exported types/constants to ensure compilation.
2025-10-30 17:29:15 +08:00
await refreshProxy();
await refreshProxyProviders();
showNotice.success("providers.notices.updateSuccess", {
name,
2025-10-31 13:04:21 +08:00
});
} catch (err) {
showNotice.error("providers.notices.updateFailed", {
name,
message: String(err),
2025-10-31 13:04:21 +08:00
});
} finally {
// 清除更新状态
setUpdating((prev) => ({ ...prev, [name]: false }));
}
});
// 更新所有代理提供者
const updateAllProviders = useLockFn(async () => {
try {
// 获取所有provider的名称
const allProviders = Object.keys(proxyProviders || {});
if (allProviders.length === 0) {
showNotice.info("providers.notices.none");
return;
}
// 设置所有provider为更新中状态
const newUpdating = allProviders.reduce(
(acc, key) => {
acc[key] = true;
return acc;
},
{} as Record<string, boolean>,
);
setUpdating(newUpdating);
// 改为串行逐个更新所有provider
for (const name of allProviders) {
try {
await updateProxyProvider(name);
// 每个更新完成后更新状态
setUpdating((prev) => ({ ...prev, [name]: false }));
} catch (err) {
console.error(`更新 ${name} 失败`, err);
// 继续执行下一个,不中断整体流程
}
}
// 刷新数据
refactor: profile switch (#5197) * refactor: proxy refresh * fix(proxy-store): properly hydrate and filter backend provider snapshots * fix(proxy-store): add monotonic fetch guard and event bridge cleanup * fix(proxy-store): tweak fetch sequencing guard to prevent snapshot invalidation from wiping fast responses * docs: UPDATELOG.md * fix(proxy-snapshot, proxy-groups): restore last-selected proxy and group info * fix(proxy): merge static and provider entries in snapshot; fix Virtuoso viewport height * fix(proxy-groups): restrict reduced-height viewport to chain-mode column * refactor(profiles): introduce a state machine * refactor:replace state machine with reducer * refactor:introduce a profile switch worker * refactor: hooked up a backend-driven profile switch flow * refactor(profile-switch): serialize switches with async queue and enrich frontend events * feat(profiles): centralize profile switching with reducer/driver queue to fix stuck UI on rapid toggles * chore: translate comments and log messages to English to avoid encoding issues * refactor: migrate backend queue to SwitchDriver actor * fix(profile): unify error string types in validation helper * refactor(profile): make switch driver fully async and handle panics safely * refactor(cmd): move switch-validation helper into new profile_switch module * refactor(profile): modularize switch logic into profile_switch.rs * refactor(profile_switch): modularize switch handler - Break monolithic switch handler into proper module hierarchy - Move shared globals, constants, and SwitchScope guard to state.rs - Isolate queue orchestration and async task spawning in driver.rs - Consolidate switch pipeline and config patching in workflow.rs - Extract request pre-checks/YAML validation into validation.rs * refactor(profile_switch): centralize state management and add cancellation flow - Introduced SwitchManager in state.rs to unify mutex, sequencing, and SwitchScope handling. - Added SwitchCancellation and SwitchRequest wrappers to encapsulate cancel tokens and notifications. - Updated driver to allocate task IDs via SwitchManager, cancel old tokens, and queue next jobs in order. - Updated workflow to check cancellation and sequence at each phase, replacing global flags with manager APIs. * feat(profile_switch): integrate explicit state machine for profile switching - workflow.rs:24 now delegates each switch to SwitchStateMachine, passing an owned SwitchRequest. Queue cancellation and state-sequence checks are centralized inside the machine instead of scattered guards. - workflow.rs:176 replaces the old helper with `SwitchStateMachine::new(manager(), None, profiles).run().await`, ensuring manual profile patches follow the same workflow (locking, validation, rollback) as queued switches. - workflow.rs:180 & 275 expose `validate_profile_yaml` and `restore_previous_profile` for reuse inside the state machine. - workflow/state_machine.rs:1 introduces a dedicated state machine module. It manages global mutex acquisition, request/cancellation state, YAML validation, draft patching, `CoreManager::update_config`, failure rollback, and tray/notification side-effects. Transitions check for cancellations and stale sequences; completions release guards via `SwitchScope` drop. * refactor(profile-switch): integrate stage-aware panic handling - src-tauri/src/cmd/profile_switch/workflow/state_machine.rs:1 Defines SwitchStage and SwitchPanicInfo as crate-visible, wraps each transition in with_stage(...) with catch_unwind, and propagates CmdResult<bool> to distinguish validation failures from panics while keeping cancellation semantics. - src-tauri/src/cmd/profile_switch/workflow.rs:25 Updates run_switch_job to return Result<bool, SwitchPanicInfo>, routing timeout, validation, config, and stage panic cases separately. Reuses SwitchPanicInfo for logging/UI notifications; patch_profiles_config maps state-machine panics into user-facing error strings. - src-tauri/src/cmd/profile_switch/driver.rs:1 Adds SwitchJobOutcome to unify workflow results: normal completions carry bool, and panics propagate SwitchPanicInfo. The driver loop now logs panics explicitly and uses AssertUnwindSafe(...).catch_unwind() to guard setup-phase panics. * refactor(profile-switch): add watchdog, heartbeat, and async timeout guards - Introduce SwitchHeartbeat for stage tracking and timing; log stage transitions with elapsed durations. - Add watchdog in driver to cancel stalled switches (5s heartbeat timeout). - Wrap blocking ops (Config::apply, tray updates, profiles_save_file_safe, etc.) with time::timeout to prevent async stalls. - Improve logs for stage transitions and watchdog timeouts to clarify cancellation points. * refactor(profile-switch): async post-switch tasks, early lock release, and spawn_blocking for IO * feat(profile-switch): track cleanup and coordinate pipeline - Add explicit cleanup tracking in the driver (`cleanup_profiles` map + `CleanupDone` messages) to know when background post-switch work is still running before starting a new workflow. (driver.rs:29-50) - Update `handle_enqueue` to detect “cleanup in progress”: same-profile retries are short-circuited; other requests collapse the pending queue, cancelling old tokens so only the latest intent survives. (driver.rs:176-247) - Rework scheduling helpers: `start_next_job` refuses to start while cleanup is outstanding; discarded requests release cancellation tokens; cleanup completion explicitly restarts the pipeline. (driver.rs:258-442) * feat(profile-switch): unify post-switch cleanup handling - workflow.rs (25-427) returns `SwitchWorkflowResult` (success + CleanupHandle) or `SwitchWorkflowError`. All failure/timeout paths stash post-switch work into a single CleanupHandle. Cleanup helpers (`notify_profile_switch_finished` and `close_connections_after_switch`) run inside that task for proper lifetime handling. - driver.rs (29-439) propagates CleanupHandle through `SwitchJobOutcome`, spawns a bridge to wait for completion, and blocks `start_next_job` until done. Direct driver-side panics now schedule failure cleanup via the shared helper. * tmp * Revert "tmp" This reverts commit e582cf4a652231a67a7c951802cb19b385f6afd7. * refactor: queue frontend events through async dispatcher * refactor: queue frontend switch/proxy events and throttle notices * chore: frontend debug log * fix: re-enable only ProfileSwitchFinished events - keep others suppressed for crash isolation - Re-enabled only ProfileSwitchFinished events; RefreshClash, RefreshProxy, and ProfileChanged remain suppressed (they log suppression messages) - Allows frontend to receive task completion notifications for UI feedback while crash isolation continues - src-tauri/src/core/handle.rs now only suppresses notify_profile_changed - Serialized emitter, frontend logging bridge, and other diagnostics unchanged * refactor: refreshClashData * refactor(proxy): stabilize proxy switch pipeline and rendering - Add coalescing buffer in notification.rs to emit only the latest proxies-updated snapshot - Replace nextTick with queueMicrotask in asyncQueue.ts for same-frame hydration - Hide auto-generated GLOBAL snapshot and preserve optional metadata in proxy-snapshot.ts - Introduce stable proxy rendering state in AppDataProvider (proxyTargetProfileId, proxyDisplayProfileId, isProxyRefreshPending) - Update proxy page to fade content during refresh and overlay status banner instead of showing incomplete snapshot * refactor(profiles): move manual activating logic to reducer for deterministic queue tracking * refactor: replace proxy-data event bridge with pure polling and simplify proxy store - Replaced the proxy-data event bridge with pure polling: AppDataProvider now fetches the initial snapshot and drives refreshes from the polled switchStatus, removing verge://refresh-* listeners (src/providers/app-data-provider.tsx). - Simplified proxy-store by dropping the proxies-updated listener queue and unused payload/normalizer helpers; relies on SWR/provider fetch path + calcuProxies for live updates (src/stores/proxy-store.ts). - Trimmed layout-level event wiring to keep only notice/show/hide subscriptions, removing obsolete refresh listeners (src/pages/_layout/useLayoutEvents.ts). * refactor(proxy): streamline proxies-updated handling and store event flow - AppDataProvider now treats `proxies-updated` as the fast path: the listener calls `applyLiveProxyPayload` immediately and schedules only a single fallback `fetchLiveProxies` ~600 ms later (replacing the old 0/250/1000/2000 cascade). Expensive provider/rule refreshes run in parallel via `Promise.allSettled`, and the multi-stage queue on profile updates completion was removed (src/providers/app-data-provider.tsx). - Rebuilt proxy-store to support the event flow: restored `setLive`, provider normalization, and an animation-frame + async queue that applies payloads without blocking. Exposed `applyLiveProxyPayload` so providers can push events directly into the store (src/stores/proxy-store.ts). * refactor: switch delay * refactor(app-data-provider): trigger getProfileSwitchStatus revalidation on profile-switch-finished - AppDataProvider now listens to `profile-switch-finished` and calls `mutate("getProfileSwitchStatus")` to immediately update state and unlock buttons (src/providers/app-data-provider.tsx). - Retain existing detailed timing logs for monitoring other stages. - Frontend success notifications remain instant; background refreshes continue asynchronously. * fix(profiles): prevent duplicate toast on page remount * refactor(profile-switch): make active switches preemptible and prevent queue piling - Add notify mechanism to SwitchCancellation to await cancellation without busy-waiting (state.rs:82) - Collapse pending queue to a single entry in the driver; cancel in-flight task on newer request (driver.rs:232) - Update handle_update_core to watch cancel token and 30s timeout; release locks, discard draft, and exit early if canceled (state_machine.rs:301) - Providers revalidate status immediately on profile-switch-finished events (app-data-provider.tsx:208) * refactor(core): make core reload phase controllable, reduce 0xcfffffff risk - CoreManager::apply_config now calls `reload_config_with_retry`, each attempt waits up to 5s, retries 3 times; on failure, returns error with duration logged and triggers core restart if needed (src-tauri/src/core/manager/config.rs:175, 205) - `reload_config_with_retry` logs attempt info on timeout or error; if error is a Mihomo connection issue, fallback to original restart logic (src-tauri/src/core/manager/config.rs:211) - `reload_config_once` retains original Mihomo call for retry wrapper usage (src-tauri/src/core/manager/config.rs:247) * chore(frontend-logs): downgrade routine event logs from info to debug - Logs like `emit_via_app entering spawn_blocking`, `Async emit…`, `Buffered proxies…` are now debug-level (src-tauri/src/core/notification.rs:155, :265, :309…) - Genuine warnings/errors (failures/timeouts) remain at warn/error - Core stage logs remain info to keep backend tracking visible * refactor(frontend-emit): make emit_via_app fire-and-forget async - `emit_via_app` now a regular function; spawns with `tokio::spawn` and logs a warn if `emit_to` fails, caller returns immediately (src-tauri/src/core/notification.rs:269) - Removed `.await` at Async emit and flush_proxies calls; only record dispatch duration and warn on failure (src-tauri/src/core/notification.rs:211, :329) * refactor(ui): restructure profile switch for event-driven speed + polling stability - Backend - SwitchManager maintains a lightweight event queue: added `event_sequence`, `recent_events`, and `SwitchResultEvent`; provides `push_event` / `events_after` (state.rs) - `handle_completion` pushes events on success/failure and keeps `last_result` (driver.rs) for frontend incremental fetch - New Tauri command `get_profile_switch_events(after_sequence)` exposes `events_after` (profile_switch/mod.rs → profile.rs → lib.rs) - Notification system - `NotificationSystem::process_event` only logs debug, disables WebView `emit_to`, fixes 0xcfffffff - Related emit/buffer functions now safe no-op, removed unused structures and warnings (notification.rs) - Frontend - services/cmds.ts defines `SwitchResultEvent` and `getProfileSwitchEvents` - `AppDataProvider` holds `switchEventSeqRef`, polls incremental events every 0.25s (busy) / 1s (idle); each event triggers: - immediate `globalMutate("getProfiles")` to refresh current profile - background refresh of proxies/providers/rules via `Promise.allSettled` (failures logged, non-blocking) - forced `mutateSwitchStatus` to correct state - original switchStatus effect calls `handleSwitchResult` as fallback; other toast/activation logic handled in profiles.tsx - Commands / API cleanup - removed `pub use profile_switch::*;` in cmd::mod.rs to avoid conflicts; frontend uses new command polling * refactor(frontend): optimize profile switch with optimistic updates * refactor(profile-switch): switch to event-driven flow with Profile Store - SwitchManager pushes events; frontend polls get_profile_switch_events - Zustand store handles optimistic profiles; AppDataProvider applies updates and background-fetches - UI flicker removed * fix(app-data): re-hook profile store updates during switch hydration * fix(notification): restore frontend event dispatch and non-blocking emits * fix(app-data-provider): restore proxy refresh and seed snapshot after refactor * fix: ensure switch completion events are received and handle proxies-updated * fix(app-data-provider): dedupe switch results by taskId and fix stale profile state * fix(profile-switch): ensure patch_profiles_config_by_profile_index waits for real completion and handle join failures in apply_config_with_timeout * docs: UPDATELOG.md * chore: add necessary comments * fix(core): always dispatch async proxy snapshot after RefreshClash event * fix(proxy-store, provider): handle pending snapshots and proxy profiles - Added pending snapshot tracking in proxy-store so `lastAppliedFetchId` no longer jumps on seed. Profile adoption is deferred until a qualifying fetch completes. Exposed `clearPendingProfile` for rollback support. - Cleared pending snapshot state whenever live payloads apply or the store resets, preventing stale optimistic profile IDs after failures. - In provider integration, subscribed to the pending proxy profile and fed it into target-profile derivation. Cleared it on failed switch results so hydration can advance and UI status remains accurate. * fix(proxy): re-hook tray refresh events into proxy refresh queue - Reattached listen("verge://refresh-proxy-config", …) at src/providers/app-data-provider.tsx:402 and registered it for cleanup. - Added matching window fallback handler at src/providers/app-data-provider.tsx:430 so in-app dispatches share the same refresh path. * fix(proxy-snapshot/proxy-groups): address review findings on snapshot placeholders - src/utils/proxy-snapshot.ts:72-95 now derives snapshot group members solely from proxy-groups.proxies, so provider ids under `use` no longer generate placeholder proxy items. - src/components/proxy/proxy-groups.tsx:665-677 lets the hydration overlay capture pointer events (and shows a wait cursor) so users can’t interact with snapshot-only placeholders before live data is ready. * fix(profile-switch): preserve queued requests and avoid stale connection teardown - Keep earlier queued switches intact by dropping the blanket “collapse” call: after removing duplicates for the same profile, new requests are simply appended, leaving other profiles pending (driver.rs:376). Resolves queue-loss scenario. - Gate connection cleanup on real successes so cancelled/stale runs no longer tear down Mihomo connections; success handler now skips close_connections_after_switch when success == false (workflow.rs:419). * fix(profile-switch, layout): improve profile validation and restore backend refresh - Hardened profile validation using `tokio::fs` with a 5s timeout and offloading YAML parsing to `AsyncHandler::spawn_blocking`, preventing slow disks or malformed files from freezing the runtime (src-tauri/src/cmd/profile_switch/validation.rs:9, 71). - Restored backend-triggered refresh handling by listening for `verge://refresh-clash-config` / `verge://refresh-verge-config` and invoking shared refresh services so SWR caches stay in sync with core events (src/pages/_layout/useLayoutEvents.ts:6, 45, 55). * feat(profile-switch): handle cancellations for superseded requests - Added a `cancelled` flag and constructor so superseded requests publish an explicit cancellation instead of a failure (src-tauri/src/cmd/profile_switch/state.rs:249, src-tauri/src/cmd/profile_switch/driver.rs:482) - Updated the profile switch effect to log cancellations as info, retain the shared `mutate` call, and skip emitting error toasts while still refreshing follow-up work (src/pages/profiles.tsx:554, src/pages/profiles.tsx:581) - Exposed the new flag on the TypeScript contract to keep downstream consumers type-safe (src/services/cmds.ts:20) * fix(profiles): wrap logging payload for Tauri frontend_log * fix(profile-switch): add rollback and error propagation for failed persistence - Added rollback on apply failure so Mihomo restores to the previous profile before exiting the success path early (state_machine.rs:474). - Reworked persist_profiles_with_timeout to surface timeout/join/save errors, convert them into CmdResult failures, and trigger rollback + error propagation when persistence fails (state_machine.rs:703). * fix(profile-switch): prevent mid-finalize reentrancy and lingering tasks * fix(profile-switch): preserve pending queue and surface discarded switches * fix(profile-switch): avoid draining Mihomo sockets on failed/cancelled switches * fix(app-data-provider): restore backend-driven refresh and reattach fallbacks * fix(profile-switch): queue concurrent updates and add bounded wait/backoff * fix(proxy): trigger live refresh on app start for proxy snapshot * refactor(profile-switch): split flow into layers and centralize async cleanup - Introduced `SwitchDriver` to encapsulate queue and driver logic while keeping the public Tauri command API. - Added workflow/cleanup helpers for notification dispatch and Mihomo connection draining, re-exported for API consistency. - Replaced monolithic state machine with `core.rs`, `context.rs`, and `stages.rs`, plus a thin `mod.rs` re-export layer; stage methods are now individually testable. - Removed legacy `workflow/state_machine.rs` and adjusted visibility on re-exported types/constants to ensure compilation.
2025-10-30 17:29:15 +08:00
await refreshProxy();
await refreshProxyProviders();
showNotice.success("providers.notices.allUpdated");
} catch (err) {
showNotice.error("providers.notices.genericError", {
message: String(err),
2025-10-31 13:04:21 +08:00
});
} finally {
// 清除所有更新状态
setUpdating({});
}
});
const handleClose = () => {
setOpen(false);
};
if (!hasProviders) return null;
2023-08-05 21:38:44 +08:00
return (
<>
<Button
variant="outlined"
size="small"
startIcon={<StorageOutlined />}
onClick={() => setOpen(true)}
sx={{ mr: 1 }}
>
{t("proxies.page.provider.title")}
</Button>
<Dialog open={open} onClose={handleClose} maxWidth="sm" fullWidth>
<DialogTitle>
<Box
display="flex"
justifyContent="space-between"
alignItems="center"
>
<Typography variant="h6">
{t("proxies.page.provider.title")}
</Typography>
<Box>
<Button
variant="contained"
size="small"
onClick={updateAllProviders}
aria-label={t("proxies.page.provider.actions.updateAll")}
>
{t("proxies.page.provider.actions.updateAll")}
</Button>
</Box>
</Box>
</DialogTitle>
<DialogContent>
<List sx={{ py: 0, minHeight: 250 }}>
{Object.entries(proxyProviders || {})
.sort()
.map(([key, item]) => {
const provider = item;
const time = dayjs(provider.updatedAt);
refactor: profile switch (#5197) * refactor: proxy refresh * fix(proxy-store): properly hydrate and filter backend provider snapshots * fix(proxy-store): add monotonic fetch guard and event bridge cleanup * fix(proxy-store): tweak fetch sequencing guard to prevent snapshot invalidation from wiping fast responses * docs: UPDATELOG.md * fix(proxy-snapshot, proxy-groups): restore last-selected proxy and group info * fix(proxy): merge static and provider entries in snapshot; fix Virtuoso viewport height * fix(proxy-groups): restrict reduced-height viewport to chain-mode column * refactor(profiles): introduce a state machine * refactor:replace state machine with reducer * refactor:introduce a profile switch worker * refactor: hooked up a backend-driven profile switch flow * refactor(profile-switch): serialize switches with async queue and enrich frontend events * feat(profiles): centralize profile switching with reducer/driver queue to fix stuck UI on rapid toggles * chore: translate comments and log messages to English to avoid encoding issues * refactor: migrate backend queue to SwitchDriver actor * fix(profile): unify error string types in validation helper * refactor(profile): make switch driver fully async and handle panics safely * refactor(cmd): move switch-validation helper into new profile_switch module * refactor(profile): modularize switch logic into profile_switch.rs * refactor(profile_switch): modularize switch handler - Break monolithic switch handler into proper module hierarchy - Move shared globals, constants, and SwitchScope guard to state.rs - Isolate queue orchestration and async task spawning in driver.rs - Consolidate switch pipeline and config patching in workflow.rs - Extract request pre-checks/YAML validation into validation.rs * refactor(profile_switch): centralize state management and add cancellation flow - Introduced SwitchManager in state.rs to unify mutex, sequencing, and SwitchScope handling. - Added SwitchCancellation and SwitchRequest wrappers to encapsulate cancel tokens and notifications. - Updated driver to allocate task IDs via SwitchManager, cancel old tokens, and queue next jobs in order. - Updated workflow to check cancellation and sequence at each phase, replacing global flags with manager APIs. * feat(profile_switch): integrate explicit state machine for profile switching - workflow.rs:24 now delegates each switch to SwitchStateMachine, passing an owned SwitchRequest. Queue cancellation and state-sequence checks are centralized inside the machine instead of scattered guards. - workflow.rs:176 replaces the old helper with `SwitchStateMachine::new(manager(), None, profiles).run().await`, ensuring manual profile patches follow the same workflow (locking, validation, rollback) as queued switches. - workflow.rs:180 & 275 expose `validate_profile_yaml` and `restore_previous_profile` for reuse inside the state machine. - workflow/state_machine.rs:1 introduces a dedicated state machine module. It manages global mutex acquisition, request/cancellation state, YAML validation, draft patching, `CoreManager::update_config`, failure rollback, and tray/notification side-effects. Transitions check for cancellations and stale sequences; completions release guards via `SwitchScope` drop. * refactor(profile-switch): integrate stage-aware panic handling - src-tauri/src/cmd/profile_switch/workflow/state_machine.rs:1 Defines SwitchStage and SwitchPanicInfo as crate-visible, wraps each transition in with_stage(...) with catch_unwind, and propagates CmdResult<bool> to distinguish validation failures from panics while keeping cancellation semantics. - src-tauri/src/cmd/profile_switch/workflow.rs:25 Updates run_switch_job to return Result<bool, SwitchPanicInfo>, routing timeout, validation, config, and stage panic cases separately. Reuses SwitchPanicInfo for logging/UI notifications; patch_profiles_config maps state-machine panics into user-facing error strings. - src-tauri/src/cmd/profile_switch/driver.rs:1 Adds SwitchJobOutcome to unify workflow results: normal completions carry bool, and panics propagate SwitchPanicInfo. The driver loop now logs panics explicitly and uses AssertUnwindSafe(...).catch_unwind() to guard setup-phase panics. * refactor(profile-switch): add watchdog, heartbeat, and async timeout guards - Introduce SwitchHeartbeat for stage tracking and timing; log stage transitions with elapsed durations. - Add watchdog in driver to cancel stalled switches (5s heartbeat timeout). - Wrap blocking ops (Config::apply, tray updates, profiles_save_file_safe, etc.) with time::timeout to prevent async stalls. - Improve logs for stage transitions and watchdog timeouts to clarify cancellation points. * refactor(profile-switch): async post-switch tasks, early lock release, and spawn_blocking for IO * feat(profile-switch): track cleanup and coordinate pipeline - Add explicit cleanup tracking in the driver (`cleanup_profiles` map + `CleanupDone` messages) to know when background post-switch work is still running before starting a new workflow. (driver.rs:29-50) - Update `handle_enqueue` to detect “cleanup in progress”: same-profile retries are short-circuited; other requests collapse the pending queue, cancelling old tokens so only the latest intent survives. (driver.rs:176-247) - Rework scheduling helpers: `start_next_job` refuses to start while cleanup is outstanding; discarded requests release cancellation tokens; cleanup completion explicitly restarts the pipeline. (driver.rs:258-442) * feat(profile-switch): unify post-switch cleanup handling - workflow.rs (25-427) returns `SwitchWorkflowResult` (success + CleanupHandle) or `SwitchWorkflowError`. All failure/timeout paths stash post-switch work into a single CleanupHandle. Cleanup helpers (`notify_profile_switch_finished` and `close_connections_after_switch`) run inside that task for proper lifetime handling. - driver.rs (29-439) propagates CleanupHandle through `SwitchJobOutcome`, spawns a bridge to wait for completion, and blocks `start_next_job` until done. Direct driver-side panics now schedule failure cleanup via the shared helper. * tmp * Revert "tmp" This reverts commit e582cf4a652231a67a7c951802cb19b385f6afd7. * refactor: queue frontend events through async dispatcher * refactor: queue frontend switch/proxy events and throttle notices * chore: frontend debug log * fix: re-enable only ProfileSwitchFinished events - keep others suppressed for crash isolation - Re-enabled only ProfileSwitchFinished events; RefreshClash, RefreshProxy, and ProfileChanged remain suppressed (they log suppression messages) - Allows frontend to receive task completion notifications for UI feedback while crash isolation continues - src-tauri/src/core/handle.rs now only suppresses notify_profile_changed - Serialized emitter, frontend logging bridge, and other diagnostics unchanged * refactor: refreshClashData * refactor(proxy): stabilize proxy switch pipeline and rendering - Add coalescing buffer in notification.rs to emit only the latest proxies-updated snapshot - Replace nextTick with queueMicrotask in asyncQueue.ts for same-frame hydration - Hide auto-generated GLOBAL snapshot and preserve optional metadata in proxy-snapshot.ts - Introduce stable proxy rendering state in AppDataProvider (proxyTargetProfileId, proxyDisplayProfileId, isProxyRefreshPending) - Update proxy page to fade content during refresh and overlay status banner instead of showing incomplete snapshot * refactor(profiles): move manual activating logic to reducer for deterministic queue tracking * refactor: replace proxy-data event bridge with pure polling and simplify proxy store - Replaced the proxy-data event bridge with pure polling: AppDataProvider now fetches the initial snapshot and drives refreshes from the polled switchStatus, removing verge://refresh-* listeners (src/providers/app-data-provider.tsx). - Simplified proxy-store by dropping the proxies-updated listener queue and unused payload/normalizer helpers; relies on SWR/provider fetch path + calcuProxies for live updates (src/stores/proxy-store.ts). - Trimmed layout-level event wiring to keep only notice/show/hide subscriptions, removing obsolete refresh listeners (src/pages/_layout/useLayoutEvents.ts). * refactor(proxy): streamline proxies-updated handling and store event flow - AppDataProvider now treats `proxies-updated` as the fast path: the listener calls `applyLiveProxyPayload` immediately and schedules only a single fallback `fetchLiveProxies` ~600 ms later (replacing the old 0/250/1000/2000 cascade). Expensive provider/rule refreshes run in parallel via `Promise.allSettled`, and the multi-stage queue on profile updates completion was removed (src/providers/app-data-provider.tsx). - Rebuilt proxy-store to support the event flow: restored `setLive`, provider normalization, and an animation-frame + async queue that applies payloads without blocking. Exposed `applyLiveProxyPayload` so providers can push events directly into the store (src/stores/proxy-store.ts). * refactor: switch delay * refactor(app-data-provider): trigger getProfileSwitchStatus revalidation on profile-switch-finished - AppDataProvider now listens to `profile-switch-finished` and calls `mutate("getProfileSwitchStatus")` to immediately update state and unlock buttons (src/providers/app-data-provider.tsx). - Retain existing detailed timing logs for monitoring other stages. - Frontend success notifications remain instant; background refreshes continue asynchronously. * fix(profiles): prevent duplicate toast on page remount * refactor(profile-switch): make active switches preemptible and prevent queue piling - Add notify mechanism to SwitchCancellation to await cancellation without busy-waiting (state.rs:82) - Collapse pending queue to a single entry in the driver; cancel in-flight task on newer request (driver.rs:232) - Update handle_update_core to watch cancel token and 30s timeout; release locks, discard draft, and exit early if canceled (state_machine.rs:301) - Providers revalidate status immediately on profile-switch-finished events (app-data-provider.tsx:208) * refactor(core): make core reload phase controllable, reduce 0xcfffffff risk - CoreManager::apply_config now calls `reload_config_with_retry`, each attempt waits up to 5s, retries 3 times; on failure, returns error with duration logged and triggers core restart if needed (src-tauri/src/core/manager/config.rs:175, 205) - `reload_config_with_retry` logs attempt info on timeout or error; if error is a Mihomo connection issue, fallback to original restart logic (src-tauri/src/core/manager/config.rs:211) - `reload_config_once` retains original Mihomo call for retry wrapper usage (src-tauri/src/core/manager/config.rs:247) * chore(frontend-logs): downgrade routine event logs from info to debug - Logs like `emit_via_app entering spawn_blocking`, `Async emit…`, `Buffered proxies…` are now debug-level (src-tauri/src/core/notification.rs:155, :265, :309…) - Genuine warnings/errors (failures/timeouts) remain at warn/error - Core stage logs remain info to keep backend tracking visible * refactor(frontend-emit): make emit_via_app fire-and-forget async - `emit_via_app` now a regular function; spawns with `tokio::spawn` and logs a warn if `emit_to` fails, caller returns immediately (src-tauri/src/core/notification.rs:269) - Removed `.await` at Async emit and flush_proxies calls; only record dispatch duration and warn on failure (src-tauri/src/core/notification.rs:211, :329) * refactor(ui): restructure profile switch for event-driven speed + polling stability - Backend - SwitchManager maintains a lightweight event queue: added `event_sequence`, `recent_events`, and `SwitchResultEvent`; provides `push_event` / `events_after` (state.rs) - `handle_completion` pushes events on success/failure and keeps `last_result` (driver.rs) for frontend incremental fetch - New Tauri command `get_profile_switch_events(after_sequence)` exposes `events_after` (profile_switch/mod.rs → profile.rs → lib.rs) - Notification system - `NotificationSystem::process_event` only logs debug, disables WebView `emit_to`, fixes 0xcfffffff - Related emit/buffer functions now safe no-op, removed unused structures and warnings (notification.rs) - Frontend - services/cmds.ts defines `SwitchResultEvent` and `getProfileSwitchEvents` - `AppDataProvider` holds `switchEventSeqRef`, polls incremental events every 0.25s (busy) / 1s (idle); each event triggers: - immediate `globalMutate("getProfiles")` to refresh current profile - background refresh of proxies/providers/rules via `Promise.allSettled` (failures logged, non-blocking) - forced `mutateSwitchStatus` to correct state - original switchStatus effect calls `handleSwitchResult` as fallback; other toast/activation logic handled in profiles.tsx - Commands / API cleanup - removed `pub use profile_switch::*;` in cmd::mod.rs to avoid conflicts; frontend uses new command polling * refactor(frontend): optimize profile switch with optimistic updates * refactor(profile-switch): switch to event-driven flow with Profile Store - SwitchManager pushes events; frontend polls get_profile_switch_events - Zustand store handles optimistic profiles; AppDataProvider applies updates and background-fetches - UI flicker removed * fix(app-data): re-hook profile store updates during switch hydration * fix(notification): restore frontend event dispatch and non-blocking emits * fix(app-data-provider): restore proxy refresh and seed snapshot after refactor * fix: ensure switch completion events are received and handle proxies-updated * fix(app-data-provider): dedupe switch results by taskId and fix stale profile state * fix(profile-switch): ensure patch_profiles_config_by_profile_index waits for real completion and handle join failures in apply_config_with_timeout * docs: UPDATELOG.md * chore: add necessary comments * fix(core): always dispatch async proxy snapshot after RefreshClash event * fix(proxy-store, provider): handle pending snapshots and proxy profiles - Added pending snapshot tracking in proxy-store so `lastAppliedFetchId` no longer jumps on seed. Profile adoption is deferred until a qualifying fetch completes. Exposed `clearPendingProfile` for rollback support. - Cleared pending snapshot state whenever live payloads apply or the store resets, preventing stale optimistic profile IDs after failures. - In provider integration, subscribed to the pending proxy profile and fed it into target-profile derivation. Cleared it on failed switch results so hydration can advance and UI status remains accurate. * fix(proxy): re-hook tray refresh events into proxy refresh queue - Reattached listen("verge://refresh-proxy-config", …) at src/providers/app-data-provider.tsx:402 and registered it for cleanup. - Added matching window fallback handler at src/providers/app-data-provider.tsx:430 so in-app dispatches share the same refresh path. * fix(proxy-snapshot/proxy-groups): address review findings on snapshot placeholders - src/utils/proxy-snapshot.ts:72-95 now derives snapshot group members solely from proxy-groups.proxies, so provider ids under `use` no longer generate placeholder proxy items. - src/components/proxy/proxy-groups.tsx:665-677 lets the hydration overlay capture pointer events (and shows a wait cursor) so users can’t interact with snapshot-only placeholders before live data is ready. * fix(profile-switch): preserve queued requests and avoid stale connection teardown - Keep earlier queued switches intact by dropping the blanket “collapse” call: after removing duplicates for the same profile, new requests are simply appended, leaving other profiles pending (driver.rs:376). Resolves queue-loss scenario. - Gate connection cleanup on real successes so cancelled/stale runs no longer tear down Mihomo connections; success handler now skips close_connections_after_switch when success == false (workflow.rs:419). * fix(profile-switch, layout): improve profile validation and restore backend refresh - Hardened profile validation using `tokio::fs` with a 5s timeout and offloading YAML parsing to `AsyncHandler::spawn_blocking`, preventing slow disks or malformed files from freezing the runtime (src-tauri/src/cmd/profile_switch/validation.rs:9, 71). - Restored backend-triggered refresh handling by listening for `verge://refresh-clash-config` / `verge://refresh-verge-config` and invoking shared refresh services so SWR caches stay in sync with core events (src/pages/_layout/useLayoutEvents.ts:6, 45, 55). * feat(profile-switch): handle cancellations for superseded requests - Added a `cancelled` flag and constructor so superseded requests publish an explicit cancellation instead of a failure (src-tauri/src/cmd/profile_switch/state.rs:249, src-tauri/src/cmd/profile_switch/driver.rs:482) - Updated the profile switch effect to log cancellations as info, retain the shared `mutate` call, and skip emitting error toasts while still refreshing follow-up work (src/pages/profiles.tsx:554, src/pages/profiles.tsx:581) - Exposed the new flag on the TypeScript contract to keep downstream consumers type-safe (src/services/cmds.ts:20) * fix(profiles): wrap logging payload for Tauri frontend_log * fix(profile-switch): add rollback and error propagation for failed persistence - Added rollback on apply failure so Mihomo restores to the previous profile before exiting the success path early (state_machine.rs:474). - Reworked persist_profiles_with_timeout to surface timeout/join/save errors, convert them into CmdResult failures, and trigger rollback + error propagation when persistence fails (state_machine.rs:703). * fix(profile-switch): prevent mid-finalize reentrancy and lingering tasks * fix(profile-switch): preserve pending queue and surface discarded switches * fix(profile-switch): avoid draining Mihomo sockets on failed/cancelled switches * fix(app-data-provider): restore backend-driven refresh and reattach fallbacks * fix(profile-switch): queue concurrent updates and add bounded wait/backoff * fix(proxy): trigger live refresh on app start for proxy snapshot * refactor(profile-switch): split flow into layers and centralize async cleanup - Introduced `SwitchDriver` to encapsulate queue and driver logic while keeping the public Tauri command API. - Added workflow/cleanup helpers for notification dispatch and Mihomo connection draining, re-exported for API consistency. - Replaced monolithic state machine with `core.rs`, `context.rs`, and `stages.rs`, plus a thin `mod.rs` re-export layer; stage methods are now individually testable. - Removed legacy `workflow/state_machine.rs` and adjusted visibility on re-exported types/constants to ensure compilation.
2025-10-30 17:29:15 +08:00
const isUpdating = updating[key];
// 订阅信息
const sub = provider.subscriptionInfo;
const hasSubInfo = !!sub;
const upload = sub?.Upload || 0;
const download = sub?.Download || 0;
const total = sub?.Total || 0;
const expire = sub?.Expire || 0;
// 流量使用进度
const progress =
total > 0
? Math.min(
Math.round(((download + upload) * 100) / total) + 1,
100,
)
: 0;
return (
<ListItem
key={key}
sx={[
{
p: 0,
mb: "8px",
borderRadius: 2,
overflow: "hidden",
transition: "all 0.2s",
},
({ palette: { mode, primary } }) => {
const bgcolor =
mode === "light" ? "#ffffff" : "#24252f";
const hoverColor =
mode === "light"
? alpha(primary.main, 0.1)
: alpha(primary.main, 0.2);
return {
backgroundColor: bgcolor,
"&:hover": {
backgroundColor: hoverColor,
},
};
},
]}
>
<ListItemText
sx={{ px: 2, py: 1 }}
primary={
<Box
sx={{
display: "flex",
justifyContent: "space-between",
alignItems: "center",
}}
2024-01-18 01:01:47 +08:00
>
<Typography
variant="subtitle1"
component="div"
noWrap
title={key}
sx={{ display: "flex", alignItems: "center" }}
>
<span style={{ marginRight: "8px" }}>{key}</span>
<TypeBox component="span">
{provider.proxies.length}
</TypeBox>
<TypeBox component="span">
{provider.vehicleType}
</TypeBox>
</Typography>
<Typography
variant="body2"
color="text.secondary"
noWrap
>
<small>{t("Update At")}: </small>
{time.fromNow()}
</Typography>
</Box>
}
secondary={
<>
{/* 订阅信息 */}
{hasSubInfo && (
<>
<Box
sx={{
mb: 1,
display: "flex",
alignItems: "center",
justifyContent: "space-between",
}}
>
<span title={t("Used / Total") as string}>
{parseTraffic(upload + download)} /{" "}
{parseTraffic(total)}
</span>
<span title={t("Expire Time") as string}>
{parseExpire(expire)}
</span>
</Box>
{/* 进度条 */}
<LinearProgress
variant="determinate"
value={progress}
sx={{
height: 6,
borderRadius: 3,
opacity: total > 0 ? 1 : 0,
}}
/>
</>
)}
</>
}
/>
<Divider orientation="vertical" flexItem />
<Box
sx={{
width: 40,
display: "flex",
justifyContent: "center",
alignItems: "center",
}}
>
<IconButton
size="small"
color="primary"
onClick={() => {
updateProvider(key);
}}
disabled={isUpdating}
sx={{
animation: isUpdating
? "spin 1s linear infinite"
: "none",
"@keyframes spin": {
"0%": { transform: "rotate(0deg)" },
"100%": { transform: "rotate(360deg)" },
},
}}
title={t("proxies.page.provider.actions.update")}
aria-label={t("proxies.page.provider.actions.update")}
>
<RefreshRounded />
</IconButton>
</Box>
</ListItem>
);
})}
</List>
</DialogContent>
<DialogActions>
<Button onClick={handleClose} variant="outlined">
{t("Close")}
</Button>
</DialogActions>
</Dialog>
2023-08-05 21:38:44 +08:00
</>
);
};