From 45c68424f0e6982a6ba09f8a64a74978d00d7a16 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 27 Oct 2025 20:38:17 +0800 Subject: [PATCH 01/70] chore(deps): update npm dependencies (#5218) Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> --- package.json | 12 +++++----- pnpm-lock.yaml | 64 +++++++++++++++++++++++++------------------------- 2 files changed, 38 insertions(+), 38 deletions(-) diff --git a/package.json b/package.json index 297f3173..3672bb0d 100644 --- a/package.json +++ b/package.json @@ -45,12 +45,12 @@ "@mui/material": "^7.3.4", "@mui/x-data-grid": "^8.15.0", "@tauri-apps/api": "2.9.0", - "@tauri-apps/plugin-clipboard-manager": "^2.3.1", - "@tauri-apps/plugin-dialog": "^2.4.1", - "@tauri-apps/plugin-fs": "^2.4.3", - "@tauri-apps/plugin-http": "~2.5.3", - "@tauri-apps/plugin-process": "^2.3.0", - "@tauri-apps/plugin-shell": "2.3.2", + "@tauri-apps/plugin-clipboard-manager": "^2.3.2", + "@tauri-apps/plugin-dialog": "^2.4.2", + "@tauri-apps/plugin-fs": "^2.4.4", + "@tauri-apps/plugin-http": "~2.5.4", + "@tauri-apps/plugin-process": "^2.3.1", + "@tauri-apps/plugin-shell": "2.3.3", "@tauri-apps/plugin-updater": "2.9.0", "@types/json-schema": "^7.0.15", "ahooks": "^3.9.6", diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index dd960284..66476e1e 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -42,23 +42,23 @@ importers: specifier: 2.9.0 version: 2.9.0 '@tauri-apps/plugin-clipboard-manager': + specifier: ^2.3.2 + version: 2.3.2 + '@tauri-apps/plugin-dialog': + specifier: ^2.4.2 + version: 2.4.2 + '@tauri-apps/plugin-fs': + specifier: ^2.4.4 + version: 2.4.4 + '@tauri-apps/plugin-http': + specifier: ~2.5.4 + version: 2.5.4 + '@tauri-apps/plugin-process': specifier: ^2.3.1 version: 2.3.1 - '@tauri-apps/plugin-dialog': - specifier: ^2.4.1 - version: 2.4.1 - '@tauri-apps/plugin-fs': - specifier: ^2.4.3 - version: 2.4.3 - '@tauri-apps/plugin-http': - specifier: ~2.5.3 - version: 2.5.3 - '@tauri-apps/plugin-process': - specifier: ^2.3.0 - version: 2.3.0 '@tauri-apps/plugin-shell': - specifier: 2.3.2 - version: 2.3.2 + specifier: 2.3.3 + version: 2.3.3 '@tauri-apps/plugin-updater': specifier: 2.9.0 version: 2.9.0 @@ -1695,23 +1695,23 @@ packages: engines: {node: '>= 10'} hasBin: true - '@tauri-apps/plugin-clipboard-manager@2.3.1': - resolution: {integrity: sha512-lQMaUSFs5my8oEHuvQOLJoPHfxRJNn0gwXyTsyMhtQZBD4sYLiDux8p+EsagY6vv5SkSYWVViJYvlcPIjJ+Dog==} + '@tauri-apps/plugin-clipboard-manager@2.3.2': + resolution: {integrity: sha512-CUlb5Hqi2oZbcZf4VUyUH53XWPPdtpw43EUpCza5HWZJwxEoDowFzNUDt1tRUXA8Uq+XPn17Ysfptip33sG4eQ==} - '@tauri-apps/plugin-dialog@2.4.1': - resolution: {integrity: sha512-2eAueoxstrUnAf5cZWT9A/jzh4mTdUu646Q8zEX0a3RQmht7fQhdhxWfgQH4/of8iy1etDLKzokXbF2CxdBFHg==} + '@tauri-apps/plugin-dialog@2.4.2': + resolution: {integrity: sha512-lNIn5CZuw8WZOn8zHzmFmDSzg5zfohWoa3mdULP0YFh/VogVdMVWZPcWSHlydsiJhRQYaTNSYKN7RmZKE2lCYQ==} - '@tauri-apps/plugin-fs@2.4.3': - resolution: {integrity: sha512-/ZVHrwf/FTMSReWgMfiraeJjBcIr4QrDQC5BalvSSLXgtEiELPjQkRaXox7zG7z5nB04m/TwTLqfeeBROg0LLQ==} + '@tauri-apps/plugin-fs@2.4.4': + resolution: {integrity: sha512-MTorXxIRmOnOPT1jZ3w96vjSuScER38ryXY88vl5F0uiKdnvTKKTtaEjTEo8uPbl4e3gnUtfsDVwC7h77GQLvQ==} - '@tauri-apps/plugin-http@2.5.3': - resolution: {integrity: sha512-YiizgUWd9jQBPGX2x3k1l6qdBiDYOo3FjqMEjkeJ0I8IRIkp6OQ8ff6fRcaiUt0sl8+h3r4983+6O/m+//PMVQ==} + '@tauri-apps/plugin-http@2.5.4': + resolution: {integrity: sha512-/i4U/9za3mrytTgfRn5RHneKubZE/dwRmshYwyMvNRlkWjvu1m4Ma72kcbVJMZFGXpkbl+qLyWMGrihtWB76Zg==} - '@tauri-apps/plugin-process@2.3.0': - resolution: {integrity: sha512-0DNj6u+9csODiV4seSxxRbnLpeGYdojlcctCuLOCgpH9X3+ckVZIEj6H7tRQ7zqWr7kSTEWnrxtAdBb0FbtrmQ==} + '@tauri-apps/plugin-process@2.3.1': + resolution: {integrity: sha512-nCa4fGVaDL/B9ai03VyPOjfAHRHSBz5v6F/ObsB73r/dA3MHHhZtldaDMIc0V/pnUw9ehzr2iEG+XkSEyC0JJA==} - '@tauri-apps/plugin-shell@2.3.2': - resolution: {integrity: sha512-pop78bu3T25UVxL6kn/dFc+LZQhHB9WHCUoLIrXPagO4hlEGtdOKVEnIzQr4E9X8COrBAKcR/G/rNWuim8eEOg==} + '@tauri-apps/plugin-shell@2.3.3': + resolution: {integrity: sha512-Xod+pRcFxmOWFWEnqH5yZcA7qwAMuaaDkMR1Sply+F8VfBj++CGnj2xf5UoialmjZ2Cvd8qrvSCbU+7GgNVsKQ==} '@tauri-apps/plugin-updater@2.9.0': resolution: {integrity: sha512-j++sgY8XpeDvzImTrzWA08OqqGqgkNyxczLD7FjNJJx/uXxMZFz5nDcfkyoI/rCjYuj2101Tci/r/HFmOmoxCg==} @@ -5840,27 +5840,27 @@ snapshots: '@tauri-apps/cli-win32-ia32-msvc': 2.9.1 '@tauri-apps/cli-win32-x64-msvc': 2.9.1 - '@tauri-apps/plugin-clipboard-manager@2.3.1': + '@tauri-apps/plugin-clipboard-manager@2.3.2': dependencies: '@tauri-apps/api': 2.9.0 - '@tauri-apps/plugin-dialog@2.4.1': + '@tauri-apps/plugin-dialog@2.4.2': dependencies: '@tauri-apps/api': 2.9.0 - '@tauri-apps/plugin-fs@2.4.3': + '@tauri-apps/plugin-fs@2.4.4': dependencies: '@tauri-apps/api': 2.9.0 - '@tauri-apps/plugin-http@2.5.3': + '@tauri-apps/plugin-http@2.5.4': dependencies: '@tauri-apps/api': 2.9.0 - '@tauri-apps/plugin-process@2.3.0': + '@tauri-apps/plugin-process@2.3.1': dependencies: '@tauri-apps/api': 2.9.0 - '@tauri-apps/plugin-shell@2.3.2': + '@tauri-apps/plugin-shell@2.3.3': dependencies: '@tauri-apps/api': 2.9.0 From 5a29508407f3827f50fe105533211807cd563fbe Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 27 Oct 2025 20:39:00 +0800 Subject: [PATCH 02/70] chore(deps): update cargo dependencies (#5217) Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> --- src-tauri/Cargo.lock | 44 ++++++++++++++++++++++---------------------- src-tauri/Cargo.toml | 22 +++++++++++----------- 2 files changed, 33 insertions(+), 33 deletions(-) diff --git a/src-tauri/Cargo.lock b/src-tauri/Cargo.lock index 9b55a1a6..186e7ff6 100644 --- a/src-tauri/Cargo.lock +++ b/src-tauri/Cargo.lock @@ -7310,9 +7310,9 @@ dependencies = [ [[package]] name = "tauri-plugin-autostart" -version = "2.5.0" +version = "2.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "062cdcd483d5e3148c9a64dabf8c574e239e2aa1193cf208d95cf89a676f87a5" +checksum = "459383cebc193cdd03d1ba4acc40f2c408a7abce419d64bdcd2d745bc2886f70" dependencies = [ "auto-launch", "serde", @@ -7324,9 +7324,9 @@ dependencies = [ [[package]] name = "tauri-plugin-clipboard-manager" -version = "2.3.1" +version = "2.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97386ff464c30f491847e56355e9f3bd7ce82726c8c51c4ca93dc6bdb7993751" +checksum = "206dc20af4ed210748ba945c2774e60fd0acd52b9a73a028402caf809e9b6ecf" dependencies = [ "arboard", "log", @@ -7339,9 +7339,9 @@ dependencies = [ [[package]] name = "tauri-plugin-deep-link" -version = "2.4.4" +version = "2.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd913b8b96627ec87a847ee4fe101427d95100f6c565768c2361c47b70d02bff" +checksum = "6e82759f7c7d51de3cbde51c04b3f2332de52436ed84541182cd8944b04e9e73" dependencies = [ "dunce", "plist", @@ -7387,9 +7387,9 @@ dependencies = [ [[package]] name = "tauri-plugin-dialog" -version = "2.4.1" +version = "2.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e34cadb63450580599193ebe3d69ce292888f1b56c26ea63563ff302f8fdf1f7" +checksum = "313f8138692ddc4a2127c4c9607d616a46f5c042e77b3722450866da0aad2f19" dependencies = [ "log", "raw-window-handle", @@ -7405,9 +7405,9 @@ dependencies = [ [[package]] name = "tauri-plugin-fs" -version = "2.4.3" +version = "2.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2864ee9907a87907ad710b5eab081a34c3c812af961d154976dab87f1fe39d12" +checksum = "47df422695255ecbe7bac7012440eddaeefd026656171eac9559f5243d3230d9" dependencies = [ "anyhow", "dunce", @@ -7427,9 +7427,9 @@ dependencies = [ [[package]] name = "tauri-plugin-global-shortcut" -version = "2.3.0" +version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6df9f0f7bf2fe768b85fee4951c2505a35b72c44df1f6403e74e110bc13c5f58" +checksum = "424af23c7e88d05e4a1a6fc2c7be077912f8c76bd7900fd50aa2b7cbf5a2c405" dependencies = [ "global-hotkey", "log", @@ -7442,9 +7442,9 @@ dependencies = [ [[package]] name = "tauri-plugin-http" -version = "2.5.3" +version = "2.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c51b7e91dd890ba1951c17ad35fd78eebb4da4fdd55347898faef546794e20f" +checksum = "c00685aceab12643cf024f712ab0448ba8fcadf86f2391d49d2e5aa732aacc70" dependencies = [ "bytes", "cookie_store", @@ -7491,9 +7491,9 @@ dependencies = [ [[package]] name = "tauri-plugin-notification" -version = "2.3.2" +version = "2.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ec42df990633cbe5148ae502b06421ab2851b890f1655965c7413fb4eb11f73" +checksum = "01fc2c5ff41105bd1f7242d8201fdf3efd70749b82fa013a17f2126357d194cc" dependencies = [ "log", "notify-rust", @@ -7510,9 +7510,9 @@ dependencies = [ [[package]] name = "tauri-plugin-process" -version = "2.3.0" +version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7461c622a5ea00eb9cd9f7a08dbd3bf79484499fd5c21aa2964677f64ca651ab" +checksum = "d55511a7bf6cd70c8767b02c97bf8134fa434daf3926cfc1be0a0f94132d165a" dependencies = [ "tauri", "tauri-plugin", @@ -7520,9 +7520,9 @@ dependencies = [ [[package]] name = "tauri-plugin-shell" -version = "2.3.2" +version = "2.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "63110fea291eb6d9eb953fcb0455178257774161317ce3b87c082e81ef6776c6" +checksum = "c374b6db45f2a8a304f0273a15080d98c70cde86178855fc24653ba657a1144c" dependencies = [ "encoding_rs", "log", @@ -7573,9 +7573,9 @@ dependencies = [ [[package]] name = "tauri-plugin-window-state" -version = "2.4.0" +version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d5f6fe3291bfa609c7e0b0ee3bedac294d94c7018934086ce782c1d0f2a468e" +checksum = "73736611e14142408d15353e21e3cca2f12a3cfb523ad0ce85999b6d2ef1a704" dependencies = [ "bitflags 2.10.0", "log", diff --git a/src-tauri/Cargo.toml b/src-tauri/Cargo.toml index f5c0e0be..4ac7cbd7 100755 --- a/src-tauri/Cargo.toml +++ b/src-tauri/Cargo.toml @@ -51,13 +51,13 @@ tauri = { version = "2.9.1", features = [ "image-png", ] } network-interface = { version = "2.0.3", features = ["serde"] } -tauri-plugin-shell = "2.3.2" -tauri-plugin-dialog = "2.4.1" -tauri-plugin-fs = "2.4.3" -tauri-plugin-process = "2.3.0" -tauri-plugin-clipboard-manager = "2.3.1" -tauri-plugin-deep-link = "2.4.4" -tauri-plugin-window-state = "2.4.0" +tauri-plugin-shell = "2.3.3" +tauri-plugin-dialog = "2.4.2" +tauri-plugin-fs = "2.4.4" +tauri-plugin-process = "2.3.1" +tauri-plugin-clipboard-manager = "2.3.2" +tauri-plugin-deep-link = "2.4.5" +tauri-plugin-window-state = "2.4.1" zip = "6.0.0" reqwest_dav = "0.2.2" aes-gcm = { version = "0.10.3", features = ["std"] } @@ -72,7 +72,7 @@ sha2 = "0.10.9" hex = "0.4.3" scopeguard = "1.2.0" dashmap = "6.1.0" -tauri-plugin-notification = "2.3.2" +tauri-plugin-notification = "2.3.3" tokio-stream = "0.1.17" isahc = { version = "1.7.2", default-features = false, features = [ "text-decoding", @@ -80,7 +80,7 @@ isahc = { version = "1.7.2", default-features = false, features = [ ] } backoff = { version = "0.4.0", features = ["tokio"] } compact_str = { version = "0.9.0", features = ["serde"] } -tauri-plugin-http = "2.5.3" +tauri-plugin-http = "2.5.4" flexi_logger = "0.31.7" console-subscriber = { version = "0.4.1", optional = true } tauri-plugin-devtools = { version = "2.0.1" } @@ -126,8 +126,8 @@ users = "0.11.0" signal-hook = "0.3.18" [target.'cfg(not(any(target_os = "android", target_os = "ios")))'.dependencies] -tauri-plugin-autostart = "2.5.0" -tauri-plugin-global-shortcut = "2.3.0" +tauri-plugin-autostart = "2.5.1" +tauri-plugin-global-shortcut = "2.3.1" tauri-plugin-updater = "2.9.0" [features] From 6df1e137f3a37cfffb24c6f3d2a11865b62fef7e Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 27 Oct 2025 20:40:13 +0800 Subject: [PATCH 03/70] chore(deps): update dependency vitest to ^4.0.4 (#5221) Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> --- package.json | 2 +- pnpm-lock.yaml | 86 +++++++++++++++++++++++++------------------------- 2 files changed, 44 insertions(+), 44 deletions(-) diff --git a/package.json b/package.json index 3672bb0d..d8b0b192 100644 --- a/package.json +++ b/package.json @@ -119,7 +119,7 @@ "vite": "^7.1.12", "vite-plugin-monaco-editor": "^1.1.0", "vite-plugin-svgr": "^4.5.0", - "vitest": "^4.0.3" + "vitest": "^4.0.4" }, "lint-staged": { "*.{ts,tsx,js,jsx}": [ diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 66476e1e..87d4e856 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -259,8 +259,8 @@ importers: specifier: ^4.5.0 version: 4.5.0(rollup@4.46.2)(typescript@5.9.3)(vite@7.1.12(@types/node@24.9.1)(jiti@2.6.1)(sass@1.93.2)(terser@5.44.0)(yaml@2.8.1)) vitest: - specifier: ^4.0.3 - version: 4.0.3(@types/debug@4.1.12)(@types/node@24.9.1)(jiti@2.6.1)(sass@1.93.2)(terser@5.44.0)(yaml@2.8.1) + specifier: ^4.0.4 + version: 4.0.4(@types/debug@4.1.12)(@types/node@24.9.1)(jiti@2.6.1)(sass@1.93.2)(terser@5.44.0)(yaml@2.8.1) packages: @@ -1971,11 +1971,11 @@ packages: peerDependencies: vite: ^4.2.0 || ^5.0.0 || ^6.0.0 || ^7.0.0 - '@vitest/expect@4.0.3': - resolution: {integrity: sha512-v3eSDx/bF25pzar6aEJrrdTXJduEBU3uSGXHslIdGIpJVP8tQQHV6x1ZfzbFQ/bLIomLSbR/2ZCfnaEGkWkiVQ==} + '@vitest/expect@4.0.4': + resolution: {integrity: sha512-0ioMscWJtfpyH7+P82sGpAi3Si30OVV73jD+tEqXm5+rIx9LgnfdaOn45uaFkKOncABi/PHL00Yn0oW/wK4cXw==} - '@vitest/mocker@4.0.3': - resolution: {integrity: sha512-evZcRspIPbbiJEe748zI2BRu94ThCBE+RkjCpVF8yoVYuTV7hMe+4wLF/7K86r8GwJHSmAPnPbZhpXWWrg1qbA==} + '@vitest/mocker@4.0.4': + resolution: {integrity: sha512-UTtKgpjWj+pvn3lUM55nSg34098obGhSHH+KlJcXesky8b5wCUgg7s60epxrS6yAG8slZ9W8T9jGWg4PisMf5Q==} peerDependencies: msw: ^2.4.9 vite: ^6.0.0 || ^7.0.0-0 @@ -1985,20 +1985,20 @@ packages: vite: optional: true - '@vitest/pretty-format@4.0.3': - resolution: {integrity: sha512-N7gly/DRXzxa9w9sbDXwD9QNFYP2hw90LLLGDobPNwiWgyW95GMxsCt29/COIKKh3P7XJICR38PSDePenMBtsw==} + '@vitest/pretty-format@4.0.4': + resolution: {integrity: sha512-lHI2rbyrLVSd1TiHGJYyEtbOBo2SDndIsN3qY4o4xe2pBxoJLD6IICghNCvD7P+BFin6jeyHXiUICXqgl6vEaQ==} - '@vitest/runner@4.0.3': - resolution: {integrity: sha512-1/aK6fPM0lYXWyGKwop2Gbvz1plyTps/HDbIIJXYtJtspHjpXIeB3If07eWpVH4HW7Rmd3Rl+IS/+zEAXrRtXA==} + '@vitest/runner@4.0.4': + resolution: {integrity: sha512-99EDqiCkncCmvIZj3qJXBZbyoQ35ghOwVWNnQ5nj0Hnsv4Qm40HmrMJrceewjLVvsxV/JSU4qyx2CGcfMBmXJw==} - '@vitest/snapshot@4.0.3': - resolution: {integrity: sha512-amnYmvZ5MTjNCP1HZmdeczAPLRD6iOm9+2nMRUGxbe/6sQ0Ymur0NnR9LIrWS8JA3wKE71X25D6ya/3LN9YytA==} + '@vitest/snapshot@4.0.4': + resolution: {integrity: sha512-XICqf5Gi4648FGoBIeRgnHWSNDp+7R5tpclGosFaUUFzY6SfcpsfHNMnC7oDu/iOLBxYfxVzaQpylEvpgii3zw==} - '@vitest/spy@4.0.3': - resolution: {integrity: sha512-82vVL8Cqz7rbXaNUl35V2G7xeNMAjBdNOVaHbrzznT9BmiCiPOzhf0FhU3eP41nP1bLDm/5wWKZqkG4nyU95DQ==} + '@vitest/spy@4.0.4': + resolution: {integrity: sha512-G9L13AFyYECo40QG7E07EdYnZZYCKMTSp83p9W8Vwed0IyCG1GnpDLxObkx8uOGPXfDpdeVf24P1Yka8/q1s9g==} - '@vitest/utils@4.0.3': - resolution: {integrity: sha512-qV6KJkq8W3piW6MDIbGOmn1xhvcW4DuA07alqaQ+vdx7YA49J85pnwnxigZVQFQw3tWnQNRKWwhz5wbP6iv/GQ==} + '@vitest/utils@4.0.4': + resolution: {integrity: sha512-4bJLmSvZLyVbNsYFRpPYdJViG9jZyRvMZ35IF4ymXbRZoS+ycYghmwTGiscTXduUg2lgKK7POWIyXJNute1hjw==} acorn-jsx@5.3.2: resolution: {integrity: sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==} @@ -4148,18 +4148,18 @@ packages: yaml: optional: true - vitest@4.0.3: - resolution: {integrity: sha512-IUSop8jgaT7w0g1yOM/35qVtKjr/8Va4PrjzH1OUb0YH4c3OXB2lCZDkMAB6glA8T5w8S164oJGsbcmAecr4sA==} + vitest@4.0.4: + resolution: {integrity: sha512-hV31h0/bGbtmDQc0KqaxsTO1v4ZQeF8ojDFuy4sZhFadwAqqvJA0LDw68QUocctI5EDpFMql/jVWKuPYHIf2Ew==} engines: {node: ^20.0.0 || ^22.0.0 || >=24.0.0} hasBin: true peerDependencies: '@edge-runtime/vm': '*' '@types/debug': ^4.1.12 '@types/node': ^20.0.0 || ^22.0.0 || >=24.0.0 - '@vitest/browser-playwright': 4.0.3 - '@vitest/browser-preview': 4.0.3 - '@vitest/browser-webdriverio': 4.0.3 - '@vitest/ui': 4.0.3 + '@vitest/browser-playwright': 4.0.4 + '@vitest/browser-preview': 4.0.4 + '@vitest/browser-webdriverio': 4.0.4 + '@vitest/ui': 4.0.4 happy-dom: '*' jsdom: '*' peerDependenciesMeta: @@ -6144,43 +6144,43 @@ snapshots: transitivePeerDependencies: - supports-color - '@vitest/expect@4.0.3': + '@vitest/expect@4.0.4': dependencies: '@standard-schema/spec': 1.0.0 '@types/chai': 5.2.2 - '@vitest/spy': 4.0.3 - '@vitest/utils': 4.0.3 + '@vitest/spy': 4.0.4 + '@vitest/utils': 4.0.4 chai: 6.2.0 tinyrainbow: 3.0.3 - '@vitest/mocker@4.0.3(vite@7.1.12(@types/node@24.9.1)(jiti@2.6.1)(sass@1.93.2)(terser@5.44.0)(yaml@2.8.1))': + '@vitest/mocker@4.0.4(vite@7.1.12(@types/node@24.9.1)(jiti@2.6.1)(sass@1.93.2)(terser@5.44.0)(yaml@2.8.1))': dependencies: - '@vitest/spy': 4.0.3 + '@vitest/spy': 4.0.4 estree-walker: 3.0.3 magic-string: 0.30.19 optionalDependencies: vite: 7.1.12(@types/node@24.9.1)(jiti@2.6.1)(sass@1.93.2)(terser@5.44.0)(yaml@2.8.1) - '@vitest/pretty-format@4.0.3': + '@vitest/pretty-format@4.0.4': dependencies: tinyrainbow: 3.0.3 - '@vitest/runner@4.0.3': + '@vitest/runner@4.0.4': dependencies: - '@vitest/utils': 4.0.3 + '@vitest/utils': 4.0.4 pathe: 2.0.3 - '@vitest/snapshot@4.0.3': + '@vitest/snapshot@4.0.4': dependencies: - '@vitest/pretty-format': 4.0.3 + '@vitest/pretty-format': 4.0.4 magic-string: 0.30.19 pathe: 2.0.3 - '@vitest/spy@4.0.3': {} + '@vitest/spy@4.0.4': {} - '@vitest/utils@4.0.3': + '@vitest/utils@4.0.4': dependencies: - '@vitest/pretty-format': 4.0.3 + '@vitest/pretty-format': 4.0.4 tinyrainbow: 3.0.3 acorn-jsx@5.3.2(acorn@8.15.0): @@ -8830,15 +8830,15 @@ snapshots: terser: 5.44.0 yaml: 2.8.1 - vitest@4.0.3(@types/debug@4.1.12)(@types/node@24.9.1)(jiti@2.6.1)(sass@1.93.2)(terser@5.44.0)(yaml@2.8.1): + vitest@4.0.4(@types/debug@4.1.12)(@types/node@24.9.1)(jiti@2.6.1)(sass@1.93.2)(terser@5.44.0)(yaml@2.8.1): dependencies: - '@vitest/expect': 4.0.3 - '@vitest/mocker': 4.0.3(vite@7.1.12(@types/node@24.9.1)(jiti@2.6.1)(sass@1.93.2)(terser@5.44.0)(yaml@2.8.1)) - '@vitest/pretty-format': 4.0.3 - '@vitest/runner': 4.0.3 - '@vitest/snapshot': 4.0.3 - '@vitest/spy': 4.0.3 - '@vitest/utils': 4.0.3 + '@vitest/expect': 4.0.4 + '@vitest/mocker': 4.0.4(vite@7.1.12(@types/node@24.9.1)(jiti@2.6.1)(sass@1.93.2)(terser@5.44.0)(yaml@2.8.1)) + '@vitest/pretty-format': 4.0.4 + '@vitest/runner': 4.0.4 + '@vitest/snapshot': 4.0.4 + '@vitest/spy': 4.0.4 + '@vitest/utils': 4.0.4 debug: 4.4.3 es-module-lexer: 1.7.0 expect-type: 1.2.2 From c736796380902376b643d2f03582779d61120ad4 Mon Sep 17 00:00:00 2001 From: Tunglies Date: Mon, 27 Oct 2025 20:55:51 +0800 Subject: [PATCH 04/70] feat(clippy): cognitive-complexity rule (#5215) * feat(config): enhance configuration initialization and validation process * refactor(profile): streamline profile update logic and enhance error handling * refactor(config): simplify profile item checks and streamline update flag processing * refactor(disney_plus): add cognitive complexity allowance for check_disney_plus function * refactor(enhance): restructure configuration and profile item handling for improved clarity and maintainability * refactor(tray): add cognitive complexity allowance for create_tray_menu function * refactor(config): add cognitive complexity allowance for patch_config function * refactor(profiles): simplify item removal logic by introducing take_item_file_by_uid helper function * refactor(profile): add new validation logic for profile configuration syntax * refactor(profiles): improve formatting and readability of take_item_file_by_uid function * refactor(cargo): change cognitive complexity level from warn to deny * refactor(cargo): ensure cognitive complexity is denied in Cargo.toml * refactor(i18n): clean up imports and improve code readability refactor(proxy): simplify system proxy toggle logic refactor(service): remove unnecessary `as_str()` conversion in error handling refactor(tray): modularize tray menu creation for better maintainability * refactor(tray): update menu item text handling to use references for improved performance --- src-tauri/.clippy.toml | 3 +- src-tauri/Cargo.toml | 1 + .../cmd/media_unlock_checker/disney_plus.rs | 1 + src-tauri/src/cmd/profile.rs | 513 +++++++++-------- src-tauri/src/cmd/service.rs | 2 +- src-tauri/src/config/config.rs | 50 +- src-tauri/src/config/profiles.rs | 87 +-- src-tauri/src/config/verge.rs | 1 + src-tauri/src/core/tray/mod.rs | 410 ++++++++------ src-tauri/src/enhance/mod.rs | 534 +++++++++++------- src-tauri/src/feat/config.rs | 275 ++++----- src-tauri/src/feat/profile.rs | 221 ++++---- src-tauri/src/feat/proxy.rs | 15 +- src-tauri/src/utils/i18n.rs | 1 + 14 files changed, 1130 insertions(+), 984 deletions(-) diff --git a/src-tauri/.clippy.toml b/src-tauri/.clippy.toml index 0e7ffd07..a1db40ca 100644 --- a/src-tauri/.clippy.toml +++ b/src-tauri/.clippy.toml @@ -1 +1,2 @@ -avoid-breaking-exported-api = true \ No newline at end of file +avoid-breaking-exported-api = true +cognitive-complexity-threshold = 25 \ No newline at end of file diff --git a/src-tauri/Cargo.toml b/src-tauri/Cargo.toml index 4ac7cbd7..04302917 100755 --- a/src-tauri/Cargo.toml +++ b/src-tauri/Cargo.toml @@ -232,3 +232,4 @@ needless_raw_string_hashes = "deny" # Too many in existing code #restriction = { level = "allow", priority = -1 } or_fun_call = "deny" +cognitive_complexity = "deny" diff --git a/src-tauri/src/cmd/media_unlock_checker/disney_plus.rs b/src-tauri/src/cmd/media_unlock_checker/disney_plus.rs index d13133f2..dc18978e 100644 --- a/src-tauri/src/cmd/media_unlock_checker/disney_plus.rs +++ b/src-tauri/src/cmd/media_unlock_checker/disney_plus.rs @@ -6,6 +6,7 @@ use crate::{logging, utils::logging::Type}; use super::UnlockItem; use super::utils::{country_code_to_emoji, get_local_date_string}; +#[allow(clippy::cognitive_complexity)] pub(super) async fn check_disney_plus(client: &Client) -> UnlockItem { let device_api_url = "https://disney.api.edge.bamgrid.com/devices"; let auth_header = diff --git a/src-tauri/src/cmd/profile.rs b/src-tauri/src/cmd/profile.rs index 766cea73..15177936 100644 --- a/src-tauri/src/cmd/profile.rs +++ b/src-tauri/src/cmd/profile.rs @@ -216,6 +216,257 @@ pub async fn delete_profile(index: String) -> CmdResult { Ok(()) } +/// 验证新配置文件的语法 +async fn validate_new_profile(new_profile: &String) -> Result<(), ()> { + logging!(info, Type::Cmd, "正在切换到新配置: {}", new_profile); + + // 获取目标配置文件路径 + let config_file_result = { + let profiles_config = Config::profiles().await; + let profiles_data = profiles_config.latest_ref(); + match profiles_data.get_item(new_profile) { + Ok(item) => { + if let Some(file) = &item.file { + let path = dirs::app_profiles_dir().map(|dir| dir.join(file.as_str())); + path.ok() + } else { + None + } + } + Err(e) => { + logging!(error, Type::Cmd, "获取目标配置信息失败: {}", e); + None + } + } + }; + + // 如果获取到文件路径,检查YAML语法 + if let Some(file_path) = config_file_result { + if !file_path.exists() { + logging!( + error, + Type::Cmd, + "目标配置文件不存在: {}", + file_path.display() + ); + handle::Handle::notice_message( + "config_validate::file_not_found", + format!("{}", file_path.display()), + ); + return Err(()); + } + + // 超时保护 + let file_read_result = tokio::time::timeout( + Duration::from_secs(5), + tokio::fs::read_to_string(&file_path), + ) + .await; + + match file_read_result { + Ok(Ok(content)) => { + let yaml_parse_result = AsyncHandler::spawn_blocking(move || { + serde_yaml_ng::from_str::(&content) + }) + .await; + + match yaml_parse_result { + Ok(Ok(_)) => { + logging!(info, Type::Cmd, "目标配置文件语法正确"); + Ok(()) + } + Ok(Err(err)) => { + let error_msg = format!(" {err}"); + logging!( + error, + Type::Cmd, + "目标配置文件存在YAML语法错误:{}", + error_msg + ); + handle::Handle::notice_message( + "config_validate::yaml_syntax_error", + error_msg.clone(), + ); + Err(()) + } + Err(join_err) => { + let error_msg = format!("YAML解析任务失败: {join_err}"); + logging!(error, Type::Cmd, "{}", error_msg); + handle::Handle::notice_message( + "config_validate::yaml_parse_error", + error_msg.clone(), + ); + Err(()) + } + } + } + Ok(Err(err)) => { + let error_msg = format!("无法读取目标配置文件: {err}"); + logging!(error, Type::Cmd, "{}", error_msg); + handle::Handle::notice_message( + "config_validate::file_read_error", + error_msg.clone(), + ); + Err(()) + } + Err(_) => { + let error_msg = "读取配置文件超时(5秒)".to_string(); + logging!(error, Type::Cmd, "{}", error_msg); + handle::Handle::notice_message( + "config_validate::file_read_timeout", + error_msg.clone(), + ); + Err(()) + } + } + } else { + Ok(()) + } +} + +/// 执行配置更新并处理结果 +async fn restore_previous_profile(prev_profile: String) -> CmdResult<()> { + logging!(info, Type::Cmd, "尝试恢复到之前的配置: {}", prev_profile); + let restore_profiles = IProfiles { + current: Some(prev_profile), + items: None, + }; + Config::profiles() + .await + .draft_mut() + .patch_config(restore_profiles) + .stringify_err()?; + Config::profiles().await.apply(); + crate::process::AsyncHandler::spawn(|| async move { + if let Err(e) = profiles_save_file_safe().await { + log::warn!(target: "app", "异步保存恢复配置文件失败: {e}"); + } + }); + logging!(info, Type::Cmd, "成功恢复到之前的配置"); + Ok(()) +} + +async fn handle_success(current_sequence: u64, current_value: Option) -> CmdResult { + let latest_sequence = CURRENT_REQUEST_SEQUENCE.load(Ordering::SeqCst); + if current_sequence < latest_sequence { + logging!( + info, + Type::Cmd, + "内核操作后发现更新的请求 (序列号: {} < {}),忽略当前结果", + current_sequence, + latest_sequence + ); + Config::profiles().await.discard(); + return Ok(false); + } + + logging!( + info, + Type::Cmd, + "配置更新成功,序列号: {}", + current_sequence + ); + Config::profiles().await.apply(); + handle::Handle::refresh_clash(); + + if let Err(e) = Tray::global().update_tooltip().await { + log::warn!(target: "app", "异步更新托盘提示失败: {e}"); + } + + if let Err(e) = Tray::global().update_menu().await { + log::warn!(target: "app", "异步更新托盘菜单失败: {e}"); + } + + if let Err(e) = profiles_save_file_safe().await { + log::warn!(target: "app", "异步保存配置文件失败: {e}"); + } + + if let Some(current) = ¤t_value { + logging!( + info, + Type::Cmd, + "向前端发送配置变更事件: {}, 序列号: {}", + current, + current_sequence + ); + handle::Handle::notify_profile_changed(current.clone()); + } + + CURRENT_SWITCHING_PROFILE.store(false, Ordering::SeqCst); + Ok(true) +} + +async fn handle_validation_failure( + error_msg: String, + current_profile: Option, +) -> CmdResult { + logging!(warn, Type::Cmd, "配置验证失败: {}", error_msg); + Config::profiles().await.discard(); + if let Some(prev_profile) = current_profile { + restore_previous_profile(prev_profile).await?; + } + handle::Handle::notice_message("config_validate::error", error_msg); + CURRENT_SWITCHING_PROFILE.store(false, Ordering::SeqCst); + Ok(false) +} + +async fn handle_update_error(e: E, current_sequence: u64) -> CmdResult { + logging!( + warn, + Type::Cmd, + "更新过程发生错误: {}, 序列号: {}", + e, + current_sequence + ); + Config::profiles().await.discard(); + handle::Handle::notice_message("config_validate::boot_error", e.to_string()); + CURRENT_SWITCHING_PROFILE.store(false, Ordering::SeqCst); + Ok(false) +} + +async fn handle_timeout(current_profile: Option, current_sequence: u64) -> CmdResult { + let timeout_msg = "配置更新超时(30秒),可能是配置验证或核心通信阻塞"; + logging!( + error, + Type::Cmd, + "{}, 序列号: {}", + timeout_msg, + current_sequence + ); + Config::profiles().await.discard(); + if let Some(prev_profile) = current_profile { + restore_previous_profile(prev_profile).await?; + } + handle::Handle::notice_message("config_validate::timeout", timeout_msg); + CURRENT_SWITCHING_PROFILE.store(false, Ordering::SeqCst); + Ok(false) +} + +async fn perform_config_update( + current_sequence: u64, + current_value: Option, + current_profile: Option, +) -> CmdResult { + logging!( + info, + Type::Cmd, + "开始内核配置更新,序列号: {}", + current_sequence + ); + let update_result = tokio::time::timeout( + Duration::from_secs(30), + CoreManager::global().update_config(), + ) + .await; + + match update_result { + Ok(Ok((true, _))) => handle_success(current_sequence, current_value).await, + Ok(Ok((false, error_msg))) => handle_validation_failure(error_msg, current_profile).await, + Ok(Err(e)) => handle_update_error(e, current_sequence).await, + Err(_) => handle_timeout(current_profile, current_sequence).await, + } +} + /// 修改profiles的配置 #[tauri::command] pub async fn patch_profiles_config(profiles: IProfiles) -> CmdResult { @@ -256,108 +507,10 @@ pub async fn patch_profiles_config(profiles: IProfiles) -> CmdResult { // 如果要切换配置,先检查目标配置文件是否有语法错误 if let Some(new_profile) = profiles.current.as_ref() && current_profile.as_ref() != Some(new_profile) + && validate_new_profile(new_profile).await.is_err() { - logging!(info, Type::Cmd, "正在切换到新配置: {}", new_profile); - - // 获取目标配置文件路径 - let config_file_result = { - let profiles_config = Config::profiles().await; - let profiles_data = profiles_config.latest_ref(); - match profiles_data.get_item(new_profile) { - Ok(item) => { - if let Some(file) = &item.file { - let path = dirs::app_profiles_dir().map(|dir| dir.join(file.as_str())); - path.ok() - } else { - None - } - } - Err(e) => { - logging!(error, Type::Cmd, "获取目标配置信息失败: {}", e); - None - } - } - }; - - // 如果获取到文件路径,检查YAML语法 - if let Some(file_path) = config_file_result { - if !file_path.exists() { - logging!( - error, - Type::Cmd, - "目标配置文件不存在: {}", - file_path.display() - ); - handle::Handle::notice_message( - "config_validate::file_not_found", - format!("{}", file_path.display()), - ); - return Ok(false); - } - - // 超时保护 - let file_read_result = tokio::time::timeout( - Duration::from_secs(5), - tokio::fs::read_to_string(&file_path), - ) - .await; - - match file_read_result { - Ok(Ok(content)) => { - let yaml_parse_result = AsyncHandler::spawn_blocking(move || { - serde_yaml_ng::from_str::(&content) - }) - .await; - - match yaml_parse_result { - Ok(Ok(_)) => { - logging!(info, Type::Cmd, "目标配置文件语法正确"); - } - Ok(Err(err)) => { - let error_msg = format!(" {err}"); - logging!( - error, - Type::Cmd, - "目标配置文件存在YAML语法错误:{}", - error_msg - ); - handle::Handle::notice_message( - "config_validate::yaml_syntax_error", - error_msg.clone(), - ); - return Ok(false); - } - Err(join_err) => { - let error_msg = format!("YAML解析任务失败: {join_err}"); - logging!(error, Type::Cmd, "{}", error_msg); - handle::Handle::notice_message( - "config_validate::yaml_parse_error", - error_msg.clone(), - ); - return Ok(false); - } - } - } - Ok(Err(err)) => { - let error_msg = format!("无法读取目标配置文件: {err}"); - logging!(error, Type::Cmd, "{}", error_msg); - handle::Handle::notice_message( - "config_validate::file_read_error", - error_msg.clone(), - ); - return Ok(false); - } - Err(_) => { - let error_msg = "读取配置文件超时(5秒)".to_string(); - logging!(error, Type::Cmd, "{}", error_msg); - handle::Handle::notice_message( - "config_validate::file_read_timeout", - error_msg.clone(), - ); - return Ok(false); - } - } - } + CURRENT_SWITCHING_PROFILE.store(false, Ordering::SeqCst); + return Ok(false); } // 检查请求有效性 @@ -399,163 +552,7 @@ pub async fn patch_profiles_config(profiles: IProfiles) -> CmdResult { return Ok(false); } - // 为配置更新添加超时保护 - logging!( - info, - Type::Cmd, - "开始内核配置更新,序列号: {}", - current_sequence - ); - let update_result = tokio::time::timeout( - Duration::from_secs(30), // 30秒超时 - CoreManager::global().update_config(), - ) - .await; - - // 更新配置并进行验证 - match update_result { - Ok(Ok((true, _))) => { - // 内核操作完成后再次检查请求有效性 - let latest_sequence = CURRENT_REQUEST_SEQUENCE.load(Ordering::SeqCst); - if current_sequence < latest_sequence { - logging!( - info, - Type::Cmd, - "内核操作后发现更新的请求 (序列号: {} < {}),忽略当前结果", - current_sequence, - latest_sequence - ); - Config::profiles().await.discard(); - return Ok(false); - } - - logging!( - info, - Type::Cmd, - "配置更新成功,序列号: {}", - current_sequence - ); - Config::profiles().await.apply(); - handle::Handle::refresh_clash(); - - // 强制刷新代理缓存,确保profile切换后立即获取最新节点数据 - // crate::process::AsyncHandler::spawn(|| async move { - // if let Err(e) = super::proxy::force_refresh_proxies().await { - // log::warn!(target: "app", "强制刷新代理缓存失败: {e}"); - // } - // }); - - if let Err(e) = Tray::global().update_tooltip().await { - log::warn!(target: "app", "异步更新托盘提示失败: {e}"); - } - - if let Err(e) = Tray::global().update_menu().await { - log::warn!(target: "app", "异步更新托盘菜单失败: {e}"); - } - - // 保存配置文件 - if let Err(e) = profiles_save_file_safe().await { - log::warn!(target: "app", "异步保存配置文件失败: {e}"); - } - - // 立即通知前端配置变更 - if let Some(current) = ¤t_value { - logging!( - info, - Type::Cmd, - "向前端发送配置变更事件: {}, 序列号: {}", - current, - current_sequence - ); - handle::Handle::notify_profile_changed(current.clone()); - } - - CURRENT_SWITCHING_PROFILE.store(false, Ordering::SeqCst); - Ok(true) - } - Ok(Ok((false, error_msg))) => { - logging!(warn, Type::Cmd, "配置验证失败: {}", error_msg); - Config::profiles().await.discard(); - // 如果验证失败,恢复到之前的配置 - if let Some(prev_profile) = current_profile { - logging!(info, Type::Cmd, "尝试恢复到之前的配置: {}", prev_profile); - let restore_profiles = IProfiles { - current: Some(prev_profile), - items: None, - }; - // 静默恢复,不触发验证 - Config::profiles() - .await - .draft_mut() - .patch_config(restore_profiles) - .stringify_err()?; - Config::profiles().await.apply(); - - crate::process::AsyncHandler::spawn(|| async move { - if let Err(e) = profiles_save_file_safe().await { - log::warn!(target: "app", "异步保存恢复配置文件失败: {e}"); - } - }); - - logging!(info, Type::Cmd, "成功恢复到之前的配置"); - } - - // 发送验证错误通知 - handle::Handle::notice_message("config_validate::error", error_msg.to_string()); - CURRENT_SWITCHING_PROFILE.store(false, Ordering::SeqCst); - Ok(false) - } - Ok(Err(e)) => { - logging!( - warn, - Type::Cmd, - "更新过程发生错误: {}, 序列号: {}", - e, - current_sequence - ); - Config::profiles().await.discard(); - handle::Handle::notice_message("config_validate::boot_error", e.to_string()); - - CURRENT_SWITCHING_PROFILE.store(false, Ordering::SeqCst); - Ok(false) - } - Err(_) => { - // 超时处理 - let timeout_msg = "配置更新超时(30秒),可能是配置验证或核心通信阻塞"; - logging!( - error, - Type::Cmd, - "{}, 序列号: {}", - timeout_msg, - current_sequence - ); - Config::profiles().await.discard(); - - if let Some(prev_profile) = current_profile { - logging!( - info, - Type::Cmd, - "超时后尝试恢复到之前的配置: {}, 序列号: {}", - prev_profile, - current_sequence - ); - let restore_profiles = IProfiles { - current: Some(prev_profile), - items: None, - }; - Config::profiles() - .await - .draft_mut() - .patch_config(restore_profiles) - .stringify_err()?; - Config::profiles().await.apply(); - } - - handle::Handle::notice_message("config_validate::timeout", timeout_msg); - CURRENT_SWITCHING_PROFILE.store(false, Ordering::SeqCst); - Ok(false) - } - } + perform_config_update(current_sequence, current_value, current_profile).await } /// 根据profile name修改profiles diff --git a/src-tauri/src/cmd/service.rs b/src-tauri/src/cmd/service.rs index 5231ea90..c2f557de 100644 --- a/src-tauri/src/cmd/service.rs +++ b/src-tauri/src/cmd/service.rs @@ -12,7 +12,7 @@ async fn execute_service_operation_sync(status: ServiceStatus, op_type: &str) -> .await { let emsg = format!("{} Service failed: {}", op_type, e); - return Err(t(emsg.as_str()).await.into()); + return Err(t(emsg.as_str()).await); } Ok(()) } diff --git a/src-tauri/src/config/config.rs b/src-tauri/src/config/config.rs index ced80e1d..dfbfed7c 100644 --- a/src-tauri/src/config/config.rs +++ b/src-tauri/src/config/config.rs @@ -53,24 +53,33 @@ impl Config { /// 初始化订阅 pub async fn init_config() -> Result<()> { - if Self::profiles() - .await - .latest_ref() - .get_item(&"Merge".into()) - .is_err() - { + Self::ensure_default_profile_items().await?; + + let validation_result = Self::generate_and_validate().await?; + + if let Some((msg_type, msg_content)) = validation_result { + sleep(timing::STARTUP_ERROR_DELAY).await; + handle::Handle::notice_message(msg_type, msg_content); + } + + Ok(()) + } + + // Ensure "Merge" and "Script" profile items exist, adding them if missing. + async fn ensure_default_profile_items() -> Result<()> { + let profiles = Self::profiles().await; + if profiles.latest_ref().get_item(&"Merge".into()).is_err() { let merge_item = PrfItem::from_merge(Some("Merge".into()))?; profiles_append_item_safe(merge_item.clone()).await?; } - if Self::profiles() - .await - .latest_ref() - .get_item(&"Script".into()) - .is_err() - { + if profiles.latest_ref().get_item(&"Script".into()).is_err() { let script_item = PrfItem::from_script(Some("Script".into()))?; profiles_append_item_safe(script_item.clone()).await?; } + Ok(()) + } + + async fn generate_and_validate() -> Result> { // 生成运行时配置 if let Err(err) = Self::generate().await { logging!(error, Type::Config, "生成运行时配置失败: {}", err); @@ -81,7 +90,7 @@ impl Config { // 生成运行时配置文件并验证 let config_result = Self::generate_file(ConfigType::Run).await; - let validation_result = if config_result.is_ok() { + if config_result.is_ok() { // 验证配置文件 logging!(info, Type::Config, "开始验证配置"); @@ -97,12 +106,12 @@ impl Config { CoreManager::global() .use_default_config("config_validate::boot_error", &error_msg) .await?; - Some(("config_validate::boot_error", error_msg)) + Ok(Some(("config_validate::boot_error", error_msg))) } else { logging!(info, Type::Config, "配置验证成功"); // 前端没有必要知道验证成功的消息,也没有事件驱动 // Some(("config_validate::success", String::new())) - None + Ok(None) } } Err(err) => { @@ -110,7 +119,7 @@ impl Config { CoreManager::global() .use_default_config("config_validate::process_terminated", "") .await?; - Some(("config_validate::process_terminated", String::new())) + Ok(Some(("config_validate::process_terminated", String::new()))) } } } else { @@ -118,15 +127,8 @@ impl Config { CoreManager::global() .use_default_config("config_validate::error", "") .await?; - Some(("config_validate::error", String::new())) - }; - - if let Some((msg_type, msg_content)) = validation_result { - sleep(timing::STARTUP_ERROR_DELAY).await; - handle::Handle::notice_message(msg_type, msg_content); + Ok(Some(("config_validate::error", String::new()))) } - - Ok(()) } pub async fn generate_file(typ: ConfigType) -> Result { diff --git a/src-tauri/src/config/profiles.rs b/src-tauri/src/config/profiles.rs index 776ee21a..1601fb66 100644 --- a/src-tauri/src/config/profiles.rs +++ b/src-tauri/src/config/profiles.rs @@ -37,6 +37,18 @@ macro_rules! patch { } impl IProfiles { + // Helper to find and remove an item by uid from the items vec, returning its file name (if any). + fn take_item_file_by_uid( + items: &mut Vec, + target_uid: Option, + ) -> Option { + for (i, _) in items.iter().enumerate() { + if items[i].uid == target_uid { + return items.remove(i).file; + } + } + None + } pub async fn new() -> Self { match dirs::profiles_path() { Ok(path) => match help::read_yaml::(&path).await { @@ -277,98 +289,41 @@ impl IProfiles { let proxies_uid = item.option.as_ref().and_then(|e| e.proxies.clone()); let groups_uid = item.option.as_ref().and_then(|e| e.groups.clone()); let mut items = self.items.take().unwrap_or_default(); - let mut index = None; - let mut merge_index = None; - let mut script_index = None; - let mut rules_index = None; - let mut proxies_index = None; - let mut groups_index = None; - // get the index - for (i, _) in items.iter().enumerate() { - if items[i].uid == Some(uid.clone()) { - index = Some(i); - break; - } - } - if let Some(index) = index - && let Some(file) = items.remove(index).file - { + // remove the main item (if exists) and delete its file + if let Some(file) = Self::take_item_file_by_uid(&mut items, Some(uid.clone())) { let _ = dirs::app_profiles_dir()? .join(file.as_str()) .remove_if_exists() .await; } - // get the merge index - for (i, _) in items.iter().enumerate() { - if items[i].uid == merge_uid { - merge_index = Some(i); - break; - } - } - if let Some(index) = merge_index - && let Some(file) = items.remove(index).file - { + + // remove related extension items (merge, script, rules, proxies, groups) + if let Some(file) = Self::take_item_file_by_uid(&mut items, merge_uid.clone()) { let _ = dirs::app_profiles_dir()? .join(file.as_str()) .remove_if_exists() .await; } - // get the script index - for (i, _) in items.iter().enumerate() { - if items[i].uid == script_uid { - script_index = Some(i); - break; - } - } - if let Some(index) = script_index - && let Some(file) = items.remove(index).file - { + if let Some(file) = Self::take_item_file_by_uid(&mut items, script_uid.clone()) { let _ = dirs::app_profiles_dir()? .join(file.as_str()) .remove_if_exists() .await; } - // get the rules index - for (i, _) in items.iter().enumerate() { - if items[i].uid == rules_uid { - rules_index = Some(i); - break; - } - } - if let Some(index) = rules_index - && let Some(file) = items.remove(index).file - { + if let Some(file) = Self::take_item_file_by_uid(&mut items, rules_uid.clone()) { let _ = dirs::app_profiles_dir()? .join(file.as_str()) .remove_if_exists() .await; } - // get the proxies index - for (i, _) in items.iter().enumerate() { - if items[i].uid == proxies_uid { - proxies_index = Some(i); - break; - } - } - if let Some(index) = proxies_index - && let Some(file) = items.remove(index).file - { + if let Some(file) = Self::take_item_file_by_uid(&mut items, proxies_uid.clone()) { let _ = dirs::app_profiles_dir()? .join(file.as_str()) .remove_if_exists() .await; } - // get the groups index - for (i, _) in items.iter().enumerate() { - if items[i].uid == groups_uid { - groups_index = Some(i); - break; - } - } - if let Some(index) = groups_index - && let Some(file) = items.remove(index).file - { + if let Some(file) = Self::take_item_file_by_uid(&mut items, groups_uid.clone()) { let _ = dirs::app_profiles_dir()? .join(file.as_str()) .remove_if_exists() diff --git a/src-tauri/src/config/verge.rs b/src-tauri/src/config/verge.rs index 8533231e..3207b57c 100644 --- a/src-tauri/src/config/verge.rs +++ b/src-tauri/src/config/verge.rs @@ -438,6 +438,7 @@ impl IVerge { /// patch verge config /// only save to file + #[allow(clippy::cognitive_complexity)] pub fn patch_config(&mut self, patch: IVerge) { macro_rules! patch { ($key: tt) => { diff --git a/src-tauri/src/core/tray/mod.rs b/src-tauri/src/core/tray/mod.rs index 2295c811..84f934a1 100644 --- a/src-tauri/src/core/tray/mod.rs +++ b/src-tauri/src/core/tray/mod.rs @@ -1,8 +1,10 @@ use once_cell::sync::OnceCell; use tauri::Emitter; use tauri::tray::TrayIconBuilder; +use tauri_plugin_mihomo::models::Proxies; #[cfg(target_os = "macos")] pub mod speed_rate; +use crate::config::PrfSelected; use crate::module::lightweight; use crate::process::AsyncHandler; use crate::utils::window_manager::WindowManager; @@ -34,6 +36,56 @@ use tauri::{ // TODO: 是否需要将可变菜单抽离存储起来,后续直接更新对应菜单实例,无需重新创建菜单(待考虑) +type ProxyMenuItem = (Option>, Vec>>); + +struct MenuTexts { + dashboard: String, + rule_mode: String, + global_mode: String, + direct_mode: String, + profiles: String, + proxies: String, + system_proxy: String, + tun_mode: String, + close_all_connections: String, + lightweight_mode: String, + copy_env: String, + conf_dir: String, + core_dir: String, + logs_dir: String, + open_dir: String, + restart_clash: String, + restart_app: String, + verge_version: String, + more: String, + exit: String, +} + +async fn fetch_menu_texts() -> MenuTexts { + MenuTexts { + dashboard: t("Dashboard").await, + rule_mode: t("Rule Mode").await, + global_mode: t("Global Mode").await, + direct_mode: t("Direct Mode").await, + profiles: t("Profiles").await, + proxies: t("Proxies").await, + system_proxy: t("System Proxy").await, + tun_mode: t("TUN Mode").await, + close_all_connections: t("Close All Connections").await, + lightweight_mode: t("LightWeight Mode").await, + copy_env: t("Copy Env").await, + conf_dir: t("Conf Dir").await, + core_dir: t("Core Dir").await, + logs_dir: t("Logs Dir").await, + open_dir: t("Open Dir").await, + restart_clash: t("Restart Clash Core").await, + restart_app: t("Restart App").await, + verge_version: t("Verge Version").await, + more: t("More").await, + exit: t("Exit").await, + } +} + #[derive(Clone)] struct TrayState {} @@ -606,70 +658,8 @@ impl Tray { } } -async fn create_tray_menu( - app_handle: &AppHandle, - mode: Option<&str>, - system_proxy_enabled: bool, - tun_mode_enabled: bool, - profile_uid_and_name: Vec<(String, String)>, - is_lightweight_mode: bool, -) -> Result> { - let mode = mode.unwrap_or(""); - - // 获取当前配置文件的选中代理组信息 - let current_profile_selected = { - let profiles_config = Config::profiles().await; - let profiles_ref = profiles_config.latest_ref(); - profiles_ref - .get_current() - .and_then(|uid| profiles_ref.get_item(&uid).ok()) - .and_then(|profile| profile.selected.clone()) - .unwrap_or_default() - }; - - let proxy_nodes_data = handle::Handle::mihomo().await.get_proxies().await; - - let runtime_proxy_groups_order = cmd::get_runtime_config() - .await - .map_err(|e| { - logging!( - error, - Type::Cmd, - "Failed to fetch runtime proxy groups for tray menu: {e}" - ); - }) - .ok() - .flatten() - .map(|config| { - config - .get("proxy-groups") - .and_then(|groups| groups.as_sequence()) - .map(|groups| { - groups - .iter() - .filter_map(|group| group.get("name")) - .filter_map(|name| name.as_str()) - .map(|name| name.into()) - .collect::>() - }) - .unwrap_or_default() - }); - - let proxy_group_order_map = runtime_proxy_groups_order.as_ref().map(|group_names| { - group_names - .iter() - .enumerate() - .map(|(index, name)| (name.clone(), index)) - .collect::>() - }); - - let verge_settings = Config::verge().await.latest_ref().clone(); - let show_proxy_groups_inline = verge_settings.tray_inline_proxy_groups.unwrap_or(false); - - let version = env!("CARGO_PKG_VERSION"); - - let hotkeys = verge_settings - .hotkeys +fn create_hotkeys(hotkeys: &Option>) -> HashMap { + hotkeys .as_ref() .map(|h| { h.iter() @@ -689,35 +679,45 @@ async fn create_tray_menu( }) .collect::>() }) - .unwrap_or_default(); + .unwrap_or_default() +} - let profile_menu_items: Vec> = { - let futures = profile_uid_and_name - .iter() - .map(|(profile_uid, profile_name)| { - let app_handle = app_handle.clone(); - let profile_uid = profile_uid.clone(); - let profile_name = profile_name.clone(); - async move { - let is_current_profile = Config::profiles() - .await - .data_mut() - .is_current_profile_index(profile_uid.clone()); - CheckMenuItem::with_id( - &app_handle, - format!("profiles_{profile_uid}"), - t(&profile_name).await, - true, - is_current_profile, - None::<&str>, - ) - } - }); - let results = join_all(futures).await; - results.into_iter().collect::, _>>()? - }; +async fn create_profile_menu_item( + app_handle: &AppHandle, + profile_uid_and_name: Vec<(String, String)>, +) -> Result>> { + let futures = profile_uid_and_name + .iter() + .map(|(profile_uid, profile_name)| { + let app_handle = app_handle.clone(); + let profile_uid = profile_uid.clone(); + let profile_name = profile_name.clone(); + async move { + let is_current_profile = Config::profiles() + .await + .latest_ref() + .is_current_profile_index(profile_uid.clone()); + CheckMenuItem::with_id( + &app_handle, + format!("profiles_{profile_uid}"), + t(&profile_name).await, + true, + is_current_profile, + None::<&str>, + ) + } + }); + let results = join_all(futures).await; + Ok(results.into_iter().collect::, _>>()?) +} - // 代理组子菜单 +fn create_subcreate_proxy_menu_item( + app_handle: &AppHandle, + proxy_mode: &str, + current_profile_selected: &[PrfSelected], + proxy_group_order_map: Option>, + proxy_nodes_data: Result, +) -> Result>> { let proxy_submenus: Vec> = { let mut submenus: Vec<(String, usize, Submenu)> = Vec::new(); @@ -725,7 +725,7 @@ async fn create_tray_menu( if let Ok(proxy_nodes_data) = proxy_nodes_data { for (group_name, group_data) in proxy_nodes_data.proxies.iter() { // Filter groups based on mode - let should_show = match mode { + let should_show = match proxy_mode { "global" => group_name == "GLOBAL", _ => group_name != "GLOBAL", } && @@ -781,7 +781,7 @@ async fn create_tray_menu( } // Determine if group is active - let is_group_active = match mode { + let is_group_active = match proxy_mode { "global" => group_name == "GLOBAL" && !now_proxy.is_empty(), "direct" => false, _ => { @@ -837,28 +837,117 @@ async fn create_tray_menu( .map(|(_, _, submenu)| submenu) .collect() }; + Ok(proxy_submenus) +} + +fn create_proxy_menu_item( + app_handle: &AppHandle, + show_proxy_groups_inline: bool, + proxy_submenus: Vec>, + proxies_text: &String, +) -> Result { + // 创建代理主菜单 + let (proxies_submenu, inline_proxy_items) = if show_proxy_groups_inline { + ( + None, + proxy_submenus + .into_iter() + .map(|submenu| Box::new(submenu) as Box>) + .collect(), + ) + } else if !proxy_submenus.is_empty() { + let proxy_submenu_refs: Vec<&dyn IsMenuItem> = proxy_submenus + .iter() + .map(|submenu| submenu as &dyn IsMenuItem) + .collect(); + + ( + Some(Submenu::with_id_and_items( + app_handle, + "proxies", + proxies_text, + true, + &proxy_submenu_refs, + )?), + Vec::new(), + ) + } else { + (None, Vec::new()) + }; + Ok((proxies_submenu, inline_proxy_items)) +} + +async fn create_tray_menu( + app_handle: &AppHandle, + mode: Option<&str>, + system_proxy_enabled: bool, + tun_mode_enabled: bool, + profile_uid_and_name: Vec<(String, String)>, + is_lightweight_mode: bool, +) -> Result> { + let current_proxy_mode = mode.unwrap_or(""); + + // 获取当前配置文件的选中代理组信息 + let current_profile_selected = { + let profiles_config = Config::profiles().await; + let profiles_ref = profiles_config.latest_ref(); + profiles_ref + .get_current() + .and_then(|uid| profiles_ref.get_item(&uid).ok()) + .and_then(|profile| profile.selected.clone()) + .unwrap_or_default() + }; + + let proxy_nodes_data = handle::Handle::mihomo().await.get_proxies().await; + + let runtime_proxy_groups_order = cmd::get_runtime_config() + .await + .map_err(|e| { + logging!( + error, + Type::Cmd, + "Failed to fetch runtime proxy groups for tray menu: {e}" + ); + }) + .ok() + .flatten() + .map(|config| { + config + .get("proxy-groups") + .and_then(|groups| groups.as_sequence()) + .map(|groups| { + groups + .iter() + .filter_map(|group| group.get("name")) + .filter_map(|name| name.as_str()) + .map(|name| name.into()) + .collect::>() + }) + .unwrap_or_default() + }); + + let proxy_group_order_map: Option< + HashMap, usize>, + > = runtime_proxy_groups_order.as_ref().map(|group_names| { + group_names + .iter() + .enumerate() + .map(|(index, name)| (name.clone(), index)) + .collect::>() + }); + + let verge_settings = Config::verge().await.latest_ref().clone(); + let show_proxy_groups_inline = verge_settings.tray_inline_proxy_groups.unwrap_or(false); + + let version = env!("CARGO_PKG_VERSION"); + + let hotkeys = create_hotkeys(&verge_settings.hotkeys); + + let profile_menu_items: Vec> = + create_profile_menu_item(app_handle, profile_uid_and_name).await?; // Pre-fetch all localized strings - let dashboard_text = t("Dashboard").await; - let rule_mode_text = t("Rule Mode").await; - let global_mode_text = t("Global Mode").await; - let direct_mode_text = t("Direct Mode").await; - let profiles_text = t("Profiles").await; - let proxies_text = t("Proxies").await; - let system_proxy_text = t("System Proxy").await; - let tun_mode_text = t("TUN Mode").await; - let close_all_connections_text = t("Close All Connections").await; - let lightweight_mode_text = t("LightWeight Mode").await; - let copy_env_text = t("Copy Env").await; - let conf_dir_text = t("Conf Dir").await; - let core_dir_text = t("Core Dir").await; - let logs_dir_text = t("Logs Dir").await; - let open_dir_text = t("Open Dir").await; - let restart_clash_text = t("Restart Clash Core").await; - let restart_app_text = t("Restart App").await; - let verge_version_text = t("Verge Version").await; - let more_text = t("More").await; - let exit_text = t("Exit").await; + let texts = &fetch_menu_texts().await; // Convert to references only when needed let profile_menu_items_refs: Vec<&dyn IsMenuItem> = profile_menu_items @@ -869,7 +958,7 @@ async fn create_tray_menu( let open_window = &MenuItem::with_id( app_handle, "open_window", - dashboard_text, + &texts.dashboard, true, hotkeys.get("open_or_close_dashboard").map(|s| s.as_str()), )?; @@ -877,72 +966,57 @@ async fn create_tray_menu( let rule_mode = &CheckMenuItem::with_id( app_handle, "rule_mode", - rule_mode_text, + &texts.rule_mode, true, - mode == "rule", + current_proxy_mode == "rule", hotkeys.get("clash_mode_rule").map(|s| s.as_str()), )?; let global_mode = &CheckMenuItem::with_id( app_handle, "global_mode", - global_mode_text, + &texts.global_mode, true, - mode == "global", + current_proxy_mode == "global", hotkeys.get("clash_mode_global").map(|s| s.as_str()), )?; let direct_mode = &CheckMenuItem::with_id( app_handle, "direct_mode", - direct_mode_text, + &texts.direct_mode, true, - mode == "direct", + current_proxy_mode == "direct", hotkeys.get("clash_mode_direct").map(|s| s.as_str()), )?; let profiles = &Submenu::with_id_and_items( app_handle, "profiles", - profiles_text, + &texts.profiles, true, &profile_menu_items_refs, )?; - // 创建代理主菜单 - let (proxies_submenu, inline_proxy_items): (Option>, Vec<&dyn IsMenuItem>) = - if show_proxy_groups_inline { - ( - None, - proxy_submenus - .iter() - .map(|submenu| submenu as &dyn IsMenuItem) - .collect(), - ) - } else if !proxy_submenus.is_empty() { - let proxy_submenu_refs: Vec<&dyn IsMenuItem> = proxy_submenus - .iter() - .map(|submenu| submenu as &dyn IsMenuItem) - .collect(); + let proxy_sub_menus = create_subcreate_proxy_menu_item( + app_handle, + current_proxy_mode, + ¤t_profile_selected, + proxy_group_order_map, + proxy_nodes_data.map_err(anyhow::Error::from), + )?; - ( - Some(Submenu::with_id_and_items( - app_handle, - "proxies", - proxies_text, - true, - &proxy_submenu_refs, - )?), - Vec::new(), - ) - } else { - (None, Vec::new()) - }; + let (proxies_menu, inline_proxy_items) = create_proxy_menu_item( + app_handle, + show_proxy_groups_inline, + proxy_sub_menus, + &texts.proxies, + )?; let system_proxy = &CheckMenuItem::with_id( app_handle, "system_proxy", - system_proxy_text, + &texts.system_proxy, true, system_proxy_enabled, hotkeys.get("toggle_system_proxy").map(|s| s.as_str()), @@ -951,7 +1025,7 @@ async fn create_tray_menu( let tun_mode = &CheckMenuItem::with_id( app_handle, "tun_mode", - tun_mode_text, + &texts.tun_mode, true, tun_mode_enabled, hotkeys.get("toggle_tun_mode").map(|s| s.as_str()), @@ -960,7 +1034,7 @@ async fn create_tray_menu( let close_all_connections = &MenuItem::with_id( app_handle, "close_all_connections", - close_all_connections_text, + &texts.close_all_connections, true, None::<&str>, )?; @@ -968,18 +1042,18 @@ async fn create_tray_menu( let lighteweight_mode = &CheckMenuItem::with_id( app_handle, "entry_lightweight_mode", - lightweight_mode_text, + &texts.lightweight_mode, true, is_lightweight_mode, hotkeys.get("entry_lightweight_mode").map(|s| s.as_str()), )?; - let copy_env = &MenuItem::with_id(app_handle, "copy_env", copy_env_text, true, None::<&str>)?; + let copy_env = &MenuItem::with_id(app_handle, "copy_env", &texts.copy_env, true, None::<&str>)?; let open_app_dir = &MenuItem::with_id( app_handle, "open_app_dir", - conf_dir_text, + &texts.conf_dir, true, None::<&str>, )?; @@ -987,7 +1061,7 @@ async fn create_tray_menu( let open_core_dir = &MenuItem::with_id( app_handle, "open_core_dir", - core_dir_text, + &texts.core_dir, true, None::<&str>, )?; @@ -995,7 +1069,7 @@ async fn create_tray_menu( let open_logs_dir = &MenuItem::with_id( app_handle, "open_logs_dir", - logs_dir_text, + &texts.logs_dir, true, None::<&str>, )?; @@ -1003,7 +1077,7 @@ async fn create_tray_menu( let open_dir = &Submenu::with_id_and_items( app_handle, "open_dir", - open_dir_text, + &texts.open_dir, true, &[open_app_dir, open_core_dir, open_logs_dir], )?; @@ -1011,7 +1085,7 @@ async fn create_tray_menu( let restart_clash = &MenuItem::with_id( app_handle, "restart_clash", - restart_clash_text, + &texts.restart_clash, true, None::<&str>, )?; @@ -1019,7 +1093,7 @@ async fn create_tray_menu( let restart_app = &MenuItem::with_id( app_handle, "restart_app", - restart_app_text, + &texts.restart_app, true, None::<&str>, )?; @@ -1027,7 +1101,7 @@ async fn create_tray_menu( let app_version = &MenuItem::with_id( app_handle, "app_version", - format!("{} {version}", verge_version_text), + format!("{} {version}", &texts.verge_version), true, None::<&str>, )?; @@ -1035,7 +1109,7 @@ async fn create_tray_menu( let more = &Submenu::with_id_and_items( app_handle, "more", - more_text, + &texts.more, true, &[ close_all_connections, @@ -1045,7 +1119,13 @@ async fn create_tray_menu( ], )?; - let quit = &MenuItem::with_id(app_handle, "quit", exit_text, true, Some("CmdOrControl+Q"))?; + let quit = &MenuItem::with_id( + app_handle, + "quit", + &texts.exit, + true, + Some("CmdOrControl+Q"), + )?; let separator = &PredefinedMenuItem::separator(app_handle)?; @@ -1063,9 +1143,9 @@ async fn create_tray_menu( // 如果有代理节点,添加代理节点菜单 if show_proxy_groups_inline { if !inline_proxy_items.is_empty() { - menu_items.extend_from_slice(&inline_proxy_items); + menu_items.extend(inline_proxy_items.iter().map(|item| item.as_ref())); } - } else if let Some(ref proxies_menu) = proxies_submenu { + } else if let Some(ref proxies_menu) = proxies_menu { menu_items.push(proxies_menu); } diff --git a/src-tauri/src/enhance/mod.rs b/src-tauri/src/enhance/mod.rs index 704f199f..fc8089c0 100644 --- a/src-tauri/src/enhance/mod.rs +++ b/src-tauri/src/enhance/mod.rs @@ -12,11 +12,35 @@ use smartstring::alias::String; use std::collections::{HashMap, HashSet}; type ResultLog = Vec<(String, String)>; +#[derive(Debug)] +struct ConfigValues { + clash_config: Mapping, + clash_core: Option, + enable_tun: bool, + enable_builtin: bool, + socks_enabled: bool, + http_enabled: bool, + enable_dns_settings: bool, + #[cfg(not(target_os = "windows"))] + redir_enabled: bool, + #[cfg(target_os = "linux")] + tproxy_enabled: bool, +} -/// Enhance mode -/// 返回最终订阅、该订阅包含的键、和script执行的结果 -pub async fn enhance() -> (Mapping, Vec, HashMap) { - // config.yaml 的订阅 +#[derive(Debug)] +struct ProfileItems { + config: Mapping, + merge_item: ChainItem, + script_item: ChainItem, + rules_item: ChainItem, + proxies_item: ChainItem, + groups_item: ChainItem, + global_merge: ChainItem, + global_script: ChainItem, + profile_name: String, +} + +async fn get_config_values() -> ConfigValues { let clash_config = { Config::clash().await.latest_ref().0.clone() }; let (clash_core, enable_tun, enable_builtin, socks_enabled, http_enabled, enable_dns_settings) = { @@ -31,12 +55,14 @@ pub async fn enhance() -> (Mapping, Vec, HashMap) { verge.enable_dns_settings.unwrap_or(false), ) }; + #[cfg(not(target_os = "windows"))] let redir_enabled = { let verge = Config::verge().await; let verge = verge.latest_ref(); verge.verge_redir_enabled.unwrap_or(false) }; + #[cfg(target_os = "linux")] let tproxy_enabled = { let verge = Config::verge().await; @@ -44,9 +70,189 @@ pub async fn enhance() -> (Mapping, Vec, HashMap) { verge.verge_tproxy_enabled.unwrap_or(false) }; + ConfigValues { + clash_config, + clash_core, + enable_tun, + enable_builtin, + socks_enabled, + http_enabled, + enable_dns_settings, + #[cfg(not(target_os = "windows"))] + redir_enabled, + #[cfg(target_os = "linux")] + tproxy_enabled, + } +} + +async fn collect_profile_items() -> ProfileItems { // 从profiles里拿东西 - 先收集需要的数据,然后释放锁 let ( - mut config, + current, + merge_uid, + script_uid, + rules_uid, + proxies_uid, + groups_uid, + _current_profile_uid, + name, + ) = { + let current = { + let profiles = Config::profiles().await; + let profiles_clone = profiles.latest_ref().clone(); + profiles_clone.current_mapping().await.unwrap_or_default() + }; + + let profiles = Config::profiles().await; + let profiles_ref = profiles.latest_ref(); + + let merge_uid = profiles_ref.current_merge().unwrap_or_default(); + let script_uid = profiles_ref.current_script().unwrap_or_default(); + let rules_uid = profiles_ref.current_rules().unwrap_or_default(); + let proxies_uid = profiles_ref.current_proxies().unwrap_or_default(); + let groups_uid = profiles_ref.current_groups().unwrap_or_default(); + let current_profile_uid = profiles_ref.get_current().unwrap_or_default(); + + let name = profiles_ref + .get_item(¤t_profile_uid) + .ok() + .and_then(|item| item.name.clone()) + .unwrap_or_default(); + + ( + current, + merge_uid, + script_uid, + rules_uid, + proxies_uid, + groups_uid, + current_profile_uid, + name, + ) + }; + + // 现在获取具体的items,此时profiles锁已经释放 + let merge_item = { + let item = { + let profiles = Config::profiles().await; + let profiles = profiles.latest_ref(); + profiles.get_item(&merge_uid).ok().cloned() + }; + if let Some(item) = item { + >::from_async(&item).await + } else { + None + } + } + .unwrap_or_else(|| ChainItem { + uid: "".into(), + data: ChainType::Merge(Mapping::new()), + }); + + let script_item = { + let item = { + let profiles = Config::profiles().await; + let profiles = profiles.latest_ref(); + profiles.get_item(&script_uid).ok().cloned() + }; + if let Some(item) = item { + >::from_async(&item).await + } else { + None + } + } + .unwrap_or_else(|| ChainItem { + uid: "".into(), + data: ChainType::Script(tmpl::ITEM_SCRIPT.into()), + }); + + let rules_item = { + let item = { + let profiles = Config::profiles().await; + let profiles = profiles.latest_ref(); + profiles.get_item(&rules_uid).ok().cloned() + }; + if let Some(item) = item { + >::from_async(&item).await + } else { + None + } + } + .unwrap_or_else(|| ChainItem { + uid: "".into(), + data: ChainType::Rules(SeqMap::default()), + }); + + let proxies_item = { + let item = { + let profiles = Config::profiles().await; + let profiles = profiles.latest_ref(); + profiles.get_item(&proxies_uid).ok().cloned() + }; + if let Some(item) = item { + >::from_async(&item).await + } else { + None + } + } + .unwrap_or_else(|| ChainItem { + uid: "".into(), + data: ChainType::Proxies(SeqMap::default()), + }); + + let groups_item = { + let item = { + let profiles = Config::profiles().await; + let profiles = profiles.latest_ref(); + profiles.get_item(&groups_uid).ok().cloned() + }; + if let Some(item) = item { + >::from_async(&item).await + } else { + None + } + } + .unwrap_or_else(|| ChainItem { + uid: "".into(), + data: ChainType::Groups(SeqMap::default()), + }); + + let global_merge = { + let item = { + let profiles = Config::profiles().await; + let profiles = profiles.latest_ref(); + profiles.get_item(&"Merge".into()).ok().cloned() + }; + if let Some(item) = item { + >::from_async(&item).await + } else { + None + } + } + .unwrap_or_else(|| ChainItem { + uid: "Merge".into(), + data: ChainType::Merge(Mapping::new()), + }); + + let global_script = { + let item = { + let profiles = Config::profiles().await; + let profiles = profiles.latest_ref(); + profiles.get_item(&"Script".into()).ok().cloned() + }; + if let Some(item) = item { + >::from_async(&item).await + } else { + None + } + } + .unwrap_or_else(|| ChainItem { + uid: "Script".into(), + data: ChainType::Script(tmpl::ITEM_SCRIPT.into()), + }); + + ProfileItems { + config: current, merge_item, script_item, rules_item, @@ -54,192 +260,19 @@ pub async fn enhance() -> (Mapping, Vec, HashMap) { groups_item, global_merge, global_script, - profile_name, - ) = { - // 收集所有需要的数据,然后释放profiles锁 - let ( - current, - merge_uid, - script_uid, - rules_uid, - proxies_uid, - groups_uid, - _current_profile_uid, - name, - ) = { - // 分离async调用和数据获取,避免借用检查问题 - let current = { - let profiles = Config::profiles().await; - let profiles_clone = profiles.latest_ref().clone(); - profiles_clone.current_mapping().await.unwrap_or_default() - }; + profile_name: name, + } +} - // 重新获取锁进行其他操作 - let profiles = Config::profiles().await; - let profiles_ref = profiles.latest_ref(); +fn process_global_items( + mut config: Mapping, + global_merge: ChainItem, + global_script: ChainItem, + profile_name: String, +) -> (Mapping, Vec, HashMap) { + let mut result_map = HashMap::new(); + let mut exists_keys = use_keys(&config); - let merge_uid = profiles_ref.current_merge().unwrap_or_default(); - let script_uid = profiles_ref.current_script().unwrap_or_default(); - let rules_uid = profiles_ref.current_rules().unwrap_or_default(); - let proxies_uid = profiles_ref.current_proxies().unwrap_or_default(); - let groups_uid = profiles_ref.current_groups().unwrap_or_default(); - let current_profile_uid = profiles_ref.get_current().unwrap_or_default(); - - let name = profiles_ref - .get_item(¤t_profile_uid) - .ok() - .and_then(|item| item.name.clone()) - .unwrap_or_default(); - - ( - current, - merge_uid, - script_uid, - rules_uid, - proxies_uid, - groups_uid, - current_profile_uid, - name, - ) - }; - - // 现在获取具体的items,此时profiles锁已经释放 - let merge = { - let item = { - let profiles = Config::profiles().await; - let profiles = profiles.latest_ref(); - profiles.get_item(&merge_uid).ok().cloned() - }; - if let Some(item) = item { - >::from_async(&item).await - } else { - None - } - } - .unwrap_or_else(|| ChainItem { - uid: "".into(), - data: ChainType::Merge(Mapping::new()), - }); - - let script = { - let item = { - let profiles = Config::profiles().await; - let profiles = profiles.latest_ref(); - profiles.get_item(&script_uid).ok().cloned() - }; - if let Some(item) = item { - >::from_async(&item).await - } else { - None - } - } - .unwrap_or_else(|| ChainItem { - uid: "".into(), - data: ChainType::Script(tmpl::ITEM_SCRIPT.into()), - }); - - let rules = { - let item = { - let profiles = Config::profiles().await; - let profiles = profiles.latest_ref(); - profiles.get_item(&rules_uid).ok().cloned() - }; - if let Some(item) = item { - >::from_async(&item).await - } else { - None - } - } - .unwrap_or_else(|| ChainItem { - uid: "".into(), - data: ChainType::Rules(SeqMap::default()), - }); - - let proxies = { - let item = { - let profiles = Config::profiles().await; - let profiles = profiles.latest_ref(); - profiles.get_item(&proxies_uid).ok().cloned() - }; - if let Some(item) = item { - >::from_async(&item).await - } else { - None - } - } - .unwrap_or_else(|| ChainItem { - uid: "".into(), - data: ChainType::Proxies(SeqMap::default()), - }); - - let groups = { - let item = { - let profiles = Config::profiles().await; - let profiles = profiles.latest_ref(); - profiles.get_item(&groups_uid).ok().cloned() - }; - if let Some(item) = item { - >::from_async(&item).await - } else { - None - } - } - .unwrap_or_else(|| ChainItem { - uid: "".into(), - data: ChainType::Groups(SeqMap::default()), - }); - - let global_merge = { - let item = { - let profiles = Config::profiles().await; - let profiles = profiles.latest_ref(); - profiles.get_item(&"Merge".into()).ok().cloned() - }; - if let Some(item) = item { - >::from_async(&item).await - } else { - None - } - } - .unwrap_or_else(|| ChainItem { - uid: "Merge".into(), - data: ChainType::Merge(Mapping::new()), - }); - - let global_script = { - let item = { - let profiles = Config::profiles().await; - let profiles = profiles.latest_ref(); - profiles.get_item(&"Script".into()).ok().cloned() - }; - if let Some(item) = item { - >::from_async(&item).await - } else { - None - } - } - .unwrap_or_else(|| ChainItem { - uid: "Script".into(), - data: ChainType::Script(tmpl::ITEM_SCRIPT.into()), - }); - - ( - current, - merge, - script, - rules, - proxies, - groups, - global_merge, - global_script, - name, - ) - }; - - let mut result_map = HashMap::new(); // 保存脚本日志 - let mut exists_keys = use_keys(&config); // 保存出现过的keys - - // 全局Merge和Script if let ChainType::Merge(merge) = global_merge.data { exists_keys.extend(use_keys(&merge)); config = use_merge(merge, config.to_owned()); @@ -247,7 +280,6 @@ pub async fn enhance() -> (Mapping, Vec, HashMap) { if let ChainType::Script(script) = global_script.data { let mut logs = vec![]; - match use_script(script, config.to_owned(), profile_name.to_owned()) { Ok((res_config, res_logs)) => { exists_keys.extend(use_keys(&res_config)); @@ -256,11 +288,24 @@ pub async fn enhance() -> (Mapping, Vec, HashMap) { } Err(err) => logs.push(("exception".into(), err.to_string().into())), } - result_map.insert(global_script.uid, logs); } - // 订阅关联的Merge、Script、Rules、Proxies、Groups + (config, exists_keys, result_map) +} + +#[allow(clippy::too_many_arguments)] +fn process_profile_items( + mut config: Mapping, + mut exists_keys: Vec, + mut result_map: HashMap, + rules_item: ChainItem, + proxies_item: ChainItem, + groups_item: ChainItem, + merge_item: ChainItem, + script_item: ChainItem, + profile_name: String, +) -> (Mapping, Vec, HashMap) { if let ChainType::Rules(rules) = rules_item.data { config = use_seq(rules, config.to_owned(), "rules"); } @@ -280,7 +325,6 @@ pub async fn enhance() -> (Mapping, Vec, HashMap) { if let ChainType::Script(script) = script_item.data { let mut logs = vec![]; - match use_script(script, config.to_owned(), profile_name) { Ok((res_config, res_logs)) => { exists_keys.extend(use_keys(&res_config)); @@ -289,11 +333,20 @@ pub async fn enhance() -> (Mapping, Vec, HashMap) { } Err(err) => logs.push(("exception".into(), err.to_string().into())), } - result_map.insert(script_item.uid, logs); } - // 合并默认的config + (config, exists_keys, result_map) +} + +async fn merge_default_config( + mut config: Mapping, + clash_config: Mapping, + socks_enabled: bool, + http_enabled: bool, + #[cfg(not(target_os = "windows"))] redir_enabled: bool, + #[cfg(target_os = "linux")] tproxy_enabled: bool, +) -> Mapping { for (key, value) in clash_config.into_iter() { if key.as_str() == Some("tun") { let mut tun = config.get_mut("tun").map_or_else(Mapping::new, |val| { @@ -353,7 +406,14 @@ pub async fn enhance() -> (Mapping, Vec, HashMap) { } } - // 内建脚本最后跑 + config +} + +fn apply_builtin_scripts( + mut config: Mapping, + clash_core: Option, + enable_builtin: bool, +) -> Mapping { if enable_builtin { ChainItem::builtin() .into_iter() @@ -374,10 +434,10 @@ pub async fn enhance() -> (Mapping, Vec, HashMap) { }); } - config = use_tun(config, enable_tun); - config = use_sort(config); + config +} - // 应用独立的DNS配置(如果启用) +fn apply_dns_settings(mut config: Mapping, enable_dns_settings: bool) -> Mapping { if enable_dns_settings { use crate::utils::dirs; use std::fs; @@ -389,7 +449,6 @@ pub async fn enhance() -> (Mapping, Vec, HashMap) { && let Ok(dns_yaml) = fs::read_to_string(&dns_path) && let Ok(dns_config) = serde_yaml_ng::from_str::(&dns_yaml) { - // 处理hosts配置 if let Some(hosts_value) = dns_config.get("hosts") && hosts_value.is_mapping() { @@ -410,9 +469,82 @@ pub async fn enhance() -> (Mapping, Vec, HashMap) { } } + config +} + +/// Enhance mode +/// 返回最终订阅、该订阅包含的键、和script执行的结果 +pub async fn enhance() -> (Mapping, Vec, HashMap) { + // gather config values + let cfg_vals = get_config_values().await; + let ConfigValues { + clash_config, + clash_core, + enable_tun, + enable_builtin, + socks_enabled, + http_enabled, + enable_dns_settings, + #[cfg(not(target_os = "windows"))] + redir_enabled, + #[cfg(target_os = "linux")] + tproxy_enabled, + } = cfg_vals; + + // collect profile items + let profile = collect_profile_items().await; + let config = profile.config; + let merge_item = profile.merge_item; + let script_item = profile.script_item; + let rules_item = profile.rules_item; + let proxies_item = profile.proxies_item; + let groups_item = profile.groups_item; + let global_merge = profile.global_merge; + let global_script = profile.global_script; + let profile_name = profile.profile_name; + + // process globals + let (config, exists_keys, result_map) = + process_global_items(config, global_merge, global_script, profile_name.clone()); + + // process profile-specific items + let (config, exists_keys, result_map) = process_profile_items( + config, + exists_keys, + result_map, + rules_item, + proxies_item, + groups_item, + merge_item, + script_item, + profile_name, + ); + + // merge default clash config + let config = merge_default_config( + config, + clash_config, + socks_enabled, + http_enabled, + #[cfg(not(target_os = "windows"))] + redir_enabled, + #[cfg(target_os = "linux")] + tproxy_enabled, + ) + .await; + + // builtin scripts + let mut config = apply_builtin_scripts(config, clash_core, enable_builtin); + + config = use_tun(config, enable_tun); + config = use_sort(config); + + // dns settings + config = apply_dns_settings(config, enable_dns_settings); + let mut exists_set = HashSet::new(); exists_set.extend(exists_keys); - exists_keys = exists_set.into_iter().collect(); + let exists_keys: Vec = exists_set.into_iter().collect(); (config, exists_keys, result_map) } diff --git a/src-tauri/src/feat/config.rs b/src-tauri/src/feat/config.rs index 506f5fa6..797bd601 100644 --- a/src-tauri/src/feat/config.rs +++ b/src-tauri/src/feat/config.rs @@ -63,23 +63,19 @@ enum UpdateFlags { LighteWeight = 1 << 10, } -/// Patch Verge configuration -pub async fn patch_verge(patch: IVerge, not_save_file: bool) -> Result<()> { - Config::verge() - .await - .draft_mut() - .patch_config(patch.clone()); +fn determine_update_flags(patch: &IVerge) -> i32 { + let mut update_flags: i32 = UpdateFlags::None as i32; let tun_mode = patch.enable_tun_mode; let auto_launch = patch.enable_auto_launch; let system_proxy = patch.enable_system_proxy; let pac = patch.proxy_auto_config; - let pac_content = patch.pac_file_content; - let proxy_bypass = patch.system_proxy_bypass; - let language = patch.language; + let pac_content = &patch.pac_file_content; + let proxy_bypass = &patch.system_proxy_bypass; + let language = &patch.language; let mixed_port = patch.verge_mixed_port; #[cfg(target_os = "macos")] - let tray_icon = patch.tray_icon; + let tray_icon = &patch.tray_icon; #[cfg(not(target_os = "macos"))] let tray_icon: Option = None; let common_tray_icon = patch.common_tray_icon; @@ -100,145 +96,152 @@ pub async fn patch_verge(patch: IVerge, not_save_file: bool) -> Result<()> { let enable_tray_speed = patch.enable_tray_speed; let enable_tray_icon = patch.enable_tray_icon; let enable_global_hotkey = patch.enable_global_hotkey; - let tray_event = patch.tray_event; + let tray_event = &patch.tray_event; let home_cards = patch.home_cards.clone(); let enable_auto_light_weight = patch.enable_auto_light_weight_mode; let enable_external_controller = patch.enable_external_controller; - let res: std::result::Result<(), anyhow::Error> = { - // Initialize with no flags set - let mut update_flags: i32 = UpdateFlags::None as i32; - if tun_mode.is_some() { - update_flags |= UpdateFlags::ClashConfig as i32; - update_flags |= UpdateFlags::SystrayMenu as i32; - update_flags |= UpdateFlags::SystrayTooltip as i32; - update_flags |= UpdateFlags::SystrayIcon as i32; - } - if enable_global_hotkey.is_some() || home_cards.is_some() { - update_flags |= UpdateFlags::VergeConfig as i32; - } - #[cfg(not(target_os = "windows"))] - if redir_enabled.is_some() || redir_port.is_some() { - update_flags |= UpdateFlags::RestartCore as i32; - } - #[cfg(target_os = "linux")] - if tproxy_enabled.is_some() || tproxy_port.is_some() { - update_flags |= UpdateFlags::RestartCore as i32; - } - if socks_enabled.is_some() - || http_enabled.is_some() - || socks_port.is_some() - || http_port.is_some() - || mixed_port.is_some() - { - update_flags |= UpdateFlags::RestartCore as i32; - } - if auto_launch.is_some() { - update_flags |= UpdateFlags::Launch as i32; - } + if tun_mode.is_some() { + update_flags |= UpdateFlags::ClashConfig as i32; + update_flags |= UpdateFlags::SystrayMenu as i32; + update_flags |= UpdateFlags::SystrayTooltip as i32; + update_flags |= UpdateFlags::SystrayIcon as i32; + } + if enable_global_hotkey.is_some() || home_cards.is_some() { + update_flags |= UpdateFlags::VergeConfig as i32; + } + #[cfg(not(target_os = "windows"))] + if redir_enabled.is_some() || redir_port.is_some() { + update_flags |= UpdateFlags::RestartCore as i32; + } + #[cfg(target_os = "linux")] + if tproxy_enabled.is_some() || tproxy_port.is_some() { + update_flags |= UpdateFlags::RestartCore as i32; + } + if socks_enabled.is_some() + || http_enabled.is_some() + || socks_port.is_some() + || http_port.is_some() + || mixed_port.is_some() + { + update_flags |= UpdateFlags::RestartCore as i32; + } + if auto_launch.is_some() { + update_flags |= UpdateFlags::Launch as i32; + } - if system_proxy.is_some() { - update_flags |= UpdateFlags::SysProxy as i32; - update_flags |= UpdateFlags::SystrayMenu as i32; - update_flags |= UpdateFlags::SystrayTooltip as i32; - update_flags |= UpdateFlags::SystrayIcon as i32; - } + if system_proxy.is_some() { + update_flags |= UpdateFlags::SysProxy as i32; + update_flags |= UpdateFlags::SystrayMenu as i32; + update_flags |= UpdateFlags::SystrayTooltip as i32; + update_flags |= UpdateFlags::SystrayIcon as i32; + } - if proxy_bypass.is_some() || pac_content.is_some() || pac.is_some() { - update_flags |= UpdateFlags::SysProxy as i32; - } + if proxy_bypass.is_some() || pac_content.is_some() || pac.is_some() { + update_flags |= UpdateFlags::SysProxy as i32; + } - if language.is_some() { - update_flags |= UpdateFlags::SystrayMenu as i32; - } - if common_tray_icon.is_some() - || sysproxy_tray_icon.is_some() - || tun_tray_icon.is_some() - || tray_icon.is_some() - || enable_tray_speed.is_some() - || enable_tray_icon.is_some() - { - update_flags |= UpdateFlags::SystrayIcon as i32; - } + if language.is_some() { + update_flags |= UpdateFlags::SystrayMenu as i32; + } + if common_tray_icon.is_some() + || sysproxy_tray_icon.is_some() + || tun_tray_icon.is_some() + || tray_icon.is_some() + || enable_tray_speed.is_some() + || enable_tray_icon.is_some() + { + update_flags |= UpdateFlags::SystrayIcon as i32; + } - if patch.hotkeys.is_some() { - update_flags |= UpdateFlags::Hotkey as i32; - update_flags |= UpdateFlags::SystrayMenu as i32; - } + if patch.hotkeys.is_some() { + update_flags |= UpdateFlags::Hotkey as i32; + update_flags |= UpdateFlags::SystrayMenu as i32; + } - if tray_event.is_some() { - update_flags |= UpdateFlags::SystrayClickBehavior as i32; - } + if tray_event.is_some() { + update_flags |= UpdateFlags::SystrayClickBehavior as i32; + } - if enable_auto_light_weight.is_some() { - update_flags |= UpdateFlags::LighteWeight as i32; - } + if enable_auto_light_weight.is_some() { + update_flags |= UpdateFlags::LighteWeight as i32; + } - // 处理 external-controller 的开关 - if enable_external_controller.is_some() { - update_flags |= UpdateFlags::RestartCore as i32; - } + if enable_external_controller.is_some() { + update_flags |= UpdateFlags::RestartCore as i32; + } - // Process updates based on flags - if (update_flags & (UpdateFlags::RestartCore as i32)) != 0 { - Config::generate().await?; - CoreManager::global().restart_core().await?; - } - if (update_flags & (UpdateFlags::ClashConfig as i32)) != 0 { - CoreManager::global().update_config().await?; - handle::Handle::refresh_clash(); - } - if (update_flags & (UpdateFlags::VergeConfig as i32)) != 0 { - Config::verge().await.draft_mut().enable_global_hotkey = enable_global_hotkey; - handle::Handle::refresh_verge(); - } - if (update_flags & (UpdateFlags::Launch as i32)) != 0 { - sysopt::Sysopt::global().update_launch().await?; - } - if (update_flags & (UpdateFlags::SysProxy as i32)) != 0 { - sysopt::Sysopt::global().update_sysproxy().await?; - } - if (update_flags & (UpdateFlags::Hotkey as i32)) != 0 - && let Some(hotkeys) = patch.hotkeys - { - hotkey::Hotkey::global().update(hotkeys).await?; - } - if (update_flags & (UpdateFlags::SystrayMenu as i32)) != 0 { - tray::Tray::global().update_menu().await?; - } - if (update_flags & (UpdateFlags::SystrayIcon as i32)) != 0 { - tray::Tray::global().update_icon().await?; - } - if (update_flags & (UpdateFlags::SystrayTooltip as i32)) != 0 { - tray::Tray::global().update_tooltip().await?; - } - if (update_flags & (UpdateFlags::SystrayClickBehavior as i32)) != 0 { - tray::Tray::global().update_click_behavior().await?; - } - if (update_flags & (UpdateFlags::LighteWeight as i32)) != 0 { - if enable_auto_light_weight.unwrap_or(false) { - lightweight::enable_auto_light_weight_mode().await; - } else { - lightweight::disable_auto_light_weight_mode(); - } - } + update_flags +} - >::Ok(()) - }; - match res { - Ok(()) => { - Config::verge().await.apply(); - if !not_save_file { - // 分离数据获取和异步调用 - let verge_data = Config::verge().await.data_mut().clone(); - verge_data.save_file().await?; - } - - Ok(()) - } - Err(err) => { - Config::verge().await.discard(); - Err(err) +async fn process_terminated_flags(update_flags: i32, patch: &IVerge) -> Result<()> { + // Process updates based on flags + if (update_flags & (UpdateFlags::RestartCore as i32)) != 0 { + Config::generate().await?; + CoreManager::global().restart_core().await?; + } + if (update_flags & (UpdateFlags::ClashConfig as i32)) != 0 { + CoreManager::global().update_config().await?; + handle::Handle::refresh_clash(); + } + if (update_flags & (UpdateFlags::VergeConfig as i32)) != 0 { + Config::verge().await.draft_mut().enable_global_hotkey = patch.enable_global_hotkey; + handle::Handle::refresh_verge(); + } + if (update_flags & (UpdateFlags::Launch as i32)) != 0 { + sysopt::Sysopt::global().update_launch().await?; + } + if (update_flags & (UpdateFlags::SysProxy as i32)) != 0 { + sysopt::Sysopt::global().update_sysproxy().await?; + } + if (update_flags & (UpdateFlags::Hotkey as i32)) != 0 + && let Some(hotkeys) = &patch.hotkeys + { + hotkey::Hotkey::global().update(hotkeys.to_owned()).await?; + } + if (update_flags & (UpdateFlags::SystrayMenu as i32)) != 0 { + tray::Tray::global().update_menu().await?; + } + if (update_flags & (UpdateFlags::SystrayIcon as i32)) != 0 { + tray::Tray::global().update_icon().await?; + } + if (update_flags & (UpdateFlags::SystrayTooltip as i32)) != 0 { + tray::Tray::global().update_tooltip().await?; + } + if (update_flags & (UpdateFlags::SystrayClickBehavior as i32)) != 0 { + tray::Tray::global().update_click_behavior().await?; + } + if (update_flags & (UpdateFlags::LighteWeight as i32)) != 0 { + if patch.enable_auto_light_weight_mode.unwrap_or(false) { + lightweight::enable_auto_light_weight_mode().await; + } else { + lightweight::disable_auto_light_weight_mode(); } } + Ok(()) +} + +pub async fn patch_verge(patch: IVerge, not_save_file: bool) -> Result<()> { + Config::verge() + .await + .draft_mut() + .patch_config(patch.clone()); + + let update_flags = determine_update_flags(&patch); + let process_flag_result: std::result::Result<(), anyhow::Error> = { + process_terminated_flags(update_flags, &patch).await?; + Ok(()) + }; + + if let Err(err) = process_flag_result { + Config::verge().await.discard(); + return Err(err); + } + Config::verge().await.apply(); + if !not_save_file { + // 分离数据获取和异步调用 + let verge_data = Config::verge().await.data_mut().clone(); + verge_data.save_file().await?; + } + Ok(()) } diff --git a/src-tauri/src/feat/profile.rs b/src-tauri/src/feat/profile.rs index 47e8d402..948ba3af 100644 --- a/src-tauri/src/feat/profile.rs +++ b/src-tauri/src/feat/profile.rs @@ -23,144 +23,125 @@ pub async fn toggle_proxy_profile(profile_index: String) { } } -/// Update a profile -/// If updating current profile, activate it -/// auto_refresh: 是否自动更新配置和刷新前端 +async fn should_update_profile(uid: String) -> Result)>> { + let profiles = Config::profiles().await; + let profiles = profiles.latest_ref(); + let item = profiles.get_item(&uid)?; + let is_remote = item.itype.as_ref().is_some_and(|s| s == "remote"); + + if !is_remote { + log::info!(target: "app", "[订阅更新] {uid} 不是远程订阅,跳过更新"); + Ok(None) + } else if item.url.is_none() { + log::warn!(target: "app", "[订阅更新] {uid} 缺少URL,无法更新"); + bail!("failed to get the profile item url"); + } else if !item + .option + .as_ref() + .and_then(|o| o.allow_auto_update) + .unwrap_or(true) + { + log::info!(target: "app", "[订阅更新] {} 禁止自动更新,跳过更新", uid); + Ok(None) + } else { + log::info!(target: "app", + "[订阅更新] {} 是远程订阅,URL: {}", + uid, + item.url.clone().ok_or_else(|| anyhow::anyhow!("Profile URL is None"))? + ); + Ok(Some(( + item.url + .clone() + .ok_or_else(|| anyhow::anyhow!("Profile URL is None"))?, + item.option.clone(), + ))) + } +} + +async fn perform_profile_update( + uid: String, + url: String, + opt: Option, + option: Option, +) -> Result { + log::info!(target: "app", "[订阅更新] 开始下载新的订阅内容"); + let merged_opt = PrfOption::merge(opt.clone(), option.clone()); + + match PrfItem::from_url(&url, None, None, merged_opt.clone()).await { + Ok(item) => { + log::info!(target: "app", "[订阅更新] 更新订阅配置成功"); + let profiles = Config::profiles().await; + profiles_draft_update_item_safe(uid.clone(), item).await?; + let is_current = Some(uid.clone()) == profiles.latest_ref().get_current(); + log::info!(target: "app", "[订阅更新] 是否为当前使用的订阅: {is_current}"); + Ok(is_current) + } + Err(err) => { + log::warn!(target: "app", "[订阅更新] 正常更新失败: {err},尝试使用Clash代理更新"); + handle::Handle::notice_message("update_retry_with_clash", uid.clone()); + + let original_with_proxy = merged_opt.as_ref().and_then(|o| o.with_proxy); + let original_self_proxy = merged_opt.as_ref().and_then(|o| o.self_proxy); + + let mut fallback_opt = merged_opt.unwrap_or_default(); + fallback_opt.with_proxy = Some(false); + fallback_opt.self_proxy = Some(true); + + match PrfItem::from_url(&url, None, None, Some(fallback_opt)).await { + Ok(mut item) => { + log::info!(target: "app", "[订阅更新] 使用Clash代理更新成功"); + + if let Some(option) = item.option.as_mut() { + option.with_proxy = original_with_proxy; + option.self_proxy = original_self_proxy; + } + + let profiles = Config::profiles().await; + profiles_draft_update_item_safe(uid.clone(), item.clone()).await?; + + let profile_name = item.name.clone().unwrap_or_else(|| uid.clone()); + handle::Handle::notice_message("update_with_clash_proxy", profile_name); + + let is_current = Some(uid.clone()) == profiles.data_ref().get_current(); + log::info!(target: "app", "[订阅更新] 是否为当前使用的订阅: {is_current}"); + Ok(is_current) + } + Err(retry_err) => { + log::error!(target: "app", "[订阅更新] 使用Clash代理更新仍然失败: {retry_err}"); + handle::Handle::notice_message( + "update_failed_even_with_clash", + format!("{retry_err}"), + ); + Err(retry_err) + } + } + } + } +} + pub async fn update_profile( uid: String, option: Option, auto_refresh: Option, ) -> Result<()> { logging!(info, Type::Config, "[订阅更新] 开始更新订阅 {}", uid); - let auto_refresh = auto_refresh.unwrap_or(true); // 默认为true,保持兼容性 + let auto_refresh = auto_refresh.unwrap_or(true); - let url_opt = { - let profiles = Config::profiles().await; - let profiles = profiles.latest_ref(); - let item = profiles.get_item(&uid)?; - let is_remote = item.itype.as_ref().is_some_and(|s| s == "remote"); + let url_opt = should_update_profile(uid.clone()).await?; - if !is_remote { - log::info!(target: "app", "[订阅更新] {uid} 不是远程订阅,跳过更新"); - None // 非远程订阅直接更新 - } else if item.url.is_none() { - log::warn!(target: "app", "[订阅更新] {uid} 缺少URL,无法更新"); - bail!("failed to get the profile item url"); - } else if !item - .option - .as_ref() - .and_then(|o| o.allow_auto_update) - .unwrap_or(true) - { - log::info!(target: "app", "[订阅更新] {} 禁止自动更新,跳过更新", uid); - None - } else { - log::info!(target: "app", - "[订阅更新] {} 是远程订阅,URL: {}", - uid, - item.url.clone().ok_or_else(|| anyhow::anyhow!("Profile URL is None"))? - ); - Some(( - item.url - .clone() - .ok_or_else(|| anyhow::anyhow!("Profile URL is None"))?, - item.option.clone(), - )) - } - }; - - let should_update = match url_opt { + let should_refresh = match url_opt { Some((url, opt)) => { - log::info!(target: "app", "[订阅更新] 开始下载新的订阅内容"); - let merged_opt = PrfOption::merge(opt.clone(), option.clone()); - - // 尝试使用正常设置更新 - match PrfItem::from_url(&url, None, None, merged_opt.clone()).await { - Ok(item) => { - log::info!(target: "app", "[订阅更新] 更新订阅配置成功"); - let profiles = Config::profiles().await; - - // 使用Send-safe helper函数 - let result = profiles_draft_update_item_safe(uid.clone(), item).await; - result?; - - let is_current = Some(uid.clone()) == profiles.latest_ref().get_current(); - log::info!(target: "app", "[订阅更新] 是否为当前使用的订阅: {is_current}"); - is_current && auto_refresh - } - Err(err) => { - // 首次更新失败,尝试使用Clash代理 - log::warn!(target: "app", "[订阅更新] 正常更新失败: {err},尝试使用Clash代理更新"); - - // 发送通知 - handle::Handle::notice_message("update_retry_with_clash", uid.clone()); - - // 保存原始代理设置 - let original_with_proxy = merged_opt.as_ref().and_then(|o| o.with_proxy); - let original_self_proxy = merged_opt.as_ref().and_then(|o| o.self_proxy); - - // 创建使用Clash代理的选项 - let mut fallback_opt = merged_opt.unwrap_or_default(); - fallback_opt.with_proxy = Some(false); - fallback_opt.self_proxy = Some(true); - - // 使用Clash代理重试 - match PrfItem::from_url(&url, None, None, Some(fallback_opt)).await { - Ok(mut item) => { - log::info!(target: "app", "[订阅更新] 使用Clash代理更新成功"); - - // 恢复原始代理设置到item - if let Some(option) = item.option.as_mut() { - option.with_proxy = original_with_proxy; - option.self_proxy = original_self_proxy; - } - - // 更新到配置 - let profiles = Config::profiles().await; - - // 使用 Send-safe 方法进行数据操作 - profiles_draft_update_item_safe(uid.clone(), item.clone()).await?; - - // 获取配置名称用于通知 - let profile_name = item.name.clone().unwrap_or_else(|| uid.clone()); - - // 发送通知告知用户自动更新使用了回退机制 - handle::Handle::notice_message("update_with_clash_proxy", profile_name); - - let is_current = Some(uid.clone()) == profiles.data_ref().get_current(); - log::info!(target: "app", "[订阅更新] 是否为当前使用的订阅: {is_current}"); - is_current && auto_refresh - } - Err(retry_err) => { - log::error!(target: "app", "[订阅更新] 使用Clash代理更新仍然失败: {retry_err}"); - handle::Handle::notice_message( - "update_failed_even_with_clash", - format!("{retry_err}"), - ); - return Err(retry_err); - } - } - } - } + perform_profile_update(uid.clone(), url, opt, option).await? && auto_refresh } None => auto_refresh, }; - if should_update { + if should_refresh { logging!(info, Type::Config, "[订阅更新] 更新内核配置"); match CoreManager::global().update_config().await { Ok(_) => { logging!(info, Type::Config, "[订阅更新] 更新成功"); handle::Handle::refresh_clash(); - // if let Err(err) = cmd::proxy::force_refresh_proxies().await { - // logging!( - // error, - // Type::Config, - // true, - // "[订阅更新] 代理组刷新失败: {}", - // err - // ); - // } } Err(err) => { logging!(error, Type::Config, "[订阅更新] 更新失败: {}", err); diff --git a/src-tauri/src/feat/proxy.rs b/src-tauri/src/feat/proxy.rs index ef063026..9178bd2d 100644 --- a/src-tauri/src/feat/proxy.rs +++ b/src-tauri/src/feat/proxy.rs @@ -7,18 +7,9 @@ use tauri_plugin_clipboard_manager::ClipboardExt; /// Toggle system proxy on/off pub async fn toggle_system_proxy() { - // 获取当前系统代理状态 - let enable = { - let verge = Config::verge().await; - - verge.latest_ref().enable_system_proxy.unwrap_or(false) - }; - // 获取自动关闭连接设置 - let auto_close_connection = { - let verge = Config::verge().await; - - verge.latest_ref().auto_close_connection.unwrap_or(false) - }; + let verge = Config::verge().await; + let enable = verge.latest_ref().enable_system_proxy.unwrap_or(false); + let auto_close_connection = verge.latest_ref().auto_close_connection.unwrap_or(false); // 如果当前系统代理即将关闭,且自动关闭连接设置为true,则关闭所有连接 if enable diff --git a/src-tauri/src/utils/i18n.rs b/src-tauri/src/utils/i18n.rs index 3ddcb5da..1d616392 100644 --- a/src-tauri/src/utils/i18n.rs +++ b/src-tauri/src/utils/i18n.rs @@ -1,6 +1,7 @@ use crate::{config::Config, utils::dirs}; use once_cell::sync::Lazy; use serde_json::Value; +use smartstring::alias::String; use std::{fs, path::PathBuf, sync::RwLock}; use sys_locale; From 2ee8d164fd09c59a3e1469311583749db71be1f4 Mon Sep 17 00:00:00 2001 From: Slinetrac Date: Mon, 27 Oct 2025 21:39:45 +0800 Subject: [PATCH 05/70] chore(i18n): upload zhtw.json Co-authored-by: LiMoon --- src/locales/zhtw.json | 188 +++++++++++++++++++++++++++++++++++------- 1 file changed, 159 insertions(+), 29 deletions(-) diff --git a/src/locales/zhtw.json b/src/locales/zhtw.json index 491272b1..1d4f0941 100644 --- a/src/locales/zhtw.json +++ b/src/locales/zhtw.json @@ -24,6 +24,7 @@ "Label-Logs": "日 誌", "Label-Unlock": "解 鎖", "Label-Settings": "設 定", + "Proxies": "代理", "Proxy Groups": "代理組", "Proxy Chain Mode": "鏈式代理模式", "Connect": "連線", @@ -37,8 +38,16 @@ "rule": "規則", "global": "全域", "direct": "直連", - "script": "指令碼", "Chain Proxy": "🔗 鏈式代理", + "Chain Proxy Config": "鏈式代理設定", + "Proxy Rules": "代理規則", + "Select Rules": "选择规则", + "Click nodes in order to add to proxy chain": "依序點擊節點新增到鏈式代理中", + "No proxy chain configured": "暫無鏈式代理設定", + "Proxy Order": "代理順序", + "timeout": "逾時", + "Clear All": "全部清除", + "script": "指令碼", "locate": "目前節點", "Delay check": "延遲測試", "Sort by default": "預設排序", @@ -144,6 +153,8 @@ "Group Name Already Exists": "代理組名稱已存在", "Extend Config": "擴充覆寫設定", "Extend Script": "擴充指令碼", + "Global Merge": "全域擴充覆寫設定", + "Global Script": "全域擴充指令碼", "Type": "類型", "Name": "名稱", "Descriptions": "描述", @@ -152,6 +163,7 @@ "Choose File": "選擇檔案", "Use System Proxy": "使用系統代理更新", "Use Clash Proxy": "使用內核代理更新", + "Accept Invalid Certs (Danger)": "允許無效憑證(危險)", "Refresh": "重整", "Home": "首頁", "Select": "使用", @@ -159,13 +171,18 @@ "Edit File": "編輯檔案", "Open File": "開啟檔案", "Update": "更新", + "Update via proxy": "更新(代理)", + "Update(Proxy)": "更新(代理)", "Confirm deletion": "確認刪除", "This operation is not reversible": "此操作無法復原", "Script Console": "指令碼控制台輸出", + "To Top": "前往頂端", + "To End": "前往末端", "Connections": "連線", "Table View": "表格檢視", "List View": "列表檢視", "Close All": "關閉全部", + "Close All Connections": "關閉全部連線", "Upload": "上傳", "Download": "下載", "Download Speed": "下載速度", @@ -199,29 +216,47 @@ "Test URL": "測試網址", "Settings": "設定", "System Setting": "系統設定", - "Tun Mode": "虛擬網卡模式", - "TUN requires Service Mode or Admin Mode": "虛擬網卡模式需要服務模式或管理員模式", + "Tun Mode": "虛擬網路介面卡模式", + "TUN requires Service Mode or Admin Mode": "虛擬網路介面卡模式需要服務模式或管理員模式", "Install Service": "安裝服務", + "Install Service failed": "安裝服務失敗", + "Uninstall Service": "解除安裝服務", + "Restart Core failed": "重新啟動內核失敗", "Reset to Default": "重設為預設值", "Tun Mode Info": "TUN(虛擬網路介面卡)模式接管系統所有流量,啟用時無需開啟系統代理", + "TUN requires Service Mode or Admin Mode": "虛擬網路介面卡模式需要安裝服務模式或以系統管理員身分執行", + "TUN Mode automatically disabled due to service unavailable": "由於服務不可使用,虛擬網路介面卡模式已自動停用", + "Failed to disable TUN Mode automatically": "自動停用虛擬網路介面卡模式失敗", "System Proxy Enabled": "系統代理已啟用,您的應用程式將透過代理存取網路", "System Proxy Disabled": "系統代理已關閉,建議大多數使用者開啟此選項", - "TUN Mode Enabled": "虛擬網卡模式已啟用,應用程式將透過虛擬網路介面卡存取網路", - "TUN Mode Disabled": "虛擬網卡模式已關閉,適用於特殊應用程式", - "TUN Mode Service Required": "虛擬網卡模式需要服務模式,請先安裝服務", - "TUN Mode Intercept Info": "虛擬網卡模式可以接管所有應用程式流量,適用於不遵循系統代理設定的特殊應用程式", - "Stack": "虛擬網卡模式堆疊", - "Device": "TUN 網路介面卡名稱", + "TUN Mode Enabled": "虛擬網路介面卡模式已啟用,應用程式將透過虛擬網路介面卡存取網路", + "TUN Mode Disabled": "虛擬網路介面卡模式已關閉,適用於特殊應用程式", + "TUN Mode Service Required": "虛擬網路介面卡模式需要服務模式,請先安裝服務", + "TUN Mode Intercept Info": "虛擬網路介面卡模式可以接管所有應用程式流量,適用於不遵循系統代理設定的特殊應用程式", + "Core communication error": "內核通信錯誤", + "Rule Mode Description": "基於預設規則智慧判斷流量走向,提供更靈活的代理策略", + "Global Mode Description": "所有流量均透過代理伺服器,適用於需要全域科學上網的場合", + "Direct Mode Description": "所有流量不透過代理伺服器,但經過Clash內核轉發連線目標伺服器,適用於需要透過內核進行分流的特定場合", + "Stack": "虛擬網路介面卡模式堆疊", + "System and Mixed Can Only be Used in Service Mode": "System 和 Mixed 只能在服務模式下使用", + "Device": "虛擬網路介面卡名稱", "Auto Route": "自動設定全域路由", "Strict Route": "嚴格路由", "Auto Detect Interface": "自動偵測流量輸出介面", "DNS Hijack": "DNS 綁架", "MTU": "最大傳輸單位", "Service Mode": "服務模式", + "Service Mode Info": "啟用虛擬網路介面卡模式前請先安裝服務模式,該服務啟動的內核處理程序可以獲得安裝虛擬網路介面卡(TUN 模式)的權限", + "Current State": "当前状态", + "pending": "等待中", + "installed": "已安装", + "uninstall": "未安装", "active": "作用中", "unknown": "未知", + "Information: Please make sure that the Clash Verge Service is installed and enabled": "提示資訊:請確認 Clash Verge Service 已安裝並啟用", "Install": "安裝", "Uninstall": "解除安裝", + "Disable Service Mode": "停用服務模式", "System Proxy": "系統代理", "System Proxy Info": "修改作業系統的代理設定,如果開啟失敗,可手動修改作業系統的代理設定", "System Proxy Setting": "系統代理設定", @@ -237,6 +272,7 @@ "Proxy Guard Info": "開啟以防止其他軟體修改作業系統的代理設定", "Guard Duration": "代理守護間隔", "Always use Default Bypass": "始終使用預設繞過", + "Use Bypass Check": "啟用代理繞過檢查", "Proxy Bypass": "代理繞過設定:", "Bypass": "目前繞過:", "Use PAC Mode": "使用 PAC 模式", @@ -246,6 +282,10 @@ "Administrator mode may not support auto launch": "管理員模式可能不支援開機自啟", "Silent Start": "靜默啟動", "Silent Start Info": "程序啟動時以後台模式執行,不顯示程序面板", + "Hover Jump Navigator": "懸浮跳轉導航", + "Hover Jump Navigator Info": "滑鼠懸停在字母上時自動捲動到對應代理組", + "Hover Jump Navigator Delay": "懸浮跳轉導航延遲", + "Hover Jump Navigator Delay Info": "滑鼠懸停後觸發自動跳轉前等待的毫秒數", "TG Channel": "Telegram 頻道", "Manual": "使用手冊", "Github Repo": "GitHub 專案位址", @@ -259,13 +299,17 @@ "Unified Delay Info": "開啟統一延遲時,會進行兩次延遲測試,以消除連線握手等帶來的不同類型節點的延遲差異", "Log Level": "日誌等級", "Log Level Info": "僅對日誌目錄 Service 資料夾下的內核日誌檔案生效", + "Port Config": "連接埠設定", "Random Port": "隨機連接埠", "Mixed Port": "混合代理連接埠", "Socks Port": "SOCKS 代理連接埠", "Http Port": "HTTP(S) 代理連接埠", "Redir Port": "Redir 透明代理連接埠", - "Tproxy Port": "TPROXY 透明代理連接埠", + "TPROXY Port": "TPROXY 透明代理連接埠", + "Port settings saved": "連結埠設定已儲存", + "Failed to save port settings": "連結埠設定儲存失敗", "External": "外部控制", + "Enable External Controller": "啟用外部控制器", "External Controller": "外部控制器監聽位址", "Core Secret": "API 存取金鑰", "Recommended": "建議設定", @@ -277,7 +321,9 @@ "Restart": "重啟內核", "Release Version": "正式版", "Alpha Version": "預覽版", + "Please Enable Service Mode": "請先安裝並啟用服務模式", "Please enter your root password": "請輸入您的 root 密碼", + "Grant": "授權", "Open UWP tool": "UWP 工具", "Open UWP tool Info": "Windows 8 開始限制 UWP 應用程式(如Microsoft Store)直接存取本機主機的網路服務,使用此工具可繞過該限制", "Update GeoData": "更新 GeoData", @@ -312,17 +358,13 @@ "Memory Usage": "內核佔用", "Memory Cleanup": "點擊清理記憶體", "Proxy Group Icon": "代理組圖示", - "Hover Jump Navigator": "懸浮跳轉導航", - "Hover Jump Navigator Info": "滑鼠懸停在字母上時自動捲動到對應代理組", - "Hover Jump Navigator Delay": "懸浮跳轉導航延遲", - "Hover Jump Navigator Delay Info": "滑鼠懸停後觸發自動跳轉前等待的毫秒數", "Nav Icon": "導覽列圖示", "Monochrome": "單色圖示", "Colorful": "彩色圖示", "Tray Icon": "系統匣圖示", "Common Tray Icon": "一般系統匣圖示", "System Proxy Tray Icon": "系統代理系統匣圖示", - "Tun Tray Icon": "虛擬網卡模式系統匣圖示", + "Tun Tray Icon": "虛擬網路介面卡模式系統匣圖示", "Miscellaneous": "雜項設定", "App Log Level": "應用程式日誌等級", "Auto Close Connections": "自動關閉連線", @@ -347,7 +389,7 @@ "clash_mode_global": "全域模式", "clash_mode_direct": "直連模式", "toggle_system_proxy": "開啟/關閉系統代理", - "toggle_tun_mode": "開啟/關閉 虛擬網卡模式", + "toggle_tun_mode": "開啟/關閉 虛擬網路介面卡模式", "entry_lightweight_mode": "進入輕量模式", "Backup Setting": "備份設定", "Backup Setting Info": "支援本機或 WebDAV 方式備份配置檔案", @@ -362,6 +404,7 @@ "Break Change Update Error": "此版本為重大更新,不支援應用程式內更新,請解除安裝後手動下載安裝", "Open Dev Tools": "開發人員工具", "Export Diagnostic Info": "匯出診斷資訊", + "Export Diagnostic Info For Issue Reporting": "匯出診斷資訊用於問題回報", "Exit": "離開", "Verge Version": "Verge 版本", "ReadOnly": "唯讀", @@ -374,26 +417,37 @@ "Profile Imported Successfully": "匯入設定檔成功", "Profile Switched": "訂閱已切換", "Profile Reactivated": "訂閱已啟用", + "Profile switch interrupted by new selection": "配置切換被新的選擇中斷", "Only YAML Files Supported": "僅支援 YAML 檔案", "Settings Applied": "設定已套用", "Stopping Core...": "內核停止中...", "Restarting Core...": "內核重啟中...", "Installing Service...": "安裝服務中...", - "Uninstall Service": "解除安裝服務", + "Uninstalling Service...": "服務解除安裝中...", "Service Installed Successfully": "已成功安裝服務", - "Service is ready and core restarted": "服務已就緒,內核已重啟", - "Core restarted. Service is now available.": "內核已重啟,服務已就緒", + "Service Uninstalled Successfully": "已成功解除安裝服務", + "Waiting for service to be ready...": "等待服務就緒...", + "Service not ready, retrying attempt {count}/{total}...": "服務未就緒,正在重試 {{count}}/{{total}} 次...", + "Failed to check service status, retrying attempt {count}/{total}...": "檢查服務狀態失敗,正在重試 {{count}}/{{total}} 次...", + "Service did not become ready after attempts. Proceeding with core restart.": "服務在嘗試後仍未就緒,正在重新啟動內核。", "Service was ready, but core restart might have issues or service became unavailable. Please check.": "服務已就緒,但內核重啟可能存在問題或服務已不可用。請檢查。", "Service installation or core restart encountered issues. Service might not be available. Please check system logs.": "服務安裝或內核重啟遇到問題。服務可能不可用。請檢查系統日誌。", - "Uninstalling Service...": "服務解除安裝中...", - "Waiting for service to be ready...": "等待服務就緒...", - "Service Uninstalled Successfully": "已成功解除安裝服務", + "Attempting to restart core as a fallback...": "嘗試重新啟動內核作為備援方案...", + "Fallback core restart also failed: {message}": "被園內核重新啟動也失敗了:{{message}}", + "Service is ready and core restarted": "服務已就緒,內核已重啟", + "Core restarted. Service is now available.": "內核已重啟,服務已就緒", "Proxy Daemon Duration Cannot be Less than 1 Second": "代理守護間隔時間不得低於 1 秒", "Invalid Bypass Format": "無效的代理繞過格式", + "Clash Port Modified": "Clash 連結埠已修改", + "Port Conflict": "連結埠衝突", + "Restart Application to Apply Modifications": "重新啟動 Verge 以套用修改", + "External Controller Address Modified": "外部控制器監聽位址已修改", + "Permissions Granted Successfully for _clash Core": "{{core}} 內核授權成功", "Core Version Updated": "內核版本已更新", "Clash Core Restarted": "已重啟 Clash 內核", "GeoData Updated": "已更新 GeoData", "Currently on the Latest Version": "目前已是最新版本", + "Already Using Latest Core": "已是最新內核版本", "Import Subscription Successful": "匯入訂閱成功", "WebDAV Server URL": "WebDAV 伺服器位址 http(s)://", "Username": "使用者名稱", @@ -411,6 +465,7 @@ "Invalid WebDAV URL": "無效的 WebDAV 伺服器位址格式", "Username Required": "使用者名稱不能為空", "Password Required": "密碼不能為空", + "Failed to Fetch Backups": "取得備份檔案失敗", "WebDAV Config Saved": "WebDAV 配置儲存成功", "WebDAV Config Save Failed": "儲存 WebDAV 配置失敗: {{error}}", "Backup Created": "備份建立成功", @@ -424,14 +479,33 @@ "Export Backup": "匯出備份", "Restore Backup": "還原備份", "Backup Time": "備份時間", + "Confirm to delete this backup file?": "確認是否刪除此備份檔案嗎?", + "Confirm to restore this backup file?": "確認還原此份檔案嗎?", "Restore Success, App will restart in 1s": "還原成功,應用程式將在 1 秒後重啟", "Failed to fetch backup files": "取得備份檔案失敗", - "Profile": "配置檔案", + "Profile": "配置", + "Help": "幫助", + "About": "關於", + "Theme": "主題", + "Main Window": "主視窗", + "Group Icon": "群組圖示", + "Menu Icon": "選單圖示", + "PAC File": "PAC 檔案", "Web UI": "網頁介面", + "Hotkeys": "快捷键", + "Verge Mixed Port": "Verge 混合連結埠", + "Verge Socks Port": "Verge SOCKS 連結埠", + "Verge Redir Port": "Verge 重新導向連結埠", + "Verge Tproxy Port": "Verge 透明代理連結埠", + "Verge Port": "Verge 連結埠", + "Verge HTTP Enabled": "Verge HTTP 已啟用", + "WebDAV URL": "WebDAV 位址", + "WebDAV Username": "WebDAV 用戶名", + "WebDAV Password": "WebDAV 密碼", "Dashboard": "儀表板", "Restart App": "重啟應用程式", "Restart Clash Core": "重啟 Clash 內核", - "TUN Mode": "虛擬網卡模式", + "TUN Mode": "虛擬網路介面卡模式", "Copy Env": "複製環境變數", "Conf Dir": "配置目錄", "Core Dir": "內核目錄", @@ -471,6 +545,10 @@ "Merge File Mapping Error": "覆寫檔案映射錯誤,變更已撤銷", "Merge File Key Error": "覆寫檔案鍵錯誤,變更已撤銷", "Merge File Error": "覆寫檔案錯誤,變更已撤銷", + "Validate YAML File": "驗證YAML檔案", + "Validate Merge File": "驗證覆寫檔案", + "Validation Success": "驗證成功", + "Validation Failed": "驗證失敗", "Service Administrator Prompt": "Clash Verge 需要管理員權限安裝系統服務", "DNS Settings": "DNS 設定", "DNS settings saved": "DNS 設定已儲存", @@ -517,11 +595,14 @@ "Hosts Settings": "Hosts 設定", "Hosts": "Hosts", "Custom domain to IP or domain mapping": "自訂網域到 IP 或網域的映射,用逗號分隔", + "Enable Alpha Channel": "啟用 Alpha 頻道", + "Alpha versions may contain experimental features and bugs": "Alpha 版本可能內含實驗性功能和已知問題,常有不穩定的情況發生。", "Home Settings": "首頁設定", "Profile Card": "訂閱卡", "Current Proxy Card": "目前代理卡", "Network Settings Card": "網路設定卡", "Proxy Mode Card": "代理模式卡", + "Clash Mode Card": "Clash模式卡", "Traffic Stats Card": "流量統計卡", "Clash Info Cards": "Clash 資訊卡", "System Info Cards": "系統資訊卡", @@ -538,6 +619,7 @@ "Running Mode": "執行模式", "Sidecar Mode": "使用者模式", "Administrator Mode": "管理員模式", + "Administrator + Service Mode": "系統管理員 + 服務模式", "Last Check Update": "最後檢查更新", "Click to import subscription": "點擊匯入訂閱", "Last Update failed": "上次更新失敗", @@ -577,11 +659,59 @@ "Completed": "檢測完成", "Disallowed ISP": "不允許的網際網路服務供應商", "Originals Only": "僅限原創", + "No (IP Banned By Disney+)": "不支援(IP被Disney+禁止)", "Unsupported Country/Region": "不支援的國家/地區", + "Failed (Network Connection)": "測試失敗(網路連線問題)", + "DashboardToggledTitle": "儀錶板已切換", + "DashboardToggledBody": "已透過快速鍵切換儀錶板顯示狀態", + "ClashModeChangedTitle": "Clash模式切換", + "ClashModeChangedBody": "已切換為 {mode} 模式", + "SystemProxyToggledTitle": "系統代理切換", + "SystemProxyToggledBody": "已透過快速鍵切換系統代理狀態", + "TunModeToggledTitle": "TUN模式切換", + "TunModeToggledBody": "已透過快速鍵切換TUN模式", + "LightweightModeEnteredTitle": "輕量模式", + "LightweightModeEnteredBody": "已透過快速鍵進入輕量模式", + "AppQuitTitle": "應用程式退出", + "AppQuitBody": "已透過快速鍵退出應用程式", + "AppHiddenTitle": "應用程式隱藏", + "AppHiddenBody": "已透過快速鍵隱藏應用程式視窗", + "Invalid Profile URL": "無效的訂閱網址,請輸入以 http:// 或 https:// 開頭的位址", + "Saved Successfully": "儲存成功", + "External Cors": "外部跨來源資源共享", + "Enable one-click CORS for external API. Click to toggle CORS": "設定內核跨來源存取,點擊切換跨來源資源共享是否啟用", + "External Cors Settings": "外部跨來源資源共享設定", + "External Cors Configuration": "外部跨來源資源共享設定", + "Allow private network access": "允許專用網路存取", + "Allowed Origins": "允許的來源", + "Please enter a valid url": "請輸入有效的網址", + "Add": "新增", + "Always included origins: {{urls}}": "始終包含來源:{{urls}}", + "Invalid regular expression": "無效的正規表示式", + "Copy Version": "複製Verge版本號", + "Version copied to clipboard": "Verge版本已複製到剪貼簿", + "Controller address cannot be empty": "控制器位址不能為空", + "Secret cannot be empty": "存取金鑰不能為空", + "Configuration saved successfully": "設定儲存完成", + "Failed to save configuration": "設定儲存失敗", "Controller address copied to clipboard": "API 連接埠已複製到剪貼簿", - "Secret copied to clipboard": "API 密鑰已複製到剪貼簿", - "Copy to clipboard": "點擊我複製到剪貼簿", - "Port Config": "連接埠設定", - "Configuration saved successfully": "配置儲存完成", - "Enable one-click random API port and key. Click to randomize the port and key": "開啟一鍵隨機 API 連接埠和密鑰,點擊即可隨機化連接埠和密鑰" + "Secret copied to clipboard": "API 金鑰已複製到剪貼簿", + "Saving...": "儲存中...", + "Proxy node already exists in chain": "該節點已在鏈式代理表中", + "Detection timeout or failed": "檢測逾時或失敗", + "Batch Operations": "批次操作", + "Delete Selected Profiles": "刪除選取訂閱", + "Deselect All": "取消選取", + "Done": "完成", + "items": "項目", + "Select All": "全選", + "Selected": "已選取", + "Selected profiles deleted successfully": "選取的訂閱已成功刪除", + "Prefer System Titlebar": "優先使用系統標題欄", + "App Log Max Size": "應用程式日誌最大大小", + "App Log Max Count": "應用程式日誌最大數量", + "Allow Auto Update": "允許自動更新", + "Menu reorder mode": "選單排序模式", + "Unlock menu order": "解鎖選單排序", + "Lock menu order": "鎖定選單排序" } From 87168b6ce0d1a0766dd21d3e15ffd0033555d123 Mon Sep 17 00:00:00 2001 From: Tunglies <77394545+Tunglies@users.noreply.github.com> Date: Mon, 27 Oct 2025 22:07:23 +0800 Subject: [PATCH 06/70] perf(tray): improve menu handling localization support refactor(tray): replace string literals with MenuIds for menu event handling --- src-tauri/src/core/tray/menu_def.rs | 47 ++++++++++ src-tauri/src/core/tray/mod.rs | 137 ++++++++++------------------ src-tauri/src/utils/i18n.rs | 18 ++-- 3 files changed, 107 insertions(+), 95 deletions(-) create mode 100644 src-tauri/src/core/tray/menu_def.rs diff --git a/src-tauri/src/core/tray/menu_def.rs b/src-tauri/src/core/tray/menu_def.rs new file mode 100644 index 00000000..636665ac --- /dev/null +++ b/src-tauri/src/core/tray/menu_def.rs @@ -0,0 +1,47 @@ +use crate::utils::i18n::t; +use smartstring::alias::String; + +macro_rules! define_menu { + ($($field:ident => $const_name:ident, $id:expr, $text:expr),+ $(,)?) => { + #[derive(Debug)] + pub struct MenuTexts { + $(pub $field: String,)+ + } + + pub struct MenuIds; + + impl MenuTexts { + pub async fn new() -> Self { + let ($($field,)+) = futures::join!($(t($text),)+); + Self { $($field,)+ } + } + } + + impl MenuIds { + $(pub const $const_name: &'static str = $id;)+ + } + }; +} + +define_menu! { + dashboard => DASHBOARD, "tray_dashboard", "Dashboard", + rule_mode => RULE_MODE, "tray_rule_mode", "Rule Mode", + global_mode => GLOBAL_MODE, "tray_global_mode", "Global Mode", + direct_mode => DIRECT_MODE, "tray_direct_mode", "Direct Mode", + profiles => PROFILES, "tray_profiles", "Profiles", + proxies => PROXIES, "tray_proxies", "Proxies", + system_proxy => SYSTEM_PROXY, "tray_system_proxy", "System Proxy", + tun_mode => TUN_MODE, "tray_tun_mode", "TUN Mode", + close_all_connections => CLOSE_ALL_CONNECTIONS, "tray_close_all_connections", "Close All Connections", + lightweight_mode => LIGHTWEIGHT_MODE, "tray_lightweight_mode", "LightWeight Mode", + copy_env => COPY_ENV, "tray_copy_env", "Copy Env", + conf_dir => CONF_DIR, "tray_conf_dir", "Conf Dir", + core_dir => CORE_DIR, "tray_core_dir", "Core Dir", + logs_dir => LOGS_DIR, "tray_logs_dir", "Logs Dir", + open_dir => OPEN_DIR, "tray_open_dir", "Open Dir", + restart_clash => RESTART_CLASH, "tray_restart_clash", "Restart Clash Core", + restart_app => RESTART_APP, "tray_restart_app", "Restart App", + verge_version => VERGE_VERSION, "tray_verge_version", "Verge Version", + more => MORE, "tray_more", "More", + exit => EXIT, "tray_exit", "Exit", +} diff --git a/src-tauri/src/core/tray/mod.rs b/src-tauri/src/core/tray/mod.rs index 84f934a1..64534031 100644 --- a/src-tauri/src/core/tray/mod.rs +++ b/src-tauri/src/core/tray/mod.rs @@ -33,59 +33,13 @@ use tauri::{ menu::{CheckMenuItem, IsMenuItem, MenuEvent, MenuItem, PredefinedMenuItem, Submenu}, tray::{MouseButton, MouseButtonState, TrayIconEvent}, }; +mod menu_def; +use menu_def::{MenuIds, MenuTexts}; // TODO: 是否需要将可变菜单抽离存储起来,后续直接更新对应菜单实例,无需重新创建菜单(待考虑) type ProxyMenuItem = (Option>, Vec>>); -struct MenuTexts { - dashboard: String, - rule_mode: String, - global_mode: String, - direct_mode: String, - profiles: String, - proxies: String, - system_proxy: String, - tun_mode: String, - close_all_connections: String, - lightweight_mode: String, - copy_env: String, - conf_dir: String, - core_dir: String, - logs_dir: String, - open_dir: String, - restart_clash: String, - restart_app: String, - verge_version: String, - more: String, - exit: String, -} - -async fn fetch_menu_texts() -> MenuTexts { - MenuTexts { - dashboard: t("Dashboard").await, - rule_mode: t("Rule Mode").await, - global_mode: t("Global Mode").await, - direct_mode: t("Direct Mode").await, - profiles: t("Profiles").await, - proxies: t("Proxies").await, - system_proxy: t("System Proxy").await, - tun_mode: t("TUN Mode").await, - close_all_connections: t("Close All Connections").await, - lightweight_mode: t("LightWeight Mode").await, - copy_env: t("Copy Env").await, - conf_dir: t("Conf Dir").await, - core_dir: t("Core Dir").await, - logs_dir: t("Logs Dir").await, - open_dir: t("Open Dir").await, - restart_clash: t("Restart Clash Core").await, - restart_app: t("Restart App").await, - verge_version: t("Verge Version").await, - more: t("More").await, - exit: t("Exit").await, - } -} - #[derive(Clone)] struct TrayState {} @@ -864,7 +818,7 @@ fn create_proxy_menu_item( ( Some(Submenu::with_id_and_items( app_handle, - "proxies", + MenuIds::PROXIES, proxies_text, true, &proxy_submenu_refs, @@ -947,8 +901,7 @@ async fn create_tray_menu( create_profile_menu_item(app_handle, profile_uid_and_name).await?; // Pre-fetch all localized strings - let texts = &fetch_menu_texts().await; - + let texts = &MenuTexts::new().await; // Convert to references only when needed let profile_menu_items_refs: Vec<&dyn IsMenuItem> = profile_menu_items .iter() @@ -957,7 +910,7 @@ async fn create_tray_menu( let open_window = &MenuItem::with_id( app_handle, - "open_window", + MenuIds::DASHBOARD, &texts.dashboard, true, hotkeys.get("open_or_close_dashboard").map(|s| s.as_str()), @@ -965,7 +918,7 @@ async fn create_tray_menu( let rule_mode = &CheckMenuItem::with_id( app_handle, - "rule_mode", + MenuIds::RULE_MODE, &texts.rule_mode, true, current_proxy_mode == "rule", @@ -974,7 +927,7 @@ async fn create_tray_menu( let global_mode = &CheckMenuItem::with_id( app_handle, - "global_mode", + MenuIds::GLOBAL_MODE, &texts.global_mode, true, current_proxy_mode == "global", @@ -983,7 +936,7 @@ async fn create_tray_menu( let direct_mode = &CheckMenuItem::with_id( app_handle, - "direct_mode", + MenuIds::DIRECT_MODE, &texts.direct_mode, true, current_proxy_mode == "direct", @@ -992,7 +945,7 @@ async fn create_tray_menu( let profiles = &Submenu::with_id_and_items( app_handle, - "profiles", + MenuIds::PROFILES, &texts.profiles, true, &profile_menu_items_refs, @@ -1015,7 +968,7 @@ async fn create_tray_menu( let system_proxy = &CheckMenuItem::with_id( app_handle, - "system_proxy", + MenuIds::SYSTEM_PROXY, &texts.system_proxy, true, system_proxy_enabled, @@ -1024,7 +977,7 @@ async fn create_tray_menu( let tun_mode = &CheckMenuItem::with_id( app_handle, - "tun_mode", + MenuIds::TUN_MODE, &texts.tun_mode, true, tun_mode_enabled, @@ -1033,26 +986,32 @@ async fn create_tray_menu( let close_all_connections = &MenuItem::with_id( app_handle, - "close_all_connections", + MenuIds::CLOSE_ALL_CONNECTIONS, &texts.close_all_connections, true, None::<&str>, )?; - let lighteweight_mode = &CheckMenuItem::with_id( + let lightweight_mode = &CheckMenuItem::with_id( app_handle, - "entry_lightweight_mode", + MenuIds::LIGHTWEIGHT_MODE, &texts.lightweight_mode, true, is_lightweight_mode, hotkeys.get("entry_lightweight_mode").map(|s| s.as_str()), )?; - let copy_env = &MenuItem::with_id(app_handle, "copy_env", &texts.copy_env, true, None::<&str>)?; + let copy_env = &MenuItem::with_id( + app_handle, + MenuIds::COPY_ENV, + &texts.copy_env, + true, + None::<&str>, + )?; let open_app_dir = &MenuItem::with_id( app_handle, - "open_app_dir", + MenuIds::CONF_DIR, &texts.conf_dir, true, None::<&str>, @@ -1060,7 +1019,7 @@ async fn create_tray_menu( let open_core_dir = &MenuItem::with_id( app_handle, - "open_core_dir", + MenuIds::CORE_DIR, &texts.core_dir, true, None::<&str>, @@ -1068,7 +1027,7 @@ async fn create_tray_menu( let open_logs_dir = &MenuItem::with_id( app_handle, - "open_logs_dir", + MenuIds::LOGS_DIR, &texts.logs_dir, true, None::<&str>, @@ -1076,7 +1035,7 @@ async fn create_tray_menu( let open_dir = &Submenu::with_id_and_items( app_handle, - "open_dir", + MenuIds::OPEN_DIR, &texts.open_dir, true, &[open_app_dir, open_core_dir, open_logs_dir], @@ -1084,7 +1043,7 @@ async fn create_tray_menu( let restart_clash = &MenuItem::with_id( app_handle, - "restart_clash", + MenuIds::RESTART_CLASH, &texts.restart_clash, true, None::<&str>, @@ -1092,7 +1051,7 @@ async fn create_tray_menu( let restart_app = &MenuItem::with_id( app_handle, - "restart_app", + MenuIds::RESTART_APP, &texts.restart_app, true, None::<&str>, @@ -1100,7 +1059,7 @@ async fn create_tray_menu( let app_version = &MenuItem::with_id( app_handle, - "app_version", + MenuIds::VERGE_VERSION, format!("{} {version}", &texts.verge_version), true, None::<&str>, @@ -1108,7 +1067,7 @@ async fn create_tray_menu( let more = &Submenu::with_id_and_items( app_handle, - "more", + MenuIds::MORE, &texts.more, true, &[ @@ -1121,7 +1080,7 @@ async fn create_tray_menu( let quit = &MenuItem::with_id( app_handle, - "quit", + MenuIds::EXIT, &texts.exit, true, Some("CmdOrControl+Q"), @@ -1154,7 +1113,7 @@ async fn create_tray_menu( system_proxy as &dyn IsMenuItem, tun_mode as &dyn IsMenuItem, separator, - lighteweight_mode as &dyn IsMenuItem, + lightweight_mode as &dyn IsMenuItem, copy_env as &dyn IsMenuItem, open_dir as &dyn IsMenuItem, more as &dyn IsMenuItem, @@ -1171,12 +1130,13 @@ async fn create_tray_menu( fn on_menu_event(_: &AppHandle, event: MenuEvent) { AsyncHandler::spawn(|| async move { match event.id.as_ref() { - mode @ ("rule_mode" | "global_mode" | "direct_mode") => { - let mode = &mode[0..mode.len() - 5]; // Removing the "_mode" suffix + mode @ (MenuIds::RULE_MODE | MenuIds::GLOBAL_MODE | MenuIds::DIRECT_MODE) => { + // Removing the the "tray_" preffix and "_mode" suffix + let mode = &mode[5..mode.len() - 5]; logging!(info, Type::ProxyMode, "Switch Proxy Mode To: {}", mode); feat::change_clash_mode(mode.into()).await; } - "open_window" => { + MenuIds::DASHBOARD => { log::info!(target: "app", "托盘菜单点击: 打开窗口"); if !should_handle_tray_click() { @@ -1186,40 +1146,41 @@ fn on_menu_event(_: &AppHandle, event: MenuEvent) { WindowManager::show_main_window().await; }; } - "system_proxy" => { + MenuIds::SYSTEM_PROXY => { feat::toggle_system_proxy().await; } - "tun_mode" => { + MenuIds::TUN_MODE => { feat::toggle_tun_mode(None).await; } - "close_all_connections" => { + MenuIds::CLOSE_ALL_CONNECTIONS => { if let Err(err) = handle::Handle::mihomo().await.close_all_connections().await { log::error!(target: "app", "Failed to close all connections from tray: {err}"); } } - "copy_env" => feat::copy_clash_env().await, - "open_app_dir" => { + MenuIds::COPY_ENV => feat::copy_clash_env().await, + MenuIds::CONF_DIR => { + println!("Open directory submenu clicked"); let _ = cmd::open_app_dir().await; } - "open_core_dir" => { + MenuIds::CORE_DIR => { let _ = cmd::open_core_dir().await; } - "open_logs_dir" => { + MenuIds::LOGS_DIR => { let _ = cmd::open_logs_dir().await; } - "restart_clash" => feat::restart_clash_core().await, - "restart_app" => feat::restart_app().await, - "entry_lightweight_mode" => { + MenuIds::RESTART_CLASH => feat::restart_clash_core().await, + MenuIds::RESTART_APP => feat::restart_app().await, + MenuIds::LIGHTWEIGHT_MODE => { if !should_handle_tray_click() { return; } if !is_in_lightweight_mode() { - lightweight::entry_lightweight_mode().await; // Await async function + lightweight::entry_lightweight_mode().await; } else { - lightweight::exit_lightweight_mode().await; // Await async function + lightweight::exit_lightweight_mode().await; } } - "quit" => { + MenuIds::EXIT => { feat::quit().await; } id if id.starts_with("profiles_") => { diff --git a/src-tauri/src/utils/i18n.rs b/src-tauri/src/utils/i18n.rs index 1d616392..8d2a03e6 100644 --- a/src-tauri/src/utils/i18n.rs +++ b/src-tauri/src/utils/i18n.rs @@ -34,6 +34,16 @@ pub fn get_supported_languages() -> Vec { languages } +pub async fn current_language() -> String { + Config::verge() + .await + .latest_ref() + .language + .as_deref() + .map(String::from) + .unwrap_or_else(get_system_language) +} + static TRANSLATIONS: Lazy> = Lazy::new(|| { let lang = get_system_language(); let json = load_lang_file(&lang).unwrap_or_else(|| Value::Object(Default::default())); @@ -57,13 +67,7 @@ fn get_system_language() -> String { } pub async fn t(key: &str) -> String { - let current_lang = Config::verge() - .await - .latest_ref() - .language - .as_deref() - .map(String::from) - .unwrap_or_else(get_system_language); + let current_lang = current_language().await; { if let Ok(cache) = TRANSLATIONS.read() From 713162ca3716959c098000ebc9a79c75f3742a3d Mon Sep 17 00:00:00 2001 From: Tunglies <77394545+Tunglies@users.noreply.github.com> Date: Mon, 27 Oct 2025 23:17:29 +0800 Subject: [PATCH 07/70] perf(i18n): change TRANSLATIONS type to use Box for better memory management This reduce memory usage from 72 to 48 --- src-tauri/src/utils/i18n.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src-tauri/src/utils/i18n.rs b/src-tauri/src/utils/i18n.rs index 8d2a03e6..e7a52caa 100644 --- a/src-tauri/src/utils/i18n.rs +++ b/src-tauri/src/utils/i18n.rs @@ -44,10 +44,10 @@ pub async fn current_language() -> String { .unwrap_or_else(get_system_language) } -static TRANSLATIONS: Lazy> = Lazy::new(|| { +static TRANSLATIONS: Lazy)>> = Lazy::new(|| { let lang = get_system_language(); let json = load_lang_file(&lang).unwrap_or_else(|| Value::Object(Default::default())); - RwLock::new((lang, json)) + RwLock::new((lang, Box::new(json))) }); fn load_lang_file(lang: &str) -> Option { @@ -81,7 +81,7 @@ pub async fn t(key: &str) -> String { if let Some(new_json) = load_lang_file(¤t_lang) && let Ok(mut cache) = TRANSLATIONS.write() { - *cache = (current_lang.clone(), new_json); + *cache = (current_lang.clone(), Box::new(new_json)); if let Some(text) = cache.1.get(key).and_then(|val| val.as_str()) { return text.into(); @@ -92,7 +92,7 @@ pub async fn t(key: &str) -> String { && let Some(default_json) = load_lang_file(DEFAULT_LANGUAGE) && let Ok(mut cache) = TRANSLATIONS.write() { - *cache = (DEFAULT_LANGUAGE.into(), default_json); + *cache = (DEFAULT_LANGUAGE.into(), Box::new(default_json)); if let Some(text) = cache.1.get(key).and_then(|val| val.as_str()) { return text.into(); From a9eb512f20a942931ed67c8ec9bfbbf1e281306c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=9D=A4=E6=98=AF=E7=BA=B1=E9=9B=BE=E9=85=B1=E5=93=9F?= =?UTF-8?q?=EF=BD=9E?= <49941141+Dragon1573@users.noreply.github.com> Date: Tue, 28 Oct 2025 00:00:01 +0800 Subject: [PATCH 08/70] docs(autobuild): update download links for release assets (#5224) - To match those in actual "Assets" section Signed-off-by: Dragon1573 <49941141+Dragon1573@users.noreply.github.com> --- .github/workflows/autobuild.yml | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/.github/workflows/autobuild.yml b/.github/workflows/autobuild.yml index f48788c5..f4dc74e4 100644 --- a/.github/workflows/autobuild.yml +++ b/.github/workflows/autobuild.yml @@ -90,20 +90,20 @@ jobs: ### Windows (不再支持Win7) #### 正常版本(推荐) - - [64位(常用)](${{ env.DOWNLOAD_URL }}/Clash.Verge_${{ env.VERSION }}_x64-setup.exe) | [ARM64(不常用)](${{ env.DOWNLOAD_URL }}/Clash.Verge_${{ env.VERSION }}_arm64-setup.exe) + - [64位(常用)](${{ env.DOWNLOAD_URL }}/Clash.Verge_${{ env.VERSION }}_x64-setup_windows.exe) | [ARM64(不常用)](${{ env.DOWNLOAD_URL }}/Clash.Verge_${{ env.VERSION }}_arm64-setup.exe) #### 内置Webview2版(体积较大,仅在企业版系统或无法安装webview2时使用) - [64位](${{ env.DOWNLOAD_URL }}/Clash.Verge_${{ env.VERSION }}_x64_fixed_webview2-setup.exe) | [ARM64](${{ env.DOWNLOAD_URL }}/Clash.Verge_${{ env.VERSION }}_arm64_fixed_webview2-setup.exe) ### macOS - - [Apple M芯片](${{ env.DOWNLOAD_URL }}/Clash.Verge_${{ env.VERSION }}_aarch64.dmg) | [Intel芯片](${{ env.DOWNLOAD_URL }}/Clash.Verge_${{ env.VERSION }}_x64.dmg) + - [Apple M芯片](${{ env.DOWNLOAD_URL }}/Clash.Verge_${{ env.VERSION }}_aarch64_darwin.dmg) | [Intel芯片](${{ env.DOWNLOAD_URL }}/Clash.Verge_${{ env.VERSION }}_x64_darwin.dmg) ### Linux #### DEB包(Debian系) 使用 apt ./路径 安装 - - [64位](${{ env.DOWNLOAD_URL }}/Clash.Verge_${{ env.VERSION }}_amd64.deb) | [ARM64](${{ env.DOWNLOAD_URL }}/Clash.Verge_${{ env.VERSION }}_arm64.deb) | [ARMv7](${{ env.DOWNLOAD_URL }}/Clash.Verge_${{ env.VERSION }}_armhf.deb) + - [64位](${{ env.DOWNLOAD_URL }}/Clash.Verge_${{ env.VERSION }}_amd64_linux.deb) | [ARM64](${{ env.DOWNLOAD_URL }}/Clash.Verge_${{ env.VERSION }}_arm64.deb) | [ARMv7](${{ env.DOWNLOAD_URL }}/Clash.Verge_${{ env.VERSION }}_armhf.deb) #### RPM包(Redhat系) 使用 dnf ./路径 安装 - - [64位](${{ env.DOWNLOAD_URL }}/Clash.Verge_${{ env.VERSION }}_amd64.rpm) | [ARM64](${{ env.DOWNLOAD_URL }}/Clash.Verge_${{ env.VERSION }}_aarch64.rpm) | [ARMv7](${{ env.DOWNLOAD_URL }}/Clash.Verge_${{ env.VERSION }}_armhfp.rpm) + - [64位](${{ env.DOWNLOAD_URL }}/Clash.Verge_${{ env.VERSION }}-1.x86_64_linux.rpm) | [ARM64](${{ env.DOWNLOAD_URL }}/Clash.Verge_${{ env.VERSION }}-1.aarch64.rpm) | [ARMv7](${{ env.DOWNLOAD_URL }}/Clash.Verge_${{ env.VERSION }}-1.armhfp.rpm) ### FAQ - [常见问题](https://clash-verge-rev.github.io/faq/windows.html) @@ -538,20 +538,20 @@ jobs: ### Windows (不再支持Win7) #### 正常版本(推荐) - - [64位(常用)](${{ env.DOWNLOAD_URL }}/Clash.Verge_${{ env.VERSION }}_x64-setup.exe) | [ARM64(不常用)](${{ env.DOWNLOAD_URL }}/Clash.Verge_${{ env.VERSION }}_arm64-setup.exe) + - [64位(常用)](${{ env.DOWNLOAD_URL }}/Clash.Verge_${{ env.VERSION }}_x64-setup_windows.exe) | [ARM64(不常用)](${{ env.DOWNLOAD_URL }}/Clash.Verge_${{ env.VERSION }}_arm64-setup.exe) #### 内置Webview2版(体积较大,仅在企业版系统或无法安装webview2时使用) - [64位](${{ env.DOWNLOAD_URL }}/Clash.Verge_${{ env.VERSION }}_x64_fixed_webview2-setup.exe) | [ARM64](${{ env.DOWNLOAD_URL }}/Clash.Verge_${{ env.VERSION }}_arm64_fixed_webview2-setup.exe) ### macOS - - [Apple M芯片](${{ env.DOWNLOAD_URL }}/Clash.Verge_${{ env.VERSION }}_aarch64.dmg) | [Intel芯片](${{ env.DOWNLOAD_URL }}/Clash.Verge_${{ env.VERSION }}_x64.dmg) + - [Apple M芯片](${{ env.DOWNLOAD_URL }}/Clash.Verge_${{ env.VERSION }}_aarch64_darwin.dmg) | [Intel芯片](${{ env.DOWNLOAD_URL }}/Clash.Verge_${{ env.VERSION }}_x64_darwin.dmg) ### Linux #### DEB包(Debian系) 使用 apt ./路径 安装 - - [64位](${{ env.DOWNLOAD_URL }}/Clash.Verge_${{ env.VERSION }}_amd64.deb) | [ARM64](${{ env.DOWNLOAD_URL }}/Clash.Verge_${{ env.VERSION }}_arm64.deb) | [ARMv7](${{ env.DOWNLOAD_URL }}/Clash.Verge_${{ env.VERSION }}_armhf.deb) + - [64位](${{ env.DOWNLOAD_URL }}/Clash.Verge_${{ env.VERSION }}_amd64_linux.deb) | [ARM64](${{ env.DOWNLOAD_URL }}/Clash.Verge_${{ env.VERSION }}_arm64.deb) | [ARMv7](${{ env.DOWNLOAD_URL }}/Clash.Verge_${{ env.VERSION }}_armhf.deb) #### RPM包(Redhat系) 使用 dnf ./路径 安装 - - [64位](${{ env.DOWNLOAD_URL }}/Clash.Verge_${{ env.VERSION }}_amd64.rpm) | [ARM64](${{ env.DOWNLOAD_URL }}/Clash.Verge_${{ env.VERSION }}_aarch64.rpm) | [ARMv7](${{ env.DOWNLOAD_URL }}/Clash.Verge_${{ env.VERSION }}_armhfp.rpm) + - [64位](${{ env.DOWNLOAD_URL }}/Clash.Verge_${{ env.VERSION }}-1.x86_64_linux.rpm) | [ARM64](${{ env.DOWNLOAD_URL }}/Clash.Verge_${{ env.VERSION }}-1.aarch64.rpm) | [ARMv7](${{ env.DOWNLOAD_URL }}/Clash.Verge_${{ env.VERSION }}-1.armhfp.rpm) ### FAQ - [常见问题](https://clash-verge-rev.github.io/faq/windows.html) From f39436f1d0dd3b65a913d43e8c0fd8564d7ac207 Mon Sep 17 00:00:00 2001 From: Tunglies <77394545+Tunglies@users.noreply.github.com> Date: Tue, 28 Oct 2025 00:26:20 +0800 Subject: [PATCH 09/70] refactor(i18n): optimize translation handling with Arc for better memory efficiency refactor(tray): change menu text storage to use Arc for improved performance refactor(service): utilize SmartString for error messages to enhance memory management --- src-tauri/src/cmd/service.rs | 3 +- src-tauri/src/core/tray/menu_def.rs | 4 +-- src-tauri/src/core/tray/mod.rs | 3 +- src-tauri/src/utils/i18n.rs | 49 ++++++++++++++++++----------- 4 files changed, 36 insertions(+), 23 deletions(-) diff --git a/src-tauri/src/cmd/service.rs b/src-tauri/src/cmd/service.rs index c2f557de..75d1a544 100644 --- a/src-tauri/src/cmd/service.rs +++ b/src-tauri/src/cmd/service.rs @@ -3,6 +3,7 @@ use crate::{ core::service::{self, SERVICE_MANAGER, ServiceStatus}, utils::i18n::t, }; +use smartstring::SmartString; async fn execute_service_operation_sync(status: ServiceStatus, op_type: &str) -> CmdResult { if let Err(e) = SERVICE_MANAGER @@ -12,7 +13,7 @@ async fn execute_service_operation_sync(status: ServiceStatus, op_type: &str) -> .await { let emsg = format!("{} Service failed: {}", op_type, e); - return Err(t(emsg.as_str()).await); + return Err(SmartString::from(&*t(emsg.as_str()).await)); } Ok(()) } diff --git a/src-tauri/src/core/tray/menu_def.rs b/src-tauri/src/core/tray/menu_def.rs index 636665ac..c2acbe25 100644 --- a/src-tauri/src/core/tray/menu_def.rs +++ b/src-tauri/src/core/tray/menu_def.rs @@ -1,11 +1,11 @@ use crate::utils::i18n::t; -use smartstring::alias::String; +use std::sync::Arc; macro_rules! define_menu { ($($field:ident => $const_name:ident, $id:expr, $text:expr),+ $(,)?) => { #[derive(Debug)] pub struct MenuTexts { - $(pub $field: String,)+ + $(pub $field: Arc,)+ } pub struct MenuIds; diff --git a/src-tauri/src/core/tray/mod.rs b/src-tauri/src/core/tray/mod.rs index 64534031..6b4eef42 100644 --- a/src-tauri/src/core/tray/mod.rs +++ b/src-tauri/src/core/tray/mod.rs @@ -23,6 +23,7 @@ use futures::future::join_all; use parking_lot::Mutex; use smartstring::alias::String; use std::collections::HashMap; +use std::sync::Arc; use std::{ fs, sync::atomic::{AtomicBool, Ordering}, @@ -798,7 +799,7 @@ fn create_proxy_menu_item( app_handle: &AppHandle, show_proxy_groups_inline: bool, proxy_submenus: Vec>, - proxies_text: &String, + proxies_text: &Arc, ) -> Result { // 创建代理主菜单 let (proxies_submenu, inline_proxy_items) = if show_proxy_groups_inline { diff --git a/src-tauri/src/utils/i18n.rs b/src-tauri/src/utils/i18n.rs index e7a52caa..83f6140e 100644 --- a/src-tauri/src/utils/i18n.rs +++ b/src-tauri/src/utils/i18n.rs @@ -1,12 +1,18 @@ use crate::{config::Config, utils::dirs}; use once_cell::sync::Lazy; -use serde_json::Value; use smartstring::alias::String; -use std::{fs, path::PathBuf, sync::RwLock}; +use std::{ + collections::HashMap, + fs, + path::PathBuf, + sync::{Arc, RwLock}, +}; use sys_locale; const DEFAULT_LANGUAGE: &str = "zh"; +type TranslationMap = (String, HashMap>); + fn get_locales_dir() -> Option { dirs::app_resources_dir() .map(|resource_path| resource_path.join("locales")) @@ -44,18 +50,23 @@ pub async fn current_language() -> String { .unwrap_or_else(get_system_language) } -static TRANSLATIONS: Lazy)>> = Lazy::new(|| { +static TRANSLATIONS: Lazy> = Lazy::new(|| { let lang = get_system_language(); - let json = load_lang_file(&lang).unwrap_or_else(|| Value::Object(Default::default())); - RwLock::new((lang, Box::new(json))) + let map = load_lang_file(&lang).unwrap_or_default(); + RwLock::new((lang, map)) }); -fn load_lang_file(lang: &str) -> Option { +fn load_lang_file(lang: &str) -> Option>> { let locales_dir = get_locales_dir()?; let file_path = locales_dir.join(format!("{lang}.json")); fs::read_to_string(file_path) .ok() - .and_then(|content| serde_json::from_str(&content).ok()) + .and_then(|content| serde_json::from_str::>(&content).ok()) + .map(|map| { + map.into_iter() + .map(|(k, v)| (k, Arc::from(v.as_str()))) + .collect() + }) } fn get_system_language() -> String { @@ -66,38 +77,38 @@ fn get_system_language() -> String { .unwrap_or_else(|| DEFAULT_LANGUAGE.into()) } -pub async fn t(key: &str) -> String { +pub async fn t(key: &str) -> Arc { let current_lang = current_language().await; { if let Ok(cache) = TRANSLATIONS.read() && cache.0 == current_lang - && let Some(text) = cache.1.get(key).and_then(|val| val.as_str()) + && let Some(text) = cache.1.get(key) { - return text.into(); + return Arc::clone(text); } } - if let Some(new_json) = load_lang_file(¤t_lang) + if let Some(new_map) = load_lang_file(¤t_lang) && let Ok(mut cache) = TRANSLATIONS.write() { - *cache = (current_lang.clone(), Box::new(new_json)); + *cache = (current_lang.clone(), new_map); - if let Some(text) = cache.1.get(key).and_then(|val| val.as_str()) { - return text.into(); + if let Some(text) = cache.1.get(key) { + return Arc::clone(text); } } if current_lang != DEFAULT_LANGUAGE - && let Some(default_json) = load_lang_file(DEFAULT_LANGUAGE) + && let Some(default_map) = load_lang_file(DEFAULT_LANGUAGE) && let Ok(mut cache) = TRANSLATIONS.write() { - *cache = (DEFAULT_LANGUAGE.into(), Box::new(default_json)); + *cache = (DEFAULT_LANGUAGE.into(), default_map); - if let Some(text) = cache.1.get(key).and_then(|val| val.as_str()) { - return text.into(); + if let Some(text) = cache.1.get(key) { + return Arc::clone(text); } } - key.into() + Arc::from(key) } From 0fcf168b085f59c87023097ef6cee3b3513d13e9 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Tue, 28 Oct 2025 10:47:36 +0800 Subject: [PATCH 10/70] chore(deps): update dependency axios to ^1.13.0 (#5225) Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> --- package.json | 2 +- pnpm-lock.yaml | 10 +++++----- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/package.json b/package.json index d8b0b192..a5776d00 100644 --- a/package.json +++ b/package.json @@ -54,7 +54,7 @@ "@tauri-apps/plugin-updater": "2.9.0", "@types/json-schema": "^7.0.15", "ahooks": "^3.9.6", - "axios": "^1.12.2", + "axios": "^1.13.0", "dayjs": "1.11.18", "foxact": "^0.2.49", "i18next": "^25.6.0", diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 87d4e856..5b91e577 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -69,8 +69,8 @@ importers: specifier: ^3.9.6 version: 3.9.6(react-dom@19.2.0(react@19.2.0))(react@19.2.0) axios: - specifier: ^1.12.2 - version: 1.12.2 + specifier: ^1.13.0 + version: 1.13.0 dayjs: specifier: 1.11.18 version: 1.11.18 @@ -2085,8 +2085,8 @@ packages: resolution: {integrity: sha512-wvUjBtSGN7+7SjNpq/9M2Tg350UZD3q62IFZLbRAR1bSMlCo1ZaeW+BJ+D090e4hIIZLBcTDWe4Mh4jvUDajzQ==} engines: {node: '>= 0.4'} - axios@1.12.2: - resolution: {integrity: sha512-vMJzPewAlRyOgxV2dU0Cuz2O8zzzx9VYtbJOaBgXFeLc4IV/Eg50n4LowmehOOR61S8ZMpc2K5Sa7g6A4jfkUw==} + axios@1.13.0: + resolution: {integrity: sha512-zt40Pz4zcRXra9CVV31KeyofwiNvAbJ5B6YPz9pMJ+yOSLikvPT4Yi5LjfgjRa9CawVYBaD1JQzIVcIvBejKeA==} babel-plugin-macros@3.1.0: resolution: {integrity: sha512-Cg7TFGpIr01vOQNODXOOaGz2NpCU5gl8x1qJFbb6hbZxR7XrcE2vtbAsTAbJ7/xwJtUuJEw8K8Zr/AE0LHlesg==} @@ -6297,7 +6297,7 @@ snapshots: possible-typed-array-names: 1.1.0 optional: true - axios@1.12.2: + axios@1.13.0: dependencies: follow-redirects: 1.15.9 form-data: 4.0.4 From 2af0af0837af8ebe39a62bdbf4b2572f9c8ab407 Mon Sep 17 00:00:00 2001 From: Tunglies <77394545+Tunglies@users.noreply.github.com> Date: Tue, 28 Oct 2025 14:29:47 +0800 Subject: [PATCH 11/70] refactor(tray): comment out enable_tray_icon references for future removal #5161 Since network speed display in Tray on menu has been removed --- src-tauri/src/config/verge.rs | 11 +++++------ src-tauri/src/feat/config.rs | 4 ++-- src/components/setting/mods/layout-viewer.tsx | 4 ++-- src/services/types.d.ts | 2 +- 4 files changed, 10 insertions(+), 11 deletions(-) diff --git a/src-tauri/src/config/verge.rs b/src-tauri/src/config/verge.rs index 3207b57c..bfa45816 100644 --- a/src-tauri/src/config/verge.rs +++ b/src-tauri/src/config/verge.rs @@ -203,8 +203,7 @@ pub struct IVerge { pub enable_tray_speed: Option, - pub enable_tray_icon: Option, - + // pub enable_tray_icon: Option, /// show proxy groups directly on tray root menu pub tray_inline_proxy_groups: Option, @@ -419,7 +418,7 @@ impl IVerge { webdav_username: None, webdav_password: None, enable_tray_speed: Some(false), - enable_tray_icon: Some(true), + // enable_tray_icon: Some(true), tray_inline_proxy_groups: Some(false), enable_global_hotkey: Some(true), enable_auto_light_weight_mode: Some(false), @@ -515,7 +514,7 @@ impl IVerge { patch!(webdav_username); patch!(webdav_password); patch!(enable_tray_speed); - patch!(enable_tray_icon); + // patch!(enable_tray_icon); patch!(tray_inline_proxy_groups); patch!(enable_auto_light_weight_mode); patch!(auto_light_weight_minutes); @@ -609,7 +608,7 @@ pub struct IVergeResponse { pub webdav_username: Option, pub webdav_password: Option, pub enable_tray_speed: Option, - pub enable_tray_icon: Option, + // pub enable_tray_icon: Option, pub tray_inline_proxy_groups: Option, pub enable_auto_light_weight_mode: Option, pub auto_light_weight_minutes: Option, @@ -686,7 +685,7 @@ impl From for IVergeResponse { webdav_username: verge.webdav_username, webdav_password: verge.webdav_password, enable_tray_speed: verge.enable_tray_speed, - enable_tray_icon: verge.enable_tray_icon, + // enable_tray_icon: verge.enable_tray_icon, tray_inline_proxy_groups: verge.tray_inline_proxy_groups, enable_auto_light_weight_mode: verge.enable_auto_light_weight_mode, auto_light_weight_minutes: verge.auto_light_weight_minutes, diff --git a/src-tauri/src/feat/config.rs b/src-tauri/src/feat/config.rs index 797bd601..d2f90f13 100644 --- a/src-tauri/src/feat/config.rs +++ b/src-tauri/src/feat/config.rs @@ -94,7 +94,7 @@ fn determine_update_flags(patch: &IVerge) -> i32 { let http_enabled = patch.verge_http_enabled; let http_port = patch.verge_port; let enable_tray_speed = patch.enable_tray_speed; - let enable_tray_icon = patch.enable_tray_icon; + // let enable_tray_icon = patch.enable_tray_icon; let enable_global_hotkey = patch.enable_global_hotkey; let tray_event = &patch.tray_event; let home_cards = patch.home_cards.clone(); @@ -149,7 +149,7 @@ fn determine_update_flags(patch: &IVerge) -> i32 { || tun_tray_icon.is_some() || tray_icon.is_some() || enable_tray_speed.is_some() - || enable_tray_icon.is_some() + // || enable_tray_icon.is_some() { update_flags |= UpdateFlags::SystrayIcon as i32; } diff --git a/src/components/setting/mods/layout-viewer.tsx b/src/components/setting/mods/layout-viewer.tsx index 0c9d2c5a..d300822a 100644 --- a/src/components/setting/mods/layout-viewer.tsx +++ b/src/components/setting/mods/layout-viewer.tsx @@ -307,7 +307,7 @@ export const LayoutViewer = forwardRef((_, ref) => { )} */} - {OS === "macos" && ( + {/* {OS === "macos" && ( ((_, ref) => { - )} + )} */} Date: Tue, 28 Oct 2025 19:16:42 +0800 Subject: [PATCH 12/70] refactor(core): optimize RunningMode handling and improve state management --- src-tauri/src/core/manager/lifecycle.rs | 4 ++-- src-tauri/src/core/manager/mod.rs | 16 ++++++++++------ src-tauri/src/core/manager/state.rs | 19 ++++++++++--------- src-tauri/src/utils/logging.rs | 2 +- 4 files changed, 23 insertions(+), 18 deletions(-) diff --git a/src-tauri/src/core/manager/lifecycle.rs b/src-tauri/src/core/manager/lifecycle.rs index 4dfbae88..90c2fcfd 100644 --- a/src-tauri/src/core/manager/lifecycle.rs +++ b/src-tauri/src/core/manager/lifecycle.rs @@ -14,7 +14,7 @@ impl CoreManager { pub async fn start_core(&self) -> Result<()> { self.prepare_startup().await?; - match self.get_running_mode() { + match *self.get_running_mode() { RunningMode::Service => self.start_core_by_service().await, RunningMode::NotRunning | RunningMode::Sidecar => self.start_core_by_sidecar().await, } @@ -23,7 +23,7 @@ impl CoreManager { pub async fn stop_core(&self) -> Result<()> { ClashLogger::global().clear_logs(); - match self.get_running_mode() { + match *self.get_running_mode() { RunningMode::Service => self.stop_core_by_service().await, RunningMode::Sidecar => self.stop_core_by_sidecar(), RunningMode::NotRunning => Ok(()), diff --git a/src-tauri/src/core/manager/mod.rs b/src-tauri/src/core/manager/mod.rs index 88d5cdb7..4ef122ae 100644 --- a/src-tauri/src/core/manager/mod.rs +++ b/src-tauri/src/core/manager/mod.rs @@ -11,7 +11,7 @@ use tokio::sync::Semaphore; use crate::process::CommandChildGuard; use crate::singleton_lazy; -#[derive(Debug, Clone, Copy, serde::Serialize, PartialEq, Eq)] +#[derive(Debug, serde::Serialize, PartialEq, Eq)] pub enum RunningMode { Service, Sidecar, @@ -37,14 +37,14 @@ pub struct CoreManager { #[derive(Debug)] struct State { - running_mode: RunningMode, + running_mode: Arc, child_sidecar: Option, } impl Default for State { fn default() -> Self { Self { - running_mode: RunningMode::NotRunning, + running_mode: Arc::new(RunningMode::NotRunning), child_sidecar: None, } } @@ -61,12 +61,16 @@ impl Default for CoreManager { } impl CoreManager { - pub fn get_running_mode(&self) -> RunningMode { - self.state.lock().running_mode + pub fn get_running_mode(&self) -> Arc { + Arc::clone(&self.state.lock().running_mode) } pub fn set_running_mode(&self, mode: RunningMode) { - self.state.lock().running_mode = mode; + self.state.lock().running_mode = Arc::new(mode); + } + + pub fn set_running_child_sidecar(&self, child: CommandChildGuard) { + self.state.lock().child_sidecar = Some(child); } pub async fn init(&self) -> Result<()> { diff --git a/src-tauri/src/core/manager/state.rs b/src-tauri/src/core/manager/state.rs index ff01a8c1..37a14a93 100644 --- a/src-tauri/src/core/manager/state.rs +++ b/src-tauri/src/core/manager/state.rs @@ -15,12 +15,13 @@ use anyhow::Result; use compact_str::CompactString; use flexi_logger::DeferredNow; use log::Level; +use scopeguard::defer; use std::collections::VecDeque; use tauri_plugin_shell::ShellExt; impl CoreManager { pub async fn get_clash_logs(&self) -> Result> { - match self.get_running_mode() { + match *self.get_running_mode() { RunningMode::Service => service::get_clash_logs_by_service().await, RunningMode::Sidecar => Ok(ClashLogger::global().get_logs().clone()), RunningMode::NotRunning => Ok(VecDeque::new()), @@ -49,11 +50,8 @@ impl CoreManager { let pid = child.pid(); logging!(trace, Type::Core, "Sidecar started with PID: {}", pid); - { - let mut state = self.state.lock(); - state.child_sidecar = Some(CommandChildGuard::new(child)); - state.running_mode = RunningMode::Sidecar; - } + self.set_running_child_sidecar(CommandChildGuard::new(child)); + self.set_running_mode(RunningMode::Sidecar); let shared_writer: SharedWriter = std::sync::Arc::new(tokio::sync::Mutex::new(sidecar_writer().await?)); @@ -93,14 +91,15 @@ impl CoreManager { pub(super) fn stop_core_by_sidecar(&self) -> Result<()> { logging!(info, Type::Core, "Stopping sidecar"); - + defer! { + self.set_running_mode(RunningMode::NotRunning); + } let mut state = self.state.lock(); if let Some(child) = state.child_sidecar.take() { let pid = child.pid(); drop(child); logging!(trace, Type::Core, "Sidecar stopped (PID: {:?})", pid); } - state.running_mode = RunningMode::NotRunning; Ok(()) } @@ -114,8 +113,10 @@ impl CoreManager { pub(super) async fn stop_core_by_service(&self) -> Result<()> { logging!(info, Type::Core, "Stopping service"); + defer! { + self.set_running_mode(RunningMode::NotRunning); + } service::stop_core_by_service().await?; - self.set_running_mode(RunningMode::NotRunning); Ok(()) } } diff --git a/src-tauri/src/utils/logging.rs b/src-tauri/src/utils/logging.rs index f4c6d740..e623b099 100644 --- a/src-tauri/src/utils/logging.rs +++ b/src-tauri/src/utils/logging.rs @@ -11,7 +11,7 @@ use tokio::sync::{Mutex, MutexGuard}; pub type SharedWriter = Arc>; -#[derive(Debug, Clone, Copy, PartialEq, Eq)] +#[derive(Debug, PartialEq, Eq)] pub enum Type { Cmd, Core, From 9375674c912fc015e7c11435993140535233950b Mon Sep 17 00:00:00 2001 From: Tunglies <77394545+Tunglies@users.noreply.github.com> Date: Tue, 28 Oct 2025 19:36:17 +0800 Subject: [PATCH 13/70] refactor(validate): simplify validation process management and remove unused code --- src-tauri/src/core/validate.rs | 51 +++++++++++++++------------------- 1 file changed, 22 insertions(+), 29 deletions(-) diff --git a/src-tauri/src/core/validate.rs b/src-tauri/src/core/validate.rs index 777d6505..44673cc0 100644 --- a/src-tauri/src/core/validate.rs +++ b/src-tauri/src/core/validate.rs @@ -1,9 +1,9 @@ use anyhow::Result; +use scopeguard::defer; use smartstring::alias::String; use std::path::Path; -use std::sync::Arc; +use std::sync::atomic::{AtomicBool, Ordering}; use tauri_plugin_shell::ShellExt; -use tokio::sync::Mutex; use crate::config::{Config, ConfigType}; use crate::core::handle; @@ -11,30 +11,27 @@ use crate::singleton_lazy; use crate::utils::dirs; use crate::{logging, utils::logging::Type}; -// pub enum ValidationResult { -// Valid, -// Invalid(String), -// } - -#[derive(PartialEq, Eq, Clone, Copy, Debug)] -pub enum ValidationProcessStatus { - Ongoing, - Completed, -} - pub struct CoreConfigValidator { - // inner: Vec, - // result: ValidationResult, - process_status: Arc>, + is_processing: AtomicBool, } impl CoreConfigValidator { pub fn new() -> Self { - CoreConfigValidator { - process_status: Arc::new(Mutex::new(ValidationProcessStatus::Completed)), + Self { + is_processing: AtomicBool::new(false), } } + pub fn try_start(&self) -> bool { + !self.is_processing.swap(true, Ordering::AcqRel) + } + + pub fn finish(&self) { + self.is_processing.store(false, Ordering::Release) + } +} + +impl CoreConfigValidator { /// 检查文件是否为脚本文件 fn is_script_file

(path: P) -> Result where @@ -325,22 +322,18 @@ impl CoreConfigValidator { /// 验证运行时配置 pub async fn validate_config(&self) -> Result<(bool, String)> { - if *self.process_status.lock().await == ValidationProcessStatus::Ongoing { + if !self.try_start() { logging!(info, Type::Validate, "验证已在进行中,跳过新的验证请求"); return Ok((true, String::new())); } - *self.process_status.lock().await = ValidationProcessStatus::Ongoing; + defer! { + self.finish(); + } logging!(info, Type::Validate, "生成临时配置文件用于验证"); - let result = async { - let config_path = Config::generate_file(ConfigType::Check).await?; - let config_path = dirs::path_to_str(&config_path)?; - Self::validate_config_internal(config_path).await - } - .await; - - *self.process_status.lock().await = ValidationProcessStatus::Completed; - result + let config_path = Config::generate_file(ConfigType::Check).await?; + let config_path = dirs::path_to_str(&config_path)?; + Self::validate_config_internal(config_path).await } } From f41998284a0648981c19cab6f061f8bd8515f587 Mon Sep 17 00:00:00 2001 From: Tunglies <77394545+Tunglies@users.noreply.github.com> Date: Wed, 29 Oct 2025 02:41:07 +0800 Subject: [PATCH 14/70] fix(verge_patch): add tray_inline_proxy_groups handling to update flags and refresh tray --- src-tauri/src/feat/config.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src-tauri/src/feat/config.rs b/src-tauri/src/feat/config.rs index d2f90f13..4f1d190a 100644 --- a/src-tauri/src/feat/config.rs +++ b/src-tauri/src/feat/config.rs @@ -100,6 +100,7 @@ fn determine_update_flags(patch: &IVerge) -> i32 { let home_cards = patch.home_cards.clone(); let enable_auto_light_weight = patch.enable_auto_light_weight_mode; let enable_external_controller = patch.enable_external_controller; + let tray_inline_proxy_groups = patch.tray_inline_proxy_groups; if tun_mode.is_some() { update_flags |= UpdateFlags::ClashConfig as i32; @@ -171,6 +172,10 @@ fn determine_update_flags(patch: &IVerge) -> i32 { update_flags |= UpdateFlags::RestartCore as i32; } + if tray_inline_proxy_groups.is_some() { + update_flags |= UpdateFlags::SystrayMenu as i32; + } + update_flags } From e928089a7709455c8167c448b0920818709b8bb2 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Wed, 29 Oct 2025 08:29:47 +0800 Subject: [PATCH 15/70] chore(deps): update npm dependencies (#5231) Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> --- package.json | 4 ++-- pnpm-lock.yaml | 58 +++++++++++++++++++++++++------------------------- 2 files changed, 31 insertions(+), 31 deletions(-) diff --git a/package.json b/package.json index a5776d00..ec480adb 100644 --- a/package.json +++ b/package.json @@ -54,7 +54,7 @@ "@tauri-apps/plugin-updater": "2.9.0", "@types/json-schema": "^7.0.15", "ahooks": "^3.9.6", - "axios": "^1.13.0", + "axios": "^1.13.1", "dayjs": "1.11.18", "foxact": "^0.2.49", "i18next": "^25.6.0", @@ -85,7 +85,7 @@ "@tauri-apps/cli": "2.9.1", "@types/js-yaml": "^4.0.9", "@types/lodash-es": "^4.17.12", - "@types/node": "^24.9.1", + "@types/node": "^24.9.2", "@types/react": "19.2.2", "@types/react-dom": "19.2.2", "@vitejs/plugin-legacy": "^7.2.1", diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 5b91e577..ecfa15fe 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -69,8 +69,8 @@ importers: specifier: ^3.9.6 version: 3.9.6(react-dom@19.2.0(react@19.2.0))(react@19.2.0) axios: - specifier: ^1.13.0 - version: 1.13.0 + specifier: ^1.13.1 + version: 1.13.1 dayjs: specifier: 1.11.18 version: 1.11.18 @@ -157,8 +157,8 @@ importers: specifier: ^4.17.12 version: 4.17.12 '@types/node': - specifier: ^24.9.1 - version: 24.9.1 + specifier: ^24.9.2 + version: 24.9.2 '@types/react': specifier: 19.2.2 version: 19.2.2 @@ -167,10 +167,10 @@ importers: version: 19.2.2(@types/react@19.2.2) '@vitejs/plugin-legacy': specifier: ^7.2.1 - version: 7.2.1(terser@5.44.0)(vite@7.1.12(@types/node@24.9.1)(jiti@2.6.1)(sass@1.93.2)(terser@5.44.0)(yaml@2.8.1)) + version: 7.2.1(terser@5.44.0)(vite@7.1.12(@types/node@24.9.2)(jiti@2.6.1)(sass@1.93.2)(terser@5.44.0)(yaml@2.8.1)) '@vitejs/plugin-react': specifier: 5.1.0 - version: 5.1.0(vite@7.1.12(@types/node@24.9.1)(jiti@2.6.1)(sass@1.93.2)(terser@5.44.0)(yaml@2.8.1)) + version: 5.1.0(vite@7.1.12(@types/node@24.9.2)(jiti@2.6.1)(sass@1.93.2)(terser@5.44.0)(yaml@2.8.1)) adm-zip: specifier: ^0.5.16 version: 0.5.16 @@ -251,16 +251,16 @@ importers: version: 8.46.2(eslint@9.38.0(jiti@2.6.1))(typescript@5.9.3) vite: specifier: ^7.1.12 - version: 7.1.12(@types/node@24.9.1)(jiti@2.6.1)(sass@1.93.2)(terser@5.44.0)(yaml@2.8.1) + version: 7.1.12(@types/node@24.9.2)(jiti@2.6.1)(sass@1.93.2)(terser@5.44.0)(yaml@2.8.1) vite-plugin-monaco-editor: specifier: ^1.1.0 version: 1.1.0(monaco-editor@0.54.0) vite-plugin-svgr: specifier: ^4.5.0 - version: 4.5.0(rollup@4.46.2)(typescript@5.9.3)(vite@7.1.12(@types/node@24.9.1)(jiti@2.6.1)(sass@1.93.2)(terser@5.44.0)(yaml@2.8.1)) + version: 4.5.0(rollup@4.46.2)(typescript@5.9.3)(vite@7.1.12(@types/node@24.9.2)(jiti@2.6.1)(sass@1.93.2)(terser@5.44.0)(yaml@2.8.1)) vitest: specifier: ^4.0.4 - version: 4.0.4(@types/debug@4.1.12)(@types/node@24.9.1)(jiti@2.6.1)(sass@1.93.2)(terser@5.44.0)(yaml@2.8.1) + version: 4.0.4(@types/debug@4.1.12)(@types/node@24.9.2)(jiti@2.6.1)(sass@1.93.2)(terser@5.44.0)(yaml@2.8.1) packages: @@ -1773,8 +1773,8 @@ packages: '@types/ms@2.1.0': resolution: {integrity: sha512-GsCCIZDE/p3i96vtEqx+7dBUGXrc7zeSK3wwPHIaRThS+9OhWIXRqzs4d6k1SVU8g91DrNRWxWUGhp5KXQb2VA==} - '@types/node@24.9.1': - resolution: {integrity: sha512-QoiaXANRkSXK6p0Duvt56W208du4P9Uye9hWLWgGMDTEoKPhuenzNcC4vGUmrNkiOKTlIrBoyNQYNpSwfEZXSg==} + '@types/node@24.9.2': + resolution: {integrity: sha512-uWN8YqxXxqFMX2RqGOrumsKeti4LlmIMIyV0lgut4jx7KQBcBiW6vkDtIBvHnHIquwNfJhk8v2OtmO8zXWHfPA==} '@types/parse-json@4.0.2': resolution: {integrity: sha512-dISoDXWWQwUquiKsyZ4Ng+HX2KsPL7LyHKHQwgGFEA3IaKac4Obd+h2a/a6waisAoepJlBcx9paWqjA8/HVjCw==} @@ -2085,8 +2085,8 @@ packages: resolution: {integrity: sha512-wvUjBtSGN7+7SjNpq/9M2Tg350UZD3q62IFZLbRAR1bSMlCo1ZaeW+BJ+D090e4hIIZLBcTDWe4Mh4jvUDajzQ==} engines: {node: '>= 0.4'} - axios@1.13.0: - resolution: {integrity: sha512-zt40Pz4zcRXra9CVV31KeyofwiNvAbJ5B6YPz9pMJ+yOSLikvPT4Yi5LjfgjRa9CawVYBaD1JQzIVcIvBejKeA==} + axios@1.13.1: + resolution: {integrity: sha512-hU4EGxxt+j7TQijx1oYdAjw4xuIp1wRQSsbMFwSthCWeBQur1eF+qJ5iQ5sN3Tw8YRzQNKb8jszgBdMDVqwJcw==} babel-plugin-macros@3.1.0: resolution: {integrity: sha512-Cg7TFGpIr01vOQNODXOOaGz2NpCU5gl8x1qJFbb6hbZxR7XrcE2vtbAsTAbJ7/xwJtUuJEw8K8Zr/AE0LHlesg==} @@ -5935,7 +5935,7 @@ snapshots: '@types/ms@2.1.0': {} - '@types/node@24.9.1': + '@types/node@24.9.2': dependencies: undici-types: 7.16.0 @@ -6113,7 +6113,7 @@ snapshots: '@unrs/resolver-binding-win32-x64-msvc@1.11.1': optional: true - '@vitejs/plugin-legacy@7.2.1(terser@5.44.0)(vite@7.1.12(@types/node@24.9.1)(jiti@2.6.1)(sass@1.93.2)(terser@5.44.0)(yaml@2.8.1))': + '@vitejs/plugin-legacy@7.2.1(terser@5.44.0)(vite@7.1.12(@types/node@24.9.2)(jiti@2.6.1)(sass@1.93.2)(terser@5.44.0)(yaml@2.8.1))': dependencies: '@babel/core': 7.28.4 '@babel/plugin-transform-dynamic-import': 7.27.1(@babel/core@7.28.4) @@ -6128,11 +6128,11 @@ snapshots: regenerator-runtime: 0.14.1 systemjs: 6.15.1 terser: 5.44.0 - vite: 7.1.12(@types/node@24.9.1)(jiti@2.6.1)(sass@1.93.2)(terser@5.44.0)(yaml@2.8.1) + vite: 7.1.12(@types/node@24.9.2)(jiti@2.6.1)(sass@1.93.2)(terser@5.44.0)(yaml@2.8.1) transitivePeerDependencies: - supports-color - '@vitejs/plugin-react@5.1.0(vite@7.1.12(@types/node@24.9.1)(jiti@2.6.1)(sass@1.93.2)(terser@5.44.0)(yaml@2.8.1))': + '@vitejs/plugin-react@5.1.0(vite@7.1.12(@types/node@24.9.2)(jiti@2.6.1)(sass@1.93.2)(terser@5.44.0)(yaml@2.8.1))': dependencies: '@babel/core': 7.28.4 '@babel/plugin-transform-react-jsx-self': 7.27.1(@babel/core@7.28.4) @@ -6140,7 +6140,7 @@ snapshots: '@rolldown/pluginutils': 1.0.0-beta.43 '@types/babel__core': 7.20.5 react-refresh: 0.18.0 - vite: 7.1.12(@types/node@24.9.1)(jiti@2.6.1)(sass@1.93.2)(terser@5.44.0)(yaml@2.8.1) + vite: 7.1.12(@types/node@24.9.2)(jiti@2.6.1)(sass@1.93.2)(terser@5.44.0)(yaml@2.8.1) transitivePeerDependencies: - supports-color @@ -6153,13 +6153,13 @@ snapshots: chai: 6.2.0 tinyrainbow: 3.0.3 - '@vitest/mocker@4.0.4(vite@7.1.12(@types/node@24.9.1)(jiti@2.6.1)(sass@1.93.2)(terser@5.44.0)(yaml@2.8.1))': + '@vitest/mocker@4.0.4(vite@7.1.12(@types/node@24.9.2)(jiti@2.6.1)(sass@1.93.2)(terser@5.44.0)(yaml@2.8.1))': dependencies: '@vitest/spy': 4.0.4 estree-walker: 3.0.3 magic-string: 0.30.19 optionalDependencies: - vite: 7.1.12(@types/node@24.9.1)(jiti@2.6.1)(sass@1.93.2)(terser@5.44.0)(yaml@2.8.1) + vite: 7.1.12(@types/node@24.9.2)(jiti@2.6.1)(sass@1.93.2)(terser@5.44.0)(yaml@2.8.1) '@vitest/pretty-format@4.0.4': dependencies: @@ -6297,7 +6297,7 @@ snapshots: possible-typed-array-names: 1.1.0 optional: true - axios@1.13.0: + axios@1.13.1: dependencies: follow-redirects: 1.15.9 form-data: 4.0.4 @@ -8803,18 +8803,18 @@ snapshots: dependencies: monaco-editor: 0.54.0 - vite-plugin-svgr@4.5.0(rollup@4.46.2)(typescript@5.9.3)(vite@7.1.12(@types/node@24.9.1)(jiti@2.6.1)(sass@1.93.2)(terser@5.44.0)(yaml@2.8.1)): + vite-plugin-svgr@4.5.0(rollup@4.46.2)(typescript@5.9.3)(vite@7.1.12(@types/node@24.9.2)(jiti@2.6.1)(sass@1.93.2)(terser@5.44.0)(yaml@2.8.1)): dependencies: '@rollup/pluginutils': 5.2.0(rollup@4.46.2) '@svgr/core': 8.1.0(typescript@5.9.3) '@svgr/plugin-jsx': 8.1.0(@svgr/core@8.1.0(typescript@5.9.3)) - vite: 7.1.12(@types/node@24.9.1)(jiti@2.6.1)(sass@1.93.2)(terser@5.44.0)(yaml@2.8.1) + vite: 7.1.12(@types/node@24.9.2)(jiti@2.6.1)(sass@1.93.2)(terser@5.44.0)(yaml@2.8.1) transitivePeerDependencies: - rollup - supports-color - typescript - vite@7.1.12(@types/node@24.9.1)(jiti@2.6.1)(sass@1.93.2)(terser@5.44.0)(yaml@2.8.1): + vite@7.1.12(@types/node@24.9.2)(jiti@2.6.1)(sass@1.93.2)(terser@5.44.0)(yaml@2.8.1): dependencies: esbuild: 0.25.4 fdir: 6.5.0(picomatch@4.0.3) @@ -8823,17 +8823,17 @@ snapshots: rollup: 4.46.2 tinyglobby: 0.2.15 optionalDependencies: - '@types/node': 24.9.1 + '@types/node': 24.9.2 fsevents: 2.3.3 jiti: 2.6.1 sass: 1.93.2 terser: 5.44.0 yaml: 2.8.1 - vitest@4.0.4(@types/debug@4.1.12)(@types/node@24.9.1)(jiti@2.6.1)(sass@1.93.2)(terser@5.44.0)(yaml@2.8.1): + vitest@4.0.4(@types/debug@4.1.12)(@types/node@24.9.2)(jiti@2.6.1)(sass@1.93.2)(terser@5.44.0)(yaml@2.8.1): dependencies: '@vitest/expect': 4.0.4 - '@vitest/mocker': 4.0.4(vite@7.1.12(@types/node@24.9.1)(jiti@2.6.1)(sass@1.93.2)(terser@5.44.0)(yaml@2.8.1)) + '@vitest/mocker': 4.0.4(vite@7.1.12(@types/node@24.9.2)(jiti@2.6.1)(sass@1.93.2)(terser@5.44.0)(yaml@2.8.1)) '@vitest/pretty-format': 4.0.4 '@vitest/runner': 4.0.4 '@vitest/snapshot': 4.0.4 @@ -8850,11 +8850,11 @@ snapshots: tinyexec: 0.3.2 tinyglobby: 0.2.15 tinyrainbow: 3.0.3 - vite: 7.1.12(@types/node@24.9.1)(jiti@2.6.1)(sass@1.93.2)(terser@5.44.0)(yaml@2.8.1) + vite: 7.1.12(@types/node@24.9.2)(jiti@2.6.1)(sass@1.93.2)(terser@5.44.0)(yaml@2.8.1) why-is-node-running: 2.3.0 optionalDependencies: '@types/debug': 4.1.12 - '@types/node': 24.9.1 + '@types/node': 24.9.2 transitivePeerDependencies: - jiti - less From 2e9f6dd17476b19676f8b450122a85f5ce596729 Mon Sep 17 00:00:00 2001 From: Tunglies <77394545+Tunglies@users.noreply.github.com> Date: Wed, 29 Oct 2025 16:00:58 +0800 Subject: [PATCH 16/70] fix: prevent service duplicate start_core and early-return after stop_core; fix start failures Update clash_verge_service_ipc version to 2.0.18 --- src-tauri/Cargo.lock | 28 ++++++++++++++-------------- src-tauri/Cargo.toml | 5 +---- 2 files changed, 15 insertions(+), 18 deletions(-) diff --git a/src-tauri/Cargo.lock b/src-tauri/Cargo.lock index 186e7ff6..afe34666 100644 --- a/src-tauri/Cargo.lock +++ b/src-tauri/Cargo.lock @@ -147,7 +147,7 @@ dependencies = [ "objc2-foundation 0.3.2", "parking_lot 0.12.5", "percent-encoding", - "windows-sys 0.60.2", + "windows-sys 0.59.0", "wl-clipboard-rs", "x11rb", ] @@ -1174,8 +1174,8 @@ dependencies = [ [[package]] name = "clash_verge_service_ipc" -version = "2.0.17" -source = "git+https://github.com/clash-verge-rev/clash-verge-service-ipc#4cd8614fbe341fdf6a41b931ec4d500cbaa8dfde" +version = "2.0.18" +source = "git+https://github.com/clash-verge-rev/clash-verge-service-ipc#381fee14ce5c69274c547b6b18819452d97fb2b2" dependencies = [ "anyhow", "compact_str", @@ -1924,7 +1924,7 @@ dependencies = [ "libc", "option-ext", "redox_users 0.5.2", - "windows-sys 0.61.2", + "windows-sys 0.59.0", ] [[package]] @@ -2180,7 +2180,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "39cab71617ae0d63f51a36d69f866391735b51691dbda63cf6f96d042b63efeb" dependencies = [ "libc", - "windows-sys 0.61.2", + "windows-sys 0.59.0", ] [[package]] @@ -3345,7 +3345,7 @@ dependencies = [ "libc", "percent-encoding", "pin-project-lite", - "socket2 0.6.1", + "socket2 0.5.10", "system-configuration", "tokio", "tower-service", @@ -4425,7 +4425,7 @@ version = "0.50.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7957b9740744892f114936ab4a57b3f487491bbeafaf8083688b16841a4240e5" dependencies = [ - "windows-sys 0.61.2", + "windows-sys 0.59.0", ] [[package]] @@ -4919,7 +4919,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7d8fae84b431384b68627d0f9b3b1245fcf9f46f6c0e3dc902e9dce64edd1967" dependencies = [ "libc", - "windows-sys 0.61.2", + "windows-sys 0.45.0", ] [[package]] @@ -5700,7 +5700,7 @@ dependencies = [ "quinn-udp", "rustc-hash", "rustls", - "socket2 0.6.1", + "socket2 0.5.10", "thiserror 2.0.17", "tokio", "tracing", @@ -5737,9 +5737,9 @@ dependencies = [ "cfg_aliases", "libc", "once_cell", - "socket2 0.6.1", + "socket2 0.5.10", "tracing", - "windows-sys 0.60.2", + "windows-sys 0.59.0", ] [[package]] @@ -6206,7 +6206,7 @@ dependencies = [ "errno", "libc", "linux-raw-sys 0.11.0", - "windows-sys 0.61.2", + "windows-sys 0.59.0", ] [[package]] @@ -7709,7 +7709,7 @@ dependencies = [ "getrandom 0.3.4", "once_cell", "rustix 1.1.2", - "windows-sys 0.61.2", + "windows-sys 0.59.0", ] [[package]] @@ -9075,7 +9075,7 @@ version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c2a7b1c03c876122aa43f3020e6c3c3ee5c05081c9a00739faf7503aeba10d22" dependencies = [ - "windows-sys 0.61.2", + "windows-sys 0.48.0", ] [[package]] diff --git a/src-tauri/Cargo.toml b/src-tauri/Cargo.toml index 04302917..20c5e2b1 100755 --- a/src-tauri/Cargo.toml +++ b/src-tauri/Cargo.toml @@ -88,12 +88,9 @@ tauri-plugin-mihomo = { git = "https://github.com/clash-verge-rev/tauri-plugin-m clash_verge_logger = { git = "https://github.com/clash-verge-rev/clash-verge-logger" } async-trait = "0.1.89" smartstring = { version = "1.0.1", features = ["serde"] } -clash_verge_service_ipc = { version = "2.0.17", features = [ +clash_verge_service_ipc = { version = "2.0.18", features = [ "client", ], git = "https://github.com/clash-verge-rev/clash-verge-service-ipc" } -# clash_verge_service_ipc = { version = "2.0.17", features = [ -# "client", -# ], path = "../../clash-verge-service-ipc" } [target.'cfg(windows)'.dependencies] runas = "=1.2.0" From f4de4738f1c10088f50119359dd81f1d70318a58 Mon Sep 17 00:00:00 2001 From: Tunglies <77394545+Tunglies@users.noreply.github.com> Date: Wed, 29 Oct 2025 17:58:02 +0800 Subject: [PATCH 17/70] refactor(logger): replace ClashLogger with CLASH_LOGGER and update log handling; improve log retrieval and management --- .gitignore | 1 + src-tauri/Cargo.lock | 149 +++++++++++++++++------- src-tauri/Cargo.toml | 2 +- src-tauri/src/cmd/clash.rs | 4 +- src-tauri/src/core/logger.rs | 40 +------ src-tauri/src/core/manager/lifecycle.rs | 4 +- src-tauri/src/core/manager/state.rs | 13 +-- src-tauri/src/core/service.rs | 3 +- 8 files changed, 120 insertions(+), 96 deletions(-) diff --git a/.gitignore b/.gitignore index 96587689..9983a234 100644 --- a/.gitignore +++ b/.gitignore @@ -11,3 +11,4 @@ scripts/_env.sh .idea .old .eslintcache +target \ No newline at end of file diff --git a/src-tauri/Cargo.lock b/src-tauri/Cargo.lock index afe34666..34ee3c0e 100644 --- a/src-tauri/Cargo.lock +++ b/src-tauri/Cargo.lock @@ -147,11 +147,17 @@ dependencies = [ "objc2-foundation 0.3.2", "parking_lot 0.12.5", "percent-encoding", - "windows-sys 0.59.0", + "windows-sys 0.52.0", "wl-clipboard-rs", "x11rb", ] +[[package]] +name = "arraydeque" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7d902e3d592a523def97af8f317b08ce16b7ab854c1985a0c671e6f15cebc236" + [[package]] name = "arrayvec" version = "0.7.6" @@ -1164,18 +1170,21 @@ dependencies = [ [[package]] name = "clash_verge_logger" -version = "0.1.0" -source = "git+https://github.com/clash-verge-rev/clash-verge-logger#256dc7441f3d0a0c1faa89e345379b32308bc815" +version = "0.2.0" +source = "git+https://github.com/clash-verge-rev/clash-verge-logger#9bb189b5b5c4c2eee35168ff4997e8fb10901c81" dependencies = [ + "arraydeque", + "compact_str", "flexi_logger", "log", "nu-ansi-term", + "tokio", ] [[package]] name = "clash_verge_service_ipc" -version = "2.0.18" -source = "git+https://github.com/clash-verge-rev/clash-verge-service-ipc#381fee14ce5c69274c547b6b18819452d97fb2b2" +version = "2.0.19" +source = "git+https://github.com/clash-verge-rev/clash-verge-service-ipc#1d9b8a6f5ea9a7f8c52ffef814b51f48d6cdad33" dependencies = [ "anyhow", "compact_str", @@ -1234,7 +1243,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "117725a109d387c937a1533ce01b450cbde6b88abceea8473c4d7a85853cda3c" dependencies = [ "lazy_static", - "windows-sys 0.59.0", + "windows-sys 0.48.0", ] [[package]] @@ -1256,9 +1265,11 @@ dependencies = [ "castaway 0.2.4", "cfg-if", "itoa", + "rkyv", "rustversion", "ryu", "serde", + "smallvec", "static_assertions", ] @@ -1924,7 +1935,7 @@ dependencies = [ "libc", "option-ext", "redox_users 0.5.2", - "windows-sys 0.59.0", + "windows-sys 0.60.2", ] [[package]] @@ -2180,7 +2191,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "39cab71617ae0d63f51a36d69f866391735b51691dbda63cf6f96d042b63efeb" dependencies = [ "libc", - "windows-sys 0.59.0", + "windows-sys 0.52.0", ] [[package]] @@ -3365,7 +3376,7 @@ dependencies = [ "js-sys", "log", "wasm-bindgen", - "windows-core 0.62.2", + "windows-core 0.58.0", ] [[package]] @@ -4244,6 +4255,26 @@ dependencies = [ "windows-sys 0.60.2", ] +[[package]] +name = "munge" +version = "0.4.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e17401f259eba956ca16491461b6e8f72913a0a114e39736ce404410f915a0c" +dependencies = [ + "munge_macro", +] + +[[package]] +name = "munge_macro" +version = "0.4.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4568f25ccbd45ab5d5603dc34318c1ec56b117531781260002151b8530a9f931" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.108", +] + [[package]] name = "nanoid" version = "0.4.0" @@ -4425,7 +4456,7 @@ version = "0.50.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7957b9740744892f114936ab4a57b3f487491bbeafaf8083688b16841a4240e5" dependencies = [ - "windows-sys 0.59.0", + "windows-sys 0.60.2", ] [[package]] @@ -5644,6 +5675,26 @@ version = "2.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "33cb294fe86a74cbcf50d4445b37da762029549ebeea341421c7c70370f86cac" +[[package]] +name = "ptr_meta" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b9a0cf95a1196af61d4f1cbdab967179516d9a4a4312af1f31948f8f6224a79" +dependencies = [ + "ptr_meta_derive", +] + +[[package]] +name = "ptr_meta_derive" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7347867d0a7e1208d93b46767be83e2b8f978c3dad35f775ac8d8847551d6fe1" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.108", +] + [[package]] name = "publicsuffix" version = "2.3.0" @@ -5739,7 +5790,7 @@ dependencies = [ "once_cell", "socket2 0.5.10", "tracing", - "windows-sys 0.59.0", + "windows-sys 0.52.0", ] [[package]] @@ -5757,6 +5808,15 @@ version = "5.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "69cdb34c158ceb288df11e18b4bd39de994f6657d83847bdffdbd7f346754b0f" +[[package]] +name = "rancor" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a063ea72381527c2a0561da9c80000ef822bdd7c3241b1cc1b12100e3df081ee" +dependencies = [ + "ptr_meta", +] + [[package]] name = "rand" version = "0.7.3" @@ -5998,6 +6058,12 @@ dependencies = [ "memchr", ] +[[package]] +name = "rend" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cadadef317c2f20755a64d7fdc48f9e7178ee6b0e1f7fce33fa60f1d68a276e6" + [[package]] name = "reqwest" version = "0.12.24" @@ -6117,6 +6183,30 @@ dependencies = [ "portable-atomic-util", ] +[[package]] +name = "rkyv" +version = "0.8.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "35a640b26f007713818e9a9b65d34da1cf58538207b052916a83d80e43f3ffa4" +dependencies = [ + "munge", + "ptr_meta", + "rancor", + "rend", + "rkyv_derive", +] + +[[package]] +name = "rkyv_derive" +version = "0.8.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bd83f5f173ff41e00337d97f6572e416d022ef8a19f371817259ae960324c482" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.108", +] + [[package]] name = "rs-snowflake" version = "0.6.0" @@ -6193,7 +6283,7 @@ dependencies = [ "errno", "libc", "linux-raw-sys 0.4.15", - "windows-sys 0.59.0", + "windows-sys 0.52.0", ] [[package]] @@ -6206,7 +6296,7 @@ dependencies = [ "errno", "libc", "linux-raw-sys 0.11.0", - "windows-sys 0.59.0", + "windows-sys 0.52.0", ] [[package]] @@ -7709,7 +7799,7 @@ dependencies = [ "getrandom 0.3.4", "once_cell", "rustix 1.1.2", - "windows-sys 0.59.0", + "windows-sys 0.52.0", ] [[package]] @@ -9157,19 +9247,6 @@ dependencies = [ "windows-strings 0.4.2", ] -[[package]] -name = "windows-core" -version = "0.62.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8e83a14d34d0623b51dce9581199302a221863196a1dde71a7663a4c2be9deb" -dependencies = [ - "windows-implement 0.60.2", - "windows-interface 0.59.3", - "windows-link 0.2.1", - "windows-result 0.4.1", - "windows-strings 0.5.1", -] - [[package]] name = "windows-future" version = "0.2.1" @@ -9276,15 +9353,6 @@ dependencies = [ "windows-link 0.1.3", ] -[[package]] -name = "windows-result" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7781fa89eaf60850ac3d2da7af8e5242a5ea78d1a11c49bf2910bb5a73853eb5" -dependencies = [ - "windows-link 0.2.1", -] - [[package]] name = "windows-strings" version = "0.1.0" @@ -9304,15 +9372,6 @@ dependencies = [ "windows-link 0.1.3", ] -[[package]] -name = "windows-strings" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7837d08f69c77cf6b07689544538e017c1bfcf57e34b4c0ff58e6c2cd3b37091" -dependencies = [ - "windows-link 0.2.1", -] - [[package]] name = "windows-sys" version = "0.45.0" diff --git a/src-tauri/Cargo.toml b/src-tauri/Cargo.toml index 20c5e2b1..09f9b3b6 100755 --- a/src-tauri/Cargo.toml +++ b/src-tauri/Cargo.toml @@ -88,7 +88,7 @@ tauri-plugin-mihomo = { git = "https://github.com/clash-verge-rev/tauri-plugin-m clash_verge_logger = { git = "https://github.com/clash-verge-rev/clash-verge-logger" } async-trait = "0.1.89" smartstring = { version = "1.0.1", features = ["serde"] } -clash_verge_service_ipc = { version = "2.0.18", features = [ +clash_verge_service_ipc = { version = "2.0.19", features = [ "client", ], git = "https://github.com/clash-verge-rev/clash-verge-service-ipc" } diff --git a/src-tauri/src/cmd/clash.rs b/src-tauri/src/cmd/clash.rs index 09f5e101..cf3675f7 100644 --- a/src-tauri/src/cmd/clash.rs +++ b/src-tauri/src/cmd/clash.rs @@ -1,5 +1,3 @@ -use std::collections::VecDeque; - use super::CmdResult; use crate::{ cmd::StringifyErr, @@ -275,7 +273,7 @@ pub async fn validate_dns_config() -> CmdResult<(bool, String)> { } #[tauri::command] -pub async fn get_clash_logs() -> CmdResult> { +pub async fn get_clash_logs() -> CmdResult> { let logs = CoreManager::global() .get_clash_logs() .await diff --git a/src-tauri/src/core/logger.rs b/src-tauri/src/core/logger.rs index ff3d8a0e..8fd38c3d 100644 --- a/src-tauri/src/core/logger.rs +++ b/src-tauri/src/core/logger.rs @@ -1,38 +1,6 @@ -use std::{collections::VecDeque, sync::Arc}; +use std::sync::Arc; -use compact_str::CompactString; -use once_cell::sync::OnceCell; -use parking_lot::{RwLock, RwLockReadGuard}; +use clash_verge_logger::AsyncLogger; +use once_cell::sync::Lazy; -const LOGS_QUEUE_LEN: usize = 100; - -pub struct ClashLogger { - logs: Arc>>, -} - -impl ClashLogger { - pub fn global() -> &'static ClashLogger { - static LOGGER: OnceCell = OnceCell::new(); - - LOGGER.get_or_init(|| ClashLogger { - logs: Arc::new(RwLock::new(VecDeque::with_capacity(LOGS_QUEUE_LEN + 10))), - }) - } - - pub fn get_logs(&self) -> RwLockReadGuard<'_, VecDeque> { - self.logs.read() - } - - pub fn append_log(&self, text: CompactString) { - let mut logs = self.logs.write(); - if logs.len() > LOGS_QUEUE_LEN { - logs.pop_front(); - } - logs.push_back(text); - } - - pub fn clear_logs(&self) { - let mut logs = self.logs.write(); - logs.clear(); - } -} +pub static CLASH_LOGGER: Lazy> = Lazy::new(|| Arc::new(AsyncLogger::new())); diff --git a/src-tauri/src/core/manager/lifecycle.rs b/src-tauri/src/core/manager/lifecycle.rs index 90c2fcfd..a766aeef 100644 --- a/src-tauri/src/core/manager/lifecycle.rs +++ b/src-tauri/src/core/manager/lifecycle.rs @@ -1,7 +1,7 @@ use super::{CoreManager, RunningMode}; use crate::{ core::{ - logger::ClashLogger, + logger::CLASH_LOGGER, service::{SERVICE_MANAGER, ServiceStatus}, }, logging, @@ -21,7 +21,7 @@ impl CoreManager { } pub async fn stop_core(&self) -> Result<()> { - ClashLogger::global().clear_logs(); + CLASH_LOGGER.clear_logs().await; match *self.get_running_mode() { RunningMode::Service => self.stop_core_by_service().await, diff --git a/src-tauri/src/core/manager/state.rs b/src-tauri/src/core/manager/state.rs index 37a14a93..d38a148c 100644 --- a/src-tauri/src/core/manager/state.rs +++ b/src-tauri/src/core/manager/state.rs @@ -2,7 +2,7 @@ use super::{CoreManager, RunningMode}; use crate::{ AsyncHandler, config::Config, - core::{handle, logger::ClashLogger, service}, + core::{handle, logger::CLASH_LOGGER, service}, logging, process::CommandChildGuard, utils::{ @@ -16,15 +16,14 @@ use compact_str::CompactString; use flexi_logger::DeferredNow; use log::Level; use scopeguard::defer; -use std::collections::VecDeque; use tauri_plugin_shell::ShellExt; impl CoreManager { - pub async fn get_clash_logs(&self) -> Result> { + pub async fn get_clash_logs(&self) -> Result> { match *self.get_running_mode() { RunningMode::Service => service::get_clash_logs_by_service().await, - RunningMode::Sidecar => Ok(ClashLogger::global().get_logs().clone()), - RunningMode::NotRunning => Ok(VecDeque::new()), + RunningMode::Sidecar => Ok(CLASH_LOGGER.get_logs().await), + RunningMode::NotRunning => Ok(Vec::new()), } } @@ -65,7 +64,7 @@ impl CoreManager { let message = CompactString::from(String::from_utf8_lossy(&line).as_ref()); let w = shared_writer.lock().await; write_sidecar_log(w, &mut now, Level::Error, &message); - ClashLogger::global().append_log(message); + CLASH_LOGGER.append_log(message).await; } tauri_plugin_shell::process::CommandEvent::Terminated(term) => { let mut now = DeferredNow::default(); @@ -78,7 +77,7 @@ impl CoreManager { }; let w = shared_writer.lock().await; write_sidecar_log(w, &mut now, Level::Info, &message); - ClashLogger::global().clear_logs(); + CLASH_LOGGER.clear_logs().await; break; } _ => {} diff --git a/src-tauri/src/core/service.rs b/src-tauri/src/core/service.rs index 839855ea..1361a593 100644 --- a/src-tauri/src/core/service.rs +++ b/src-tauri/src/core/service.rs @@ -8,7 +8,6 @@ use clash_verge_service_ipc::CoreConfig; use compact_str::CompactString; use once_cell::sync::Lazy; use std::{ - collections::VecDeque, env::current_exe, path::{Path, PathBuf}, process::Command as StdCommand, @@ -394,7 +393,7 @@ pub(super) async fn run_core_by_service(config_file: &PathBuf) -> Result<()> { start_with_existing_service(config_file).await } -pub(super) async fn get_clash_logs_by_service() -> Result> { +pub(super) async fn get_clash_logs_by_service() -> Result> { logging!(info, Type::Service, "正在获取服务模式下的 Clash 日志"); let response = clash_verge_service_ipc::get_clash_logs() From 73323edf06216b42644f9d3c9504eaf02f30fb02 Mon Sep 17 00:00:00 2001 From: Tunglies <77394545+Tunglies@users.noreply.github.com> Date: Wed, 29 Oct 2025 20:34:39 +0800 Subject: [PATCH 18/70] chore(deps): update clash_verge_service_ipc to version 2.0.20 Reduce memory usage, avoid duplicated clients --- src-tauri/Cargo.lock | 4 ++-- src-tauri/Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src-tauri/Cargo.lock b/src-tauri/Cargo.lock index 34ee3c0e..bbe55504 100644 --- a/src-tauri/Cargo.lock +++ b/src-tauri/Cargo.lock @@ -1183,8 +1183,8 @@ dependencies = [ [[package]] name = "clash_verge_service_ipc" -version = "2.0.19" -source = "git+https://github.com/clash-verge-rev/clash-verge-service-ipc#1d9b8a6f5ea9a7f8c52ffef814b51f48d6cdad33" +version = "2.0.20" +source = "git+https://github.com/clash-verge-rev/clash-verge-service-ipc#c0b6e99da27e7956047d42aee104f5c33083c970" dependencies = [ "anyhow", "compact_str", diff --git a/src-tauri/Cargo.toml b/src-tauri/Cargo.toml index 09f9b3b6..bd44656e 100755 --- a/src-tauri/Cargo.toml +++ b/src-tauri/Cargo.toml @@ -88,7 +88,7 @@ tauri-plugin-mihomo = { git = "https://github.com/clash-verge-rev/tauri-plugin-m clash_verge_logger = { git = "https://github.com/clash-verge-rev/clash-verge-logger" } async-trait = "0.1.89" smartstring = { version = "1.0.1", features = ["serde"] } -clash_verge_service_ipc = { version = "2.0.19", features = [ +clash_verge_service_ipc = { version = "2.0.20", features = [ "client", ], git = "https://github.com/clash-verge-rev/clash-verge-service-ipc" } From d8b0e9929cd96069e053b7abe2f1692728367bbc Mon Sep 17 00:00:00 2001 From: Tunglies <77394545+Tunglies@users.noreply.github.com> Date: Wed, 29 Oct 2025 21:09:21 +0800 Subject: [PATCH 19/70] fix: include Mihomo-go122 by default for macOS 10.15+ to resolve Intel architecture compatibility issues --- UPDATELOG.md | 1 + scripts/prebuild.mjs | 443 +++++++++++++++++++++---------------------- 2 files changed, 216 insertions(+), 228 deletions(-) diff --git a/UPDATELOG.md b/UPDATELOG.md index 2d297526..b38c286f 100644 --- a/UPDATELOG.md +++ b/UPDATELOG.md @@ -42,6 +42,7 @@ - 优化首页当前节点对MATCH规则的支持 - 允许在 `界面设置` 修改 `悬浮跳转导航延迟` - 添加热键绑定错误的提示信息 +- 在 macOS 10.15 及更高版本默认包含 Mihomo-go122,以解决 Intel 架构 Mac 无法运行内核的问题 ### 🐞 修复问题 diff --git a/scripts/prebuild.mjs b/scripts/prebuild.mjs index 1cf801c5..d127a0ca 100644 --- a/scripts/prebuild.mjs +++ b/scripts/prebuild.mjs @@ -18,7 +18,6 @@ import { log_debug, log_error, log_info, log_success } from "./utils.mjs"; * 3. Use file hash to detect changes and skip unnecessary chmod/copy operations * 4. Use --force or -f flag to force re-download and update all resources * - * This optimization significantly reduces build time for local development */ const cwd = process.cwd(); @@ -56,8 +55,7 @@ const ARCH_MAP = { const arg1 = process.argv.slice(2)[0]; const arg2 = process.argv.slice(2)[1]; -let target; -target = arg1 === "--force" || arg1 === "-f" ? arg2 : arg1; +let target = arg1 === "--force" || arg1 === "-f" ? arg2 : arg1; const { platform, arch } = target ? { platform: PLATFORM_MAP[target], arch: ARCH_MAP[target] } : process; @@ -68,7 +66,9 @@ const SIDECAR_HOST = target .toString() .match(/(?<=host: ).+(?=\s*)/g)[0]; -/* ======= Version Cache Functions ======= */ +// ======================= +// Version Cache +// ======================= async function loadVersionCache() { try { if (fs.existsSync(VERSION_CACHE_FILE)) { @@ -80,7 +80,6 @@ async function loadVersionCache() { } return {}; } - async function saveVersionCache(cache) { try { await fsp.mkdir(TEMP_DIR, { recursive: true }); @@ -90,28 +89,24 @@ async function saveVersionCache(cache) { log_debug("Failed to save version cache:", err.message); } } - async function getCachedVersion(key) { const cache = await loadVersionCache(); const cached = cache[key]; if (cached && Date.now() - cached.timestamp < 3600000) { - // 1小时内有效 log_info(`Using cached version for ${key}: ${cached.version}`); return cached.version; } return null; } - async function setCachedVersion(key, version) { const cache = await loadVersionCache(); - cache[key] = { - version, - timestamp: Date.now(), - }; + cache[key] = { version, timestamp: Date.now() }; await saveVersionCache(cache); } -/* ======= File Hash Functions ======= */ +// ======================= +// Hash Cache & File Hash +// ======================= async function calculateFileHash(filePath) { try { const fileBuffer = await fsp.readFile(filePath); @@ -122,7 +117,6 @@ async function calculateFileHash(filePath) { return null; } } - async function loadHashCache() { try { if (fs.existsSync(HASH_CACHE_FILE)) { @@ -134,7 +128,6 @@ async function loadHashCache() { } return {}; } - async function saveHashCache(cache) { try { await fsp.mkdir(TEMP_DIR, { recursive: true }); @@ -144,28 +137,20 @@ async function saveHashCache(cache) { log_debug("Failed to save hash cache:", err.message); } } - async function hasFileChanged(filePath, targetPath) { if (FORCE) return true; if (!fs.existsSync(targetPath)) return true; - const hashCache = await loadHashCache(); const sourceHash = await calculateFileHash(filePath); const targetHash = await calculateFileHash(targetPath); - if (!sourceHash || !targetHash) return true; - const cacheKey = targetPath; const cachedHash = hashCache[cacheKey]; - if (cachedHash === sourceHash && sourceHash === targetHash) { - // 文件未变化,不输出日志 return false; } - return true; } - async function updateHashCache(targetPath) { const hashCache = await loadHashCache(); const hash = await calculateFileHash(targetPath); @@ -175,18 +160,25 @@ async function updateHashCache(targetPath) { } } -/* ======= clash meta alpha======= */ +// ======================= +// Meta maps (stable & alpha) +// ======================= const META_ALPHA_VERSION_URL = "https://github.com/MetaCubeX/mihomo/releases/download/Prerelease-Alpha/version.txt"; const META_ALPHA_URL_PREFIX = `https://github.com/MetaCubeX/mihomo/releases/download/Prerelease-Alpha`; let META_ALPHA_VERSION; +const META_VERSION_URL = + "https://github.com/MetaCubeX/mihomo/releases/latest/download/version.txt"; +const META_URL_PREFIX = `https://github.com/MetaCubeX/mihomo/releases/download`; +let META_VERSION; + const META_ALPHA_MAP = { "win32-x64": "mihomo-windows-amd64-v2", "win32-ia32": "mihomo-windows-386", "win32-arm64": "mihomo-windows-arm64", - "darwin-x64": "mihomo-darwin-amd64-v1", - "darwin-arm64": "mihomo-darwin-arm64", + "darwin-x64": "mihomo-darwin-amd64-v1-go122", + "darwin-arm64": "mihomo-darwin-arm64-go122", "linux-x64": "mihomo-linux-amd64-v2", "linux-ia32": "mihomo-linux-386", "linux-arm64": "mihomo-linux-arm64", @@ -195,9 +187,24 @@ const META_ALPHA_MAP = { "linux-loong64": "mihomo-linux-loong64", }; -// Fetch the latest alpha release version from the version.txt file +const META_MAP = { + "win32-x64": "mihomo-windows-amd64-v2", + "win32-ia32": "mihomo-windows-386", + "win32-arm64": "mihomo-windows-arm64", + "darwin-x64": "mihomo-darwin-amd64-v2-go122", + "darwin-arm64": "mihomo-darwin-arm64-go122", + "linux-x64": "mihomo-linux-amd64-v2", + "linux-ia32": "mihomo-linux-386", + "linux-arm64": "mihomo-linux-arm64", + "linux-arm": "mihomo-linux-armv7", + "linux-riscv64": "mihomo-linux-riscv64", + "linux-loong64": "mihomo-linux-loong64", +}; + +// ======================= +// Fetch latest versions +// ======================= async function getLatestAlphaVersion() { - // 如果不强制更新,先尝试从缓存获取 if (!FORCE) { const cached = await getCachedVersion("META_ALPHA_VERSION"); if (cached) { @@ -205,58 +212,33 @@ async function getLatestAlphaVersion() { return; } } - const options = {}; - const httpProxy = process.env.HTTP_PROXY || process.env.http_proxy || process.env.HTTPS_PROXY || process.env.https_proxy; + if (httpProxy) options.agent = new HttpsProxyAgent(httpProxy); - if (httpProxy) { - options.agent = new HttpsProxyAgent(httpProxy); - } try { const response = await fetch(META_ALPHA_VERSION_URL, { ...options, method: "GET", }); - let v = await response.text(); - META_ALPHA_VERSION = v.trim(); // Trim to remove extra whitespaces + if (!response.ok) + throw new Error( + `Failed to fetch ${META_ALPHA_VERSION_URL}: ${response.status}`, + ); + META_ALPHA_VERSION = (await response.text()).trim(); log_info(`Latest alpha version: ${META_ALPHA_VERSION}`); - - // 保存到缓存 await setCachedVersion("META_ALPHA_VERSION", META_ALPHA_VERSION); - } catch (error) { - log_error("Error fetching latest alpha version:", error.message); + } catch (err) { + log_error("Error fetching latest alpha version:", err.message); process.exit(1); } } -/* ======= clash meta stable ======= */ -const META_VERSION_URL = - "https://github.com/MetaCubeX/mihomo/releases/latest/download/version.txt"; -const META_URL_PREFIX = `https://github.com/MetaCubeX/mihomo/releases/download`; -let META_VERSION; - -const META_MAP = { - "win32-x64": "mihomo-windows-amd64-v2", - "win32-ia32": "mihomo-windows-386", - "win32-arm64": "mihomo-windows-arm64", - "darwin-x64": "mihomo-darwin-amd64-v2", - "darwin-arm64": "mihomo-darwin-arm64", - "linux-x64": "mihomo-linux-amd64-v2", - "linux-ia32": "mihomo-linux-386", - "linux-arm64": "mihomo-linux-arm64", - "linux-arm": "mihomo-linux-armv7", - "linux-riscv64": "mihomo-linux-riscv64", - "linux-loong64": "mihomo-linux-loong64", -}; - -// Fetch the latest release version from the version.txt file async function getLatestReleaseVersion() { - // 如果不强制更新,先尝试从缓存获取 if (!FORCE) { const cached = await getCachedVersion("META_VERSION"); if (cached) { @@ -264,67 +246,57 @@ async function getLatestReleaseVersion() { return; } } - const options = {}; - const httpProxy = process.env.HTTP_PROXY || process.env.http_proxy || process.env.HTTPS_PROXY || process.env.https_proxy; + if (httpProxy) options.agent = new HttpsProxyAgent(httpProxy); - if (httpProxy) { - options.agent = new HttpsProxyAgent(httpProxy); - } try { const response = await fetch(META_VERSION_URL, { ...options, method: "GET", }); - let v = await response.text(); - META_VERSION = v.trim(); // Trim to remove extra whitespaces + if (!response.ok) + throw new Error( + `Failed to fetch ${META_VERSION_URL}: ${response.status}`, + ); + META_VERSION = (await response.text()).trim(); log_info(`Latest release version: ${META_VERSION}`); - - // 保存到缓存 await setCachedVersion("META_VERSION", META_VERSION); - } catch (error) { - log_error("Error fetching latest release version:", error.message); + } catch (err) { + log_error("Error fetching latest release version:", err.message); process.exit(1); } } -/* - * check available - */ +// ======================= +// Validate availability +// ======================= if (!META_MAP[`${platform}-${arch}`]) { - throw new Error( - `clash meta alpha unsupported platform "${platform}-${arch}"`, - ); + throw new Error(`clash meta unsupported platform "${platform}-${arch}"`); } - if (!META_ALPHA_MAP[`${platform}-${arch}`]) { throw new Error( `clash meta alpha unsupported platform "${platform}-${arch}"`, ); } -/** - * core info - */ +// ======================= +// Build meta objects +// ======================= function clashMetaAlpha() { const name = META_ALPHA_MAP[`${platform}-${arch}`]; const isWin = platform === "win32"; const urlExt = isWin ? "zip" : "gz"; - const downloadURL = `${META_ALPHA_URL_PREFIX}/${name}-${META_ALPHA_VERSION}.${urlExt}`; - const exeFile = `${name}${isWin ? ".exe" : ""}`; - const zipFile = `${name}-${META_ALPHA_VERSION}.${urlExt}`; - return { name: "verge-mihomo-alpha", targetFile: `verge-mihomo-alpha-${SIDECAR_HOST}${isWin ? ".exe" : ""}`, - exeFile, - zipFile, - downloadURL, + exeFile: `${name}${isWin ? ".exe" : ""}`, + zipFile: `${name}-${META_ALPHA_VERSION}.${urlExt}`, + downloadURL: `${META_ALPHA_URL_PREFIX}/${name}-${META_ALPHA_VERSION}.${urlExt}`, }; } @@ -332,40 +304,83 @@ function clashMeta() { const name = META_MAP[`${platform}-${arch}`]; const isWin = platform === "win32"; const urlExt = isWin ? "zip" : "gz"; - const downloadURL = `${META_URL_PREFIX}/${META_VERSION}/${name}-${META_VERSION}.${urlExt}`; - const exeFile = `${name}${isWin ? ".exe" : ""}`; - const zipFile = `${name}-${META_VERSION}.${urlExt}`; - return { name: "verge-mihomo", targetFile: `verge-mihomo-${SIDECAR_HOST}${isWin ? ".exe" : ""}`, - exeFile, - zipFile, - downloadURL, + exeFile: `${name}${isWin ? ".exe" : ""}`, + zipFile: `${name}-${META_VERSION}.${urlExt}`, + downloadURL: `${META_URL_PREFIX}/${META_VERSION}/${name}-${META_VERSION}.${urlExt}`, }; } -/** - * download sidecar and rename - */ + +// ======================= +// download helper (增强:status + magic bytes) +// ======================= +async function downloadFile(url, outPath) { + const options = {}; + const httpProxy = + process.env.HTTP_PROXY || + process.env.http_proxy || + process.env.HTTPS_PROXY || + process.env.https_proxy; + if (httpProxy) options.agent = new HttpsProxyAgent(httpProxy); + + const response = await fetch(url, { + ...options, + method: "GET", + headers: { "Content-Type": "application/octet-stream" }, + }); + if (!response.ok) { + const body = await response.text().catch(() => ""); + // 将 body 写到文件以便排查(可通过临时目录查看) + await fsp.mkdir(path.dirname(outPath), { recursive: true }); + await fsp.writeFile(outPath, body); + throw new Error(`Failed to download ${url}: status ${response.status}`); + } + + const buf = Buffer.from(await response.arrayBuffer()); + await fsp.mkdir(path.dirname(outPath), { recursive: true }); + + // 简单 magic 字节检查 + if (url.endsWith(".gz") || url.endsWith(".tgz")) { + if (!(buf[0] === 0x1f && buf[1] === 0x8b)) { + await fsp.writeFile(outPath, buf); + throw new Error( + `Downloaded file for ${url} is not a valid gzip (magic mismatch).`, + ); + } + } else if (url.endsWith(".zip")) { + if (!(buf[0] === 0x50 && buf[1] === 0x4b)) { + await fsp.writeFile(outPath, buf); + throw new Error( + `Downloaded file for ${url} is not a valid zip (magic mismatch).`, + ); + } + } + + await fsp.writeFile(outPath, buf); + log_success(`download finished: ${url}`); +} + +// ======================= +// resolveSidecar (支持 zip / tgz / gz) +// ======================= async function resolveSidecar(binInfo) { const { name, targetFile, zipFile, exeFile, downloadURL } = binInfo; - const sidecarDir = path.join(cwd, "src-tauri", "sidecar"); const sidecarPath = path.join(sidecarDir, targetFile); - await fsp.mkdir(sidecarDir, { recursive: true }); - // 检查文件是否已存在,如果存在则跳过重复下载 if (!FORCE && fs.existsSync(sidecarPath)) { - log_success(`"${name}" already exists, skipping download to save time`); + log_success(`"${name}" already exists, skipping download`); return; } const tempDir = path.join(TEMP_DIR, name); const tempZip = path.join(tempDir, zipFile); const tempExe = path.join(tempDir, exeFile); - await fsp.mkdir(tempDir, { recursive: true }); + try { if (!fs.existsSync(tempZip)) { await downloadFile(downloadURL, tempZip); @@ -374,78 +389,76 @@ async function resolveSidecar(binInfo) { if (zipFile.endsWith(".zip")) { const zip = new AdmZip(tempZip); zip.getEntries().forEach((entry) => { - log_debug(`"${name}" entry name`, entry.entryName); + log_debug(`"${name}" entry: ${entry.entryName}`); }); zip.extractAllTo(tempDir, true); - await fsp.rename(tempExe, sidecarPath); + // 尝试按 exeFile 重命名,否则找第一个可执行文件 + if (fs.existsSync(tempExe)) { + await fsp.rename(tempExe, sidecarPath); + } else { + // 搜索候选 + const files = await fsp.readdir(tempDir); + const candidate = files.find( + (f) => + f === path.basename(exeFile) || + f.endsWith(".exe") || + !f.includes("."), + ); + if (!candidate) + throw new Error(`Expected binary not found in ${tempDir}`); + await fsp.rename(path.join(tempDir, candidate), sidecarPath); + } + if (platform !== "win32") execSync(`chmod 755 ${sidecarPath}`); log_success(`unzip finished: "${name}"`); } else if (zipFile.endsWith(".tgz")) { - // tgz - await fsp.mkdir(tempDir, { recursive: true }); - await extract({ - cwd: tempDir, - file: tempZip, - //strip: 1, // 可能需要根据实际的 .tgz 文件结构调整 - }); + await extract({ cwd: tempDir, file: tempZip }); const files = await fsp.readdir(tempDir); - log_debug(`"${name}" files in tempDir:`, files); - const extractedFile = files.find((file) => file.startsWith("虚空终端-")); - if (extractedFile) { - const extractedFilePath = path.join(tempDir, extractedFile); - await fsp.rename(extractedFilePath, sidecarPath); - log_success(`"${name}" file renamed to "${sidecarPath}"`); - execSync(`chmod 755 ${sidecarPath}`); - log_success(`chmod binary finished: "${name}"`); - } else { - throw new Error(`Expected file not found in ${tempDir}`); - } + log_debug(`"${name}" extracted files:`, files); + // 优先寻找给定 exeFile 或已知前缀 + let extracted = files.find( + (f) => + f === path.basename(exeFile) || + f.startsWith("虚空终端-") || + !f.includes("."), + ); + if (!extracted) extracted = files[0]; + if (!extracted) throw new Error(`Expected file not found in ${tempDir}`); + await fsp.rename(path.join(tempDir, extracted), sidecarPath); + execSync(`chmod 755 ${sidecarPath}`); + log_success(`tgz processed: "${name}"`); } else { - // gz + // .gz const readStream = fs.createReadStream(tempZip); const writeStream = fs.createWriteStream(sidecarPath); await new Promise((resolve, reject) => { - const onError = (error) => { - log_error(`"${name}" gz failed:`, error.message); - reject(error); - }; readStream - .pipe(zlib.createGunzip().on("error", onError)) + .pipe(zlib.createGunzip()) + .on("error", (e) => { + log_error(`gunzip error for ${name}:`, e.message); + reject(e); + }) .pipe(writeStream) .on("finish", () => { - execSync(`chmod 755 ${sidecarPath}`); - log_success(`chmod binary finished: "${name}"`); + if (platform !== "win32") execSync(`chmod 755 ${sidecarPath}`); resolve(); }) - .on("error", onError); + .on("error", (e) => { + log_error(`write stream error for ${name}:`, e.message); + reject(e); + }); }); + log_success(`gz binary processed: "${name}"`); } } catch (err) { - // 需要删除文件 await fsp.rm(sidecarPath, { recursive: true, force: true }); throw err; } finally { - // delete temp dir await fsp.rm(tempDir, { recursive: true, force: true }); } } -const resolveSetDnsScript = () => - resolveResource({ - file: "set_dns.sh", - localPath: path.join(cwd, "scripts/set_dns.sh"), - }); -const resolveUnSetDnsScript = () => - resolveResource({ - file: "unset_dns.sh", - localPath: path.join(cwd, "scripts/unset_dns.sh"), - }); - -/** - * download the file to the resources dir - */ async function resolveResource(binInfo) { const { file, downloadURL, localPath } = binInfo; - const resDir = path.join(cwd, "src-tauri/resources"); const targetPath = path.join(resDir, file); @@ -465,12 +478,9 @@ async function resolveResource(binInfo) { } if (localPath) { - // 检查文件哈希是否变化 if (!(await hasFileChanged(localPath, targetPath))) { - // 文件未变化,静默跳过 return; } - await fsp.mkdir(resDir, { recursive: true }); await fsp.copyFile(localPath, targetPath); await updateHashCache(targetPath); @@ -480,44 +490,17 @@ async function resolveResource(binInfo) { log_success(`${file} finished`); } -/** - * download file and save to `path` - */ async function downloadFile(url, path) { - const options = {}; - - const httpProxy = - process.env.HTTP_PROXY || - process.env.http_proxy || - process.env.HTTPS_PROXY || - process.env.https_proxy; - - if (httpProxy) { - options.agent = new HttpsProxyAgent(httpProxy); - } - - const response = await fetch(url, { - ...options, - method: "GET", - headers: { "Content-Type": "application/octet-stream" }, - }); - const buffer = await response.arrayBuffer(); - await fsp.writeFile(path, new Uint8Array(buffer)); - - log_success(`download finished: ${url}`); -} - -// SimpleSC.dll +// SimpleSC.dll (win plugin) const resolvePlugin = async () => { const url = "https://nsis.sourceforge.io/mediawiki/images/e/ef/NSIS_Simple_Service_Plugin_Unicode_1.30.zip"; - const tempDir = path.join(TEMP_DIR, "SimpleSC"); const tempZip = path.join( tempDir, "NSIS_Simple_Service_Plugin_Unicode_1.30.zip", ); const tempDll = path.join(tempDir, "SimpleSC.dll"); - const pluginDir = path.join(process.env.APPDATA, "Local/NSIS"); + const pluginDir = path.join(process.env.APPDATA || "", "Local/NSIS"); const pluginPath = path.join(pluginDir, "SimpleSC.dll"); await fsp.mkdir(pluginDir, { recursive: true }); await fsp.mkdir(tempDir, { recursive: true }); @@ -527,18 +510,33 @@ const resolvePlugin = async () => { await downloadFile(url, tempZip); } const zip = new AdmZip(tempZip); - zip.getEntries().forEach((entry) => { - log_debug(`"SimpleSC" entry name`, entry.entryName); - }); + zip + .getEntries() + .forEach((entry) => log_debug(`"SimpleSC" entry`, entry.entryName)); zip.extractAllTo(tempDir, true); - await fsp.cp(tempDll, pluginPath, { recursive: true, force: true }); - log_success(`unzip finished: "SimpleSC"`); + if (fs.existsSync(tempDll)) { + await fsp.cp(tempDll, pluginPath, { recursive: true, force: true }); + log_success(`unzip finished: "SimpleSC"`); + } else { + // 如果 dll 名称不同,尝试找到 dll + const files = await fsp.readdir(tempDir); + const dll = files.find((f) => f.toLowerCase().endsWith(".dll")); + if (dll) { + await fsp.cp(path.join(tempDir, dll), pluginPath, { + recursive: true, + force: true, + }); + log_success(`unzip finished: "SimpleSC" (found ${dll})`); + } else { + throw new Error("SimpleSC.dll not found in zip"); + } + } } finally { await fsp.rm(tempDir, { recursive: true, force: true }); } }; -// service chmod +// service chmod (保留并使用 glob) const resolveServicePermission = async () => { const serviceExecutables = [ "clash-verge-service*", @@ -550,23 +548,20 @@ const resolveServicePermission = async () => { let hasChanges = false; for (let f of serviceExecutables) { - // 使用glob模块来处理通配符 const files = glob.sync(path.join(resDir, f)); for (let filePath of files) { if (fs.existsSync(filePath)) { const currentHash = await calculateFileHash(filePath); const cacheKey = `${filePath}_chmod`; - - // 检查文件哈希是否变化 if (!FORCE && hashCache[cacheKey] === currentHash) { - // 权限未变化,静默跳过 continue; } - - execSync(`chmod 755 ${filePath}`); - log_success(`chmod finished: "${filePath}"`); - - // 更新哈希缓存 + try { + execSync(`chmod 755 ${filePath}`); + log_success(`chmod finished: "${filePath}"`); + } catch (e) { + log_error(`chmod failed for ${filePath}:`, e.message); + } hashCache[cacheKey] = currentHash; hasChanges = true; } @@ -578,34 +573,22 @@ const resolveServicePermission = async () => { } }; -// 在 resolveResource 函数后添加新函数 +// resolve locales (从 src/locales 复制到 resources/locales,并使用 hash 检查) async function resolveLocales() { const srcLocalesDir = path.join(cwd, "src/locales"); const targetLocalesDir = path.join(cwd, "src-tauri/resources/locales"); try { - // 确保目标目录存在 await fsp.mkdir(targetLocalesDir, { recursive: true }); - - // 读取所有语言文件 const files = await fsp.readdir(srcLocalesDir); - - // 复制每个文件,只有当哈希变化时才复制 for (const file of files) { const srcPath = path.join(srcLocalesDir, file); const targetPath = path.join(targetLocalesDir, file); - - // 检查文件是否需要更新 - if (!(await hasFileChanged(srcPath, targetPath))) { - // 文件未变化,静默跳过 - continue; - } - + if (!(await hasFileChanged(srcPath, targetPath))) continue; await fsp.copyFile(srcPath, targetPath); await updateHashCache(targetPath); log_success(`Copied locale file: ${file}`); } - log_success("All locale files processed successfully"); } catch (err) { log_error("Error copying locale files:", err.message); @@ -613,34 +596,30 @@ async function resolveLocales() { } } -/** - * main - */ +// ======================= +// Other resource resolvers (service, mmdb, geosite, geoip, enableLoopback, sysproxy) +// ======================= const SERVICE_URL = `https://github.com/clash-verge-rev/clash-verge-service-ipc/releases/download/${SIDECAR_HOST}`; - const resolveService = () => { let ext = platform === "win32" ? ".exe" : ""; let suffix = platform === "linux" ? "-" + SIDECAR_HOST : ""; - resolveResource({ + return resolveResource({ file: "clash-verge-service" + suffix + ext, downloadURL: `${SERVICE_URL}/clash-verge-service${ext}`, }); }; - const resolveInstall = () => { let ext = platform === "win32" ? ".exe" : ""; let suffix = platform === "linux" ? "-" + SIDECAR_HOST : ""; - resolveResource({ + return resolveResource({ file: "clash-verge-service-install" + suffix + ext, downloadURL: `${SERVICE_URL}/clash-verge-service-install${ext}`, }); }; - const resolveUninstall = () => { let ext = platform === "win32" ? ".exe" : ""; let suffix = platform === "linux" ? "-" + SIDECAR_HOST : ""; - - resolveResource({ + return resolveResource({ file: "clash-verge-service-uninstall" + suffix + ext, downloadURL: `${SERVICE_URL}/clash-verge-service-uninstall${ext}`, }); @@ -666,15 +645,27 @@ const resolveEnableLoopback = () => file: "enableLoopback.exe", downloadURL: `https://github.com/Kuingsmile/uwp-tool/releases/download/latest/enableLoopback.exe`, }); - const resolveWinSysproxy = () => resolveResource({ file: "sysproxy.exe", downloadURL: `https://github.com/clash-verge-rev/sysproxy/releases/download/${arch}/sysproxy.exe`, }); +const resolveSetDnsScript = () => + resolveResource({ + file: "set_dns.sh", + localPath: path.join(cwd, "scripts/set_dns.sh"), + }); +const resolveUnSetDnsScript = () => + resolveResource({ + file: "unset_dns.sh", + localPath: path.join(cwd, "scripts/unset_dns.sh"), + }); + +// ======================= +// Tasks +// ======================= const tasks = [ - // { name: "clash", func: resolveClash, retry: 5 }, { name: "verge-mihomo-alpha", func: () => @@ -724,11 +715,7 @@ const tasks = [ retry: 5, macosOnly: true, }, - { - name: "locales", - func: resolveLocales, - retry: 2, - }, + { name: "locales", func: resolveLocales, retry: 2 }, ]; async function runTask() { From d10665091baceaa09f3ea6807a30b493ddd9ae7d Mon Sep 17 00:00:00 2001 From: oomeow Date: Wed, 29 Oct 2025 21:14:36 +0800 Subject: [PATCH 20/70] chore: update eslint ignorePattern --- eslint.config.ts | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/eslint.config.ts b/eslint.config.ts index 8146350f..8342ff0f 100644 --- a/eslint.config.ts +++ b/eslint.config.ts @@ -94,9 +94,10 @@ export default defineConfig([ "warn", { vars: "all", - varsIgnorePattern: "^_+$", + varsIgnorePattern: "^_", args: "after-used", - argsIgnorePattern: "^_+$", + argsIgnorePattern: "^_", + caughtErrorsIgnorePattern: "^ignore", }, ], From fb09e6c85dfb21c8cebb3b4dc9f614775a3245fc Mon Sep 17 00:00:00 2001 From: oomeow Date: Thu, 30 Oct 2025 00:32:57 +0800 Subject: [PATCH 21/70] fix: notification can not notify frontend (#5243) --- UPDATELOG.md | 1 + src-tauri/src/core/notification.rs | 13 ++++++++++--- 2 files changed, 11 insertions(+), 3 deletions(-) diff --git a/UPDATELOG.md b/UPDATELOG.md index b38c286f..10c90113 100644 --- a/UPDATELOG.md +++ b/UPDATELOG.md @@ -73,6 +73,7 @@ - 修复首页自定义卡片在切换轻量模式时失效 - 修复悬浮跳转导航失效 - 修复小键盘热键映射错误 +- 修复后端无法通知前端事件 ## v2.4.2 diff --git a/src-tauri/src/core/notification.rs b/src-tauri/src/core/notification.rs index 08102fc0..071bcedb 100644 --- a/src-tauri/src/core/notification.rs +++ b/src-tauri/src/core/notification.rs @@ -96,10 +96,17 @@ impl NotificationSystem { let handle = Handle::global(); while !handle.is_exiting() { - match rx.recv_timeout(std::time::Duration::from_millis(100)) { + match rx.recv() { Ok(event) => Self::process_event(handle, event), - Err(mpsc::RecvTimeoutError::Disconnected) => break, - Err(mpsc::RecvTimeoutError::Timeout) => break, + Err(e) => { + logging!( + error, + Type::System, + "receive event error, stop notification worker: {}", + e + ); + break; + } } } } From 37359ffc27397cfeb97ee29bb0a3f9018ef9c8cc Mon Sep 17 00:00:00 2001 From: Tunglies <77394545+Tunglies@users.noreply.github.com> Date: Thu, 30 Oct 2025 01:40:43 +0800 Subject: [PATCH 22/70] fix: add check for allow_auto_update in timer task filtering --- src-tauri/src/core/timer.rs | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src-tauri/src/core/timer.rs b/src-tauri/src/core/timer.rs index 29eba1fd..2ceca275 100644 --- a/src-tauri/src/core/timer.rs +++ b/src-tauri/src/core/timer.rs @@ -99,6 +99,12 @@ impl Timer { items .iter() .filter_map(|item| { + let allow_auto_update = + item.option.as_ref()?.allow_auto_update.unwrap_or_default(); + if !allow_auto_update { + return None; + } + let interval = item.option.as_ref()?.update_interval? as i64; let updated = item.updated? as i64; let uid = item.uid.as_ref()?; From dfcdb33e581681e4172f4a53ccab3711cf8e366c Mon Sep 17 00:00:00 2001 From: Sukka Date: Thu, 30 Oct 2025 10:19:29 +0800 Subject: [PATCH 23/70] chore: use vite-swc-react (#5246) --- package.json | 2 +- pnpm-lock.yaml | 210 +++++++++++++++++++++++++++++++----------------- vite.config.mts | 2 +- 3 files changed, 138 insertions(+), 76 deletions(-) diff --git a/package.json b/package.json index ec480adb..0c5d6caa 100644 --- a/package.json +++ b/package.json @@ -89,7 +89,7 @@ "@types/react": "19.2.2", "@types/react-dom": "19.2.2", "@vitejs/plugin-legacy": "^7.2.1", - "@vitejs/plugin-react": "5.1.0", + "@vitejs/plugin-react-swc": "^4.2.0", "adm-zip": "^0.5.16", "cli-color": "^2.0.4", "commander": "^14.0.2", diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index ecfa15fe..d2ad8cd6 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -168,9 +168,9 @@ importers: '@vitejs/plugin-legacy': specifier: ^7.2.1 version: 7.2.1(terser@5.44.0)(vite@7.1.12(@types/node@24.9.2)(jiti@2.6.1)(sass@1.93.2)(terser@5.44.0)(yaml@2.8.1)) - '@vitejs/plugin-react': - specifier: 5.1.0 - version: 5.1.0(vite@7.1.12(@types/node@24.9.2)(jiti@2.6.1)(sass@1.93.2)(terser@5.44.0)(yaml@2.8.1)) + '@vitejs/plugin-react-swc': + specifier: ^4.2.0 + version: 4.2.0(vite@7.1.12(@types/node@24.9.2)(jiti@2.6.1)(sass@1.93.2)(terser@5.44.0)(yaml@2.8.1)) adm-zip: specifier: ^0.5.16 version: 0.5.16 @@ -666,18 +666,6 @@ packages: peerDependencies: '@babel/core': ^7.0.0-0 - '@babel/plugin-transform-react-jsx-self@7.27.1': - resolution: {integrity: sha512-6UzkCs+ejGdZ5mFFC/OCUrv028ab2fp1znZmCZjAOBKiBK2jXD1O+BPSfX8X2qjJ75fZBMSnQn3Rq2mrBJK2mw==} - engines: {node: '>=6.9.0'} - peerDependencies: - '@babel/core': ^7.0.0-0 - - '@babel/plugin-transform-react-jsx-source@7.27.1': - resolution: {integrity: sha512-zbwoTsBruTeKB9hSq73ha66iFeJHuaFkUbwvqElnygoNbj/jHRsSeokowZFN3CZ64IvEqcmmkVe89OPXc7ldAw==} - engines: {node: '>=6.9.0'} - peerDependencies: - '@babel/core': ^7.0.0-0 - '@babel/plugin-transform-regenerator@7.28.1': resolution: {integrity: sha512-P0QiV/taaa3kXpLY+sXla5zec4E+4t4Aqc9ggHlfZ7a2cp8/x/Gv08jfwEtn9gnnYIMvHx6aoOZ8XJL8eU71Dg==} engines: {node: '>=6.9.0'} @@ -1621,6 +1609,81 @@ packages: peerDependencies: '@svgr/core': '*' + '@swc/core-darwin-arm64@1.14.0': + resolution: {integrity: sha512-uHPC8rlCt04nvYNczWzKVdgnRhxCa3ndKTBBbBpResOZsRmiwRAvByIGh599j+Oo6Z5eyTPrgY+XfJzVmXnN7Q==} + engines: {node: '>=10'} + cpu: [arm64] + os: [darwin] + + '@swc/core-darwin-x64@1.14.0': + resolution: {integrity: sha512-2SHrlpl68vtePRknv9shvM9YKKg7B9T13tcTg9aFCwR318QTYo+FzsKGmQSv9ox/Ua0Q2/5y2BNjieffJoo4nA==} + engines: {node: '>=10'} + cpu: [x64] + os: [darwin] + + '@swc/core-linux-arm-gnueabihf@1.14.0': + resolution: {integrity: sha512-SMH8zn01dxt809svetnxpeg/jWdpi6dqHKO3Eb11u4OzU2PK7I5uKS6gf2hx5LlTbcJMFKULZiVwjlQLe8eqtg==} + engines: {node: '>=10'} + cpu: [arm] + os: [linux] + + '@swc/core-linux-arm64-gnu@1.14.0': + resolution: {integrity: sha512-q2JRu2D8LVqGeHkmpVCljVNltG0tB4o4eYg+dElFwCS8l2Mnt9qurMCxIeo9mgoqz0ax+k7jWtIRHktnVCbjvQ==} + engines: {node: '>=10'} + cpu: [arm64] + os: [linux] + + '@swc/core-linux-arm64-musl@1.14.0': + resolution: {integrity: sha512-uofpVoPCEUjYIv454ZEZ3sLgMD17nIwlz2z7bsn7rl301Kt/01umFA7MscUovFfAK2IRGck6XB+uulMu6aFhKQ==} + engines: {node: '>=10'} + cpu: [arm64] + os: [linux] + + '@swc/core-linux-x64-gnu@1.14.0': + resolution: {integrity: sha512-quTTx1Olm05fBfv66DEBuOsOgqdypnZ/1Bh3yGXWY7ANLFeeRpCDZpljD9BSjdsNdPOlwJmEUZXMHtGm3v1TZQ==} + engines: {node: '>=10'} + cpu: [x64] + os: [linux] + + '@swc/core-linux-x64-musl@1.14.0': + resolution: {integrity: sha512-caaNAu+aIqT8seLtCf08i8C3/UC5ttQujUjejhMcuS1/LoCKtNiUs4VekJd2UGt+pyuuSrQ6dKl8CbCfWvWeXw==} + engines: {node: '>=10'} + cpu: [x64] + os: [linux] + + '@swc/core-win32-arm64-msvc@1.14.0': + resolution: {integrity: sha512-EeW3jFlT3YNckJ6V/JnTfGcX7UHGyh6/AiCPopZ1HNaGiXVCKHPpVQZicmtyr/UpqxCXLrTgjHOvyMke7YN26A==} + engines: {node: '>=10'} + cpu: [arm64] + os: [win32] + + '@swc/core-win32-ia32-msvc@1.14.0': + resolution: {integrity: sha512-dPai3KUIcihV5hfoO4QNQF5HAaw8+2bT7dvi8E5zLtecW2SfL3mUZipzampXq5FHll0RSCLzlrXnSx+dBRZIIQ==} + engines: {node: '>=10'} + cpu: [ia32] + os: [win32] + + '@swc/core-win32-x64-msvc@1.14.0': + resolution: {integrity: sha512-nm+JajGrTqUA6sEHdghDlHMNfH1WKSiuvljhdmBACW4ta4LC3gKurX2qZuiBARvPkephW9V/i5S8QPY1PzFEqg==} + engines: {node: '>=10'} + cpu: [x64] + os: [win32] + + '@swc/core@1.14.0': + resolution: {integrity: sha512-oExhY90bes5pDTVrei0xlMVosTxwd/NMafIpqsC4dMbRYZ5KB981l/CX8tMnGsagTplj/RcG9BeRYmV6/J5m3w==} + engines: {node: '>=10'} + peerDependencies: + '@swc/helpers': '>=0.5.17' + peerDependenciesMeta: + '@swc/helpers': + optional: true + + '@swc/counter@0.1.3': + resolution: {integrity: sha512-e2BR4lsJkkRlKZ/qCHPw9ZaSxc0MVUd7gtbtaB7aMvHeJVYe8sOB8DBZkP2DtISHGSku9sCK6T6cnY0CtXrOCQ==} + + '@swc/types@0.1.25': + resolution: {integrity: sha512-iAoY/qRhNH8a/hBvm3zKj9qQ4oc2+3w1unPJa2XvTK3XjeLXtzcCingVPw/9e5mn1+0yPqxcBGp9Jf0pkfMb1g==} + '@tauri-apps/api@2.9.0': resolution: {integrity: sha512-qD5tMjh7utwBk9/5PrTA/aGr3i5QaJ/Mlt7p8NilQ45WgbifUNPyKWsA63iQ8YfQq6R8ajMapU+/Q8nMcPRLNw==} @@ -1719,18 +1782,6 @@ packages: '@tybys/wasm-util@0.10.1': resolution: {integrity: sha512-9tTaPJLSiejZKx+Bmog4uSubteqTvFrVrURwkmHixBo0G4seD0zUxp98E1DzUBJxLQ3NPwXrGKDiVjwx/DpPsg==} - '@types/babel__core@7.20.5': - resolution: {integrity: sha512-qoQprZvz5wQFJwMDqeseRXWv3rqMvhgpbXFfVyWhbx9X47POIA6i/+dXefEmZKoAgOaTdaIgNSMqMIU61yRyzA==} - - '@types/babel__generator@7.27.0': - resolution: {integrity: sha512-ufFd2Xi92OAVPYsy+P4n7/U7e68fex0+Ee8gSG9KX7eo084CWiQ4sdxktvdl0bOPupXtVJPY19zk6EwWqUQ8lg==} - - '@types/babel__template@7.4.4': - resolution: {integrity: sha512-h/NUaSyG5EyxBIp8YRxo4RMe2/qQgvyowRwVMzhYhBCONbW8PUsg4lkFMrhgZhUe5z3L3MiLDuvyJ/CaPa2A8A==} - - '@types/babel__traverse@7.20.7': - resolution: {integrity: sha512-dkO5fhS7+/oos4ciWxyEyjWe48zmG6wbCheo/G2ZnHx4fs3EU6YC6UM8rk56gAjNJ9P3MTH2jo5jb92/K6wbng==} - '@types/chai@5.2.2': resolution: {integrity: sha512-8kB30R7Hwqf40JPiKhVzodJs2Qc1ZJ5zuT3uzw5Hq/dhNCl3G3l83jfpdI1e20BP348+fV7VIL/+FxaXkqBmWg==} @@ -1965,11 +2016,11 @@ packages: terser: ^5.16.0 vite: ^7.0.0 - '@vitejs/plugin-react@5.1.0': - resolution: {integrity: sha512-4LuWrg7EKWgQaMJfnN+wcmbAW+VSsCmqGohftWjuct47bv8uE4n/nPpq4XjJPsxgq00GGG5J8dvBczp8uxScew==} + '@vitejs/plugin-react-swc@4.2.0': + resolution: {integrity: sha512-/tesahXD1qpkGC6FzMoFOJj0RyZdw9xLELOL+6jbElwmWfwOnIVy+IfpY+o9JfD9PKaR/Eyb6DNrvbXpuvA+8Q==} engines: {node: ^20.19.0 || >=22.12.0} peerDependencies: - vite: ^4.2.0 || ^5.0.0 || ^6.0.0 || ^7.0.0 + vite: ^4 || ^5 || ^6 || ^7 '@vitest/expect@4.0.4': resolution: {integrity: sha512-0ioMscWJtfpyH7+P82sGpAi3Si30OVV73jD+tEqXm5+rIx9LgnfdaOn45uaFkKOncABi/PHL00Yn0oW/wK4cXw==} @@ -3611,10 +3662,6 @@ packages: react: '>=16.8.0 <20.0.0' react-dom: '>=16.8.0 <20.0.0' - react-refresh@0.18.0: - resolution: {integrity: sha512-QgT5//D3jfjJb6Gsjxv0Slpj23ip+HtOpnNgnb2S5zU3CB26G/IDPGoy4RJB42wzFE46DRsstbW6tKHoKbhAxw==} - engines: {node: '>=0.10.0'} - react-router@7.9.4: resolution: {integrity: sha512-SD3G8HKviFHg9xj7dNODUKDFgpG4xqD5nhyd0mYoB5iISepuZAvzSr8ywxgxKJ52yRzf/HWtVHc9AWwoTbljvA==} engines: {node: '>=20.0.0'} @@ -4797,16 +4844,6 @@ snapshots: '@babel/core': 7.28.4 '@babel/helper-plugin-utils': 7.27.1 - '@babel/plugin-transform-react-jsx-self@7.27.1(@babel/core@7.28.4)': - dependencies: - '@babel/core': 7.28.4 - '@babel/helper-plugin-utils': 7.27.1 - - '@babel/plugin-transform-react-jsx-source@7.27.1(@babel/core@7.28.4)': - dependencies: - '@babel/core': 7.28.4 - '@babel/helper-plugin-utils': 7.27.1 - '@babel/plugin-transform-regenerator@7.28.1(@babel/core@7.28.4)': dependencies: '@babel/core': 7.28.4 @@ -5791,6 +5828,58 @@ snapshots: transitivePeerDependencies: - supports-color + '@swc/core-darwin-arm64@1.14.0': + optional: true + + '@swc/core-darwin-x64@1.14.0': + optional: true + + '@swc/core-linux-arm-gnueabihf@1.14.0': + optional: true + + '@swc/core-linux-arm64-gnu@1.14.0': + optional: true + + '@swc/core-linux-arm64-musl@1.14.0': + optional: true + + '@swc/core-linux-x64-gnu@1.14.0': + optional: true + + '@swc/core-linux-x64-musl@1.14.0': + optional: true + + '@swc/core-win32-arm64-msvc@1.14.0': + optional: true + + '@swc/core-win32-ia32-msvc@1.14.0': + optional: true + + '@swc/core-win32-x64-msvc@1.14.0': + optional: true + + '@swc/core@1.14.0': + dependencies: + '@swc/counter': 0.1.3 + '@swc/types': 0.1.25 + optionalDependencies: + '@swc/core-darwin-arm64': 1.14.0 + '@swc/core-darwin-x64': 1.14.0 + '@swc/core-linux-arm-gnueabihf': 1.14.0 + '@swc/core-linux-arm64-gnu': 1.14.0 + '@swc/core-linux-arm64-musl': 1.14.0 + '@swc/core-linux-x64-gnu': 1.14.0 + '@swc/core-linux-x64-musl': 1.14.0 + '@swc/core-win32-arm64-msvc': 1.14.0 + '@swc/core-win32-ia32-msvc': 1.14.0 + '@swc/core-win32-x64-msvc': 1.14.0 + + '@swc/counter@0.1.3': {} + + '@swc/types@0.1.25': + dependencies: + '@swc/counter': 0.1.3 + '@tauri-apps/api@2.9.0': {} '@tauri-apps/cli-darwin-arm64@2.9.1': @@ -5873,27 +5962,6 @@ snapshots: tslib: 2.8.1 optional: true - '@types/babel__core@7.20.5': - dependencies: - '@babel/parser': 7.28.4 - '@babel/types': 7.28.4 - '@types/babel__generator': 7.27.0 - '@types/babel__template': 7.4.4 - '@types/babel__traverse': 7.20.7 - - '@types/babel__generator@7.27.0': - dependencies: - '@babel/types': 7.28.4 - - '@types/babel__template@7.4.4': - dependencies: - '@babel/parser': 7.28.4 - '@babel/types': 7.28.4 - - '@types/babel__traverse@7.20.7': - dependencies: - '@babel/types': 7.28.4 - '@types/chai@5.2.2': dependencies: '@types/deep-eql': 4.0.2 @@ -6132,17 +6200,13 @@ snapshots: transitivePeerDependencies: - supports-color - '@vitejs/plugin-react@5.1.0(vite@7.1.12(@types/node@24.9.2)(jiti@2.6.1)(sass@1.93.2)(terser@5.44.0)(yaml@2.8.1))': + '@vitejs/plugin-react-swc@4.2.0(vite@7.1.12(@types/node@24.9.2)(jiti@2.6.1)(sass@1.93.2)(terser@5.44.0)(yaml@2.8.1))': dependencies: - '@babel/core': 7.28.4 - '@babel/plugin-transform-react-jsx-self': 7.27.1(@babel/core@7.28.4) - '@babel/plugin-transform-react-jsx-source': 7.27.1(@babel/core@7.28.4) '@rolldown/pluginutils': 1.0.0-beta.43 - '@types/babel__core': 7.20.5 - react-refresh: 0.18.0 + '@swc/core': 1.14.0 vite: 7.1.12(@types/node@24.9.2)(jiti@2.6.1)(sass@1.93.2)(terser@5.44.0)(yaml@2.8.1) transitivePeerDependencies: - - supports-color + - '@swc/helpers' '@vitest/expect@4.0.4': dependencies: @@ -8179,8 +8243,6 @@ snapshots: react: 19.2.0 react-dom: 19.2.0(react@19.2.0) - react-refresh@0.18.0: {} - react-router@7.9.4(react-dom@19.2.0(react@19.2.0))(react@19.2.0): dependencies: cookie: 1.0.2 diff --git a/vite.config.mts b/vite.config.mts index 6082ee42..d7307c6d 100644 --- a/vite.config.mts +++ b/vite.config.mts @@ -1,7 +1,7 @@ import path from "node:path"; import legacy from "@vitejs/plugin-legacy"; -import react from "@vitejs/plugin-react"; +import react from "@vitejs/plugin-react-swc"; import monacoEditorPlugin, { type IMonacoEditorOpts, } from "vite-plugin-monaco-editor"; From d209238009a1684f20749ed3f5969bf5d9db0ba2 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Thu, 30 Oct 2025 10:24:35 +0800 Subject: [PATCH 24/70] chore(deps): update npm dependencies (#5245) Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> --- package.json | 8 +- pnpm-lock.yaml | 222 ++++++++++++++++++++++++------------------------- 2 files changed, 115 insertions(+), 115 deletions(-) diff --git a/package.json b/package.json index 0c5d6caa..051e0dc8 100644 --- a/package.json +++ b/package.json @@ -43,7 +43,7 @@ "@mui/icons-material": "^7.3.4", "@mui/lab": "7.0.0-beta.17", "@mui/material": "^7.3.4", - "@mui/x-data-grid": "^8.15.0", + "@mui/x-data-grid": "^8.16.0", "@tauri-apps/api": "2.9.0", "@tauri-apps/plugin-clipboard-manager": "^2.3.2", "@tauri-apps/plugin-dialog": "^2.4.2", @@ -71,7 +71,7 @@ "react-i18next": "16.2.1", "react-markdown": "10.1.0", "react-monaco-editor": "0.59.0", - "react-router": "^7.9.4", + "react-router": "^7.9.5", "react-virtuoso": "^4.14.1", "swr": "^2.3.6", "tauri-plugin-mihomo-api": "git+https://github.com/clash-verge-rev/tauri-plugin-mihomo", @@ -82,7 +82,7 @@ "@actions/github": "^6.0.1", "@eslint-react/eslint-plugin": "^2.2.4", "@eslint/js": "^9.38.0", - "@tauri-apps/cli": "2.9.1", + "@tauri-apps/cli": "2.9.2", "@types/js-yaml": "^4.0.9", "@types/lodash-es": "^4.17.12", "@types/node": "^24.9.2", @@ -119,7 +119,7 @@ "vite": "^7.1.12", "vite-plugin-monaco-editor": "^1.1.0", "vite-plugin-svgr": "^4.5.0", - "vitest": "^4.0.4" + "vitest": "^4.0.5" }, "lint-staged": { "*.{ts,tsx,js,jsx}": [ diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index d2ad8cd6..db8d50d0 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -36,8 +36,8 @@ importers: specifier: ^7.3.4 version: 7.3.4(@emotion/react@11.14.0(@types/react@19.2.2)(react@19.2.0))(@emotion/styled@11.14.1(@emotion/react@11.14.0(@types/react@19.2.2)(react@19.2.0))(@types/react@19.2.2)(react@19.2.0))(@types/react@19.2.2)(react-dom@19.2.0(react@19.2.0))(react@19.2.0) '@mui/x-data-grid': - specifier: ^8.15.0 - version: 8.15.0(@emotion/react@11.14.0(@types/react@19.2.2)(react@19.2.0))(@emotion/styled@11.14.1(@emotion/react@11.14.0(@types/react@19.2.2)(react@19.2.0))(@types/react@19.2.2)(react@19.2.0))(@mui/material@7.3.4(@emotion/react@11.14.0(@types/react@19.2.2)(react@19.2.0))(@emotion/styled@11.14.1(@emotion/react@11.14.0(@types/react@19.2.2)(react@19.2.0))(@types/react@19.2.2)(react@19.2.0))(@types/react@19.2.2)(react-dom@19.2.0(react@19.2.0))(react@19.2.0))(@mui/system@7.3.3(@emotion/react@11.14.0(@types/react@19.2.2)(react@19.2.0))(@emotion/styled@11.14.1(@emotion/react@11.14.0(@types/react@19.2.2)(react@19.2.0))(@types/react@19.2.2)(react@19.2.0))(@types/react@19.2.2)(react@19.2.0))(@types/react@19.2.2)(react-dom@19.2.0(react@19.2.0))(react@19.2.0) + specifier: ^8.16.0 + version: 8.16.0(@emotion/react@11.14.0(@types/react@19.2.2)(react@19.2.0))(@emotion/styled@11.14.1(@emotion/react@11.14.0(@types/react@19.2.2)(react@19.2.0))(@types/react@19.2.2)(react@19.2.0))(@mui/material@7.3.4(@emotion/react@11.14.0(@types/react@19.2.2)(react@19.2.0))(@emotion/styled@11.14.1(@emotion/react@11.14.0(@types/react@19.2.2)(react@19.2.0))(@types/react@19.2.2)(react@19.2.0))(@types/react@19.2.2)(react-dom@19.2.0(react@19.2.0))(react@19.2.0))(@mui/system@7.3.3(@emotion/react@11.14.0(@types/react@19.2.2)(react@19.2.0))(@emotion/styled@11.14.1(@emotion/react@11.14.0(@types/react@19.2.2)(react@19.2.0))(@types/react@19.2.2)(react@19.2.0))(@types/react@19.2.2)(react@19.2.0))(@types/react@19.2.2)(react-dom@19.2.0(react@19.2.0))(react@19.2.0) '@tauri-apps/api': specifier: 2.9.0 version: 2.9.0 @@ -120,8 +120,8 @@ importers: specifier: 0.59.0 version: 0.59.0(monaco-editor@0.54.0)(react-dom@19.2.0(react@19.2.0))(react@19.2.0) react-router: - specifier: ^7.9.4 - version: 7.9.4(react-dom@19.2.0(react@19.2.0))(react@19.2.0) + specifier: ^7.9.5 + version: 7.9.5(react-dom@19.2.0(react@19.2.0))(react@19.2.0) react-virtuoso: specifier: ^4.14.1 version: 4.14.1(react-dom@19.2.0(react@19.2.0))(react@19.2.0) @@ -148,8 +148,8 @@ importers: specifier: ^9.38.0 version: 9.38.0 '@tauri-apps/cli': - specifier: 2.9.1 - version: 2.9.1 + specifier: 2.9.2 + version: 2.9.2 '@types/js-yaml': specifier: ^4.0.9 version: 4.0.9 @@ -259,8 +259,8 @@ importers: specifier: ^4.5.0 version: 4.5.0(rollup@4.46.2)(typescript@5.9.3)(vite@7.1.12(@types/node@24.9.2)(jiti@2.6.1)(sass@1.93.2)(terser@5.44.0)(yaml@2.8.1)) vitest: - specifier: ^4.0.4 - version: 4.0.4(@types/debug@4.1.12)(@types/node@24.9.2)(jiti@2.6.1)(sass@1.93.2)(terser@5.44.0)(yaml@2.8.1) + specifier: ^4.0.5 + version: 4.0.5(@types/debug@4.1.12)(@types/node@24.9.2)(jiti@2.6.1)(sass@1.93.2)(terser@5.44.0)(yaml@2.8.1) packages: @@ -1242,8 +1242,8 @@ packages: '@types/react': optional: true - '@mui/x-data-grid@8.15.0': - resolution: {integrity: sha512-JNPG2WSYJVKbUAbDpLCbWmIY25k9hyfUjAVnzDREbJMwPL+/5B9pIK0ikRQEXc0wRKY2T59SeR/Um2FZjBeeWQ==} + '@mui/x-data-grid@8.16.0': + resolution: {integrity: sha512-yJ+v+E1yI1HxrEUdOfgrUTCxobAFvotGggU6cy6MnM7c7/TPPg9d5mDzjzxb0imOCJ6WyiM/vtd5WKbY/5sUNw==} engines: {node: '>=14.0.0'} peerDependencies: '@emotion/react': ^11.9.0 @@ -1258,14 +1258,14 @@ packages: '@emotion/styled': optional: true - '@mui/x-internals@8.14.0': - resolution: {integrity: sha512-esYyl61nuuFXiN631TWuPh2tqdoyTdBI/4UXgwH3rytF8jiWvy6prPBPRHEH1nvW3fgw9FoBI48FlOO+yEI8xg==} + '@mui/x-internals@8.16.0': + resolution: {integrity: sha512-JR53WOFqmQYQzurOpB0H91K7/9uMcte1ooxHxTLGB+97PgB+rKY6siRWvUALGS56XyPV+1a2ALI33hd2E7+Rgg==} engines: {node: '>=14.0.0'} peerDependencies: react: ^17.0.0 || ^18.0.0 || ^19.0.0 - '@mui/x-virtualizer@0.2.5': - resolution: {integrity: sha512-kCo/i9YfNavbupqZGO1649CHwIABrwUDHVZh+GvGierHhIglUc9MHxYKsPhuojOg6izWa2HP+klt3nq2n/arOw==} + '@mui/x-virtualizer@0.2.6': + resolution: {integrity: sha512-t45EHhD9kStSwIYMkqYYQIFbZNVQws9LRANktf0e/+j+MxsRTFk41r0rgiazMSOSugJlCuSh/H8xUUuMCZdtow==} engines: {node: '>=14.0.0'} peerDependencies: react: ^17.0.0 || ^18.0.0 || ^19.0.0 @@ -1687,74 +1687,74 @@ packages: '@tauri-apps/api@2.9.0': resolution: {integrity: sha512-qD5tMjh7utwBk9/5PrTA/aGr3i5QaJ/Mlt7p8NilQ45WgbifUNPyKWsA63iQ8YfQq6R8ajMapU+/Q8nMcPRLNw==} - '@tauri-apps/cli-darwin-arm64@2.9.1': - resolution: {integrity: sha512-sdwhtsE/6njD0AjgfYEj1JyxZH4SBmCJSXpRm6Ph5fQeuZD6MyjzjdVOrrtFguyREVQ7xn0Ujkwvbo01ULthNg==} + '@tauri-apps/cli-darwin-arm64@2.9.2': + resolution: {integrity: sha512-g1OtCXydOZFYRUEAyGYdJ2lLaE3l5jk8o+Bro8y2WOLwBLtbWjBoJIVobOKFanfjG/Xr8H/UA+umEVILPhMc2A==} engines: {node: '>= 10'} cpu: [arm64] os: [darwin] - '@tauri-apps/cli-darwin-x64@2.9.1': - resolution: {integrity: sha512-c86g+67wTdI4TUCD7CaSd/13+oYuLQxVST4ZNJ5C+6i1kdnU3Us1L68N9MvbDLDQGJc9eo0pvuK6sCWkee+BzA==} + '@tauri-apps/cli-darwin-x64@2.9.2': + resolution: {integrity: sha512-nHHIY33noUmMOyFwAJz0xQyrYIXU+bae8MNos4TGsTo491YWAF2uzr6iW+Bq0N530xDcbe7EyRvDHgK43RmmVw==} engines: {node: '>= 10'} cpu: [x64] os: [darwin] - '@tauri-apps/cli-linux-arm-gnueabihf@2.9.1': - resolution: {integrity: sha512-IrB3gFQmueQKJjjisOcMktW/Gh6gxgqYO419doA3YZ7yIV5rbE8ZW52Q3I4AO+SlFEyVYer5kpi066p0JBlLGw==} + '@tauri-apps/cli-linux-arm-gnueabihf@2.9.2': + resolution: {integrity: sha512-Dq17LBdSuzf+fWOKMIyiSao+Fcq4FiQwYYlx3Nk8oafDINc8sVBjC5gv2xp18KzYhk9teSWfmDpD1sj+D3t7uw==} engines: {node: '>= 10'} cpu: [arm] os: [linux] - '@tauri-apps/cli-linux-arm64-gnu@2.9.1': - resolution: {integrity: sha512-Ke7TyXvu6HbWSkmVkFbbH19D3cLsd117YtXP/u9NIvSpYwKeFtnbpirrIUfPm44Q+PZFZ2Hvg8X9qoUiAK0zKw==} + '@tauri-apps/cli-linux-arm64-gnu@2.9.2': + resolution: {integrity: sha512-Pxj5k29Rxj9xEht4gdE744t5HLXTwBojkjYDXXyJ3mE+BEg9hFX5WkStg7OkyZwH60u8NSkDSMpo7MJTH9srmA==} engines: {node: '>= 10'} cpu: [arm64] os: [linux] - '@tauri-apps/cli-linux-arm64-musl@2.9.1': - resolution: {integrity: sha512-sGvy75sv55oeMulR5ArwPD28DsDQxqTzLhXCrpU9/nbFg/JImmI7k994YE9fr3V0qE3Cjk5gjLldRNv7I9sjwQ==} + '@tauri-apps/cli-linux-arm64-musl@2.9.2': + resolution: {integrity: sha512-mx82BuD4q3Yj5Zw+LXveZgPaDCnmH2At2LosX1siK77kaD5Ap5FF+FN0V4y+3cwq+Hcrk9AhEUPbHqoNOx1R2g==} engines: {node: '>= 10'} cpu: [arm64] os: [linux] - '@tauri-apps/cli-linux-riscv64-gnu@2.9.1': - resolution: {integrity: sha512-tEKbJydV3BdIxpAx8aGHW6VDg1xW4LlQuRD/QeFZdZNTreHJpMbJEcdvAcI+Hg6vgQpVpaoEldR9W4F6dYSLqQ==} + '@tauri-apps/cli-linux-riscv64-gnu@2.9.2': + resolution: {integrity: sha512-Ypm1nnr7k+ECC1+JfDcnxROHt6BX8t/4GplxBvdY68BDXtIcBbdhPWDos7MK+3bDmoaA0WSJbW+DUjpfSkyKgw==} engines: {node: '>= 10'} cpu: [riscv64] os: [linux] - '@tauri-apps/cli-linux-x64-gnu@2.9.1': - resolution: {integrity: sha512-mg5msXHagtHpyCVWgI01M26JeSrgE/otWyGdYcuTwyRYZYEJRTbcNt7hscOkdNlPBe7isScW7PVKbxmAjJJl4g==} + '@tauri-apps/cli-linux-x64-gnu@2.9.2': + resolution: {integrity: sha512-tg85cGIM9PWwsbQg8m3uah3SfoNapgUr4vhWtkqgeTDZOjQuQ2duTwCH4UiM7acBpbZHNzvRrxSFpv0U53TqQQ==} engines: {node: '>= 10'} cpu: [x64] os: [linux] - '@tauri-apps/cli-linux-x64-musl@2.9.1': - resolution: {integrity: sha512-lFZEXkpDreUe3zKilvnMsrnKP9gwQudaEjDnOz/GMzbzNceIuPfFZz0cR/ky1Aoq4eSvZonPKHhROq4owz4fzg==} + '@tauri-apps/cli-linux-x64-musl@2.9.2': + resolution: {integrity: sha512-xW8qaz9bcwR35W2gIg7fKG9e1Z34idOsGpD2zIPgxlJyF314B/1qie50hbOqt5AbbXHR4iRpxKE4kA2grqMmkg==} engines: {node: '>= 10'} cpu: [x64] os: [linux] - '@tauri-apps/cli-win32-arm64-msvc@2.9.1': - resolution: {integrity: sha512-ejc5RAp/Lm1Aj0EQHaT+Wdt5PHfdgQV5hIDV00MV6HNbIb5W4ZUFxMDaRkAg65gl9MvY2fH396riePW3RoKXDw==} + '@tauri-apps/cli-win32-arm64-msvc@2.9.2': + resolution: {integrity: sha512-A1PshB8oHdY7zYOPlLD7Om7/aD9sOUVREd765ElIzYDtptWcALwOP9jb22Wi01vDTqxf98E4ZGIcG2gxr4FhiA==} engines: {node: '>= 10'} cpu: [arm64] os: [win32] - '@tauri-apps/cli-win32-ia32-msvc@2.9.1': - resolution: {integrity: sha512-fSATtJDc0fNjVB6ystyi8NbwhNFk8i8E05h6KrsC8Fio5eaJIJvPCbC9pdrPl6kkxN1X7fj25ErBbgfqgcK8Fg==} + '@tauri-apps/cli-win32-ia32-msvc@2.9.2': + resolution: {integrity: sha512-AuCi0Vnc4qkXRLCC58das0u45SmXAjqcOjqF324CBKa1Z7jjNJESm0Sc2oc2G2q6f2eAbAfi34s2iJNaJU1hlQ==} engines: {node: '>= 10'} cpu: [ia32] os: [win32] - '@tauri-apps/cli-win32-x64-msvc@2.9.1': - resolution: {integrity: sha512-/JHlOzpUDhjBOO9w167bcYxfJbcMQv7ykS/Y07xjtcga8np0rzUzVGWYmLMH7orKcDMC7wjhheEW1x8cbGma/Q==} + '@tauri-apps/cli-win32-x64-msvc@2.9.2': + resolution: {integrity: sha512-kDoejyfvME/mLkR4VofQnmVPTt/smJvoXuE3xgTbUwcUQKqawM8EyQvxOHQosaJYfQphHi7G0ya8UZo3PlDZig==} engines: {node: '>= 10'} cpu: [x64] os: [win32] - '@tauri-apps/cli@2.9.1': - resolution: {integrity: sha512-kKi2/WWsNXKoMdatBl4xrT7e1Ce27JvsetBVfWuIb6D3ep/Y0WO5SIr70yarXOSWam8NyDur4ipzjZkg6m7VDg==} + '@tauri-apps/cli@2.9.2': + resolution: {integrity: sha512-aGzdVgxQW6WQ7e5nydPZ/30u8HvltHjO3Ytzf1wOxX1N5Yj2TsjKWRb/AWJlB95Huml3k3c/b6s0ijAvlSo9xw==} engines: {node: '>= 10'} hasBin: true @@ -2022,11 +2022,11 @@ packages: peerDependencies: vite: ^4 || ^5 || ^6 || ^7 - '@vitest/expect@4.0.4': - resolution: {integrity: sha512-0ioMscWJtfpyH7+P82sGpAi3Si30OVV73jD+tEqXm5+rIx9LgnfdaOn45uaFkKOncABi/PHL00Yn0oW/wK4cXw==} + '@vitest/expect@4.0.5': + resolution: {integrity: sha512-DJctLVlKoddvP/G389oGmKWNG6GD9frm2FPXARziU80Rjo7SIYxQzb2YFzmQ4fVD3Q5utUYY8nUmWrqsuIlIXQ==} - '@vitest/mocker@4.0.4': - resolution: {integrity: sha512-UTtKgpjWj+pvn3lUM55nSg34098obGhSHH+KlJcXesky8b5wCUgg7s60epxrS6yAG8slZ9W8T9jGWg4PisMf5Q==} + '@vitest/mocker@4.0.5': + resolution: {integrity: sha512-iYHIy72LfbK+mL5W8zXROp6oOcJKXWeKcNjcPPsqoa18qIEDrhB6/Z08o0wRajTd6SSSDNw8NCSIHVNOMpz0mw==} peerDependencies: msw: ^2.4.9 vite: ^6.0.0 || ^7.0.0-0 @@ -2036,20 +2036,20 @@ packages: vite: optional: true - '@vitest/pretty-format@4.0.4': - resolution: {integrity: sha512-lHI2rbyrLVSd1TiHGJYyEtbOBo2SDndIsN3qY4o4xe2pBxoJLD6IICghNCvD7P+BFin6jeyHXiUICXqgl6vEaQ==} + '@vitest/pretty-format@4.0.5': + resolution: {integrity: sha512-t1T/sSdsYyNc5AZl0EMeD0jW9cpJe2cODP0R++ZQe1kTkpgrwEfxGFR/yCG4w8ZybizbXRTHU7lE8sTDD/QsGw==} - '@vitest/runner@4.0.4': - resolution: {integrity: sha512-99EDqiCkncCmvIZj3qJXBZbyoQ35ghOwVWNnQ5nj0Hnsv4Qm40HmrMJrceewjLVvsxV/JSU4qyx2CGcfMBmXJw==} + '@vitest/runner@4.0.5': + resolution: {integrity: sha512-CQVVe+YEeKSiFBD5gBAmRDQglm4PnMBYzeTmt06t5iWtsUN9StQeeKhYCea/oaqBYilf8sARG6fSctUcEL/UmQ==} - '@vitest/snapshot@4.0.4': - resolution: {integrity: sha512-XICqf5Gi4648FGoBIeRgnHWSNDp+7R5tpclGosFaUUFzY6SfcpsfHNMnC7oDu/iOLBxYfxVzaQpylEvpgii3zw==} + '@vitest/snapshot@4.0.5': + resolution: {integrity: sha512-jfmSAeR6xYNEvcD+/RxFGA1bzpqHtkVhgxo2cxXia+Q3xX7m6GpZij07rz+WyQcA/xEGn4eIS1OItkMyWsGBmQ==} - '@vitest/spy@4.0.4': - resolution: {integrity: sha512-G9L13AFyYECo40QG7E07EdYnZZYCKMTSp83p9W8Vwed0IyCG1GnpDLxObkx8uOGPXfDpdeVf24P1Yka8/q1s9g==} + '@vitest/spy@4.0.5': + resolution: {integrity: sha512-TUmVQpAQign7r8+EnZsgTF3vY9BdGofTUge1rGNbnHn2IN3FChiQoT9lrPz7A7AVUZJU2LAZXl4v66HhsNMhoA==} - '@vitest/utils@4.0.4': - resolution: {integrity: sha512-4bJLmSvZLyVbNsYFRpPYdJViG9jZyRvMZ35IF4ymXbRZoS+ycYghmwTGiscTXduUg2lgKK7POWIyXJNute1hjw==} + '@vitest/utils@4.0.5': + resolution: {integrity: sha512-V5RndUgCB5/AfNvK9zxGCrRs99IrPYtMTIdUzJMMFs9nrmE5JXExIEfjVtUteyTRiLfCm+dCRMHf/Uu7Mm8/dg==} acorn-jsx@5.3.2: resolution: {integrity: sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==} @@ -3662,8 +3662,8 @@ packages: react: '>=16.8.0 <20.0.0' react-dom: '>=16.8.0 <20.0.0' - react-router@7.9.4: - resolution: {integrity: sha512-SD3G8HKviFHg9xj7dNODUKDFgpG4xqD5nhyd0mYoB5iISepuZAvzSr8ywxgxKJ52yRzf/HWtVHc9AWwoTbljvA==} + react-router@7.9.5: + resolution: {integrity: sha512-JmxqrnBZ6E9hWmf02jzNn9Jm3UqyeimyiwzD69NjxGySG6lIz/1LVPsoTCwN7NBX2XjCEa1LIX5EMz1j2b6u6A==} engines: {node: '>=20.0.0'} peerDependencies: react: '>=18' @@ -4195,18 +4195,18 @@ packages: yaml: optional: true - vitest@4.0.4: - resolution: {integrity: sha512-hV31h0/bGbtmDQc0KqaxsTO1v4ZQeF8ojDFuy4sZhFadwAqqvJA0LDw68QUocctI5EDpFMql/jVWKuPYHIf2Ew==} + vitest@4.0.5: + resolution: {integrity: sha512-4H+J28MI5oeYgGg3h5BFSkQ1g/2GKK1IR8oorH3a6EQQbb7CwjbnyBjH4PGxw9/6vpwAPNzaeUMp4Js4WJmdXQ==} engines: {node: ^20.0.0 || ^22.0.0 || >=24.0.0} hasBin: true peerDependencies: '@edge-runtime/vm': '*' '@types/debug': ^4.1.12 '@types/node': ^20.0.0 || ^22.0.0 || >=24.0.0 - '@vitest/browser-playwright': 4.0.4 - '@vitest/browser-preview': 4.0.4 - '@vitest/browser-webdriverio': 4.0.4 - '@vitest/ui': 4.0.4 + '@vitest/browser-playwright': 4.0.5 + '@vitest/browser-preview': 4.0.5 + '@vitest/browser-webdriverio': 4.0.5 + '@vitest/ui': 4.0.5 happy-dom: '*' jsdom: '*' peerDependenciesMeta: @@ -5502,14 +5502,14 @@ snapshots: optionalDependencies: '@types/react': 19.2.2 - '@mui/x-data-grid@8.15.0(@emotion/react@11.14.0(@types/react@19.2.2)(react@19.2.0))(@emotion/styled@11.14.1(@emotion/react@11.14.0(@types/react@19.2.2)(react@19.2.0))(@types/react@19.2.2)(react@19.2.0))(@mui/material@7.3.4(@emotion/react@11.14.0(@types/react@19.2.2)(react@19.2.0))(@emotion/styled@11.14.1(@emotion/react@11.14.0(@types/react@19.2.2)(react@19.2.0))(@types/react@19.2.2)(react@19.2.0))(@types/react@19.2.2)(react-dom@19.2.0(react@19.2.0))(react@19.2.0))(@mui/system@7.3.3(@emotion/react@11.14.0(@types/react@19.2.2)(react@19.2.0))(@emotion/styled@11.14.1(@emotion/react@11.14.0(@types/react@19.2.2)(react@19.2.0))(@types/react@19.2.2)(react@19.2.0))(@types/react@19.2.2)(react@19.2.0))(@types/react@19.2.2)(react-dom@19.2.0(react@19.2.0))(react@19.2.0)': + '@mui/x-data-grid@8.16.0(@emotion/react@11.14.0(@types/react@19.2.2)(react@19.2.0))(@emotion/styled@11.14.1(@emotion/react@11.14.0(@types/react@19.2.2)(react@19.2.0))(@types/react@19.2.2)(react@19.2.0))(@mui/material@7.3.4(@emotion/react@11.14.0(@types/react@19.2.2)(react@19.2.0))(@emotion/styled@11.14.1(@emotion/react@11.14.0(@types/react@19.2.2)(react@19.2.0))(@types/react@19.2.2)(react@19.2.0))(@types/react@19.2.2)(react-dom@19.2.0(react@19.2.0))(react@19.2.0))(@mui/system@7.3.3(@emotion/react@11.14.0(@types/react@19.2.2)(react@19.2.0))(@emotion/styled@11.14.1(@emotion/react@11.14.0(@types/react@19.2.2)(react@19.2.0))(@types/react@19.2.2)(react@19.2.0))(@types/react@19.2.2)(react@19.2.0))(@types/react@19.2.2)(react-dom@19.2.0(react@19.2.0))(react@19.2.0)': dependencies: '@babel/runtime': 7.28.4 '@mui/material': 7.3.4(@emotion/react@11.14.0(@types/react@19.2.2)(react@19.2.0))(@emotion/styled@11.14.1(@emotion/react@11.14.0(@types/react@19.2.2)(react@19.2.0))(@types/react@19.2.2)(react@19.2.0))(@types/react@19.2.2)(react-dom@19.2.0(react@19.2.0))(react@19.2.0) '@mui/system': 7.3.3(@emotion/react@11.14.0(@types/react@19.2.2)(react@19.2.0))(@emotion/styled@11.14.1(@emotion/react@11.14.0(@types/react@19.2.2)(react@19.2.0))(@types/react@19.2.2)(react@19.2.0))(@types/react@19.2.2)(react@19.2.0) '@mui/utils': 7.3.3(@types/react@19.2.2)(react@19.2.0) - '@mui/x-internals': 8.14.0(@types/react@19.2.2)(react@19.2.0) - '@mui/x-virtualizer': 0.2.5(@types/react@19.2.2)(react-dom@19.2.0(react@19.2.0))(react@19.2.0) + '@mui/x-internals': 8.16.0(@types/react@19.2.2)(react@19.2.0) + '@mui/x-virtualizer': 0.2.6(@types/react@19.2.2)(react-dom@19.2.0(react@19.2.0))(react@19.2.0) clsx: 2.1.1 prop-types: 15.8.1 react: 19.2.0 @@ -5521,7 +5521,7 @@ snapshots: transitivePeerDependencies: - '@types/react' - '@mui/x-internals@8.14.0(@types/react@19.2.2)(react@19.2.0)': + '@mui/x-internals@8.16.0(@types/react@19.2.2)(react@19.2.0)': dependencies: '@babel/runtime': 7.28.4 '@mui/utils': 7.3.3(@types/react@19.2.2)(react@19.2.0) @@ -5531,11 +5531,11 @@ snapshots: transitivePeerDependencies: - '@types/react' - '@mui/x-virtualizer@0.2.5(@types/react@19.2.2)(react-dom@19.2.0(react@19.2.0))(react@19.2.0)': + '@mui/x-virtualizer@0.2.6(@types/react@19.2.2)(react-dom@19.2.0(react@19.2.0))(react@19.2.0)': dependencies: '@babel/runtime': 7.28.4 '@mui/utils': 7.3.3(@types/react@19.2.2)(react@19.2.0) - '@mui/x-internals': 8.14.0(@types/react@19.2.2)(react@19.2.0) + '@mui/x-internals': 8.16.0(@types/react@19.2.2)(react@19.2.0) react: 19.2.0 react-dom: 19.2.0(react@19.2.0) transitivePeerDependencies: @@ -5882,52 +5882,52 @@ snapshots: '@tauri-apps/api@2.9.0': {} - '@tauri-apps/cli-darwin-arm64@2.9.1': + '@tauri-apps/cli-darwin-arm64@2.9.2': optional: true - '@tauri-apps/cli-darwin-x64@2.9.1': + '@tauri-apps/cli-darwin-x64@2.9.2': optional: true - '@tauri-apps/cli-linux-arm-gnueabihf@2.9.1': + '@tauri-apps/cli-linux-arm-gnueabihf@2.9.2': optional: true - '@tauri-apps/cli-linux-arm64-gnu@2.9.1': + '@tauri-apps/cli-linux-arm64-gnu@2.9.2': optional: true - '@tauri-apps/cli-linux-arm64-musl@2.9.1': + '@tauri-apps/cli-linux-arm64-musl@2.9.2': optional: true - '@tauri-apps/cli-linux-riscv64-gnu@2.9.1': + '@tauri-apps/cli-linux-riscv64-gnu@2.9.2': optional: true - '@tauri-apps/cli-linux-x64-gnu@2.9.1': + '@tauri-apps/cli-linux-x64-gnu@2.9.2': optional: true - '@tauri-apps/cli-linux-x64-musl@2.9.1': + '@tauri-apps/cli-linux-x64-musl@2.9.2': optional: true - '@tauri-apps/cli-win32-arm64-msvc@2.9.1': + '@tauri-apps/cli-win32-arm64-msvc@2.9.2': optional: true - '@tauri-apps/cli-win32-ia32-msvc@2.9.1': + '@tauri-apps/cli-win32-ia32-msvc@2.9.2': optional: true - '@tauri-apps/cli-win32-x64-msvc@2.9.1': + '@tauri-apps/cli-win32-x64-msvc@2.9.2': optional: true - '@tauri-apps/cli@2.9.1': + '@tauri-apps/cli@2.9.2': optionalDependencies: - '@tauri-apps/cli-darwin-arm64': 2.9.1 - '@tauri-apps/cli-darwin-x64': 2.9.1 - '@tauri-apps/cli-linux-arm-gnueabihf': 2.9.1 - '@tauri-apps/cli-linux-arm64-gnu': 2.9.1 - '@tauri-apps/cli-linux-arm64-musl': 2.9.1 - '@tauri-apps/cli-linux-riscv64-gnu': 2.9.1 - '@tauri-apps/cli-linux-x64-gnu': 2.9.1 - '@tauri-apps/cli-linux-x64-musl': 2.9.1 - '@tauri-apps/cli-win32-arm64-msvc': 2.9.1 - '@tauri-apps/cli-win32-ia32-msvc': 2.9.1 - '@tauri-apps/cli-win32-x64-msvc': 2.9.1 + '@tauri-apps/cli-darwin-arm64': 2.9.2 + '@tauri-apps/cli-darwin-x64': 2.9.2 + '@tauri-apps/cli-linux-arm-gnueabihf': 2.9.2 + '@tauri-apps/cli-linux-arm64-gnu': 2.9.2 + '@tauri-apps/cli-linux-arm64-musl': 2.9.2 + '@tauri-apps/cli-linux-riscv64-gnu': 2.9.2 + '@tauri-apps/cli-linux-x64-gnu': 2.9.2 + '@tauri-apps/cli-linux-x64-musl': 2.9.2 + '@tauri-apps/cli-win32-arm64-msvc': 2.9.2 + '@tauri-apps/cli-win32-ia32-msvc': 2.9.2 + '@tauri-apps/cli-win32-x64-msvc': 2.9.2 '@tauri-apps/plugin-clipboard-manager@2.3.2': dependencies: @@ -6208,43 +6208,43 @@ snapshots: transitivePeerDependencies: - '@swc/helpers' - '@vitest/expect@4.0.4': + '@vitest/expect@4.0.5': dependencies: '@standard-schema/spec': 1.0.0 '@types/chai': 5.2.2 - '@vitest/spy': 4.0.4 - '@vitest/utils': 4.0.4 + '@vitest/spy': 4.0.5 + '@vitest/utils': 4.0.5 chai: 6.2.0 tinyrainbow: 3.0.3 - '@vitest/mocker@4.0.4(vite@7.1.12(@types/node@24.9.2)(jiti@2.6.1)(sass@1.93.2)(terser@5.44.0)(yaml@2.8.1))': + '@vitest/mocker@4.0.5(vite@7.1.12(@types/node@24.9.2)(jiti@2.6.1)(sass@1.93.2)(terser@5.44.0)(yaml@2.8.1))': dependencies: - '@vitest/spy': 4.0.4 + '@vitest/spy': 4.0.5 estree-walker: 3.0.3 magic-string: 0.30.19 optionalDependencies: vite: 7.1.12(@types/node@24.9.2)(jiti@2.6.1)(sass@1.93.2)(terser@5.44.0)(yaml@2.8.1) - '@vitest/pretty-format@4.0.4': + '@vitest/pretty-format@4.0.5': dependencies: tinyrainbow: 3.0.3 - '@vitest/runner@4.0.4': + '@vitest/runner@4.0.5': dependencies: - '@vitest/utils': 4.0.4 + '@vitest/utils': 4.0.5 pathe: 2.0.3 - '@vitest/snapshot@4.0.4': + '@vitest/snapshot@4.0.5': dependencies: - '@vitest/pretty-format': 4.0.4 + '@vitest/pretty-format': 4.0.5 magic-string: 0.30.19 pathe: 2.0.3 - '@vitest/spy@4.0.4': {} + '@vitest/spy@4.0.5': {} - '@vitest/utils@4.0.4': + '@vitest/utils@4.0.5': dependencies: - '@vitest/pretty-format': 4.0.4 + '@vitest/pretty-format': 4.0.5 tinyrainbow: 3.0.3 acorn-jsx@5.3.2(acorn@8.15.0): @@ -8243,7 +8243,7 @@ snapshots: react: 19.2.0 react-dom: 19.2.0(react@19.2.0) - react-router@7.9.4(react-dom@19.2.0(react@19.2.0))(react@19.2.0): + react-router@7.9.5(react-dom@19.2.0(react@19.2.0))(react@19.2.0): dependencies: cookie: 1.0.2 react: 19.2.0 @@ -8892,15 +8892,15 @@ snapshots: terser: 5.44.0 yaml: 2.8.1 - vitest@4.0.4(@types/debug@4.1.12)(@types/node@24.9.2)(jiti@2.6.1)(sass@1.93.2)(terser@5.44.0)(yaml@2.8.1): + vitest@4.0.5(@types/debug@4.1.12)(@types/node@24.9.2)(jiti@2.6.1)(sass@1.93.2)(terser@5.44.0)(yaml@2.8.1): dependencies: - '@vitest/expect': 4.0.4 - '@vitest/mocker': 4.0.4(vite@7.1.12(@types/node@24.9.2)(jiti@2.6.1)(sass@1.93.2)(terser@5.44.0)(yaml@2.8.1)) - '@vitest/pretty-format': 4.0.4 - '@vitest/runner': 4.0.4 - '@vitest/snapshot': 4.0.4 - '@vitest/spy': 4.0.4 - '@vitest/utils': 4.0.4 + '@vitest/expect': 4.0.5 + '@vitest/mocker': 4.0.5(vite@7.1.12(@types/node@24.9.2)(jiti@2.6.1)(sass@1.93.2)(terser@5.44.0)(yaml@2.8.1)) + '@vitest/pretty-format': 4.0.5 + '@vitest/runner': 4.0.5 + '@vitest/snapshot': 4.0.5 + '@vitest/spy': 4.0.5 + '@vitest/utils': 4.0.5 debug: 4.4.3 es-module-lexer: 1.7.0 expect-type: 1.2.2 From e7a9f8f7556fd8d89652aec2271b745563c8b500 Mon Sep 17 00:00:00 2001 From: miewx Date: Thu, 30 Oct 2025 11:24:40 +0800 Subject: [PATCH 25/70] add support x-oss-meta-subscription-userinfo (#5234) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * add support x-oss-meta-subscription-userinfo * Update prfitem.rs match any subscription-userinfo * Update prfitem.rs 改为 ends_with 更好 * feat(config): enforce stricter header match for subscription usage --------- Co-authored-by: i18n Co-authored-by: Slinetrac --- src-tauri/src/config/prfitem.rs | 31 ++++++++++++++++++++----------- 1 file changed, 20 insertions(+), 11 deletions(-) diff --git a/src-tauri/src/config/prfitem.rs b/src-tauri/src/config/prfitem.rs index 7475cf1f..46c86478 100644 --- a/src-tauri/src/config/prfitem.rs +++ b/src-tauri/src/config/prfitem.rs @@ -297,18 +297,27 @@ impl PrfItem { let header = resp.headers(); // parse the Subscription UserInfo - let extra = match header.get("Subscription-Userinfo") { - Some(value) => { - let sub_info = value.to_str().unwrap_or(""); - Some(PrfExtra { - upload: help::parse_str(sub_info, "upload").unwrap_or(0), - download: help::parse_str(sub_info, "download").unwrap_or(0), - total: help::parse_str(sub_info, "total").unwrap_or(0), - expire: help::parse_str(sub_info, "expire").unwrap_or(0), - }) + let extra; + 'extra: { + for (k, v) in header.iter() { + let key_lower = k.as_str().to_ascii_lowercase(); + // Accept standard custom-metadata prefixes (x-amz-meta-, x-obs-meta-, x-cos-meta-, etc.). + if key_lower + .strip_suffix("subscription-userinfo") + .is_some_and(|prefix| prefix.is_empty() || prefix.ends_with('-')) + { + let sub_info = v.to_str().unwrap_or(""); + extra = Some(PrfExtra { + upload: help::parse_str(sub_info, "upload").unwrap_or(0), + download: help::parse_str(sub_info, "download").unwrap_or(0), + total: help::parse_str(sub_info, "total").unwrap_or(0), + expire: help::parse_str(sub_info, "expire").unwrap_or(0), + }); + break 'extra; + } } - None => None, - }; + extra = None; + } // parse the Content-Disposition let filename = match header.get("Content-Disposition") { From 2d73afdff2351401fb74f3024e162d6bbdf07611 Mon Sep 17 00:00:00 2001 From: Tunglies <77394545+Tunglies@users.noreply.github.com> Date: Thu, 30 Oct 2025 16:57:09 +0800 Subject: [PATCH 26/70] style: update UPDATELOG.md using details and summary --- UPDATELOG.md | 95 +++++++++++++++++++++++++++------------------------- 1 file changed, 50 insertions(+), 45 deletions(-) diff --git a/UPDATELOG.md b/UPDATELOG.md index 10c90113..20900405 100644 --- a/UPDATELOG.md +++ b/UPDATELOG.md @@ -1,49 +1,5 @@ ## v2.4.3 -### ✨ 新增功能 - -- **Mihomo(Meta) 内核升级至 v1.19.15** -- 支持前端修改日志(最大文件大小、最大保留数量) -- 新增链式代理图形化设置功能 -- 新增系统标题栏与程序标题栏切换 (设置-页面设置-倾向系统标题栏) -- 监听关机事件,自动关闭系统代理 -- 主界面“当前节点”卡片新增“延迟测试”按钮 -- 新增批量选择配置文件功能 -- Windows / Linux / MacOS 监听关机信号,优雅恢复网络设置 -- 新增本地备份功能 -- 主界面“当前节点”卡片新增自动延迟检测开关(默认关闭) -- 允许独立控制订阅自动更新 -- 托盘 `更多` 中新增 `关闭所有连接` 按钮 -- 新增左侧菜单栏的排序功能(右键点击左侧菜单栏) - -### 🚀 优化改进 - -- 重构并简化服务模式启动检测流程,消除重复检测 -- 重构并简化窗口创建流程 -- 重构日志系统,单个日志默认最大 10 MB -- 优化前端资源占用 -- 改进 macos 下系统代理设置的方法 -- 优化 TUN 模式可用性的判断 -- 移除流媒体检测的系统级提示(使用软件内通知) -- 优化后端 i18n 资源占用 -- 改进 Linux 托盘支持并添加 `--no-tray` 选项 -- Linux 现在在新生成的配置中默认将 TUN 栈恢复为 mixed 模式 -- 为代理延迟测试的 URL 设置增加了保护以及添加了安全的备用 URL -- 更新了 Wayland 合成器检测逻辑,从而在 Hyprland 会话中保留原生 Wayland 后端 -- 改进 Windows 和 Unix 的 服务连接方式以及权限,避免无法连接服务或内核 -- 修改内核默认日志级别为 Info -- 支持通过桌面快捷方式重新打开应用 -- 支持订阅界面输入链接后回车导入 -- 选择按延迟排序时每次延迟测试自动刷新节点顺序 -- 配置重载失败时自动重启核心 -- 启用 TUN 前等待服务就绪 -- 卸载 TUN 时会先关闭 -- 优化应用启动页 -- 优化首页当前节点对MATCH规则的支持 -- 允许在 `界面设置` 修改 `悬浮跳转导航延迟` -- 添加热键绑定错误的提示信息 -- 在 macOS 10.15 及更高版本默认包含 Mihomo-go122,以解决 Intel 架构 Mac 无法运行内核的问题 - ### 🐞 修复问题 - 优化服务模式重装逻辑,避免不必要的重复检查 @@ -73,7 +29,56 @@ - 修复首页自定义卡片在切换轻量模式时失效 - 修复悬浮跳转导航失效 - 修复小键盘热键映射错误 -- 修复后端无法通知前端事件 +- 修复前端无法及时刷新操作状态 + +

+ ✨ 新增功能 + +- **Mihomo(Meta) 内核升级至 v1.19.15** +- 支持前端修改日志(最大文件大小、最大保留数量) +- 新增链式代理图形化设置功能 +- 新增系统标题栏与程序标题栏切换 (设置-页面设置-倾向系统标题栏) +- 监听关机事件,自动关闭系统代理 +- 主界面“当前节点”卡片新增“延迟测试”按钮 +- 新增批量选择配置文件功能 +- Windows / Linux / MacOS 监听关机信号,优雅恢复网络设置 +- 新增本地备份功能 +- 主界面“当前节点”卡片新增自动延迟检测开关(默认关闭) +- 允许独立控制订阅自动更新 +- 托盘 `更多` 中新增 `关闭所有连接` 按钮 +- 新增左侧菜单栏的排序功能(右键点击左侧菜单栏) +
+ +
+ 🚀 优化改进 + +- 重构并简化服务模式启动检测流程,消除重复检测 +- 重构并简化窗口创建流程 +- 重构日志系统,单个日志默认最大 10 MB +- 优化前端资源占用 +- 改进 macos 下系统代理设置的方法 +- 优化 TUN 模式可用性的判断 +- 移除流媒体检测的系统级提示(使用软件内通知) +- 优化后端 i18n 资源占用 +- 改进 Linux 托盘支持并添加 `--no-tray` 选项 +- Linux 现在在新生成的配置中默认将 TUN 栈恢复为 mixed 模式 +- 为代理延迟测试的 URL 设置增加了保护以及添加了安全的备用 URL +- 更新了 Wayland 合成器检测逻辑,从而在 Hyprland 会话中保留原生 Wayland 后端 +- 改进 Windows 和 Unix 的 服务连接方式以及权限,避免无法连接服务或内核 +- 修改内核默认日志级别为 Info +- 支持通过桌面快捷方式重新打开应用 +- 支持订阅界面输入链接后回车导入 +- 选择按延迟排序时每次延迟测试自动刷新节点顺序 +- 配置重载失败时自动重启核心 +- 启用 TUN 前等待服务就绪 +- 卸载 TUN 时会先关闭 +- 优化应用启动页 +- 优化首页当前节点对MATCH规则的支持 +- 允许在 `界面设置` 修改 `悬浮跳转导航延迟` +- 添加热键绑定错误的提示信息 +- 在 macOS 10.15 及更高版本默认包含 Mihomo-go122,以解决 Intel 架构 Mac 无法运行内核的问题 + +
## v2.4.2 From af79bcd1cf5370f566db669f33b16f1f0537c4c6 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Thu, 30 Oct 2025 17:12:20 +0800 Subject: [PATCH 27/70] chore(deps): update dependency react-i18next to v16.2.2 (#5251) Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> --- package.json | 2 +- pnpm-lock.yaml | 10 +++++----- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/package.json b/package.json index 051e0dc8..07418b07 100644 --- a/package.json +++ b/package.json @@ -68,7 +68,7 @@ "react-dom": "19.2.0", "react-error-boundary": "6.0.0", "react-hook-form": "^7.65.0", - "react-i18next": "16.2.1", + "react-i18next": "16.2.2", "react-markdown": "10.1.0", "react-monaco-editor": "0.59.0", "react-router": "^7.9.5", diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index db8d50d0..a1d8151c 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -111,8 +111,8 @@ importers: specifier: ^7.65.0 version: 7.65.0(react@19.2.0) react-i18next: - specifier: 16.2.1 - version: 16.2.1(i18next@25.6.0(typescript@5.9.3))(react-dom@19.2.0(react@19.2.0))(react@19.2.0)(typescript@5.9.3) + specifier: 16.2.2 + version: 16.2.2(i18next@25.6.0(typescript@5.9.3))(react-dom@19.2.0(react@19.2.0))(react@19.2.0)(typescript@5.9.3) react-markdown: specifier: 10.1.0 version: 10.1.0(@types/react@19.2.2)(react@19.2.0) @@ -3627,8 +3627,8 @@ packages: peerDependencies: react: ^16.8.0 || ^17 || ^18 || ^19 - react-i18next@16.2.1: - resolution: {integrity: sha512-z7TVwd8q4AjFo2n7oOwzNusY7xVL4uHykwX1zZRvasUQnmnXlp7Z1FZqXvhK/6hQaCvWTZmZW1bMaUWKowtvVw==} + react-i18next@16.2.2: + resolution: {integrity: sha512-iKgJMloKpkLUCy6w/0BEJSVQ5jG2WbnFiO2w/9wwa8nc+obsEZjQErRCw27O7BHlpscKfpWSu5vTnC+3fBMQfQ==} peerDependencies: i18next: '>= 25.5.2' react: '>= 16.8.0' @@ -8204,7 +8204,7 @@ snapshots: dependencies: react: 19.2.0 - react-i18next@16.2.1(i18next@25.6.0(typescript@5.9.3))(react-dom@19.2.0(react@19.2.0))(react@19.2.0)(typescript@5.9.3): + react-i18next@16.2.2(i18next@25.6.0(typescript@5.9.3))(react-dom@19.2.0(react@19.2.0))(react@19.2.0)(typescript@5.9.3): dependencies: '@babel/runtime': 7.28.4 html-parse-stringify: 3.0.1 From c2dcd867228aae3f2fa0855226244cc701ba4f43 Mon Sep 17 00:00:00 2001 From: Sline Date: Thu, 30 Oct 2025 17:29:15 +0800 Subject: [PATCH 28/70] refactor: profile switch (#5197) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * refactor: proxy refresh * fix(proxy-store): properly hydrate and filter backend provider snapshots * fix(proxy-store): add monotonic fetch guard and event bridge cleanup * fix(proxy-store): tweak fetch sequencing guard to prevent snapshot invalidation from wiping fast responses * docs: UPDATELOG.md * fix(proxy-snapshot, proxy-groups): restore last-selected proxy and group info * fix(proxy): merge static and provider entries in snapshot; fix Virtuoso viewport height * fix(proxy-groups): restrict reduced-height viewport to chain-mode column * refactor(profiles): introduce a state machine * refactor:replace state machine with reducer * refactor:introduce a profile switch worker * refactor: hooked up a backend-driven profile switch flow * refactor(profile-switch): serialize switches with async queue and enrich frontend events * feat(profiles): centralize profile switching with reducer/driver queue to fix stuck UI on rapid toggles * chore: translate comments and log messages to English to avoid encoding issues * refactor: migrate backend queue to SwitchDriver actor * fix(profile): unify error string types in validation helper * refactor(profile): make switch driver fully async and handle panics safely * refactor(cmd): move switch-validation helper into new profile_switch module * refactor(profile): modularize switch logic into profile_switch.rs * refactor(profile_switch): modularize switch handler - Break monolithic switch handler into proper module hierarchy - Move shared globals, constants, and SwitchScope guard to state.rs - Isolate queue orchestration and async task spawning in driver.rs - Consolidate switch pipeline and config patching in workflow.rs - Extract request pre-checks/YAML validation into validation.rs * refactor(profile_switch): centralize state management and add cancellation flow - Introduced SwitchManager in state.rs to unify mutex, sequencing, and SwitchScope handling. - Added SwitchCancellation and SwitchRequest wrappers to encapsulate cancel tokens and notifications. - Updated driver to allocate task IDs via SwitchManager, cancel old tokens, and queue next jobs in order. - Updated workflow to check cancellation and sequence at each phase, replacing global flags with manager APIs. * feat(profile_switch): integrate explicit state machine for profile switching - workflow.rs:24 now delegates each switch to SwitchStateMachine, passing an owned SwitchRequest. Queue cancellation and state-sequence checks are centralized inside the machine instead of scattered guards. - workflow.rs:176 replaces the old helper with `SwitchStateMachine::new(manager(), None, profiles).run().await`, ensuring manual profile patches follow the same workflow (locking, validation, rollback) as queued switches. - workflow.rs:180 & 275 expose `validate_profile_yaml` and `restore_previous_profile` for reuse inside the state machine. - workflow/state_machine.rs:1 introduces a dedicated state machine module. It manages global mutex acquisition, request/cancellation state, YAML validation, draft patching, `CoreManager::update_config`, failure rollback, and tray/notification side-effects. Transitions check for cancellations and stale sequences; completions release guards via `SwitchScope` drop. * refactor(profile-switch): integrate stage-aware panic handling - src-tauri/src/cmd/profile_switch/workflow/state_machine.rs:1 Defines SwitchStage and SwitchPanicInfo as crate-visible, wraps each transition in with_stage(...) with catch_unwind, and propagates CmdResult to distinguish validation failures from panics while keeping cancellation semantics. - src-tauri/src/cmd/profile_switch/workflow.rs:25 Updates run_switch_job to return Result, routing timeout, validation, config, and stage panic cases separately. Reuses SwitchPanicInfo for logging/UI notifications; patch_profiles_config maps state-machine panics into user-facing error strings. - src-tauri/src/cmd/profile_switch/driver.rs:1 Adds SwitchJobOutcome to unify workflow results: normal completions carry bool, and panics propagate SwitchPanicInfo. The driver loop now logs panics explicitly and uses AssertUnwindSafe(...).catch_unwind() to guard setup-phase panics. * refactor(profile-switch): add watchdog, heartbeat, and async timeout guards - Introduce SwitchHeartbeat for stage tracking and timing; log stage transitions with elapsed durations. - Add watchdog in driver to cancel stalled switches (5s heartbeat timeout). - Wrap blocking ops (Config::apply, tray updates, profiles_save_file_safe, etc.) with time::timeout to prevent async stalls. - Improve logs for stage transitions and watchdog timeouts to clarify cancellation points. * refactor(profile-switch): async post-switch tasks, early lock release, and spawn_blocking for IO * feat(profile-switch): track cleanup and coordinate pipeline - Add explicit cleanup tracking in the driver (`cleanup_profiles` map + `CleanupDone` messages) to know when background post-switch work is still running before starting a new workflow. (driver.rs:29-50) - Update `handle_enqueue` to detect “cleanup in progress”: same-profile retries are short-circuited; other requests collapse the pending queue, cancelling old tokens so only the latest intent survives. (driver.rs:176-247) - Rework scheduling helpers: `start_next_job` refuses to start while cleanup is outstanding; discarded requests release cancellation tokens; cleanup completion explicitly restarts the pipeline. (driver.rs:258-442) * feat(profile-switch): unify post-switch cleanup handling - workflow.rs (25-427) returns `SwitchWorkflowResult` (success + CleanupHandle) or `SwitchWorkflowError`. All failure/timeout paths stash post-switch work into a single CleanupHandle. Cleanup helpers (`notify_profile_switch_finished` and `close_connections_after_switch`) run inside that task for proper lifetime handling. - driver.rs (29-439) propagates CleanupHandle through `SwitchJobOutcome`, spawns a bridge to wait for completion, and blocks `start_next_job` until done. Direct driver-side panics now schedule failure cleanup via the shared helper. * tmp * Revert "tmp" This reverts commit e582cf4a652231a67a7c951802cb19b385f6afd7. * refactor: queue frontend events through async dispatcher * refactor: queue frontend switch/proxy events and throttle notices * chore: frontend debug log * fix: re-enable only ProfileSwitchFinished events - keep others suppressed for crash isolation - Re-enabled only ProfileSwitchFinished events; RefreshClash, RefreshProxy, and ProfileChanged remain suppressed (they log suppression messages) - Allows frontend to receive task completion notifications for UI feedback while crash isolation continues - src-tauri/src/core/handle.rs now only suppresses notify_profile_changed - Serialized emitter, frontend logging bridge, and other diagnostics unchanged * refactor: refreshClashData * refactor(proxy): stabilize proxy switch pipeline and rendering - Add coalescing buffer in notification.rs to emit only the latest proxies-updated snapshot - Replace nextTick with queueMicrotask in asyncQueue.ts for same-frame hydration - Hide auto-generated GLOBAL snapshot and preserve optional metadata in proxy-snapshot.ts - Introduce stable proxy rendering state in AppDataProvider (proxyTargetProfileId, proxyDisplayProfileId, isProxyRefreshPending) - Update proxy page to fade content during refresh and overlay status banner instead of showing incomplete snapshot * refactor(profiles): move manual activating logic to reducer for deterministic queue tracking * refactor: replace proxy-data event bridge with pure polling and simplify proxy store - Replaced the proxy-data event bridge with pure polling: AppDataProvider now fetches the initial snapshot and drives refreshes from the polled switchStatus, removing verge://refresh-* listeners (src/providers/app-data-provider.tsx). - Simplified proxy-store by dropping the proxies-updated listener queue and unused payload/normalizer helpers; relies on SWR/provider fetch path + calcuProxies for live updates (src/stores/proxy-store.ts). - Trimmed layout-level event wiring to keep only notice/show/hide subscriptions, removing obsolete refresh listeners (src/pages/_layout/useLayoutEvents.ts). * refactor(proxy): streamline proxies-updated handling and store event flow - AppDataProvider now treats `proxies-updated` as the fast path: the listener calls `applyLiveProxyPayload` immediately and schedules only a single fallback `fetchLiveProxies` ~600 ms later (replacing the old 0/250/1000/2000 cascade). Expensive provider/rule refreshes run in parallel via `Promise.allSettled`, and the multi-stage queue on profile updates completion was removed (src/providers/app-data-provider.tsx). - Rebuilt proxy-store to support the event flow: restored `setLive`, provider normalization, and an animation-frame + async queue that applies payloads without blocking. Exposed `applyLiveProxyPayload` so providers can push events directly into the store (src/stores/proxy-store.ts). * refactor: switch delay * refactor(app-data-provider): trigger getProfileSwitchStatus revalidation on profile-switch-finished - AppDataProvider now listens to `profile-switch-finished` and calls `mutate("getProfileSwitchStatus")` to immediately update state and unlock buttons (src/providers/app-data-provider.tsx). - Retain existing detailed timing logs for monitoring other stages. - Frontend success notifications remain instant; background refreshes continue asynchronously. * fix(profiles): prevent duplicate toast on page remount * refactor(profile-switch): make active switches preemptible and prevent queue piling - Add notify mechanism to SwitchCancellation to await cancellation without busy-waiting (state.rs:82) - Collapse pending queue to a single entry in the driver; cancel in-flight task on newer request (driver.rs:232) - Update handle_update_core to watch cancel token and 30s timeout; release locks, discard draft, and exit early if canceled (state_machine.rs:301) - Providers revalidate status immediately on profile-switch-finished events (app-data-provider.tsx:208) * refactor(core): make core reload phase controllable, reduce 0xcfffffff risk - CoreManager::apply_config now calls `reload_config_with_retry`, each attempt waits up to 5s, retries 3 times; on failure, returns error with duration logged and triggers core restart if needed (src-tauri/src/core/manager/config.rs:175, 205) - `reload_config_with_retry` logs attempt info on timeout or error; if error is a Mihomo connection issue, fallback to original restart logic (src-tauri/src/core/manager/config.rs:211) - `reload_config_once` retains original Mihomo call for retry wrapper usage (src-tauri/src/core/manager/config.rs:247) * chore(frontend-logs): downgrade routine event logs from info to debug - Logs like `emit_via_app entering spawn_blocking`, `Async emit…`, `Buffered proxies…` are now debug-level (src-tauri/src/core/notification.rs:155, :265, :309…) - Genuine warnings/errors (failures/timeouts) remain at warn/error - Core stage logs remain info to keep backend tracking visible * refactor(frontend-emit): make emit_via_app fire-and-forget async - `emit_via_app` now a regular function; spawns with `tokio::spawn` and logs a warn if `emit_to` fails, caller returns immediately (src-tauri/src/core/notification.rs:269) - Removed `.await` at Async emit and flush_proxies calls; only record dispatch duration and warn on failure (src-tauri/src/core/notification.rs:211, :329) * refactor(ui): restructure profile switch for event-driven speed + polling stability - Backend - SwitchManager maintains a lightweight event queue: added `event_sequence`, `recent_events`, and `SwitchResultEvent`; provides `push_event` / `events_after` (state.rs) - `handle_completion` pushes events on success/failure and keeps `last_result` (driver.rs) for frontend incremental fetch - New Tauri command `get_profile_switch_events(after_sequence)` exposes `events_after` (profile_switch/mod.rs → profile.rs → lib.rs) - Notification system - `NotificationSystem::process_event` only logs debug, disables WebView `emit_to`, fixes 0xcfffffff - Related emit/buffer functions now safe no-op, removed unused structures and warnings (notification.rs) - Frontend - services/cmds.ts defines `SwitchResultEvent` and `getProfileSwitchEvents` - `AppDataProvider` holds `switchEventSeqRef`, polls incremental events every 0.25s (busy) / 1s (idle); each event triggers: - immediate `globalMutate("getProfiles")` to refresh current profile - background refresh of proxies/providers/rules via `Promise.allSettled` (failures logged, non-blocking) - forced `mutateSwitchStatus` to correct state - original switchStatus effect calls `handleSwitchResult` as fallback; other toast/activation logic handled in profiles.tsx - Commands / API cleanup - removed `pub use profile_switch::*;` in cmd::mod.rs to avoid conflicts; frontend uses new command polling * refactor(frontend): optimize profile switch with optimistic updates * refactor(profile-switch): switch to event-driven flow with Profile Store - SwitchManager pushes events; frontend polls get_profile_switch_events - Zustand store handles optimistic profiles; AppDataProvider applies updates and background-fetches - UI flicker removed * fix(app-data): re-hook profile store updates during switch hydration * fix(notification): restore frontend event dispatch and non-blocking emits * fix(app-data-provider): restore proxy refresh and seed snapshot after refactor * fix: ensure switch completion events are received and handle proxies-updated * fix(app-data-provider): dedupe switch results by taskId and fix stale profile state * fix(profile-switch): ensure patch_profiles_config_by_profile_index waits for real completion and handle join failures in apply_config_with_timeout * docs: UPDATELOG.md * chore: add necessary comments * fix(core): always dispatch async proxy snapshot after RefreshClash event * fix(proxy-store, provider): handle pending snapshots and proxy profiles - Added pending snapshot tracking in proxy-store so `lastAppliedFetchId` no longer jumps on seed. Profile adoption is deferred until a qualifying fetch completes. Exposed `clearPendingProfile` for rollback support. - Cleared pending snapshot state whenever live payloads apply or the store resets, preventing stale optimistic profile IDs after failures. - In provider integration, subscribed to the pending proxy profile and fed it into target-profile derivation. Cleared it on failed switch results so hydration can advance and UI status remains accurate. * fix(proxy): re-hook tray refresh events into proxy refresh queue - Reattached listen("verge://refresh-proxy-config", …) at src/providers/app-data-provider.tsx:402 and registered it for cleanup. - Added matching window fallback handler at src/providers/app-data-provider.tsx:430 so in-app dispatches share the same refresh path. * fix(proxy-snapshot/proxy-groups): address review findings on snapshot placeholders - src/utils/proxy-snapshot.ts:72-95 now derives snapshot group members solely from proxy-groups.proxies, so provider ids under `use` no longer generate placeholder proxy items. - src/components/proxy/proxy-groups.tsx:665-677 lets the hydration overlay capture pointer events (and shows a wait cursor) so users can’t interact with snapshot-only placeholders before live data is ready. * fix(profile-switch): preserve queued requests and avoid stale connection teardown - Keep earlier queued switches intact by dropping the blanket “collapse” call: after removing duplicates for the same profile, new requests are simply appended, leaving other profiles pending (driver.rs:376). Resolves queue-loss scenario. - Gate connection cleanup on real successes so cancelled/stale runs no longer tear down Mihomo connections; success handler now skips close_connections_after_switch when success == false (workflow.rs:419). * fix(profile-switch, layout): improve profile validation and restore backend refresh - Hardened profile validation using `tokio::fs` with a 5s timeout and offloading YAML parsing to `AsyncHandler::spawn_blocking`, preventing slow disks or malformed files from freezing the runtime (src-tauri/src/cmd/profile_switch/validation.rs:9, 71). - Restored backend-triggered refresh handling by listening for `verge://refresh-clash-config` / `verge://refresh-verge-config` and invoking shared refresh services so SWR caches stay in sync with core events (src/pages/_layout/useLayoutEvents.ts:6, 45, 55). * feat(profile-switch): handle cancellations for superseded requests - Added a `cancelled` flag and constructor so superseded requests publish an explicit cancellation instead of a failure (src-tauri/src/cmd/profile_switch/state.rs:249, src-tauri/src/cmd/profile_switch/driver.rs:482) - Updated the profile switch effect to log cancellations as info, retain the shared `mutate` call, and skip emitting error toasts while still refreshing follow-up work (src/pages/profiles.tsx:554, src/pages/profiles.tsx:581) - Exposed the new flag on the TypeScript contract to keep downstream consumers type-safe (src/services/cmds.ts:20) * fix(profiles): wrap logging payload for Tauri frontend_log * fix(profile-switch): add rollback and error propagation for failed persistence - Added rollback on apply failure so Mihomo restores to the previous profile before exiting the success path early (state_machine.rs:474). - Reworked persist_profiles_with_timeout to surface timeout/join/save errors, convert them into CmdResult failures, and trigger rollback + error propagation when persistence fails (state_machine.rs:703). * fix(profile-switch): prevent mid-finalize reentrancy and lingering tasks * fix(profile-switch): preserve pending queue and surface discarded switches * fix(profile-switch): avoid draining Mihomo sockets on failed/cancelled switches * fix(app-data-provider): restore backend-driven refresh and reattach fallbacks * fix(profile-switch): queue concurrent updates and add bounded wait/backoff * fix(proxy): trigger live refresh on app start for proxy snapshot * refactor(profile-switch): split flow into layers and centralize async cleanup - Introduced `SwitchDriver` to encapsulate queue and driver logic while keeping the public Tauri command API. - Added workflow/cleanup helpers for notification dispatch and Mihomo connection draining, re-exported for API consistency. - Replaced monolithic state machine with `core.rs`, `context.rs`, and `stages.rs`, plus a thin `mod.rs` re-export layer; stage methods are now individually testable. - Removed legacy `workflow/state_machine.rs` and adjusted visibility on re-exported types/constants to ensure compilation. --- UPDATELOG.md | 2 + src-tauri/src/cmd/frontend.rs | 48 + src-tauri/src/cmd/mod.rs | 3 + src-tauri/src/cmd/profile.rs | 720 ++++++--------- src-tauri/src/cmd/profile_switch/driver.rs | 683 ++++++++++++++ src-tauri/src/cmd/profile_switch/mod.rs | 34 + src-tauri/src/cmd/profile_switch/state.rs | 353 ++++++++ .../src/cmd/profile_switch/validation.rs | 113 +++ src-tauri/src/cmd/profile_switch/workflow.rs | 385 ++++++++ .../cmd/profile_switch/workflow/cleanup.rs | 65 ++ .../workflow/state_machine/context.rs | 178 ++++ .../workflow/state_machine/core.rs | 284 ++++++ .../workflow/state_machine/mod.rs | 11 + .../workflow/state_machine/stages.rs | 597 +++++++++++++ src-tauri/src/core/handle.rs | 131 ++- src-tauri/src/core/manager/config.rs | 239 ++++- src-tauri/src/core/notification.rs | 269 +++++- src-tauri/src/lib.rs | 26 + src-tauri/src/utils/draft.rs | 7 + src/components/home/current-proxy-card.tsx | 22 +- src/components/proxy/provider-button.tsx | 310 ++++--- src/components/proxy/proxy-groups.tsx | 122 ++- src/components/proxy/use-render-list.ts | 89 +- src/hooks/use-current-proxy.ts | 12 +- src/hooks/use-profiles.ts | 77 +- src/pages/_layout/useLayoutEvents.ts | 36 +- src/pages/profiles.tsx | 838 +++++++++++------- src/providers/app-data-context.ts | 9 +- src/providers/app-data-provider.tsx | 743 ++++++++++++---- src/services/cmds.ts | 146 ++- src/services/noticeService.ts | 18 +- src/services/refresh.ts | 24 + src/stores/profile-store.ts | 59 ++ src/stores/proxy-store.ts | 298 +++++++ src/utils/asyncQueue.ts | 31 + src/utils/proxy-snapshot.ts | 205 +++++ 36 files changed, 5912 insertions(+), 1275 deletions(-) create mode 100644 src-tauri/src/cmd/frontend.rs create mode 100644 src-tauri/src/cmd/profile_switch/driver.rs create mode 100644 src-tauri/src/cmd/profile_switch/mod.rs create mode 100644 src-tauri/src/cmd/profile_switch/state.rs create mode 100644 src-tauri/src/cmd/profile_switch/validation.rs create mode 100644 src-tauri/src/cmd/profile_switch/workflow.rs create mode 100644 src-tauri/src/cmd/profile_switch/workflow/cleanup.rs create mode 100644 src-tauri/src/cmd/profile_switch/workflow/state_machine/context.rs create mode 100644 src-tauri/src/cmd/profile_switch/workflow/state_machine/core.rs create mode 100644 src-tauri/src/cmd/profile_switch/workflow/state_machine/mod.rs create mode 100644 src-tauri/src/cmd/profile_switch/workflow/state_machine/stages.rs create mode 100644 src/services/refresh.ts create mode 100644 src/stores/profile-store.ts create mode 100644 src/stores/proxy-store.ts create mode 100644 src/utils/asyncQueue.ts create mode 100644 src/utils/proxy-snapshot.ts diff --git a/UPDATELOG.md b/UPDATELOG.md index 20900405..51236c73 100644 --- a/UPDATELOG.md +++ b/UPDATELOG.md @@ -30,6 +30,7 @@ - 修复悬浮跳转导航失效 - 修复小键盘热键映射错误 - 修复前端无法及时刷新操作状态 +- 修复切换订阅卡死
✨ 新增功能 @@ -76,6 +77,7 @@ - 优化首页当前节点对MATCH规则的支持 - 允许在 `界面设置` 修改 `悬浮跳转导航延迟` - 添加热键绑定错误的提示信息 +- 重构订阅切换,保证代理页面的及时刷新 - 在 macOS 10.15 及更高版本默认包含 Mihomo-go122,以解决 Intel 架构 Mac 无法运行内核的问题
diff --git a/src-tauri/src/cmd/frontend.rs b/src-tauri/src/cmd/frontend.rs new file mode 100644 index 00000000..8559c589 --- /dev/null +++ b/src-tauri/src/cmd/frontend.rs @@ -0,0 +1,48 @@ +use super::CmdResult; +use crate::{logging, utils::logging::Type}; +use serde::Deserialize; + +#[derive(Debug, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct FrontendLogPayload { + pub level: Option, + pub message: String, + pub context: Option, +} + +#[tauri::command] +pub fn frontend_log(payload: FrontendLogPayload) -> CmdResult<()> { + let level = payload.level.as_deref().unwrap_or("info"); + match level { + "trace" | "debug" => logging!( + debug, + Type::Frontend, + "[frontend] {}", + payload.message.as_str() + ), + "warn" => logging!( + warn, + Type::Frontend, + "[frontend] {}", + payload.message.as_str() + ), + "error" => logging!( + error, + Type::Frontend, + "[frontend] {}", + payload.message.as_str() + ), + _ => logging!( + info, + Type::Frontend, + "[frontend] {}", + payload.message.as_str() + ), + } + + if let Some(context) = payload.context { + logging!(info, Type::Frontend, "[frontend] context: {}", context); + } + + Ok(()) +} diff --git a/src-tauri/src/cmd/mod.rs b/src-tauri/src/cmd/mod.rs index 6c748687..2cf76898 100644 --- a/src-tauri/src/cmd/mod.rs +++ b/src-tauri/src/cmd/mod.rs @@ -7,10 +7,12 @@ pub type CmdResult = Result; pub mod app; pub mod backup; pub mod clash; +pub mod frontend; pub mod lightweight; pub mod media_unlock_checker; pub mod network; pub mod profile; +mod profile_switch; pub mod proxy; pub mod runtime; pub mod save_profile; @@ -25,6 +27,7 @@ pub mod webdav; pub use app::*; pub use backup::*; pub use clash::*; +pub use frontend::*; pub use lightweight::*; pub use media_unlock_checker::*; pub use network::*; diff --git a/src-tauri/src/cmd/profile.rs b/src-tauri/src/cmd/profile.rs index 15177936..37cdd2e9 100644 --- a/src-tauri/src/cmd/profile.rs +++ b/src-tauri/src/cmd/profile.rs @@ -1,5 +1,4 @@ -use super::CmdResult; -use super::StringifyErr; +use super::{CmdResult, StringifyErr, profile_switch}; use crate::{ config::{ Config, IProfiles, PrfItem, PrfOption, @@ -9,77 +8,191 @@ use crate::{ }, profiles_append_item_safe, }, - core::{CoreManager, handle, timer::Timer, tray::Tray}, - feat, logging, - process::AsyncHandler, - ret_err, + core::{CoreManager, handle, timer::Timer}, + feat, logging, ret_err, utils::{dirs, help, logging::Type}, }; +use once_cell::sync::Lazy; +use parking_lot::RwLock; use smartstring::alias::String; -use std::sync::atomic::{AtomicBool, AtomicU64, Ordering}; -use std::time::Duration; +use std::sync::atomic::{AtomicU64, Ordering}; +use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH}; -// 全局请求序列号跟踪,用于避免队列化执行 -static CURRENT_REQUEST_SEQUENCE: AtomicU64 = AtomicU64::new(0); +use crate::cmd::profile_switch::{ProfileSwitchStatus, SwitchResultEvent}; -static CURRENT_SWITCHING_PROFILE: AtomicBool = AtomicBool::new(false); - -#[tauri::command] -pub async fn get_profiles() -> CmdResult { - // 策略1: 尝试快速获取latest数据 - let latest_result = tokio::time::timeout(Duration::from_millis(500), async { - let profiles = Config::profiles().await; - let latest = profiles.latest_ref(); - IProfiles { - current: latest.current.clone(), - items: latest.items.clone(), - } - }) - .await; - - match latest_result { - Ok(profiles) => { - logging!(info, Type::Cmd, "快速获取配置列表成功"); - return Ok(profiles); - } - Err(_) => { - logging!(warn, Type::Cmd, "快速获取配置超时(500ms)"); - } - } - - // 策略2: 如果快速获取失败,尝试获取data() - let data_result = tokio::time::timeout(Duration::from_secs(2), async { - let profiles = Config::profiles().await; - let data = profiles.latest_ref(); - IProfiles { - current: data.current.clone(), - items: data.items.clone(), - } - }) - .await; - - match data_result { - Ok(profiles) => { - logging!(info, Type::Cmd, "获取draft配置列表成功"); - return Ok(profiles); - } - Err(join_err) => { - logging!( - error, - Type::Cmd, - "获取draft配置任务失败或超时: {}", - join_err - ); - } - } - - // 策略3: fallback,尝试重新创建配置 - logging!(warn, Type::Cmd, "所有获取配置策略都失败,尝试fallback"); - - Ok(IProfiles::new().await) +#[derive(Clone)] +struct CachedProfiles { + snapshot: IProfiles, + captured_at: Instant, } -/// 增强配置文件 +static PROFILES_CACHE: Lazy>> = Lazy::new(|| RwLock::new(None)); + +#[derive(Default)] +struct SnapshotMetrics { + fast_hits: AtomicU64, + cache_hits: AtomicU64, + blocking_hits: AtomicU64, + refresh_scheduled: AtomicU64, + last_log_ms: AtomicU64, +} + +static SNAPSHOT_METRICS: Lazy = Lazy::new(SnapshotMetrics::default); + +/// Store the latest snapshot so cache consumers can reuse it without hitting the lock again. +fn update_profiles_cache(snapshot: &IProfiles) { + *PROFILES_CACHE.write() = Some(CachedProfiles { + snapshot: snapshot.clone(), + captured_at: Instant::now(), + }); +} + +/// Return the cached snapshot and how old it is, if present. +fn cached_profiles_snapshot() -> Option<(IProfiles, u128)> { + PROFILES_CACHE.read().as_ref().map(|entry| { + ( + entry.snapshot.clone(), + entry.captured_at.elapsed().as_millis(), + ) + }) +} + +/// Return the latest profiles snapshot, preferring cached data so UI requests never block. +#[tauri::command] +pub async fn get_profiles() -> CmdResult { + let started_at = Instant::now(); + + // Resolve snapshots in three tiers so UI reads never stall on a mutex: + // 1) try a non-blocking read, 2) fall back to the last cached copy while a + // writer holds the lock, 3) block and refresh the cache as a final resort. + if let Some(snapshot) = read_profiles_snapshot_nonblocking().await { + let item_count = snapshot + .items + .as_ref() + .map(|items| items.len()) + .unwrap_or(0); + update_profiles_cache(&snapshot); + SNAPSHOT_METRICS.fast_hits.fetch_add(1, Ordering::Relaxed); + logging!( + debug, + Type::Cmd, + "[Profiles] Snapshot served (path=fast, items={}, elapsed={}ms)", + item_count, + started_at.elapsed().as_millis() + ); + maybe_log_snapshot_metrics(); + return Ok(snapshot); + } + + if let Some((cached, age_ms)) = cached_profiles_snapshot() { + SNAPSHOT_METRICS.cache_hits.fetch_add(1, Ordering::Relaxed); + logging!( + debug, + Type::Cmd, + "[Profiles] Served cached snapshot while lock busy (age={}ms)", + age_ms + ); + schedule_profiles_snapshot_refresh(); + maybe_log_snapshot_metrics(); + return Ok(cached); + } + + let snapshot = read_profiles_snapshot_blocking().await; + let item_count = snapshot + .items + .as_ref() + .map(|items| items.len()) + .unwrap_or(0); + update_profiles_cache(&snapshot); + SNAPSHOT_METRICS + .blocking_hits + .fetch_add(1, Ordering::Relaxed); + logging!( + debug, + Type::Cmd, + "[Profiles] Snapshot served (path=blocking, items={}, elapsed={}ms)", + item_count, + started_at.elapsed().as_millis() + ); + maybe_log_snapshot_metrics(); + Ok(snapshot) +} + +/// Try to grab the latest profile data without waiting for the writer. +async fn read_profiles_snapshot_nonblocking() -> Option { + let profiles = Config::profiles().await; + profiles.try_latest_ref().map(|guard| (**guard).clone()) +} + +/// Fall back to a blocking read when we absolutely must have fresh data. +async fn read_profiles_snapshot_blocking() -> IProfiles { + let profiles = Config::profiles().await; + let guard = profiles.latest_ref(); + (**guard).clone() +} + +/// Schedule a background cache refresh once the exclusive lock becomes available again. +fn schedule_profiles_snapshot_refresh() { + crate::process::AsyncHandler::spawn(|| async { + // Once the lock is released we refresh the cached snapshot so the next + // request observes the latest data instead of the stale fallback. + SNAPSHOT_METRICS + .refresh_scheduled + .fetch_add(1, Ordering::Relaxed); + let snapshot = read_profiles_snapshot_blocking().await; + update_profiles_cache(&snapshot); + logging!( + debug, + Type::Cmd, + "[Profiles] Cache refreshed after busy snapshot" + ); + }); +} + +fn maybe_log_snapshot_metrics() { + const LOG_INTERVAL_MS: u64 = 5_000; + let now_ms = current_millis(); + let last_ms = SNAPSHOT_METRICS.last_log_ms.load(Ordering::Relaxed); + if now_ms.saturating_sub(last_ms) < LOG_INTERVAL_MS { + return; + } + + if SNAPSHOT_METRICS + .last_log_ms + .compare_exchange(last_ms, now_ms, Ordering::SeqCst, Ordering::Relaxed) + .is_err() + { + return; + } + + let fast = SNAPSHOT_METRICS.fast_hits.swap(0, Ordering::SeqCst); + let cache = SNAPSHOT_METRICS.cache_hits.swap(0, Ordering::SeqCst); + let blocking = SNAPSHOT_METRICS.blocking_hits.swap(0, Ordering::SeqCst); + let refresh = SNAPSHOT_METRICS.refresh_scheduled.swap(0, Ordering::SeqCst); + + if fast == 0 && cache == 0 && blocking == 0 && refresh == 0 { + return; + } + + logging!( + debug, + Type::Cmd, + "[Profiles][Metrics] 5s window => fast={}, cache={}, blocking={}, refresh_jobs={}", + fast, + cache, + blocking, + refresh + ); +} + +fn current_millis() -> u64 { + SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap_or(Duration::ZERO) + .as_millis() as u64 +} + +/// Run the optional enhancement pipeline and refresh Clash when it completes. #[tauri::command] pub async fn enhance_profiles() -> CmdResult { match feat::enhance_profiles().await { @@ -93,79 +206,106 @@ pub async fn enhance_profiles() -> CmdResult { Ok(()) } -/// 导入配置文件 +/// Download a profile from the given URL and persist it to the local catalog. #[tauri::command] pub async fn import_profile(url: std::string::String, option: Option) -> CmdResult { - logging!(info, Type::Cmd, "[导入订阅] 开始导入: {}", url); + logging!(info, Type::Cmd, "[Profile Import] Begin: {}", url); - // 直接依赖 PrfItem::from_url 自身的超时/重试逻辑,不再使用 tokio::time::timeout 包裹 + // Rely on PrfItem::from_url internal timeout/retry logic instead of wrapping with tokio::time::timeout let item = match PrfItem::from_url(&url, None, None, option).await { Ok(it) => { - logging!(info, Type::Cmd, "[导入订阅] 下载完成,开始保存配置"); + logging!( + info, + Type::Cmd, + "[Profile Import] Download complete; saving configuration" + ); it } Err(e) => { - logging!(error, Type::Cmd, "[导入订阅] 下载失败: {}", e); - return Err(format!("导入订阅失败: {}", e).into()); + logging!(error, Type::Cmd, "[Profile Import] Download failed: {}", e); + return Err(format!("Profile import failed: {}", e).into()); } }; match profiles_append_item_safe(item.clone()).await { Ok(_) => match profiles_save_file_safe().await { Ok(_) => { - logging!(info, Type::Cmd, "[导入订阅] 配置文件保存成功"); + logging!( + info, + Type::Cmd, + "[Profile Import] Configuration file saved successfully" + ); } Err(e) => { - logging!(error, Type::Cmd, "[导入订阅] 保存配置文件失败: {}", e); + logging!( + error, + Type::Cmd, + "[Profile Import] Failed to save configuration file: {}", + e + ); } }, Err(e) => { - logging!(error, Type::Cmd, "[导入订阅] 保存配置失败: {}", e); - return Err(format!("导入订阅失败: {}", e).into()); + logging!( + error, + Type::Cmd, + "[Profile Import] Failed to persist configuration: {}", + e + ); + return Err(format!("Profile import failed: {}", e).into()); } } - // 立即发送配置变更通知 + // Immediately emit a configuration change notification if let Some(uid) = &item.uid { - logging!(info, Type::Cmd, "[导入订阅] 发送配置变更通知: {}", uid); + logging!( + info, + Type::Cmd, + "[Profile Import] Emitting configuration change event: {}", + uid + ); handle::Handle::notify_profile_changed(uid.clone()); } - // 异步保存配置文件并发送全局通知 + // Save configuration asynchronously and emit a global notification let uid_clone = item.uid.clone(); if let Some(uid) = uid_clone { - // 延迟发送,确保文件已完全写入 + // Delay notification to ensure the file is fully written tokio::time::sleep(Duration::from_millis(100)).await; handle::Handle::notify_profile_changed(uid); } - logging!(info, Type::Cmd, "[导入订阅] 导入完成: {}", url); + logging!(info, Type::Cmd, "[Profile Import] Completed: {}", url); Ok(()) } -/// 调整profile的顺序 +/// Move a profile in the list relative to another entry. #[tauri::command] pub async fn reorder_profile(active_id: String, over_id: String) -> CmdResult { match profiles_reorder_safe(active_id, over_id).await { Ok(_) => { - log::info!(target: "app", "重新排序配置文件"); + log::info!(target: "app", "Reordered profiles"); Ok(()) } Err(err) => { - log::error!(target: "app", "重新排序配置文件失败: {}", err); - Err(format!("重新排序配置文件失败: {}", err).into()) + log::error!(target: "app", "Failed to reorder profiles: {}", err); + Err(format!("Failed to reorder profiles: {}", err).into()) } } } -/// 创建新的profile -/// 创建一个新的配置文件 +/// Create a new profile entry and optionally write its backing file. #[tauri::command] pub async fn create_profile(item: PrfItem, file_data: Option) -> CmdResult { match profiles_append_item_with_filedata_safe(item.clone(), file_data).await { Ok(_) => { - // 发送配置变更通知 + // Emit configuration change notification if let Some(uid) = &item.uid { - logging!(info, Type::Cmd, "[创建订阅] 发送配置变更通知: {}", uid); + logging!( + info, + Type::Cmd, + "[Profile Create] Emitting configuration change event: {}", + uid + ); handle::Handle::notify_profile_changed(uid.clone()); } Ok(()) @@ -177,7 +317,7 @@ pub async fn create_profile(item: PrfItem, file_data: Option) -> CmdResu } } -/// 更新配置文件 +/// Force-refresh a profile from its remote source, if available. #[tauri::command] pub async fn update_profile(index: String, option: Option) -> CmdResult { match feat::update_profile(index, option, Some(true)).await { @@ -189,11 +329,11 @@ pub async fn update_profile(index: String, option: Option) -> CmdResu } } -/// 删除配置文件 +/// Remove a profile and refresh the running configuration if necessary. #[tauri::command] pub async fn delete_profile(index: String) -> CmdResult { println!("delete_profile: {}", index); - // 使用Send-safe helper函数 + // Use send-safe helper function let should_update = profiles_delete_item_safe(index.clone()) .await .stringify_err()?; @@ -203,8 +343,13 @@ pub async fn delete_profile(index: String) -> CmdResult { match CoreManager::global().update_config().await { Ok(_) => { handle::Handle::refresh_clash(); - // 发送配置变更通知 - logging!(info, Type::Cmd, "[删除订阅] 发送配置变更通知: {}", index); + // Emit configuration change notification + logging!( + info, + Type::Cmd, + "[Profile Delete] Emitting configuration change event: {}", + index + ); handle::Handle::notify_profile_changed(index); } Err(e) => { @@ -216,361 +361,28 @@ pub async fn delete_profile(index: String) -> CmdResult { Ok(()) } -/// 验证新配置文件的语法 -async fn validate_new_profile(new_profile: &String) -> Result<(), ()> { - logging!(info, Type::Cmd, "正在切换到新配置: {}", new_profile); - - // 获取目标配置文件路径 - let config_file_result = { - let profiles_config = Config::profiles().await; - let profiles_data = profiles_config.latest_ref(); - match profiles_data.get_item(new_profile) { - Ok(item) => { - if let Some(file) = &item.file { - let path = dirs::app_profiles_dir().map(|dir| dir.join(file.as_str())); - path.ok() - } else { - None - } - } - Err(e) => { - logging!(error, Type::Cmd, "获取目标配置信息失败: {}", e); - None - } - } - }; - - // 如果获取到文件路径,检查YAML语法 - if let Some(file_path) = config_file_result { - if !file_path.exists() { - logging!( - error, - Type::Cmd, - "目标配置文件不存在: {}", - file_path.display() - ); - handle::Handle::notice_message( - "config_validate::file_not_found", - format!("{}", file_path.display()), - ); - return Err(()); - } - - // 超时保护 - let file_read_result = tokio::time::timeout( - Duration::from_secs(5), - tokio::fs::read_to_string(&file_path), - ) - .await; - - match file_read_result { - Ok(Ok(content)) => { - let yaml_parse_result = AsyncHandler::spawn_blocking(move || { - serde_yaml_ng::from_str::(&content) - }) - .await; - - match yaml_parse_result { - Ok(Ok(_)) => { - logging!(info, Type::Cmd, "目标配置文件语法正确"); - Ok(()) - } - Ok(Err(err)) => { - let error_msg = format!(" {err}"); - logging!( - error, - Type::Cmd, - "目标配置文件存在YAML语法错误:{}", - error_msg - ); - handle::Handle::notice_message( - "config_validate::yaml_syntax_error", - error_msg.clone(), - ); - Err(()) - } - Err(join_err) => { - let error_msg = format!("YAML解析任务失败: {join_err}"); - logging!(error, Type::Cmd, "{}", error_msg); - handle::Handle::notice_message( - "config_validate::yaml_parse_error", - error_msg.clone(), - ); - Err(()) - } - } - } - Ok(Err(err)) => { - let error_msg = format!("无法读取目标配置文件: {err}"); - logging!(error, Type::Cmd, "{}", error_msg); - handle::Handle::notice_message( - "config_validate::file_read_error", - error_msg.clone(), - ); - Err(()) - } - Err(_) => { - let error_msg = "读取配置文件超时(5秒)".to_string(); - logging!(error, Type::Cmd, "{}", error_msg); - handle::Handle::notice_message( - "config_validate::file_read_timeout", - error_msg.clone(), - ); - Err(()) - } - } - } else { - Ok(()) - } -} - -/// 执行配置更新并处理结果 -async fn restore_previous_profile(prev_profile: String) -> CmdResult<()> { - logging!(info, Type::Cmd, "尝试恢复到之前的配置: {}", prev_profile); - let restore_profiles = IProfiles { - current: Some(prev_profile), - items: None, - }; - Config::profiles() - .await - .draft_mut() - .patch_config(restore_profiles) - .stringify_err()?; - Config::profiles().await.apply(); - crate::process::AsyncHandler::spawn(|| async move { - if let Err(e) = profiles_save_file_safe().await { - log::warn!(target: "app", "异步保存恢复配置文件失败: {e}"); - } - }); - logging!(info, Type::Cmd, "成功恢复到之前的配置"); - Ok(()) -} - -async fn handle_success(current_sequence: u64, current_value: Option) -> CmdResult { - let latest_sequence = CURRENT_REQUEST_SEQUENCE.load(Ordering::SeqCst); - if current_sequence < latest_sequence { - logging!( - info, - Type::Cmd, - "内核操作后发现更新的请求 (序列号: {} < {}),忽略当前结果", - current_sequence, - latest_sequence - ); - Config::profiles().await.discard(); - return Ok(false); - } - - logging!( - info, - Type::Cmd, - "配置更新成功,序列号: {}", - current_sequence - ); - Config::profiles().await.apply(); - handle::Handle::refresh_clash(); - - if let Err(e) = Tray::global().update_tooltip().await { - log::warn!(target: "app", "异步更新托盘提示失败: {e}"); - } - - if let Err(e) = Tray::global().update_menu().await { - log::warn!(target: "app", "异步更新托盘菜单失败: {e}"); - } - - if let Err(e) = profiles_save_file_safe().await { - log::warn!(target: "app", "异步保存配置文件失败: {e}"); - } - - if let Some(current) = ¤t_value { - logging!( - info, - Type::Cmd, - "向前端发送配置变更事件: {}, 序列号: {}", - current, - current_sequence - ); - handle::Handle::notify_profile_changed(current.clone()); - } - - CURRENT_SWITCHING_PROFILE.store(false, Ordering::SeqCst); - Ok(true) -} - -async fn handle_validation_failure( - error_msg: String, - current_profile: Option, -) -> CmdResult { - logging!(warn, Type::Cmd, "配置验证失败: {}", error_msg); - Config::profiles().await.discard(); - if let Some(prev_profile) = current_profile { - restore_previous_profile(prev_profile).await?; - } - handle::Handle::notice_message("config_validate::error", error_msg); - CURRENT_SWITCHING_PROFILE.store(false, Ordering::SeqCst); - Ok(false) -} - -async fn handle_update_error(e: E, current_sequence: u64) -> CmdResult { - logging!( - warn, - Type::Cmd, - "更新过程发生错误: {}, 序列号: {}", - e, - current_sequence - ); - Config::profiles().await.discard(); - handle::Handle::notice_message("config_validate::boot_error", e.to_string()); - CURRENT_SWITCHING_PROFILE.store(false, Ordering::SeqCst); - Ok(false) -} - -async fn handle_timeout(current_profile: Option, current_sequence: u64) -> CmdResult { - let timeout_msg = "配置更新超时(30秒),可能是配置验证或核心通信阻塞"; - logging!( - error, - Type::Cmd, - "{}, 序列号: {}", - timeout_msg, - current_sequence - ); - Config::profiles().await.discard(); - if let Some(prev_profile) = current_profile { - restore_previous_profile(prev_profile).await?; - } - handle::Handle::notice_message("config_validate::timeout", timeout_msg); - CURRENT_SWITCHING_PROFILE.store(false, Ordering::SeqCst); - Ok(false) -} - -async fn perform_config_update( - current_sequence: u64, - current_value: Option, - current_profile: Option, -) -> CmdResult { - logging!( - info, - Type::Cmd, - "开始内核配置更新,序列号: {}", - current_sequence - ); - let update_result = tokio::time::timeout( - Duration::from_secs(30), - CoreManager::global().update_config(), - ) - .await; - - match update_result { - Ok(Ok((true, _))) => handle_success(current_sequence, current_value).await, - Ok(Ok((false, error_msg))) => handle_validation_failure(error_msg, current_profile).await, - Ok(Err(e)) => handle_update_error(e, current_sequence).await, - Err(_) => handle_timeout(current_profile, current_sequence).await, - } -} - -/// 修改profiles的配置 +/// Apply partial profile list updates through the switching workflow. #[tauri::command] pub async fn patch_profiles_config(profiles: IProfiles) -> CmdResult { - if CURRENT_SWITCHING_PROFILE.load(Ordering::SeqCst) { - logging!(info, Type::Cmd, "当前正在切换配置,放弃请求"); - return Ok(false); - } - CURRENT_SWITCHING_PROFILE.store(true, Ordering::SeqCst); - - // 为当前请求分配序列号 - let current_sequence = CURRENT_REQUEST_SEQUENCE.fetch_add(1, Ordering::SeqCst) + 1; - let target_profile = profiles.current.clone(); - - logging!( - info, - Type::Cmd, - "开始修改配置文件,请求序列号: {}, 目标profile: {:?}", - current_sequence, - target_profile - ); - - let latest_sequence = CURRENT_REQUEST_SEQUENCE.load(Ordering::SeqCst); - if current_sequence < latest_sequence { - logging!( - info, - Type::Cmd, - "获取锁后发现更新的请求 (序列号: {} < {}),放弃当前请求", - current_sequence, - latest_sequence - ); - return Ok(false); - } - - // 保存当前配置,以便在验证失败时恢复 - let current_profile = Config::profiles().await.latest_ref().current.clone(); - logging!(info, Type::Cmd, "当前配置: {:?}", current_profile); - - // 如果要切换配置,先检查目标配置文件是否有语法错误 - if let Some(new_profile) = profiles.current.as_ref() - && current_profile.as_ref() != Some(new_profile) - && validate_new_profile(new_profile).await.is_err() - { - CURRENT_SWITCHING_PROFILE.store(false, Ordering::SeqCst); - return Ok(false); - } - - // 检查请求有效性 - let latest_sequence = CURRENT_REQUEST_SEQUENCE.load(Ordering::SeqCst); - if current_sequence < latest_sequence { - logging!( - info, - Type::Cmd, - "在核心操作前发现更新的请求 (序列号: {} < {}),放弃当前请求", - current_sequence, - latest_sequence - ); - return Ok(false); - } - - // 更新profiles配置 - logging!( - info, - Type::Cmd, - "正在更新配置草稿,序列号: {}", - current_sequence - ); - - let current_value = profiles.current.clone(); - - let _ = Config::profiles().await.draft_mut().patch_config(profiles); - - // 在调用内核前再次验证请求有效性 - let latest_sequence = CURRENT_REQUEST_SEQUENCE.load(Ordering::SeqCst); - if current_sequence < latest_sequence { - logging!( - info, - Type::Cmd, - "在内核交互前发现更新的请求 (序列号: {} < {}),放弃当前请求", - current_sequence, - latest_sequence - ); - Config::profiles().await.discard(); - return Ok(false); - } - - perform_config_update(current_sequence, current_value, current_profile).await + profile_switch::patch_profiles_config(profiles).await } -/// 根据profile name修改profiles +/// Switch to the provided profile index and wait for completion before returning. #[tauri::command] pub async fn patch_profiles_config_by_profile_index(profile_index: String) -> CmdResult { - logging!(info, Type::Cmd, "切换配置到: {}", profile_index); - - let profiles = IProfiles { - current: Some(profile_index), - items: None, - }; - patch_profiles_config(profiles).await + profile_switch::patch_profiles_config_by_profile_index(profile_index).await } -/// 修改某个profile item的 +/// Enqueue a profile switch request and optionally notify on success. +#[tauri::command] +pub async fn switch_profile(profile_index: String, notify_success: bool) -> CmdResult { + profile_switch::switch_profile(profile_index, notify_success).await +} + +/// Update a specific profile item and refresh timers if its schedule changed. #[tauri::command] pub async fn patch_profile(index: String, profile: PrfItem) -> CmdResult { - // 保存修改前检查是否有更新 update_interval + // Check for update_interval changes before saving let profiles = Config::profiles().await; let should_refresh_timer = if let Ok(old_profile) = profiles.latest_ref().get_item(&index) { let old_interval = old_profile.option.as_ref().and_then(|o| o.update_interval); @@ -589,15 +401,19 @@ pub async fn patch_profile(index: String, profile: PrfItem) -> CmdResult { .await .stringify_err()?; - // 如果更新间隔或允许自动更新变更,异步刷新定时器 + // If the interval or auto-update flag changes, refresh the timer asynchronously if should_refresh_timer { let index_clone = index.clone(); crate::process::AsyncHandler::spawn(move || async move { - logging!(info, Type::Timer, "定时器更新间隔已变更,正在刷新定时器..."); + logging!( + info, + Type::Timer, + "Timer interval changed; refreshing timer..." + ); if let Err(e) = crate::core::Timer::global().refresh().await { - logging!(error, Type::Timer, "刷新定时器失败: {}", e); + logging!(error, Type::Timer, "Failed to refresh timer: {}", e); } else { - // 刷新成功后发送自定义事件,不触发配置重载 + // After refreshing successfully, emit a custom event without triggering a reload crate::core::handle::Handle::notify_timer_updated(index_clone); } }); @@ -606,7 +422,7 @@ pub async fn patch_profile(index: String, profile: PrfItem) -> CmdResult { Ok(()) } -/// 查看配置文件 +/// Open the profile file in the system viewer. #[tauri::command] pub async fn view_profile(index: String) -> CmdResult { let profiles = Config::profiles().await; @@ -628,7 +444,7 @@ pub async fn view_profile(index: String) -> CmdResult { help::open_file(path).stringify_err() } -/// 读取配置文件内容 +/// Return the raw YAML contents for the given profile file. #[tauri::command] pub async fn read_profile_file(index: String) -> CmdResult { let profiles = Config::profiles().await; @@ -638,10 +454,22 @@ pub async fn read_profile_file(index: String) -> CmdResult { Ok(data) } -/// 获取下一次更新时间 +/// Report the scheduled refresh timestamp (if any) for the profile timer. #[tauri::command] pub async fn get_next_update_time(uid: String) -> CmdResult> { let timer = Timer::global(); let next_time = timer.get_next_update_time(&uid).await; Ok(next_time) } + +/// Return the latest driver snapshot describing active and queued switch tasks. +#[tauri::command] +pub async fn get_profile_switch_status() -> CmdResult { + profile_switch::get_switch_status() +} + +/// Fetch switch result events newer than the provided sequence number. +#[tauri::command] +pub async fn get_profile_switch_events(after_sequence: u64) -> CmdResult> { + profile_switch::get_switch_events(after_sequence) +} diff --git a/src-tauri/src/cmd/profile_switch/driver.rs b/src-tauri/src/cmd/profile_switch/driver.rs new file mode 100644 index 00000000..8815458e --- /dev/null +++ b/src-tauri/src/cmd/profile_switch/driver.rs @@ -0,0 +1,683 @@ +use super::{ + CmdResult, + state::{ + ProfileSwitchStatus, SwitchCancellation, SwitchManager, SwitchRequest, SwitchResultStatus, + SwitchTaskStatus, current_millis, manager, + }, + workflow::{self, SwitchPanicInfo, SwitchStage}, +}; +use crate::{logging, utils::logging::Type}; +use futures::FutureExt; +use once_cell::sync::OnceCell; +use smartstring::alias::String as SmartString; +use std::{ + collections::{HashMap, VecDeque}, + panic::AssertUnwindSafe, + time::Duration, +}; +use tokio::{ + sync::{ + Mutex as AsyncMutex, + mpsc::{self, error::TrySendError}, + oneshot, + }, + time::{self, MissedTickBehavior}, +}; + +// Single shared queue so profile switches are executed sequentially and can +// collapse redundant requests for the same profile. +const SWITCH_QUEUE_CAPACITY: usize = 32; +static SWITCH_QUEUE: OnceCell> = OnceCell::new(); + +type CompletionRegistry = AsyncMutex>>; + +static SWITCH_COMPLETION_WAITERS: OnceCell = OnceCell::new(); + +/// Global map of task id -> completion channel sender used when callers await the result. +fn completion_waiters() -> &'static CompletionRegistry { + SWITCH_COMPLETION_WAITERS.get_or_init(|| AsyncMutex::new(HashMap::new())) +} + +/// Register a oneshot sender so `switch_profile_and_wait` can be notified when its task finishes. +async fn register_completion_waiter(task_id: u64) -> oneshot::Receiver { + let (sender, receiver) = oneshot::channel(); + let mut guard = completion_waiters().lock().await; + if guard.insert(task_id, sender).is_some() { + logging!( + warn, + Type::Cmd, + "Replacing existing completion waiter for task {}", + task_id + ); + } + receiver +} + +/// Remove an outstanding completion waiter; used when enqueue fails or succeeds immediately. +async fn remove_completion_waiter(task_id: u64) -> Option> { + completion_waiters().lock().await.remove(&task_id) +} + +/// Fire-and-forget notify helper so we do not block the driver loop. +fn notify_completion_waiter(task_id: u64, result: SwitchResultStatus) { + tokio::spawn(async move { + let sender = completion_waiters().lock().await.remove(&task_id); + if let Some(sender) = sender { + let _ = sender.send(result); + } + }); +} + +const WATCHDOG_TIMEOUT: Duration = Duration::from_secs(5); +const WATCHDOG_TICK: Duration = Duration::from_millis(500); + +// Mutable snapshot of the driver's world; all mutations happen on the driver task. +#[derive(Debug, Default)] +struct SwitchDriverState { + active: Option, + queue: VecDeque, + latest_tokens: HashMap, + cleanup_profiles: HashMap>, + last_result: Option, +} + +// Messages passed through SWITCH_QUEUE so the driver can react to events in order. +#[derive(Debug)] +enum SwitchDriverMessage { + Request { + request: SwitchRequest, + respond_to: oneshot::Sender, + }, + Completion { + request: SwitchRequest, + outcome: SwitchJobOutcome, + }, + CleanupDone { + profile: SmartString, + }, +} + +#[derive(Debug)] +enum SwitchJobOutcome { + Completed { + success: bool, + cleanup: workflow::CleanupHandle, + }, + Panicked { + info: SwitchPanicInfo, + cleanup: workflow::CleanupHandle, + }, +} + +pub(super) async fn switch_profile( + profile_index: impl Into, + notify_success: bool, +) -> CmdResult { + switch_profile_impl(profile_index.into(), notify_success, false).await +} + +pub(super) async fn switch_profile_and_wait( + profile_index: impl Into, + notify_success: bool, +) -> CmdResult { + switch_profile_impl(profile_index.into(), notify_success, true).await +} + +async fn switch_profile_impl( + profile_index: SmartString, + notify_success: bool, + wait_for_completion: bool, +) -> CmdResult { + // wait_for_completion is used by CLI flows that must block until the switch finishes. + let manager = manager(); + let sender = switch_driver_sender(); + + let request = SwitchRequest::new( + manager.next_task_id(), + profile_index.clone(), + notify_success, + ); + + logging!( + info, + Type::Cmd, + "Queue profile switch task {} -> {} (notify={})", + request.task_id(), + profile_index, + notify_success + ); + + let task_id = request.task_id(); + let mut completion_rx = if wait_for_completion { + Some(register_completion_waiter(task_id).await) + } else { + None + }; + + let (tx, rx) = oneshot::channel(); + + let enqueue_result = match sender.try_send(SwitchDriverMessage::Request { + request, + respond_to: tx, + }) { + Ok(_) => match rx.await { + Ok(result) => Ok(result), + Err(err) => { + logging!( + error, + Type::Cmd, + "Failed to receive enqueue result for profile {}: {}", + profile_index, + err + ); + Err("switch profile queue unavailable".into()) + } + }, + Err(TrySendError::Full(msg)) => { + logging!( + warn, + Type::Cmd, + "Profile switch queue is full; waiting for space: {}", + profile_index + ); + match sender.send(msg).await { + Ok(_) => match rx.await { + Ok(result) => Ok(result), + Err(err) => { + logging!( + error, + Type::Cmd, + "Failed to receive enqueue result after wait for {}: {}", + profile_index, + err + ); + Err("switch profile queue unavailable".into()) + } + }, + Err(err) => { + logging!( + error, + Type::Cmd, + "Profile switch queue closed while waiting ({}): {}", + profile_index, + err + ); + Err("switch profile queue unavailable".into()) + } + } + } + Err(TrySendError::Closed(_)) => { + logging!( + error, + Type::Cmd, + "Profile switch queue is closed, cannot enqueue: {}", + profile_index + ); + Err("switch profile queue unavailable".into()) + } + }; + + let accepted = match enqueue_result { + Ok(result) => result, + Err(err) => { + if completion_rx.is_some() { + remove_completion_waiter(task_id).await; + } + return Err(err); + } + }; + + if !accepted { + if completion_rx.is_some() { + remove_completion_waiter(task_id).await; + } + return Ok(false); + } + + if let Some(rx_completion) = completion_rx.take() { + match rx_completion.await { + Ok(status) => Ok(status.success), + Err(err) => { + logging!( + error, + Type::Cmd, + "Switch task {} completion channel dropped: {}", + task_id, + err + ); + Err("profile switch completion unavailable".into()) + } + } + } else { + Ok(true) + } +} + +fn switch_driver_sender() -> &'static mpsc::Sender { + SWITCH_QUEUE.get_or_init(|| { + let (tx, rx) = mpsc::channel::(SWITCH_QUEUE_CAPACITY); + let driver_tx = tx.clone(); + tokio::spawn(async move { + let manager = manager(); + let driver = SwitchDriver::new(manager, driver_tx); + driver.run(rx).await; + }); + tx + }) +} + +struct SwitchDriver { + manager: &'static SwitchManager, + sender: mpsc::Sender, + state: SwitchDriverState, +} + +impl SwitchDriver { + fn new(manager: &'static SwitchManager, sender: mpsc::Sender) -> Self { + let state = SwitchDriverState::default(); + manager.set_status(state.snapshot(manager)); + Self { + manager, + sender, + state, + } + } + + async fn run(mut self, mut rx: mpsc::Receiver) { + while let Some(message) = rx.recv().await { + match message { + SwitchDriverMessage::Request { + request, + respond_to, + } => { + self.handle_enqueue(request, respond_to); + } + SwitchDriverMessage::Completion { request, outcome } => { + self.handle_completion(request, outcome); + } + SwitchDriverMessage::CleanupDone { profile } => { + self.handle_cleanup_done(profile); + } + } + } + } + + fn handle_enqueue(&mut self, request: SwitchRequest, respond_to: oneshot::Sender) { + // Each new request supersedes older ones for the same profile to avoid thrashing the core. + let mut responder = Some(respond_to); + let accepted = true; + let profile_key = request.profile_id().clone(); + let cleanup_pending = + self.state.active.is_none() && !self.state.cleanup_profiles.is_empty(); + + if cleanup_pending && self.state.cleanup_profiles.contains_key(&profile_key) { + logging!( + debug, + Type::Cmd, + "Cleanup running for {}; queueing switch task {} -> {} to run afterwards", + profile_key, + request.task_id(), + profile_key + ); + if let Some(previous) = self + .state + .latest_tokens + .insert(profile_key.clone(), request.cancel_token().clone()) + { + previous.cancel(); + } + self.state + .queue + .retain(|queued| queued.profile_id() != &profile_key); + self.state.queue.push_back(request); + if let Some(sender) = responder.take() { + let _ = sender.send(accepted); + } + self.publish_status(); + return; + } + + if cleanup_pending { + logging!( + debug, + Type::Cmd, + "Cleanup running for {} profile(s); queueing task {} -> {} to run after cleanup without clearing existing requests", + self.state.cleanup_profiles.len(), + request.task_id(), + profile_key + ); + } + + if let Some(previous) = self + .state + .latest_tokens + .insert(profile_key.clone(), request.cancel_token().clone()) + { + previous.cancel(); + } + + if let Some(active) = self.state.active.as_mut() + && active.profile_id() == &profile_key + { + active.cancel_token().cancel(); + active.merge_notify(request.notify()); + self.state + .queue + .retain(|queued| queued.profile_id() != &profile_key); + self.state.queue.push_front(request.clone()); + if let Some(sender) = responder.take() { + let _ = sender.send(accepted); + } + self.publish_status(); + return; + } + + if let Some(active) = self.state.active.as_ref() { + logging!( + debug, + Type::Cmd, + "Cancelling active switch task {} (profile={}) in favour of task {} -> {}", + active.task_id(), + active.profile_id(), + request.task_id(), + profile_key + ); + active.cancel_token().cancel(); + } + + self.state + .queue + .retain(|queued| queued.profile_id() != &profile_key); + + self.state.queue.push_back(request.clone()); + if let Some(sender) = responder.take() { + let _ = sender.send(accepted); + } + + self.start_next_job(); + self.publish_status(); + } + + fn handle_completion(&mut self, request: SwitchRequest, outcome: SwitchJobOutcome) { + // Translate the workflow result into an event the frontend can understand. + let result_record = match &outcome { + SwitchJobOutcome::Completed { success, .. } => { + logging!( + info, + Type::Cmd, + "Switch task {} completed (success={})", + request.task_id(), + success + ); + if *success { + SwitchResultStatus::success(request.task_id(), request.profile_id()) + } else { + SwitchResultStatus::failed(request.task_id(), request.profile_id(), None, None) + } + } + SwitchJobOutcome::Panicked { info, .. } => { + logging!( + error, + Type::Cmd, + "Switch task {} panicked at stage {:?}: {}", + request.task_id(), + info.stage, + info.detail + ); + SwitchResultStatus::failed( + request.task_id(), + request.profile_id(), + Some(format!("{:?}", info.stage)), + Some(info.detail.clone()), + ) + } + }; + + if let Some(active) = self.state.active.as_ref() + && active.task_id() == request.task_id() + { + self.state.active = None; + } + + if let Some(latest) = self.state.latest_tokens.get(request.profile_id()) + && latest.same_token(request.cancel_token()) + { + self.state.latest_tokens.remove(request.profile_id()); + } + + let cleanup = match outcome { + SwitchJobOutcome::Completed { cleanup, .. } => cleanup, + SwitchJobOutcome::Panicked { cleanup, .. } => cleanup, + }; + + self.track_cleanup(request.profile_id().clone(), cleanup); + + let event_record = result_record.clone(); + self.state.last_result = Some(result_record); + notify_completion_waiter(request.task_id(), event_record.clone()); + self.manager.push_event(event_record); + self.start_next_job(); + self.publish_status(); + } + + fn handle_cleanup_done(&mut self, profile: SmartString) { + if let Some(handle) = self.state.cleanup_profiles.remove(&profile) { + handle.abort(); + } + self.start_next_job(); + self.publish_status(); + } + + fn start_next_job(&mut self) { + if self.state.active.is_some() || !self.state.cleanup_profiles.is_empty() { + self.publish_status(); + return; + } + + while let Some(request) = self.state.queue.pop_front() { + if request.cancel_token().is_cancelled() { + self.discard_request(request); + continue; + } + + self.state.active = Some(request.clone()); + self.start_switch_job(request); + break; + } + + self.publish_status(); + } + + fn track_cleanup(&mut self, profile: SmartString, cleanup: workflow::CleanupHandle) { + if let Some(existing) = self.state.cleanup_profiles.remove(&profile) { + existing.abort(); + } + + let driver_tx = self.sender.clone(); + let profile_clone = profile.clone(); + let handle = tokio::spawn(async move { + let profile_label = profile_clone.clone(); + if let Err(err) = cleanup.await { + logging!( + warn, + Type::Cmd, + "Cleanup task for profile {} failed: {}", + profile_label.as_str(), + err + ); + } + if let Err(err) = driver_tx + .send(SwitchDriverMessage::CleanupDone { + profile: profile_clone, + }) + .await + { + logging!( + error, + Type::Cmd, + "Failed to push cleanup completion for profile {}: {}", + profile_label.as_str(), + err + ); + } + }); + self.state.cleanup_profiles.insert(profile, handle); + } + + fn start_switch_job(&self, request: SwitchRequest) { + // Run the workflow in a background task while the driver keeps processing messages. + let driver_tx = self.sender.clone(); + let manager = self.manager; + + let completion_request = request.clone(); + let heartbeat = request.heartbeat().clone(); + let cancel_token = request.cancel_token().clone(); + let task_id = request.task_id(); + let profile_label = request.profile_id().clone(); + + tokio::spawn(async move { + let mut watchdog_interval = time::interval(WATCHDOG_TICK); + watchdog_interval.set_missed_tick_behavior(MissedTickBehavior::Skip); + + let workflow_fut = + AssertUnwindSafe(workflow::run_switch_job(manager, request)).catch_unwind(); + tokio::pin!(workflow_fut); + + let job_result = loop { + tokio::select! { + res = workflow_fut.as_mut() => { + break match res { + Ok(Ok(result)) => SwitchJobOutcome::Completed { + success: result.success, + cleanup: result.cleanup, + }, + Ok(Err(error)) => SwitchJobOutcome::Panicked { + info: error.info, + cleanup: error.cleanup, + }, + Err(payload) => { + let info = SwitchPanicInfo::driver_task( + workflow::describe_panic_payload(payload.as_ref()), + ); + let cleanup = workflow::schedule_post_switch_failure( + profile_label.clone(), + completion_request.notify(), + completion_request.task_id(), + ); + SwitchJobOutcome::Panicked { info, cleanup } + } + }; + } + _ = watchdog_interval.tick() => { + if cancel_token.is_cancelled() { + continue; + } + let elapsed = heartbeat.elapsed(); + if elapsed > WATCHDOG_TIMEOUT { + let stage = SwitchStage::from_code(heartbeat.stage_code()) + .unwrap_or(SwitchStage::Workflow); + logging!( + warn, + Type::Cmd, + "Switch task {} watchdog timeout (profile={} stage={:?}, elapsed={:?}); cancelling", + task_id, + profile_label.as_str(), + stage, + elapsed + ); + cancel_token.cancel(); + } + } + } + }; + + let request_for_error = completion_request.clone(); + + if let Err(err) = driver_tx + .send(SwitchDriverMessage::Completion { + request: completion_request, + outcome: job_result, + }) + .await + { + logging!( + error, + Type::Cmd, + "Failed to push switch completion to driver: {}", + err + ); + notify_completion_waiter( + request_for_error.task_id(), + SwitchResultStatus::failed( + request_for_error.task_id(), + request_for_error.profile_id(), + Some("driver".to_string()), + Some(format!("completion dispatch failed: {}", err)), + ), + ); + } + }); + } + + /// Mark a request as failed because a newer request superseded it. + fn discard_request(&mut self, request: SwitchRequest) { + let key = request.profile_id().clone(); + let should_remove = self + .state + .latest_tokens + .get(&key) + .map(|latest| latest.same_token(request.cancel_token())) + .unwrap_or(false); + + if should_remove { + self.state.latest_tokens.remove(&key); + } + + if !request.cancel_token().is_cancelled() { + request.cancel_token().cancel(); + } + + let event = SwitchResultStatus::cancelled( + request.task_id(), + request.profile_id(), + Some("request superseded".to_string()), + ); + + self.state.last_result = Some(event.clone()); + notify_completion_waiter(request.task_id(), event.clone()); + self.manager.push_event(event); + } + + fn publish_status(&self) { + self.manager.set_status(self.state.snapshot(self.manager)); + } +} + +impl SwitchDriverState { + /// Lightweight struct suitable for sharing across the command boundary. + fn snapshot(&self, manager: &SwitchManager) -> ProfileSwitchStatus { + let active = self + .active + .as_ref() + .map(|req| SwitchTaskStatus::from_request(req, false)); + let queue = self + .queue + .iter() + .map(|req| SwitchTaskStatus::from_request(req, true)) + .collect::>(); + let cleanup_profiles = self + .cleanup_profiles + .keys() + .map(|key| key.to_string()) + .collect::>(); + + ProfileSwitchStatus { + is_switching: manager.is_switching(), + active, + queue, + cleanup_profiles, + last_result: self.last_result.clone(), + last_updated: current_millis(), + } + } +} diff --git a/src-tauri/src/cmd/profile_switch/mod.rs b/src-tauri/src/cmd/profile_switch/mod.rs new file mode 100644 index 00000000..0729c68d --- /dev/null +++ b/src-tauri/src/cmd/profile_switch/mod.rs @@ -0,0 +1,34 @@ +// Profile switch orchestration: plumbing between the public tauri commands, +// the async driver queue, validation helpers, and the state machine workflow. +mod driver; +mod state; +mod validation; +mod workflow; + +pub use state::{ProfileSwitchStatus, SwitchResultEvent}; + +use smartstring::alias::String; + +use super::CmdResult; + +pub(super) async fn patch_profiles_config(profiles: crate::config::IProfiles) -> CmdResult { + workflow::patch_profiles_config(profiles).await +} + +pub(super) async fn patch_profiles_config_by_profile_index( + profile_index: String, +) -> CmdResult { + driver::switch_profile_and_wait(profile_index, false).await +} + +pub(super) async fn switch_profile(profile_index: String, notify_success: bool) -> CmdResult { + driver::switch_profile(profile_index, notify_success).await +} + +pub(super) fn get_switch_status() -> CmdResult { + Ok(state::manager().status_snapshot()) +} + +pub(super) fn get_switch_events(after_sequence: u64) -> CmdResult> { + Ok(state::manager().events_after(after_sequence)) +} diff --git a/src-tauri/src/cmd/profile_switch/state.rs b/src-tauri/src/cmd/profile_switch/state.rs new file mode 100644 index 00000000..1bb52d6b --- /dev/null +++ b/src-tauri/src/cmd/profile_switch/state.rs @@ -0,0 +1,353 @@ +use once_cell::sync::OnceCell; +use parking_lot::RwLock; +use serde::Serialize; +use smartstring::alias::String as SmartString; +use std::collections::VecDeque; +use std::sync::Arc; +use std::sync::atomic::{AtomicBool, AtomicU32, AtomicU64, Ordering}; +use std::time::{Duration, SystemTime, UNIX_EPOCH}; +use tokio::sync::{Mutex, Notify}; + +pub(super) const SWITCH_JOB_TIMEOUT: Duration = Duration::from_secs(30); +pub(super) const SWITCH_CLEANUP_TIMEOUT: Duration = Duration::from_secs(5); + +static SWITCH_MANAGER: OnceCell = OnceCell::new(); + +pub(super) fn manager() -> &'static SwitchManager { + SWITCH_MANAGER.get_or_init(SwitchManager::default) +} + +#[derive(Debug)] +// Central coordination point shared between the driver and workflow state machine. +pub(super) struct SwitchManager { + core_mutex: Mutex<()>, + request_sequence: AtomicU64, + switching: AtomicBool, + task_sequence: AtomicU64, + status: RwLock, + event_sequence: AtomicU64, + recent_events: RwLock>, +} + +impl Default for SwitchManager { + fn default() -> Self { + Self { + core_mutex: Mutex::new(()), + request_sequence: AtomicU64::new(0), + switching: AtomicBool::new(false), + task_sequence: AtomicU64::new(0), + status: RwLock::new(ProfileSwitchStatus::default()), + event_sequence: AtomicU64::new(0), + recent_events: RwLock::new(VecDeque::with_capacity(32)), + } + } +} + +impl SwitchManager { + pub(super) fn core_mutex(&self) -> &Mutex<()> { + &self.core_mutex + } + + // Monotonic identifiers so logs can correlate enqueue/finish pairs. + pub(super) fn next_task_id(&self) -> u64 { + self.task_sequence.fetch_add(1, Ordering::SeqCst) + 1 + } + + /// Sequence id assigned to each enqueue request so we can spot stale work. + pub(super) fn next_request_sequence(&self) -> u64 { + self.request_sequence.fetch_add(1, Ordering::SeqCst) + 1 + } + + pub(super) fn latest_request_sequence(&self) -> u64 { + self.request_sequence.load(Ordering::SeqCst) + } + + pub(super) fn begin_switch(&'static self) -> SwitchScope<'static> { + self.switching.store(true, Ordering::SeqCst); + SwitchScope { manager: self } + } + + pub(super) fn is_switching(&self) -> bool { + self.switching.load(Ordering::SeqCst) + } + + pub(super) fn set_status(&self, status: ProfileSwitchStatus) { + *self.status.write() = status; + } + + pub(super) fn status_snapshot(&self) -> ProfileSwitchStatus { + self.status.read().clone() + } + pub(super) fn push_event(&self, result: SwitchResultStatus) { + const MAX_EVENTS: usize = 64; + let sequence = self.event_sequence.fetch_add(1, Ordering::SeqCst) + 1; + let mut guard = self.recent_events.write(); + if guard.len() == MAX_EVENTS { + guard.pop_front(); + } + guard.push_back(SwitchResultEvent { sequence, result }); + } + + pub(super) fn events_after(&self, sequence: u64) -> Vec { + self.recent_events + .read() + .iter() + .filter(|event| event.sequence > sequence) + .cloned() + .collect() + } +} + +pub(super) struct SwitchScope<'a> { + manager: &'a SwitchManager, +} + +impl Drop for SwitchScope<'_> { + fn drop(&mut self) { + self.manager.switching.store(false, Ordering::SeqCst); + } +} + +#[derive(Debug, Clone)] +pub(super) struct SwitchCancellation { + flag: Arc, + notify: Arc, +} + +impl SwitchCancellation { + pub(super) fn new() -> Self { + Self { + flag: Arc::new(AtomicBool::new(false)), + notify: Arc::new(Notify::new()), + } + } + + pub(super) fn cancel(&self) { + self.flag.store(true, Ordering::SeqCst); + self.notify.notify_waiters(); + } + + /// True if another request already cancelled this job. + pub(super) fn is_cancelled(&self) -> bool { + self.flag.load(Ordering::SeqCst) + } + + pub(super) fn same_token(&self, other: &SwitchCancellation) -> bool { + Arc::ptr_eq(&self.flag, &other.flag) + } + + pub(super) async fn cancelled_future(&self) { + // Used by async blocks that want to pause until a newer request pre-empts them. + if self.is_cancelled() { + return; + } + self.notify.notified().await; + } +} + +#[derive(Debug, Clone)] +pub(super) struct SwitchRequest { + task_id: u64, + profile_id: SmartString, + notify: bool, + cancel_token: SwitchCancellation, + heartbeat: SwitchHeartbeat, +} + +impl SwitchRequest { + pub(super) fn new(task_id: u64, profile_id: SmartString, notify: bool) -> Self { + Self { + task_id, + profile_id, + notify, + cancel_token: SwitchCancellation::new(), + heartbeat: SwitchHeartbeat::new(), + } + } + + pub(super) fn task_id(&self) -> u64 { + self.task_id + } + + pub(super) fn profile_id(&self) -> &SmartString { + &self.profile_id + } + + pub(super) fn notify(&self) -> bool { + self.notify + } + + pub(super) fn merge_notify(&mut self, notify: bool) { + // When a new request wants a toast, remember it even if an older request did not. + if notify { + self.notify = true; + } + } + + pub(super) fn cancel_token(&self) -> &SwitchCancellation { + &self.cancel_token + } + + pub(super) fn heartbeat(&self) -> &SwitchHeartbeat { + &self.heartbeat + } +} + +#[derive(Debug, Clone)] +pub(super) struct SwitchHeartbeat { + last_tick_millis: Arc, + stage_code: Arc, +} + +fn now_millis() -> u64 { + SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap_or(Duration::ZERO) + .as_millis() as u64 +} + +#[derive(Debug, Clone, Serialize, Default)] +#[serde(rename_all = "camelCase")] +pub struct ProfileSwitchStatus { + pub is_switching: bool, + pub active: Option, + pub queue: Vec, + pub cleanup_profiles: Vec, + pub last_result: Option, + pub last_updated: u64, +} + +#[derive(Debug, Clone, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct SwitchTaskStatus { + pub task_id: u64, + pub profile_id: String, + pub notify: bool, + pub stage: Option, + pub queued: bool, +} + +impl SwitchTaskStatus { + pub(super) fn from_request(request: &SwitchRequest, queued: bool) -> Self { + Self { + task_id: request.task_id(), + profile_id: request.profile_id().to_string(), + notify: request.notify(), + stage: if queued { + None + } else { + Some(request.heartbeat().stage_code()) + }, + queued, + } + } +} + +#[derive(Debug, Clone, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct SwitchResultStatus { + pub task_id: u64, + pub profile_id: String, + pub success: bool, + pub cancelled: bool, + pub finished_at: u64, + pub error_stage: Option, + pub error_detail: Option, +} + +impl SwitchResultStatus { + pub(super) fn success(task_id: u64, profile_id: &SmartString) -> Self { + Self { + task_id, + profile_id: profile_id.to_string(), + success: true, + cancelled: false, + finished_at: now_millis(), + error_stage: None, + error_detail: None, + } + } + + pub(super) fn failed( + task_id: u64, + profile_id: &SmartString, + stage: Option, + detail: Option, + ) -> Self { + Self { + task_id, + profile_id: profile_id.to_string(), + success: false, + cancelled: false, + finished_at: now_millis(), + error_stage: stage, + error_detail: detail, + } + } + + pub(super) fn cancelled( + task_id: u64, + profile_id: &SmartString, + detail: Option, + ) -> Self { + Self { + task_id, + profile_id: profile_id.to_string(), + success: false, + cancelled: true, + finished_at: now_millis(), + error_stage: Some("cancelled".to_string()), + error_detail: detail, + } + } +} + +#[derive(Debug, Clone, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct SwitchResultEvent { + pub sequence: u64, + pub result: SwitchResultStatus, +} + +pub(super) fn current_millis() -> u64 { + now_millis() +} + +impl SwitchHeartbeat { + fn now_millis() -> u64 { + SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap_or(Duration::ZERO) + .as_millis() as u64 + } + + pub(super) fn new() -> Self { + let heartbeat = Self { + last_tick_millis: Arc::new(AtomicU64::new(Self::now_millis())), + stage_code: Arc::new(AtomicU32::new(0)), + }; + heartbeat.touch(); + heartbeat + } + + pub(super) fn touch(&self) { + self.last_tick_millis + .store(Self::now_millis(), Ordering::SeqCst); + } + + /// Update the internal timer to reflect the amount of time since the last heartbeat. + pub(super) fn elapsed(&self) -> Duration { + let last = self.last_tick_millis.load(Ordering::SeqCst); + let now = Self::now_millis(); + Duration::from_millis(now.saturating_sub(last)) + } + + pub(super) fn set_stage(&self, stage: u32) { + self.stage_code.store(stage, Ordering::SeqCst); + self.touch(); + } + + pub(super) fn stage_code(&self) -> u32 { + self.stage_code.load(Ordering::SeqCst) + } +} diff --git a/src-tauri/src/cmd/profile_switch/validation.rs b/src-tauri/src/cmd/profile_switch/validation.rs new file mode 100644 index 00000000..b15806cf --- /dev/null +++ b/src-tauri/src/cmd/profile_switch/validation.rs @@ -0,0 +1,113 @@ +use crate::{ + config::Config, + logging, + process::AsyncHandler, + utils::{dirs, logging::Type}, +}; +use serde_yaml_ng as serde_yaml; +use smartstring::alias::String; +use std::time::Duration; +use tokio::{fs as tokio_fs, time}; + +const YAML_READ_TIMEOUT: Duration = Duration::from_secs(5); + +/// Verify that the requested profile exists locally and is well-formed before switching. +pub(super) async fn validate_switch_request(task_id: u64, profile_id: &str) -> Result<(), String> { + logging!( + info, + Type::Cmd, + "Validating profile switch task {} -> {}", + task_id, + profile_id + ); + + let profile_key: String = profile_id.into(); + let (file_path, profile_type, is_current, remote_url) = { + let profiles_guard = Config::profiles().await; + let latest = profiles_guard.latest_ref(); + let item = latest.get_item(&profile_key).map_err(|err| -> String { + format!("Target profile {} not found: {}", profile_id, err).into() + })?; + ( + item.file.clone().map(|f| f.to_string()), + item.itype.clone().map(|t| t.to_string()), + latest + .current + .as_ref() + .map(|current| current.as_str() == profile_id) + .unwrap_or(false), + item.url.clone().map(|u| u.to_string()), + ) + }; + + if is_current { + logging!( + info, + Type::Cmd, + "Switch task {} is targeting the current profile {}; skipping validation", + task_id, + profile_id + ); + return Ok(()); + } + + if matches!(profile_type.as_deref(), Some("remote")) { + // Remote profiles must retain a URL so the subsequent refresh job knows where to download. + let has_url = remote_url.as_ref().map(|u| !u.is_empty()).unwrap_or(false); + if !has_url { + return Err({ + let msg = format!("Remote profile {} is missing a download URL", profile_id); + msg.into() + }); + } + } + + if let Some(file) = file_path { + let profiles_dir = dirs::app_profiles_dir().map_err(|err| -> String { + format!("Failed to resolve profiles directory: {}", err).into() + })?; + let path = profiles_dir.join(&file); + + let contents = match time::timeout(YAML_READ_TIMEOUT, tokio_fs::read_to_string(&path)).await + { + Ok(Ok(contents)) => contents, + Ok(Err(err)) => { + return Err( + format!("Failed to read profile file {}: {}", path.display(), err).into(), + ); + } + Err(_) => { + return Err(format!( + "Timed out reading profile file {} after {:?}", + path.display(), + YAML_READ_TIMEOUT + ) + .into()); + } + }; + + let parse_result = AsyncHandler::spawn_blocking(move || { + serde_yaml::from_str::(&contents) + }) + .await; + + match parse_result { + Ok(Ok(_)) => {} + Ok(Err(err)) => { + return Err( + format!("Profile YAML parse failed for {}: {}", path.display(), err).into(), + ); + } + Err(join_err) => { + return Err(format!( + "Profile YAML parse task panicked for {}: {}", + path.display(), + join_err + ) + .into()); + } + } + } + + Ok(()) +} diff --git a/src-tauri/src/cmd/profile_switch/workflow.rs b/src-tauri/src/cmd/profile_switch/workflow.rs new file mode 100644 index 00000000..27d16269 --- /dev/null +++ b/src-tauri/src/cmd/profile_switch/workflow.rs @@ -0,0 +1,385 @@ +use super::{ + CmdResult, + state::{SWITCH_JOB_TIMEOUT, SwitchManager, SwitchRequest, manager}, + validation::validate_switch_request, +}; +use crate::cmd::StringifyErr; +use crate::{ + config::{Config, IProfiles, profiles::profiles_save_file_safe}, + core::handle, + logging, + process::AsyncHandler, + utils::{dirs, logging::Type}, +}; +use futures::FutureExt; +use serde_yaml_ng as serde_yaml; +use smartstring::alias::String as SmartString; +use std::{any::Any, panic::AssertUnwindSafe, time::Duration}; +use tokio::{fs as tokio_fs, time}; + +mod cleanup; +mod state_machine; +pub(super) use cleanup::{ + CleanupHandle, schedule_post_switch_failure, schedule_post_switch_success, +}; + +use state_machine::{CONFIG_APPLY_TIMEOUT, SAVE_PROFILES_TIMEOUT, SwitchStateMachine}; +pub(super) use state_machine::{SwitchPanicInfo, SwitchStage}; + +pub(super) struct SwitchWorkflowResult { + pub success: bool, + pub cleanup: CleanupHandle, +} + +pub(super) struct SwitchWorkflowError { + pub info: SwitchPanicInfo, + pub cleanup: CleanupHandle, +} + +pub(super) async fn run_switch_job( + manager: &'static SwitchManager, + request: SwitchRequest, +) -> Result { + // Short-circuit cancelled jobs before we allocate resources or emit events. + if request.cancel_token().is_cancelled() { + logging!( + info, + Type::Cmd, + "Switch task {} cancelled before validation", + request.task_id() + ); + let cleanup = schedule_post_switch_failure( + request.profile_id().clone(), + request.notify(), + request.task_id(), + ); + return Ok(SwitchWorkflowResult { + success: false, + cleanup, + }); + } + + let profile_id = request.profile_id().clone(); + let task_id = request.task_id(); + let notify = request.notify(); + + if let Err(err) = validate_switch_request(task_id, profile_id.as_str()).await { + logging!( + warn, + Type::Cmd, + "Validation failed for switch task {} -> {}: {}", + task_id, + profile_id, + err + ); + handle::Handle::notice_message("config_validate::error", err.clone()); + let cleanup = schedule_post_switch_failure(profile_id.clone(), notify, task_id); + return Ok(SwitchWorkflowResult { + success: false, + cleanup, + }); + } + + logging!( + info, + Type::Cmd, + "Starting switch task {} for profile {} (notify={})", + task_id, + profile_id, + notify + ); + + let pipeline_request = request; + // The state machine owns the heavy lifting. We wrap it with timeout/panic guards so the driver never hangs. + let pipeline = async move { + let target_profile = pipeline_request.profile_id().clone(); + SwitchStateMachine::new( + manager, + Some(pipeline_request), + IProfiles { + current: Some(target_profile), + items: None, + }, + ) + .run() + .await + }; + + match time::timeout( + SWITCH_JOB_TIMEOUT, + AssertUnwindSafe(pipeline).catch_unwind(), + ) + .await + { + Err(_) => { + logging!( + error, + Type::Cmd, + "Profile switch task {} timed out after {:?}", + task_id, + SWITCH_JOB_TIMEOUT + ); + handle::Handle::notice_message( + "config_validate::error", + format!("profile switch timed out: {}", profile_id), + ); + let cleanup = schedule_post_switch_failure(profile_id.clone(), notify, task_id); + Ok(SwitchWorkflowResult { + success: false, + cleanup, + }) + } + Ok(Err(panic_payload)) => { + let panic_message = describe_panic_payload(panic_payload.as_ref()); + logging!( + error, + Type::Cmd, + "Panic captured during profile switch task {} ({}): {}", + task_id, + profile_id, + panic_message + ); + handle::Handle::notice_message( + "config_validate::panic", + format!("profile switch panic: {}", profile_id), + ); + let cleanup = schedule_post_switch_failure(profile_id.clone(), notify, task_id); + Err(SwitchWorkflowError { + info: SwitchPanicInfo::workflow_root(panic_message), + cleanup, + }) + } + Ok(Ok(machine_result)) => match machine_result { + Ok(cmd_result) => match cmd_result { + Ok(success) => { + let cleanup = + schedule_post_switch_success(profile_id.clone(), success, notify, task_id); + Ok(SwitchWorkflowResult { success, cleanup }) + } + Err(err) => { + logging!( + error, + Type::Cmd, + "Profile switch failed ({}): {}", + profile_id, + err + ); + handle::Handle::notice_message("config_validate::error", err.clone()); + let cleanup = schedule_post_switch_failure(profile_id.clone(), notify, task_id); + Ok(SwitchWorkflowResult { + success: false, + cleanup, + }) + } + }, + Err(panic_info) => { + logging!( + error, + Type::Cmd, + "State machine panic during profile switch task {} ({} {:?}): {}", + task_id, + profile_id, + panic_info.stage, + panic_info.detail + ); + handle::Handle::notice_message( + "config_validate::panic", + format!("profile switch panic: {}", profile_id), + ); + let cleanup = schedule_post_switch_failure(profile_id.clone(), notify, task_id); + Err(SwitchWorkflowError { + info: panic_info, + cleanup, + }) + } + }, + } +} + +/// Allow patch operations (no driver request) to use the same state machine pipeline. +pub(super) async fn patch_profiles_config(profiles: IProfiles) -> CmdResult { + match SwitchStateMachine::new(manager(), None, profiles) + .run() + .await + { + Ok(result) => result, + Err(panic_info) => Err(format!( + "profile switch panic ({:?}): {}", + panic_info.stage, panic_info.detail + ) + .into()), + } +} + +/// Parse the target profile YAML on a background thread to catch syntax errors early. +pub(super) async fn validate_profile_yaml(profile: &SmartString) -> CmdResult { + let file_path = { + let profiles_guard = Config::profiles().await; + let profiles_data = profiles_guard.latest_ref(); + match profiles_data.get_item(profile) { + Ok(item) => item.file.as_ref().and_then(|file| { + dirs::app_profiles_dir() + .ok() + .map(|dir| dir.join(file.as_str())) + }), + Err(e) => { + logging!( + error, + Type::Cmd, + "Failed to load target profile metadata: {}", + e + ); + return Ok(false); + } + } + }; + + let Some(path) = file_path else { + return Ok(true); + }; + + if !path.exists() { + logging!( + error, + Type::Cmd, + "Target profile file does not exist: {}", + path.display() + ); + handle::Handle::notice_message( + "config_validate::file_not_found", + format!("{}", path.display()), + ); + return Ok(false); + } + + let file_read_result = + time::timeout(Duration::from_secs(5), tokio_fs::read_to_string(&path)).await; + + match file_read_result { + Ok(Ok(content)) => { + let yaml_parse_result = AsyncHandler::spawn_blocking(move || { + serde_yaml::from_str::(&content) + }) + .await; + + match yaml_parse_result { + Ok(Ok(_)) => { + logging!(info, Type::Cmd, "Target profile YAML syntax is valid"); + Ok(true) + } + Ok(Err(err)) => { + let error_msg = format!(" {err}"); + logging!( + error, + Type::Cmd, + "Target profile contains YAML syntax errors: {}", + error_msg + ); + handle::Handle::notice_message( + "config_validate::yaml_syntax_error", + error_msg.clone(), + ); + Ok(false) + } + Err(join_err) => { + let error_msg = format!("YAML parsing task failed: {join_err}"); + logging!(error, Type::Cmd, "{}", error_msg); + handle::Handle::notice_message( + "config_validate::yaml_parse_error", + error_msg.clone(), + ); + Ok(false) + } + } + } + Ok(Err(err)) => { + let error_msg = format!("Failed to read target profile file: {err}"); + logging!(error, Type::Cmd, "{}", error_msg); + handle::Handle::notice_message("config_validate::file_read_error", error_msg.clone()); + Ok(false) + } + Err(_) => { + let error_msg = "Timed out reading profile file (5s)".to_string(); + logging!(error, Type::Cmd, "{}", error_msg); + handle::Handle::notice_message("config_validate::file_read_timeout", error_msg.clone()); + Err(error_msg.into()) + } + } +} + +/// Best-effort rollback invoked when a switch fails midway through the pipeline. +pub(super) async fn restore_previous_profile(previous: Option) -> CmdResult<()> { + if let Some(prev_profile) = previous { + logging!( + info, + Type::Cmd, + "Attempting to restore previous configuration: {}", + prev_profile + ); + let restore_profiles = IProfiles { + current: Some(prev_profile), + items: None, + }; + Config::profiles() + .await + .draft_mut() + .patch_config(restore_profiles) + .stringify_err()?; + if time::timeout(CONFIG_APPLY_TIMEOUT, async { + Config::profiles().await.apply(); + }) + .await + .is_err() + { + logging!( + warn, + Type::Cmd, + "Restoring previous configuration timed out after {:?}", + CONFIG_APPLY_TIMEOUT + ); + return Ok(()); + } + + AsyncHandler::spawn(|| async move { + let save_future = AsyncHandler::spawn_blocking(|| { + futures::executor::block_on(async { profiles_save_file_safe().await }) + }); + match time::timeout(SAVE_PROFILES_TIMEOUT, save_future).await { + Ok(join_res) => match join_res { + Ok(Ok(())) => {} + Ok(Err(err)) => { + logging!( + warn, + Type::Cmd, + "Failed to persist restored configuration asynchronously: {}", + err + ); + } + Err(join_err) => { + logging!(warn, Type::Cmd, "Blocking save task failed: {}", join_err); + } + }, + Err(_) => { + logging!( + warn, + Type::Cmd, + "Persisting restored configuration timed out after {:?}", + SAVE_PROFILES_TIMEOUT + ); + } + } + }); + } + + Ok(()) +} + +pub(super) fn describe_panic_payload(payload: &(dyn Any + Send)) -> String { + if let Some(message) = payload.downcast_ref::<&str>() { + (*message).to_string() + } else if let Some(message) = payload.downcast_ref::() { + message.clone() + } else { + "unknown panic".into() + } +} diff --git a/src-tauri/src/cmd/profile_switch/workflow/cleanup.rs b/src-tauri/src/cmd/profile_switch/workflow/cleanup.rs new file mode 100644 index 00000000..2f7e1aac --- /dev/null +++ b/src-tauri/src/cmd/profile_switch/workflow/cleanup.rs @@ -0,0 +1,65 @@ +use super::super::state::SWITCH_CLEANUP_TIMEOUT; +use crate::{core::handle, logging, process::AsyncHandler, utils::logging::Type}; +use smartstring::alias::String as SmartString; +use tokio::time; + +pub(crate) type CleanupHandle = tauri::async_runtime::JoinHandle<()>; + +pub(crate) fn schedule_post_switch_success( + profile_id: SmartString, + success: bool, + notify: bool, + task_id: u64, +) -> CleanupHandle { + // Post-success cleanup runs detached from the driver so the queue keeps moving. + AsyncHandler::spawn(move || async move { + handle::Handle::notify_profile_switch_finished( + profile_id.clone(), + success, + notify, + task_id, + ); + if success { + close_connections_after_switch(profile_id).await; + } + }) +} + +pub(crate) fn schedule_post_switch_failure( + profile_id: SmartString, + notify: bool, + task_id: u64, +) -> CleanupHandle { + // Failures or cancellations do not alter the active profile, so skip draining live connections. + AsyncHandler::spawn(move || async move { + handle::Handle::notify_profile_switch_finished(profile_id.clone(), false, notify, task_id); + }) +} + +async fn close_connections_after_switch(profile_id: SmartString) { + match time::timeout(SWITCH_CLEANUP_TIMEOUT, async { + handle::Handle::mihomo().await.close_all_connections().await + }) + .await + { + Ok(Ok(())) => {} + Ok(Err(err)) => { + logging!( + warn, + Type::Cmd, + "Failed to close connections after profile switch ({}): {}", + profile_id, + err + ); + } + Err(_) => { + logging!( + warn, + Type::Cmd, + "Closing connections after profile switch ({}) timed out after {:?}", + profile_id, + SWITCH_CLEANUP_TIMEOUT + ); + } + } +} diff --git a/src-tauri/src/cmd/profile_switch/workflow/state_machine/context.rs b/src-tauri/src/cmd/profile_switch/workflow/state_machine/context.rs new file mode 100644 index 00000000..9de753db --- /dev/null +++ b/src-tauri/src/cmd/profile_switch/workflow/state_machine/context.rs @@ -0,0 +1,178 @@ +use super::{CmdResult, core::SwitchStage}; +use crate::{ + cmd::profile_switch::state::{ + SwitchCancellation, SwitchHeartbeat, SwitchManager, SwitchRequest, SwitchScope, + }, + config::IProfiles, + logging, + utils::logging::Type, +}; +use smartstring::alias::String as SmartString; +use tokio::sync::MutexGuard; + +pub(super) struct SwitchContext { + pub(super) manager: &'static SwitchManager, + pub(super) request: Option, + pub(super) profiles_patch: Option, + pub(super) sequence: Option, + pub(super) target_profile: Option, + pub(super) previous_profile: Option, + pub(super) new_profile_for_event: Option, + pub(super) switch_scope: Option>, + pub(super) core_guard: Option>, + pub(super) heartbeat: SwitchHeartbeat, + pub(super) task_id: Option, + pub(super) profile_label: SmartString, + pub(super) active_stage: SwitchStage, +} + +impl SwitchContext { + // Captures all mutable data required across states (locks, profile ids, etc). + pub(super) fn new( + manager: &'static SwitchManager, + request: Option, + profiles: IProfiles, + heartbeat: SwitchHeartbeat, + ) -> Self { + let task_id = request.as_ref().map(|req| req.task_id()); + let profile_label = request + .as_ref() + .map(|req| req.profile_id().clone()) + .or_else(|| profiles.current.clone()) + .unwrap_or_else(|| SmartString::from("unknown")); + heartbeat.touch(); + Self { + manager, + request, + profiles_patch: Some(profiles), + sequence: None, + target_profile: None, + previous_profile: None, + new_profile_for_event: None, + switch_scope: None, + core_guard: None, + heartbeat, + task_id, + profile_label, + active_stage: SwitchStage::Start, + } + } + + pub(super) fn ensure_target_profile(&mut self) { + // Lazily determine which profile we're switching to so shared paths (patch vs. driver) behave the same. + if let Some(patch) = self.profiles_patch.as_mut() { + if patch.current.is_none() + && let Some(request) = self.request.as_ref() + { + patch.current = Some(request.profile_id().clone()); + } + self.target_profile = patch.current.clone(); + } + } + + pub(super) fn take_profiles_patch(&mut self) -> CmdResult { + self.profiles_patch + .take() + .ok_or_else(|| "profiles patch already consumed".into()) + } + + pub(super) fn cancel_token(&self) -> Option { + self.request.as_ref().map(|req| req.cancel_token().clone()) + } + + pub(super) fn cancelled(&self) -> bool { + self.request + .as_ref() + .map(|req| req.cancel_token().is_cancelled()) + .unwrap_or(false) + } + + pub(super) fn log_cancelled(&self, stage: &str) { + if let Some(request) = self.request.as_ref() { + logging!( + info, + Type::Cmd, + "Switch task {} cancelled {}; profile={}", + request.task_id(), + stage, + request.profile_id() + ); + } else { + logging!(info, Type::Cmd, "Profile switch cancelled {}", stage); + } + } + + pub(super) fn should_validate_target(&self) -> bool { + match (&self.target_profile, &self.previous_profile) { + (Some(target), Some(current)) => current != target, + (Some(_), None) => true, + _ => false, + } + } + + pub(super) fn stale(&self) -> bool { + self.sequence + .map(|seq| seq < self.manager.latest_request_sequence()) + .unwrap_or(false) + } + + pub(super) fn sequence(&self) -> u64 { + self.sequence.unwrap_or_else(|| { + logging!( + warn, + Type::Cmd, + "Sequence unexpectedly missing in switch context; defaulting to 0" + ); + 0 + }) + } + + pub(super) fn record_stage(&mut self, stage: SwitchStage) { + let since_last = self.heartbeat.elapsed(); + let previous = self.active_stage; + self.active_stage = stage; + self.heartbeat.set_stage(stage.as_code()); + + match self.task_id { + Some(task_id) => logging!( + debug, + Type::Cmd, + "Switch task {} (profile={}) transitioned {:?} -> {:?} after {:?}", + task_id, + self.profile_label, + previous, + stage, + since_last + ), + None => logging!( + debug, + Type::Cmd, + "Profile patch {} transitioned {:?} -> {:?} after {:?}", + self.profile_label, + previous, + stage, + since_last + ), + } + } + + pub(super) fn release_core_guard(&mut self) { + self.core_guard = None; + } + + pub(super) fn release_switch_scope(&mut self) { + self.switch_scope = None; + } + + pub(super) fn release_locks(&mut self) { + self.release_core_guard(); + self.release_switch_scope(); + } +} + +impl Drop for SwitchContext { + fn drop(&mut self) { + self.core_guard.take(); + self.switch_scope.take(); + } +} diff --git a/src-tauri/src/cmd/profile_switch/workflow/state_machine/core.rs b/src-tauri/src/cmd/profile_switch/workflow/state_machine/core.rs new file mode 100644 index 00000000..1c4e32ab --- /dev/null +++ b/src-tauri/src/cmd/profile_switch/workflow/state_machine/core.rs @@ -0,0 +1,284 @@ +use super::{CmdResult, context::SwitchContext, describe_panic_payload}; +use crate::{ + cmd::profile_switch::state::{SwitchHeartbeat, SwitchManager, SwitchRequest}, + config::IProfiles, + logging, + utils::logging::Type, +}; +use futures::FutureExt; +use std::{ + mem, + panic::AssertUnwindSafe, + time::{Duration, Instant}, +}; +pub(crate) const CONFIG_APPLY_TIMEOUT: Duration = Duration::from_secs(5); +pub(crate) const TRAY_UPDATE_TIMEOUT: Duration = Duration::from_secs(3); +pub(crate) const REFRESH_TIMEOUT: Duration = Duration::from_secs(3); +pub(crate) const SAVE_PROFILES_TIMEOUT: Duration = Duration::from_secs(5); +pub(crate) const SWITCH_IDLE_WAIT_TIMEOUT: Duration = Duration::from_secs(30); +pub(crate) const SWITCH_IDLE_WAIT_POLL: Duration = Duration::from_millis(25); +pub(crate) const SWITCH_IDLE_WAIT_MAX_BACKOFF: Duration = Duration::from_millis(250); + +/// Explicit state machine for profile switching so we can reason about +/// cancellation, stale requests, and side effects at each stage. +pub(crate) struct SwitchStateMachine { + pub(super) ctx: SwitchContext, + state: SwitchState, +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub(crate) enum SwitchStage { + Start, + AcquireCore, + Prepare, + ValidateTarget, + PatchDraft, + UpdateCore, + Finalize, + Workflow, + DriverTask, +} + +impl SwitchStage { + pub(crate) fn as_code(self) -> u32 { + match self { + SwitchStage::Start => 0, + SwitchStage::AcquireCore => 1, + SwitchStage::Prepare => 2, + SwitchStage::ValidateTarget => 3, + SwitchStage::PatchDraft => 4, + SwitchStage::UpdateCore => 5, + SwitchStage::Finalize => 6, + SwitchStage::Workflow => 7, + SwitchStage::DriverTask => 8, + } + } + + pub(crate) fn from_code(code: u32) -> Option { + Some(match code { + 0 => SwitchStage::Start, + 1 => SwitchStage::AcquireCore, + 2 => SwitchStage::Prepare, + 3 => SwitchStage::ValidateTarget, + 4 => SwitchStage::PatchDraft, + 5 => SwitchStage::UpdateCore, + 6 => SwitchStage::Finalize, + 7 => SwitchStage::Workflow, + 8 => SwitchStage::DriverTask, + _ => return None, + }) + } +} + +#[derive(Debug, Clone)] +pub(crate) struct SwitchPanicInfo { + pub(crate) stage: SwitchStage, + pub(crate) detail: String, +} + +impl SwitchPanicInfo { + pub(crate) fn new(stage: SwitchStage, detail: String) -> Self { + Self { stage, detail } + } + + pub(crate) fn workflow_root(detail: String) -> Self { + Self::new(SwitchStage::Workflow, detail) + } + + pub(crate) fn driver_task(detail: String) -> Self { + Self::new(SwitchStage::DriverTask, detail) + } +} + +/// High-level state machine nodes executed in strict sequence. +pub(crate) enum SwitchState { + Start, + AcquireCore, + Prepare, + ValidateTarget, + PatchDraft, + UpdateCore, + Finalize(CoreUpdateOutcome), + Complete(bool), +} + +/// Result of trying to apply the draft configuration to the core. +pub(crate) enum CoreUpdateOutcome { + Success, + ValidationFailed { message: String }, + CoreError { message: String }, + Timeout, +} + +/// Indicates where a stale request was detected so logs stay descriptive. +pub(crate) enum StaleStage { + AfterLock, + BeforeCoreOperation, + BeforeCoreInteraction, + AfterCoreOperation, +} + +impl StaleStage { + pub(super) fn log(&self, ctx: &SwitchContext) { + let sequence = ctx.sequence(); + let latest = ctx.manager.latest_request_sequence(); + match self { + StaleStage::AfterLock => logging!( + info, + Type::Cmd, + "Detected a newer request after acquiring the lock (sequence: {} < {}), abandoning current request", + sequence, + latest + ), + StaleStage::BeforeCoreOperation => logging!( + info, + Type::Cmd, + "Detected a newer request before core operation (sequence: {} < {}), abandoning current request", + sequence, + latest + ), + StaleStage::BeforeCoreInteraction => logging!( + info, + Type::Cmd, + "Detected a newer request before core interaction (sequence: {} < {}), abandoning current request", + sequence, + latest + ), + StaleStage::AfterCoreOperation => logging!( + info, + Type::Cmd, + "Detected a newer request after core operation (sequence: {} < {}), ignoring current result", + sequence, + latest + ), + } + } +} + +impl SwitchStateMachine { + pub(crate) fn new( + manager: &'static SwitchManager, + request: Option, + profiles: IProfiles, + ) -> Self { + let heartbeat = request + .as_ref() + .map(|req| req.heartbeat().clone()) + .unwrap_or_else(SwitchHeartbeat::new); + + Self { + ctx: SwitchContext::new(manager, request, profiles, heartbeat), + state: SwitchState::Start, + } + } + + pub(crate) async fn run(mut self) -> Result, SwitchPanicInfo> { + // Drive the state machine until we either complete successfully or bubble up a panic. + loop { + let current_state = mem::replace(&mut self.state, SwitchState::Complete(false)); + match current_state { + SwitchState::Complete(result) => return Ok(Ok(result)), + _ => match self.run_state(current_state).await? { + Ok(state) => self.state = state, + Err(err) => return Ok(Err(err)), + }, + } + } + } + + async fn run_state( + &mut self, + current: SwitchState, + ) -> Result, SwitchPanicInfo> { + match current { + SwitchState::Start => { + self.with_stage( + SwitchStage::Start, + |this| async move { this.handle_start() }, + ) + .await + } + SwitchState::AcquireCore => { + self.with_stage(SwitchStage::AcquireCore, |this| async move { + this.handle_acquire_core().await + }) + .await + } + SwitchState::Prepare => { + self.with_stage(SwitchStage::Prepare, |this| async move { + this.handle_prepare().await + }) + .await + } + SwitchState::ValidateTarget => { + self.with_stage(SwitchStage::ValidateTarget, |this| async move { + this.handle_validate_target().await + }) + .await + } + SwitchState::PatchDraft => { + self.with_stage(SwitchStage::PatchDraft, |this| async move { + this.handle_patch_draft().await + }) + .await + } + SwitchState::UpdateCore => { + self.with_stage(SwitchStage::UpdateCore, |this| async move { + this.handle_update_core().await + }) + .await + } + SwitchState::Finalize(outcome) => { + self.with_stage(SwitchStage::Finalize, |this| async move { + this.handle_finalize(outcome).await + }) + .await + } + SwitchState::Complete(result) => Ok(Ok(SwitchState::Complete(result))), + } + } + + /// Helper that wraps each stage with consistent logging and panic reporting. + async fn with_stage<'a, F, Fut>( + &'a mut self, + stage: SwitchStage, + f: F, + ) -> Result, SwitchPanicInfo> + where + F: FnOnce(&'a mut Self) -> Fut, + Fut: std::future::Future> + 'a, + { + let sequence = self.ctx.sequence(); + let task = self.ctx.task_id; + let profile = self.ctx.profile_label.clone(); + logging!( + info, + Type::Cmd, + "Enter {:?} (sequence={}, task={:?}, profile={})", + stage, + sequence, + task, + profile + ); + let stage_start = Instant::now(); + self.ctx.record_stage(stage); + AssertUnwindSafe(f(self)) + .catch_unwind() + .await + .map_err(|payload| { + SwitchPanicInfo::new(stage, describe_panic_payload(payload.as_ref())) + }) + .inspect(|_| { + logging!( + info, + Type::Cmd, + "Exit {:?} (sequence={}, task={:?}, profile={}, elapsed={}ms)", + stage, + sequence, + task, + profile, + stage_start.elapsed().as_millis() + ); + }) + } +} diff --git a/src-tauri/src/cmd/profile_switch/workflow/state_machine/mod.rs b/src-tauri/src/cmd/profile_switch/workflow/state_machine/mod.rs new file mode 100644 index 00000000..84ee0f49 --- /dev/null +++ b/src-tauri/src/cmd/profile_switch/workflow/state_machine/mod.rs @@ -0,0 +1,11 @@ +mod context; +mod core; +mod stages; + +pub(crate) use core::{ + CONFIG_APPLY_TIMEOUT, SAVE_PROFILES_TIMEOUT, SwitchPanicInfo, SwitchStage, SwitchStateMachine, +}; + +pub(super) use super::{ + CmdResult, describe_panic_payload, restore_previous_profile, validate_profile_yaml, +}; diff --git a/src-tauri/src/cmd/profile_switch/workflow/state_machine/stages.rs b/src-tauri/src/cmd/profile_switch/workflow/state_machine/stages.rs new file mode 100644 index 00000000..78c313d4 --- /dev/null +++ b/src-tauri/src/cmd/profile_switch/workflow/state_machine/stages.rs @@ -0,0 +1,597 @@ +use super::{ + CmdResult, + core::{ + CONFIG_APPLY_TIMEOUT, CoreUpdateOutcome, REFRESH_TIMEOUT, SAVE_PROFILES_TIMEOUT, + SWITCH_IDLE_WAIT_MAX_BACKOFF, SWITCH_IDLE_WAIT_POLL, SWITCH_IDLE_WAIT_TIMEOUT, StaleStage, + SwitchState, SwitchStateMachine, TRAY_UPDATE_TIMEOUT, + }, + restore_previous_profile, validate_profile_yaml, +}; +use crate::{ + config::{Config, profiles::profiles_save_file_safe}, + core::{CoreManager, handle, tray::Tray}, + logging, + process::AsyncHandler, + utils::logging::Type, +}; +use anyhow::Error; +use futures::future; +use smartstring::alias::String as SmartString; +use std::{ + pin::Pin, + time::{Duration, Instant}, +}; +use tokio::time; + +impl SwitchStateMachine { + pub(super) fn handle_start(&mut self) -> CmdResult { + if self.ctx.manager.is_switching() { + logging!( + info, + Type::Cmd, + "Profile switch already in progress; queuing request for task={:?}, profile={}", + self.ctx.task_id, + self.ctx.profile_label + ); + } + Ok(SwitchState::AcquireCore) + } + + /// Grab the core lock, mark the manager as switching, and compute the target profile. + pub(super) async fn handle_acquire_core(&mut self) -> CmdResult { + let manager = self.ctx.manager; + let core_guard = manager.core_mutex().lock().await; + + if manager.is_switching() { + logging!( + info, + Type::Cmd, + "Active profile switch detected; waiting before acquiring scope" + ); + let wait_start = Instant::now(); + let mut backoff = SWITCH_IDLE_WAIT_POLL; + while manager.is_switching() { + if self.ctx.cancelled() { + self.ctx + .log_cancelled("while waiting for active switch to finish"); + return Ok(SwitchState::Complete(false)); + } + if wait_start.elapsed() >= SWITCH_IDLE_WAIT_TIMEOUT { + let message = format!( + "Timed out after {:?} waiting for active profile switch to finish", + SWITCH_IDLE_WAIT_TIMEOUT + ); + logging!(error, Type::Cmd, "{}", message); + return Err(message.into()); + } + + time::sleep(backoff).await; + backoff = backoff.saturating_mul(2).min(SWITCH_IDLE_WAIT_MAX_BACKOFF); + } + let waited = wait_start.elapsed().as_millis(); + if waited > 0 { + logging!( + info, + Type::Cmd, + "Waited {}ms for active switch to finish before acquiring scope", + waited + ); + } + } + + self.ctx.core_guard = Some(core_guard); + self.ctx.switch_scope = Some(manager.begin_switch()); + self.ctx.sequence = Some(manager.next_request_sequence()); + self.ctx.ensure_target_profile(); + + logging!( + info, + Type::Cmd, + "Begin modifying configuration; sequence: {}, target profile: {:?}", + self.ctx.sequence(), + self.ctx.target_profile + ); + + if self.ctx.cancelled() { + self.ctx.log_cancelled("after acquiring core lock"); + return Ok(SwitchState::Complete(false)); + } + + if self.ctx.stale() { + StaleStage::AfterLock.log(&self.ctx); + return Ok(SwitchState::Complete(false)); + } + + Ok(SwitchState::Prepare) + } + + pub(super) async fn handle_prepare(&mut self) -> CmdResult { + let current_profile = { + let profiles_guard = Config::profiles().await; + profiles_guard.latest_ref().current.clone() + }; + + logging!(info, Type::Cmd, "Current profile: {:?}", current_profile); + self.ctx.previous_profile = current_profile; + Ok(SwitchState::ValidateTarget) + } + + pub(super) async fn handle_validate_target(&mut self) -> CmdResult { + if self.ctx.cancelled() { + self.ctx.log_cancelled("before validation"); + return Ok(SwitchState::Complete(false)); + } + + if self.ctx.should_validate_target() { + let Some(target) = self.ctx.target_profile.clone() else { + logging!( + error, + Type::Cmd, + "Missing target profile while validation was requested; aborting switch" + ); + return Err("missing target profile at validation".into()); + }; + if !validate_profile_yaml(&target).await? { + return Ok(SwitchState::Complete(false)); + } + } + + if self.ctx.stale() { + StaleStage::BeforeCoreOperation.log(&self.ctx); + return Ok(SwitchState::Complete(false)); + } + + Ok(SwitchState::PatchDraft) + } + + pub(super) async fn handle_patch_draft(&mut self) -> CmdResult { + if self.ctx.cancelled() { + self.ctx.log_cancelled("before patching configuration"); + return Ok(SwitchState::Complete(false)); + } + + logging!( + info, + Type::Cmd, + "Updating configuration draft, sequence: {}", + self.ctx.sequence() + ); + + let patch = self.ctx.take_profiles_patch()?; + self.ctx.new_profile_for_event = patch.current.clone(); + let _ = Config::profiles().await.draft_mut().patch_config(patch); + + if self.ctx.stale() { + StaleStage::BeforeCoreInteraction.log(&self.ctx); + Config::profiles().await.discard(); + return Ok(SwitchState::Complete(false)); + } + + Ok(SwitchState::UpdateCore) + } + + pub(super) async fn handle_update_core(&mut self) -> CmdResult { + let sequence = self.ctx.sequence(); + let task_id = self.ctx.task_id; + let profile = self.ctx.profile_label.clone(); + logging!( + info, + Type::Cmd, + "Starting core configuration update, sequence: {}, task={:?}, profile={}", + sequence, + task_id, + profile + ); + + let heartbeat = self.ctx.heartbeat.clone(); + let start = Instant::now(); + let mut ticker = time::interval(Duration::from_secs(1)); + ticker.set_missed_tick_behavior(time::MissedTickBehavior::Delay); + + let update_future = CoreManager::global().update_config(); + tokio::pin!(update_future); + + let timeout = time::sleep(Duration::from_secs(30)); + tokio::pin!(timeout); + + let cancel_token = self.ctx.cancel_token(); + let mut cancel_notifier: Pin + Send>> = + match cancel_token { + Some(token) => Box::pin(async move { + token.cancelled_future().await; + }), + None => Box::pin(future::pending()), + }; + + enum UpdateOutcome { + Finished(Result<(bool, SmartString), Error>), + Timeout, + Cancelled, + } + + let update_outcome = loop { + tokio::select! { + res = &mut update_future => break UpdateOutcome::Finished(res), + _ = &mut timeout => break UpdateOutcome::Timeout, + _ = &mut cancel_notifier => break UpdateOutcome::Cancelled, + _ = ticker.tick() => { + let elapsed_ms = start.elapsed().as_millis(); + heartbeat.touch(); + match task_id { + Some(id) => logging!( + debug, + Type::Cmd, + "Switch task {} (profile={}) UpdateCore still running (elapsed={}ms)", + id, + profile, + elapsed_ms + ), + None => logging!( + debug, + Type::Cmd, + "Profile patch {} UpdateCore still running (elapsed={}ms)", + profile, + elapsed_ms + ), + } + } + } + }; + + let elapsed_ms = start.elapsed().as_millis(); + + let outcome = match update_outcome { + UpdateOutcome::Finished(Ok((true, _))) => { + logging!( + info, + Type::Cmd, + "Core configuration update succeeded in {}ms", + elapsed_ms + ); + CoreUpdateOutcome::Success + } + UpdateOutcome::Finished(Ok((false, msg))) => { + logging!( + warn, + Type::Cmd, + "Core configuration update validation failed in {}ms: {}", + elapsed_ms, + msg + ); + CoreUpdateOutcome::ValidationFailed { + message: msg.to_string(), + } + } + UpdateOutcome::Finished(Err(err)) => { + logging!( + error, + Type::Cmd, + "Core configuration update errored in {}ms: {}", + elapsed_ms, + err + ); + CoreUpdateOutcome::CoreError { + message: err.to_string(), + } + } + UpdateOutcome::Timeout => { + logging!( + error, + Type::Cmd, + "Core configuration update timed out after {}ms", + elapsed_ms + ); + CoreUpdateOutcome::Timeout + } + UpdateOutcome::Cancelled => { + self.ctx.log_cancelled("during core update"); + logging!( + info, + Type::Cmd, + "Core configuration update cancelled after {}ms", + elapsed_ms + ); + self.ctx.release_locks(); + Config::profiles().await.discard(); + return Ok(SwitchState::Complete(false)); + } + }; + + self.ctx.release_core_guard(); + + Ok(SwitchState::Finalize(outcome)) + } + + pub(super) async fn handle_finalize( + &mut self, + outcome: CoreUpdateOutcome, + ) -> CmdResult { + let next_state = match outcome { + CoreUpdateOutcome::Success => self.finalize_success().await, + CoreUpdateOutcome::ValidationFailed { message } => { + self.finalize_validation_failed(message).await + } + CoreUpdateOutcome::CoreError { message } => self.finalize_core_error(message).await, + CoreUpdateOutcome::Timeout => self.finalize_timeout().await, + }; + + if next_state.is_err() || matches!(next_state, Ok(SwitchState::Complete(_))) { + self.ctx.release_switch_scope(); + } + + next_state + } + + pub(super) async fn finalize_success(&mut self) -> CmdResult { + if self.abort_if_stale_post_core().await? { + return Ok(SwitchState::Complete(false)); + } + + self.log_successful_update(); + + if !self.apply_config_with_timeout().await? { + logging!( + warn, + Type::Cmd, + "Apply step failed; attempting to restore previous profile before completing" + ); + restore_previous_profile(self.ctx.previous_profile.clone()).await?; + return Ok(SwitchState::Complete(false)); + } + + self.refresh_clash_with_timeout().await; + self.update_tray_tooltip_with_timeout().await; + self.update_tray_menu_with_timeout().await; + if let Err(err) = self.persist_profiles_with_timeout().await { + logging!( + error, + Type::Cmd, + "Persisting new profile configuration failed; attempting to restore previous profile: {}", + err + ); + restore_previous_profile(self.ctx.previous_profile.clone()).await?; + return Err(err); + } + self.emit_profile_change_event(); + logging!( + debug, + Type::Cmd, + "Finalize success pipeline completed for sequence {}", + self.ctx.sequence() + ); + + Ok(SwitchState::Complete(true)) + } + + pub(super) async fn finalize_validation_failed( + &mut self, + message: String, + ) -> CmdResult { + logging!( + warn, + Type::Cmd, + "Configuration validation failed: {}", + message + ); + Config::profiles().await.discard(); + restore_previous_profile(self.ctx.previous_profile.clone()).await?; + handle::Handle::notice_message("config_validate::error", message); + Ok(SwitchState::Complete(false)) + } + + pub(super) async fn finalize_core_error(&mut self, message: String) -> CmdResult { + logging!( + warn, + Type::Cmd, + "Error occurred during update: {}, sequence: {}", + message, + self.ctx.sequence() + ); + Config::profiles().await.discard(); + handle::Handle::notice_message("config_validate::boot_error", message); + Ok(SwitchState::Complete(false)) + } + + pub(super) async fn finalize_timeout(&mut self) -> CmdResult { + let timeout_msg = + "Configuration update timed out (30s); possible validation or core communication stall"; + logging!( + error, + Type::Cmd, + "{}, sequence: {}", + timeout_msg, + self.ctx.sequence() + ); + Config::profiles().await.discard(); + restore_previous_profile(self.ctx.previous_profile.clone()).await?; + handle::Handle::notice_message("config_validate::timeout", timeout_msg); + Ok(SwitchState::Complete(false)) + } + + pub(super) async fn abort_if_stale_post_core(&mut self) -> CmdResult { + if self.ctx.stale() { + StaleStage::AfterCoreOperation.log(&self.ctx); + Config::profiles().await.discard(); + return Ok(true); + } + + Ok(false) + } + + pub(super) fn log_successful_update(&self) { + logging!( + info, + Type::Cmd, + "Configuration update succeeded, sequence: {}", + self.ctx.sequence() + ); + } + + pub(super) async fn apply_config_with_timeout(&mut self) -> CmdResult { + let apply_result = time::timeout(CONFIG_APPLY_TIMEOUT, async { + Config::profiles().await.apply() + }) + .await; + + if apply_result.is_ok() { + Ok(true) + } else { + logging!( + warn, + Type::Cmd, + "Applying profile configuration timed out after {:?}", + CONFIG_APPLY_TIMEOUT + ); + Config::profiles().await.discard(); + Ok(false) + } + } + + pub(super) async fn refresh_clash_with_timeout(&self) { + let start = Instant::now(); + let result = time::timeout(REFRESH_TIMEOUT, async { + handle::Handle::refresh_clash(); + }) + .await; + + let elapsed = start.elapsed().as_millis(); + match result { + Ok(_) => logging!( + debug, + Type::Cmd, + "refresh_clash_with_timeout completed in {}ms", + elapsed + ), + Err(_) => logging!( + warn, + Type::Cmd, + "Refreshing Clash state timed out after {:?} (elapsed={}ms)", + REFRESH_TIMEOUT, + elapsed + ), + } + } + + pub(super) async fn update_tray_tooltip_with_timeout(&self) { + let start = Instant::now(); + let update_tooltip = time::timeout(TRAY_UPDATE_TIMEOUT, async { + Tray::global().update_tooltip().await + }) + .await; + let elapsed = start.elapsed().as_millis(); + + if update_tooltip.is_err() { + logging!( + warn, + Type::Cmd, + "Updating tray tooltip timed out after {:?} (elapsed={}ms)", + TRAY_UPDATE_TIMEOUT, + elapsed + ); + } else if let Ok(Err(err)) = update_tooltip { + logging!( + warn, + Type::Cmd, + "Failed to update tray tooltip asynchronously: {}", + err + ); + } else { + logging!( + debug, + Type::Cmd, + "update_tray_tooltip_with_timeout completed in {}ms", + elapsed + ); + } + } + + pub(super) async fn update_tray_menu_with_timeout(&self) { + let start = Instant::now(); + let update_menu = time::timeout(TRAY_UPDATE_TIMEOUT, async { + Tray::global().update_menu().await + }) + .await; + let elapsed = start.elapsed().as_millis(); + + if update_menu.is_err() { + logging!( + warn, + Type::Cmd, + "Updating tray menu timed out after {:?} (elapsed={}ms)", + TRAY_UPDATE_TIMEOUT, + elapsed + ); + } else if let Ok(Err(err)) = update_menu { + logging!( + warn, + Type::Cmd, + "Failed to update tray menu asynchronously: {}", + err + ); + } else { + logging!( + debug, + Type::Cmd, + "update_tray_menu_with_timeout completed in {}ms", + elapsed + ); + } + } + + pub(super) async fn persist_profiles_with_timeout(&self) -> CmdResult<()> { + let start = Instant::now(); + let save_future = AsyncHandler::spawn_blocking(|| { + futures::executor::block_on(async { profiles_save_file_safe().await }) + }); + + let elapsed = start.elapsed().as_millis(); + match time::timeout(SAVE_PROFILES_TIMEOUT, save_future).await { + Err(_) => { + let message = format!( + "Persisting configuration file timed out after {:?} (elapsed={}ms)", + SAVE_PROFILES_TIMEOUT, elapsed + ); + logging!(warn, Type::Cmd, "{}", message); + Err(message.into()) + } + Ok(join_result) => match join_result { + Err(join_err) => { + let message = format!( + "Persisting configuration file failed: blocking task join error: {join_err}" + ); + logging!(error, Type::Cmd, "{}", message); + Err(message.into()) + } + Ok(save_result) => match save_result { + Ok(()) => { + logging!( + debug, + Type::Cmd, + "persist_profiles_with_timeout completed in {}ms", + elapsed + ); + Ok(()) + } + Err(err) => { + let message = format!("Persisting configuration file failed: {}", err); + logging!(error, Type::Cmd, "{}", message); + Err(message.into()) + } + }, + }, + } + } + + pub(super) fn emit_profile_change_event(&self) { + if let Some(current) = self.ctx.new_profile_for_event.clone() { + logging!( + info, + Type::Cmd, + "Emitting configuration change event to frontend: {}, sequence: {}", + current, + self.ctx.sequence() + ); + handle::Handle::notify_profile_changed(current); + } + } +} diff --git a/src-tauri/src/core/handle.rs b/src-tauri/src/core/handle.rs index ef868f59..e3735d46 100644 --- a/src-tauri/src/core/handle.rs +++ b/src-tauri/src/core/handle.rs @@ -1,7 +1,14 @@ -use crate::{APP_HANDLE, constants::timing, singleton}; +use crate::{ + APP_HANDLE, config::Config, constants::timing, logging, singleton, utils::logging::Type, +}; use parking_lot::RwLock; +use serde_json::{Value, json}; use smartstring::alias::String; -use std::{sync::Arc, thread}; +use std::{ + sync::Arc, + thread, + time::{SystemTime, UNIX_EPOCH}, +}; use tauri::{AppHandle, Manager, WebviewWindow}; use tauri_plugin_mihomo::{Mihomo, MihomoExt}; use tokio::sync::RwLockReadGuard; @@ -66,10 +73,14 @@ impl Handle { return; } - let system_opt = handle.notification_system.read(); - if let Some(system) = system_opt.as_ref() { - system.send_event(FrontendEvent::RefreshClash); + { + let system_opt = handle.notification_system.read(); + if let Some(system) = system_opt.as_ref() { + system.send_event(FrontendEvent::RefreshClash); + } } + + Self::spawn_proxy_snapshot(); } pub fn refresh_verge() { @@ -85,11 +96,37 @@ impl Handle { } pub fn notify_profile_changed(profile_id: String) { - Self::send_event(FrontendEvent::ProfileChanged { - current_profile_id: profile_id, + let handle = Self::global(); + if handle.is_exiting() { + return; + } + + let system_opt = handle.notification_system.read(); + if let Some(system) = system_opt.as_ref() { + system.send_event(FrontendEvent::ProfileChanged { + current_profile_id: profile_id, + }); + } + } + + pub fn notify_profile_switch_finished( + profile_id: String, + success: bool, + notify: bool, + task_id: u64, + ) { + Self::send_event(FrontendEvent::ProfileSwitchFinished { + profile_id, + success, + notify, + task_id, }); } + pub fn notify_rust_panic(message: String, location: String) { + Self::send_event(FrontendEvent::RustPanic { message, location }); + } + pub fn notify_timer_updated(profile_index: String) { Self::send_event(FrontendEvent::TimerUpdated { profile_index }); } @@ -100,6 +137,86 @@ impl Handle { pub fn notify_profile_update_completed(uid: String) { Self::send_event(FrontendEvent::ProfileUpdateCompleted { uid }); + Self::spawn_proxy_snapshot(); + } + + pub fn notify_proxies_updated(payload: Value) { + Self::send_event(FrontendEvent::ProxiesUpdated { payload }); + } + + pub async fn build_proxy_snapshot() -> Option { + let mihomo_guard = Self::mihomo().await; + let proxies = match mihomo_guard.get_proxies().await { + Ok(data) => match serde_json::to_value(&data) { + Ok(value) => value, + Err(error) => { + logging!( + warn, + Type::Frontend, + "Failed to serialize proxies snapshot: {error}" + ); + return None; + } + }, + Err(error) => { + logging!( + warn, + Type::Frontend, + "Failed to fetch proxies for snapshot: {error}" + ); + return None; + } + }; + + drop(mihomo_guard); + + let providers_guard = Self::mihomo().await; + let providers_value = match providers_guard.get_proxy_providers().await { + Ok(data) => serde_json::to_value(&data).unwrap_or_else(|error| { + logging!( + warn, + Type::Frontend, + "Failed to serialize proxy providers for snapshot: {error}" + ); + Value::Null + }), + Err(error) => { + logging!( + warn, + Type::Frontend, + "Failed to fetch proxy providers for snapshot: {error}" + ); + Value::Null + } + }; + + drop(providers_guard); + + let profile_guard = Config::profiles().await; + let profile_id = profile_guard.latest_ref().current.clone(); + drop(profile_guard); + + let emitted_at = SystemTime::now() + .duration_since(UNIX_EPOCH) + .map(|duration| duration.as_millis() as i64) + .unwrap_or(0); + + let payload = json!({ + "proxies": proxies, + "providers": providers_value, + "profileId": profile_id, + "emittedAt": emitted_at, + }); + + Some(payload) + } + + fn spawn_proxy_snapshot() { + tauri::async_runtime::spawn(async { + if let Some(payload) = Handle::build_proxy_snapshot().await { + Handle::notify_proxies_updated(payload); + } + }); } pub fn notice_message, M: Into>(status: S, msg: M) { diff --git a/src-tauri/src/core/manager/config.rs b/src-tauri/src/core/manager/config.rs index 263ddb4b..e93d5244 100644 --- a/src-tauri/src/core/manager/config.rs +++ b/src-tauri/src/core/manager/config.rs @@ -10,7 +10,10 @@ use anyhow::{Result, anyhow}; use smartstring::alias::String; use std::{path::PathBuf, time::Instant}; use tauri_plugin_mihomo::Error as MihomoError; -use tokio::time::sleep; +use tokio::time::{sleep, timeout}; + +const RELOAD_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(5); +const MAX_RELOAD_ATTEMPTS: usize = 3; impl CoreManager { pub async fn use_default_config(&self, error_key: &str, error_msg: &str) -> Result<()> { @@ -39,12 +42,38 @@ impl CoreManager { return Ok((true, String::new())); } + let start = Instant::now(); + let _permit = self .update_semaphore .try_acquire() .map_err(|_| anyhow!("Config update already in progress"))?; - self.perform_config_update().await + let result = self.perform_config_update().await; + + match &result { + Ok((success, msg)) => { + logging!( + info, + Type::Core, + "[ConfigUpdate] Finished (success={}, elapsed={}ms, msg={})", + success, + start.elapsed().as_millis(), + msg + ); + } + Err(err) => { + logging!( + error, + Type::Core, + "[ConfigUpdate] Failed after {}ms: {}", + start.elapsed().as_millis(), + err + ); + } + } + + result } fn should_update_config(&self) -> Result { @@ -62,20 +91,73 @@ impl CoreManager { } async fn perform_config_update(&self) -> Result<(bool, String)> { - Config::generate().await?; + logging!(debug, Type::Core, "[ConfigUpdate] Pipeline start"); + let total_start = Instant::now(); - match CoreConfigValidator::global().validate_config().await { + let mut stage_timer = Instant::now(); + Config::generate().await?; + logging!( + debug, + Type::Core, + "[ConfigUpdate] Generation completed in {}ms", + stage_timer.elapsed().as_millis() + ); + + stage_timer = Instant::now(); + let validation_result = CoreConfigValidator::global().validate_config().await; + logging!( + debug, + Type::Core, + "[ConfigUpdate] Validation completed in {}ms", + stage_timer.elapsed().as_millis() + ); + + match validation_result { Ok((true, _)) => { + stage_timer = Instant::now(); let run_path = Config::generate_file(ConfigType::Run).await?; + logging!( + debug, + Type::Core, + "[ConfigUpdate] Runtime file generated in {}ms", + stage_timer.elapsed().as_millis() + ); + stage_timer = Instant::now(); self.apply_config(run_path).await?; + logging!( + debug, + Type::Core, + "[ConfigUpdate] Core apply completed in {}ms", + stage_timer.elapsed().as_millis() + ); + logging!( + debug, + Type::Core, + "[ConfigUpdate] Pipeline succeeded in {}ms", + total_start.elapsed().as_millis() + ); Ok((true, String::new())) } Ok((false, error_msg)) => { Config::runtime().await.discard(); + logging!( + warn, + Type::Core, + "[ConfigUpdate] Validation reported failure after {}ms: {}", + total_start.elapsed().as_millis(), + error_msg + ); Ok((false, error_msg)) } Err(e) => { Config::runtime().await.discard(); + logging!( + error, + Type::Core, + "[ConfigUpdate] Validation errored after {}ms: {}", + total_start.elapsed().as_millis(), + e + ); Err(e) } } @@ -88,17 +170,49 @@ impl CoreManager { pub(super) async fn apply_config(&self, path: PathBuf) -> Result<()> { let path_str = dirs::path_to_str(&path)?; - match self.reload_config(path_str).await { + let reload_start = Instant::now(); + match self.reload_config_with_retry(path_str).await { Ok(_) => { Config::runtime().await.apply(); - logging!(info, Type::Core, "Configuration applied"); + logging!( + debug, + Type::Core, + "Configuration applied (reload={}ms)", + reload_start.elapsed().as_millis() + ); Ok(()) } - Err(err) if Self::should_restart_on_error(&err) => { - self.retry_with_restart(path_str).await - } Err(err) => { + if Self::should_restart_for_anyhow(&err) { + logging!( + warn, + Type::Core, + "Reload failed after {}ms with retryable/timeout error; attempting restart: {}", + reload_start.elapsed().as_millis(), + err + ); + match self.retry_with_restart(path_str).await { + Ok(_) => return Ok(()), + Err(retry_err) => { + logging!( + error, + Type::Core, + "Reload retry with restart failed: {}", + retry_err + ); + Config::runtime().await.discard(); + return Err(retry_err); + } + } + } Config::runtime().await.discard(); + logging!( + error, + Type::Core, + "Failed to apply config after {}ms: {}", + reload_start.elapsed().as_millis(), + err + ); Err(anyhow!("Failed to apply config: {}", err)) } } @@ -113,17 +227,116 @@ impl CoreManager { self.restart_core().await?; sleep(timing::CONFIG_RELOAD_DELAY).await; - self.reload_config(config_path).await?; + self.reload_config_with_retry(config_path).await?; Config::runtime().await.apply(); logging!(info, Type::Core, "Configuration applied after restart"); Ok(()) } - async fn reload_config(&self, path: &str) -> Result<(), MihomoError> { - handle::Handle::mihomo() + async fn reload_config_with_retry(&self, path: &str) -> Result<()> { + for attempt in 1..=MAX_RELOAD_ATTEMPTS { + let attempt_start = Instant::now(); + let reload_future = self.reload_config_once(path); + match timeout(RELOAD_TIMEOUT, reload_future).await { + Ok(Ok(())) => { + logging!( + debug, + Type::Core, + "reload_config attempt {}/{} succeeded in {}ms", + attempt, + MAX_RELOAD_ATTEMPTS, + attempt_start.elapsed().as_millis() + ); + return Ok(()); + } + Ok(Err(err)) => { + logging!( + warn, + Type::Core, + "reload_config attempt {}/{} failed after {}ms: {}", + attempt, + MAX_RELOAD_ATTEMPTS, + attempt_start.elapsed().as_millis(), + err + ); + if attempt == MAX_RELOAD_ATTEMPTS { + return Err(anyhow!( + "Failed to reload config after {} attempts: {}", + attempt, + err + )); + } + } + Err(_) => { + logging!( + warn, + Type::Core, + "reload_config attempt {}/{} timed out after {:?}", + attempt, + MAX_RELOAD_ATTEMPTS, + RELOAD_TIMEOUT + ); + if attempt == MAX_RELOAD_ATTEMPTS { + return Err(anyhow!( + "Config reload timed out after {:?} ({} attempts)", + RELOAD_TIMEOUT, + MAX_RELOAD_ATTEMPTS + )); + } + } + } + } + + Err(anyhow!( + "Config reload retry loop exited unexpectedly ({} attempts)", + MAX_RELOAD_ATTEMPTS + )) + } + + async fn reload_config_once(&self, path: &str) -> Result<(), MihomoError> { + logging!( + info, + Type::Core, + "[ConfigUpdate] reload_config_once begin path={} ", + path + ); + let start = Instant::now(); + let result = handle::Handle::mihomo() .await .reload_config(true, path) - .await + .await; + let elapsed = start.elapsed().as_millis(); + match result { + Ok(()) => { + logging!( + info, + Type::Core, + "[ConfigUpdate] reload_config_once succeeded (elapsed={}ms)", + elapsed + ); + Ok(()) + } + Err(err) => { + logging!( + warn, + Type::Core, + "[ConfigUpdate] reload_config_once failed (elapsed={}ms, err={})", + elapsed, + err + ); + Err(err) + } + } + } + + fn should_restart_for_anyhow(err: &anyhow::Error) -> bool { + if let Some(mihomo_err) = err.downcast_ref::() { + return Self::should_restart_on_error(mihomo_err); + } + let msg = err.to_string(); + msg.contains("timed out") + || msg.contains("reload") + || msg.contains("Failed to apply config") } fn should_restart_on_error(err: &MihomoError) -> bool { diff --git a/src-tauri/src/core/notification.rs b/src-tauri/src/core/notification.rs index 071bcedb..5754fecb 100644 --- a/src-tauri/src/core/notification.rs +++ b/src-tauri/src/core/notification.rs @@ -1,38 +1,71 @@ -use crate::{ - constants::{retry, timing}, - logging, - utils::logging::Type, -}; +use crate::{constants::retry, logging, utils::logging::Type}; +use once_cell::sync::Lazy; use parking_lot::RwLock; use smartstring::alias::String; use std::{ sync::{ - atomic::{AtomicU64, Ordering}, + Arc, + atomic::{AtomicBool, AtomicU64, Ordering}, mpsc, }, thread, time::Instant, }; -use tauri::{Emitter, WebviewWindow}; +use tauri::Emitter; +use tauri::async_runtime; +#[allow(dead_code)] // Temporarily suppress warnings while diagnostics disable certain events #[derive(Debug, Clone)] pub enum FrontendEvent { RefreshClash, RefreshVerge, - NoticeMessage { status: String, message: String }, - ProfileChanged { current_profile_id: String }, - TimerUpdated { profile_index: String }, - ProfileUpdateStarted { uid: String }, - ProfileUpdateCompleted { uid: String }, + RefreshProxy, + ProxiesUpdated { + payload: serde_json::Value, + }, + NoticeMessage { + status: String, + message: String, + }, + ProfileChanged { + current_profile_id: String, + }, + ProfileSwitchFinished { + profile_id: String, + success: bool, + notify: bool, + task_id: u64, + }, + TimerUpdated { + profile_index: String, + }, + ProfileUpdateStarted { + uid: String, + }, + ProfileUpdateCompleted { + uid: String, + }, + RustPanic { + message: String, + location: String, + }, } +static EMIT_SERIALIZER: Lazy> = Lazy::new(|| tokio::sync::Mutex::new(())); + #[derive(Debug, Default)] struct EventStats { - total_sent: AtomicU64, total_errors: AtomicU64, last_error_time: RwLock>, } +#[derive(Debug, Default)] +#[allow(dead_code)] +struct BufferedProxies { + pending: parking_lot::Mutex>, + in_flight: AtomicBool, +} + #[derive(Debug, Clone)] pub struct ErrorMessage { pub status: String, @@ -47,6 +80,7 @@ pub struct NotificationSystem { pub(super) is_running: bool, stats: EventStats, emergency_mode: RwLock, + proxies_buffer: Arc, } impl Default for NotificationSystem { @@ -63,6 +97,7 @@ impl NotificationSystem { is_running: false, stats: EventStats::default(), emergency_mode: RwLock::new(false), + proxies_buffer: Arc::new(BufferedProxies::default()), } } @@ -117,13 +152,78 @@ impl NotificationSystem { return; }; - if system.should_skip_event(&event) { - return; + let event_label = Self::describe_event(&event); + + match event { + FrontendEvent::ProxiesUpdated { payload } => { + logging!( + debug, + Type::Frontend, + "Queueing proxies-updated event for buffered emit: {}", + event_label + ); + system.enqueue_proxies_updated(payload); + } + other => { + logging!( + debug, + Type::Frontend, + "Queueing event for async emit: {}", + event_label + ); + + let (event_name, payload_result) = system.serialize_event(other); + let payload = match payload_result { + Ok(value) => value, + Err(err) => { + logging!( + warn, + Type::Frontend, + "Failed to serialize event {}: {}", + event_name, + err + ); + return; + } + }; + + logging!( + debug, + Type::Frontend, + "Dispatching async emit: {}", + event_name + ); + let _ = Self::emit_via_app(event_name, payload); + } + } + } + + fn enqueue_proxies_updated(&self, payload: serde_json::Value) { + let replaced = { + let mut slot = self.proxies_buffer.pending.lock(); + let had_pending = slot.is_some(); + *slot = Some(payload); + had_pending + }; + + if replaced { + logging!( + debug, + Type::Frontend, + "Replaced pending proxies-updated payload with latest snapshot" + ); } - if let Some(window) = super::handle::Handle::get_window() { - system.emit_to_window(&window, event); - thread::sleep(timing::EVENT_EMIT_DELAY); + if self + .proxies_buffer + .in_flight + .compare_exchange(false, true, Ordering::AcqRel, Ordering::Acquire) + .is_ok() + { + let buffer = Arc::clone(&self.proxies_buffer); + async_runtime::spawn(async move { + Self::flush_proxies(buffer).await; + }); } } @@ -135,25 +235,95 @@ impl NotificationSystem { ) } - fn emit_to_window(&self, window: &WebviewWindow, event: FrontendEvent) { - let (event_name, payload) = self.serialize_event(event); - - let Ok(payload) = payload else { - self.stats.total_errors.fetch_add(1, Ordering::Relaxed); - return; - }; - - match window.emit(event_name, payload) { - Ok(_) => { - self.stats.total_sent.fetch_add(1, Ordering::Relaxed); + fn emit_via_app(event_name: &'static str, payload: serde_json::Value) -> Result<(), String> { + let app_handle = super::handle::Handle::app_handle().clone(); + let event_name = event_name.to_string(); + async_runtime::spawn(async move { + if let Err(err) = app_handle.emit_to("main", event_name.as_str(), payload) { + logging!( + warn, + Type::Frontend, + "emit_to failed for {}: {}", + event_name, + err + ); } - Err(e) => { - logging!(warn, Type::Frontend, "Event emit failed: {}", e); - self.handle_emit_error(); + }); + Ok(()) + } + + async fn flush_proxies(buffer: Arc) { + const EVENT_NAME: &str = "proxies-updated"; + + loop { + let payload_opt = { + let mut guard = buffer.pending.lock(); + guard.take() + }; + + let Some(payload) = payload_opt else { + buffer.in_flight.store(false, Ordering::Release); + + if buffer.pending.lock().is_some() + && buffer + .in_flight + .compare_exchange(false, true, Ordering::AcqRel, Ordering::Acquire) + .is_ok() + { + continue; + } + + break; + }; + + logging!(debug, Type::Frontend, "Dispatching buffered proxies emit"); + let _guard = EMIT_SERIALIZER.lock().await; + if let Err(err) = Self::emit_via_app(EVENT_NAME, payload) { + logging!( + warn, + Type::Frontend, + "Buffered proxies emit failed: {}", + err + ); } } } + fn describe_event(event: &FrontendEvent) -> String { + match event { + FrontendEvent::RefreshClash => "RefreshClash".into(), + FrontendEvent::RefreshVerge => "RefreshVerge".into(), + FrontendEvent::RefreshProxy => "RefreshProxy".into(), + FrontendEvent::ProxiesUpdated { .. } => "ProxiesUpdated".into(), + FrontendEvent::NoticeMessage { status, .. } => { + format!("NoticeMessage({})", status).into() + } + FrontendEvent::ProfileChanged { current_profile_id } => { + format!("ProfileChanged({})", current_profile_id).into() + } + FrontendEvent::ProfileSwitchFinished { + profile_id, + task_id, + .. + } => format!( + "ProfileSwitchFinished(profile={}, task={})", + profile_id, task_id + ) + .into(), + FrontendEvent::TimerUpdated { profile_index } => { + format!("TimerUpdated({})", profile_index).into() + } + FrontendEvent::ProfileUpdateStarted { uid } => { + format!("ProfileUpdateStarted({})", uid).into() + } + FrontendEvent::ProfileUpdateCompleted { uid } => { + format!("ProfileUpdateCompleted({})", uid).into() + } + FrontendEvent::RustPanic { message, .. } => format!("RustPanic({})", message).into(), + } + } + + #[allow(dead_code)] fn serialize_event( &self, event: FrontendEvent, @@ -167,9 +337,25 @@ impl NotificationSystem { "verge://notice-message", serde_json::to_value((status, message)), ), + FrontendEvent::RefreshProxy => ("verge://refresh-proxy-config", Ok(json!("yes"))), + FrontendEvent::ProxiesUpdated { payload } => ("proxies-updated", Ok(payload)), FrontendEvent::ProfileChanged { current_profile_id } => { ("profile-changed", Ok(json!(current_profile_id))) } + FrontendEvent::ProfileSwitchFinished { + profile_id, + success, + notify, + task_id, + } => ( + "profile-switch-finished", + Ok(json!({ + "profileId": profile_id, + "success": success, + "notify": notify, + "taskId": task_id + })), + ), FrontendEvent::TimerUpdated { profile_index } => { ("verge://timer-updated", Ok(json!(profile_index))) } @@ -179,6 +365,10 @@ impl NotificationSystem { FrontendEvent::ProfileUpdateCompleted { uid } => { ("profile-update-completed", Ok(json!({ "uid": uid }))) } + FrontendEvent::RustPanic { message, location } => ( + "rust-panic", + Ok(json!({ "message": message, "location": location })), + ), } } @@ -204,10 +394,19 @@ impl NotificationSystem { } if let Some(sender) = &self.sender { - sender.send(event).is_ok() - } else { - false + if sender.send(event).is_err() { + logging!( + warn, + Type::Frontend, + "Failed to send event to worker thread" + ); + self.handle_emit_error(); + return false; + } + return true; } + + false } pub fn shutdown(&mut self) { diff --git a/src-tauri/src/lib.rs b/src-tauri/src/lib.rs index 5942ac15..6f3130fe 100644 --- a/src-tauri/src/lib.rs +++ b/src-tauri/src/lib.rs @@ -192,6 +192,7 @@ mod app_init { cmd::get_profiles, cmd::enhance_profiles, cmd::patch_profiles_config, + cmd::switch_profile, cmd::view_profile, cmd::patch_profile, cmd::create_profile, @@ -202,6 +203,8 @@ mod app_init { cmd::read_profile_file, cmd::save_profile_file, cmd::get_next_update_time, + cmd::get_profile_switch_status, + cmd::get_profile_switch_events, cmd::script_validate_notice, cmd::validate_script_file, cmd::create_local_backup, @@ -218,6 +221,7 @@ mod app_init { cmd::get_system_info, cmd::get_unlock_items, cmd::check_media_unlock, + cmd::frontend_log, ] } } @@ -356,6 +360,28 @@ pub fn run() { } } + std::panic::set_hook(Box::new(|info| { + let payload = info + .payload() + .downcast_ref::<&'static str>() + .map(|s| (*s).to_string()) + .or_else(|| info.payload().downcast_ref::().cloned()) + .unwrap_or_else(|| "Unknown panic".to_string()); + let location = info + .location() + .map(|loc| format!("{}:{}", loc.file(), loc.line())) + .unwrap_or_else(|| "unknown location".to_string()); + + logging!( + error, + Type::System, + "Rust panic captured: {} @ {}", + payload, + location + ); + handle::Handle::notify_rust_panic(payload.into(), location.into()); + })); + #[cfg(feature = "clippy")] let context = tauri::test::mock_context(tauri::test::noop_assets()); #[cfg(feature = "clippy")] diff --git a/src-tauri/src/utils/draft.rs b/src-tauri/src/utils/draft.rs index 044f6f1f..cc72f9c3 100644 --- a/src-tauri/src/utils/draft.rs +++ b/src-tauri/src/utils/draft.rs @@ -68,6 +68,13 @@ impl Draft> { }) } + /// 尝试获取最新只读视图,若当前持有写锁则返回 `None` + pub fn try_latest_ref(&self) -> Option>> { + self.inner + .try_read() + .map(|guard| RwLockReadGuard::map(guard, |inner| inner.1.as_ref().unwrap_or(&inner.0))) + } + /// 提交草稿,返回旧正式数据 pub fn apply(&self) -> Option> { let mut inner = self.inner.write(); diff --git a/src/components/home/current-proxy-card.tsx b/src/components/home/current-proxy-card.tsx index ceea82d7..ed74def8 100644 --- a/src/components/home/current-proxy-card.tsx +++ b/src/components/home/current-proxy-card.tsx @@ -100,10 +100,12 @@ export const CurrentProxyCard = () => { const { t } = useTranslation(); const navigate = useNavigate(); const theme = useTheme(); - const { proxies, clashConfig, refreshProxy, rules } = useAppData(); + const { proxies, proxyHydration, clashConfig, refreshProxy, rules } = + useAppData(); const { verge } = useVerge(); const { current: currentProfile } = useProfiles(); const autoDelayEnabled = verge?.enable_auto_delay_detection ?? false; + const isLiveHydration = proxyHydration === "live"; const currentProfileId = currentProfile?.uid || null; const getProfileStorageKey = useCallback( @@ -715,7 +717,6 @@ export const CurrentProxyCard = () => { ); } } - refreshProxy(); if (sortType === 1) { setDelaySortRefresh((prev) => prev + 1); @@ -840,13 +841,24 @@ export const CurrentProxyCard = () => { iconColor={currentProxy ? "primary" : undefined} action={ + {!isLiveHydration && ( + + )} @@ -960,7 +972,7 @@ export const CurrentProxyCard = () => { value={state.selection.group} onChange={handleGroupChange} label={t("Group")} - disabled={isGlobalMode || isDirectMode} + disabled={isGlobalMode || isDirectMode || !isLiveHydration} > {state.proxyData.groups.map((group) => ( @@ -978,7 +990,7 @@ export const CurrentProxyCard = () => { value={state.selection.proxy} onChange={handleProxyChange} label={t("Proxy")} - disabled={isDirectMode} + disabled={isDirectMode || !isLiveHydration} renderValue={renderProxyValue} MenuProps={{ PaperProps: { diff --git a/src/components/proxy/provider-button.tsx b/src/components/proxy/provider-button.tsx index e22b856e..4d0f2c39 100644 --- a/src/components/proxy/provider-button.tsx +++ b/src/components/proxy/provider-button.tsx @@ -1,6 +1,7 @@ import { RefreshRounded, StorageOutlined } from "@mui/icons-material"; import { Box, + Chip, Button, Dialog, DialogActions, @@ -18,7 +19,7 @@ import { } from "@mui/material"; import { useLockFn } from "ahooks"; import dayjs from "dayjs"; -import { useState } from "react"; +import { useMemo, useState } from "react"; import { useTranslation } from "react-i18next"; import { updateProxyProvider } from "tauri-plugin-mihomo-api"; @@ -48,29 +49,61 @@ const parseExpire = (expire?: number) => { export const ProviderButton = () => { const { t } = useTranslation(); const [open, setOpen] = useState(false); - const { proxyProviders, refreshProxy, refreshProxyProviders } = useAppData(); + const { + proxyProviders, + proxyHydration, + refreshProxy, + refreshProxyProviders, + } = useAppData(); + + const isHydrating = proxyHydration !== "live"; const [updating, setUpdating] = useState>({}); // 检查是否有提供者 const hasProviders = Object.keys(proxyProviders || {}).length > 0; + // Hydration hint badge keeps users aware of sync state + const hydrationChip = useMemo(() => { + if (proxyHydration === "live") return null; + + return ( + + ); + }, [proxyHydration, t]); + // 更新单个代理提供者 const updateProvider = useLockFn(async (name: string) => { + if (isHydrating) { + showNotice("info", t("Proxy data is syncing, please wait")); + return; + } + try { // 设置更新状态 setUpdating((prev) => ({ ...prev, [name]: true })); - await updateProxyProvider(name); - - // 刷新数据 - await refreshProxy(); await refreshProxyProviders(); - - showNotice("success", `${name} 更新成功`); + await refreshProxy(); + showNotice( + "success", + t("Provider {{name}} updated successfully", { name }), + ); } catch (err: any) { showNotice( "error", - `${name} 更新失败: ${err?.message || err.toString()}`, + t("Provider {{name}} update failed: {{message}}", { + name, + message: err?.message || err.toString(), + }), ); } finally { // 清除更新状态 @@ -80,11 +113,16 @@ export const ProviderButton = () => { // 更新所有代理提供者 const updateAllProviders = useLockFn(async () => { + if (isHydrating) { + showNotice("info", t("Proxy data is syncing, please wait")); + return; + } + try { // 获取所有provider的名称 const allProviders = Object.keys(proxyProviders || {}); if (allProviders.length === 0) { - showNotice("info", "没有可更新的代理提供者"); + showNotice("info", t("No providers to update")); return; } @@ -110,54 +148,67 @@ export const ProviderButton = () => { } } - // 刷新数据 - await refreshProxy(); await refreshProxyProviders(); - - showNotice("success", "全部代理提供者更新成功"); + await refreshProxy(); + showNotice("success", t("All providers updated successfully")); } catch (err: any) { - showNotice("error", `更新失败: ${err?.message || err.toString()}`); + showNotice( + "error", + t("Failed to update providers: {{message}}", { + message: err?.message || err.toString(), + }), + ); } finally { // 清除所有更新状态 setUpdating({}); } }); - const handleClose = () => { - setOpen(false); - }; + const handleClose = () => setOpen(false); if (!hasProviders) return null; return ( <> - + + + {hydrationChip} + {t("Proxy Provider")} - - - + @@ -166,54 +217,63 @@ export const ProviderButton = () => { {Object.entries(proxyProviders || {}) .sort() .map(([key, item]) => { - const provider = item; - const time = dayjs(provider.updatedAt); + if (!item) return null; + + const time = dayjs(item.updatedAt); const isUpdating = updating[key]; - - // 订阅信息 - const sub = provider.subscriptionInfo; - const hasSubInfo = !!sub; - const upload = sub?.Upload || 0; - const download = sub?.Download || 0; - const total = sub?.Total || 0; - const expire = sub?.Expire || 0; - - // 流量使用进度 + const sub = item.subscriptionInfo; + const hasSubInfo = Boolean(sub); + const upload = sub?.Upload ?? 0; + const download = sub?.Download ?? 0; + const total = sub?.Total ?? 0; + const expire = sub?.Expire ?? 0; const progress = total > 0 ? Math.min( - Math.round(((download + upload) * 100) / total) + 1, 100, + Math.max(0, ((upload + download) / total) * 100), ) : 0; return ( { - const bgcolor = - mode === "light" ? "#ffffff" : "#24252f"; - const hoverColor = - mode === "light" - ? alpha(primary.main, 0.1) - : alpha(primary.main, 0.2); - - return { - backgroundColor: bgcolor, - "&:hover": { - backgroundColor: hoverColor, - }, - }; - }, - ]} + secondaryAction={ + + updateProvider(key)} + disabled={isUpdating || isHydrating} + sx={{ + animation: isUpdating + ? "spin 1s linear infinite" + : "none", + "@keyframes spin": { + "0%": { transform: "rotate(0deg)" }, + "100%": { transform: "rotate(360deg)" }, + }, + }} + title={t("Update Provider") as string} + > + + + + } + sx={{ + mb: 1, + borderRadius: 1, + border: "1px solid", + borderColor: alpha("#ccc", 0.4), + backgroundColor: alpha("#fff", 0.02), + }} > { display: "flex", justifyContent: "space-between", alignItems: "center", + gap: 1, }} > { title={key} sx={{ display: "flex", alignItems: "center" }} > - {key} + {key} - {provider.proxies.length} + {item.proxies.length} - {provider.vehicleType} + {item.vehicleType} @@ -252,72 +313,39 @@ export const ProviderButton = () => { } secondary={ - <> - {/* 订阅信息 */} - {hasSubInfo && ( - <> - - - {parseTraffic(upload + download)} /{" "} - {parseTraffic(total)} - - - {parseExpire(expire)} - - + hasSubInfo ? ( + <> + + + {parseTraffic(upload + download)} /{" "} + {parseTraffic(total)} + + + {parseExpire(expire)} + + - {/* 进度条 */} - 0 ? 1 : 0, - }} - /> - - )} - + 0 ? 1 : 0, + }} + /> + + ) : null } /> - - { - updateProvider(key); - }} - disabled={isUpdating} - sx={{ - animation: isUpdating - ? "spin 1s linear infinite" - : "none", - "@keyframes spin": { - "0%": { transform: "rotate(0deg)" }, - "100%": { transform: "rotate(360deg)" }, - }, - }} - title={t("Update Provider") as string} - > - - - ); })} diff --git a/src/components/proxy/proxy-groups.tsx b/src/components/proxy/proxy-groups.tsx index eec3f2e8..1a82f2c8 100644 --- a/src/components/proxy/proxy-groups.tsx +++ b/src/components/proxy/proxy-groups.tsx @@ -61,10 +61,17 @@ export const ProxyGroups = (props: Props) => { }>({ open: false, message: "" }); const { verge } = useVerge(); - const { proxies: proxiesData } = useAppData(); + const { + proxies: proxiesData, + proxyHydration, + proxyTargetProfileId, + proxyDisplayProfileId, + isProxyRefreshPending, + } = useAppData(); const groups = proxiesData?.groups; const availableGroups = useMemo(() => groups ?? [], [groups]); - + const showHydrationOverlay = isProxyRefreshPending; + const pendingProfileSwitch = proxyTargetProfileId !== proxyDisplayProfileId; const defaultRuleGroup = useMemo(() => { if (isChainMode && mode === "rule" && availableGroups.length > 0) { return availableGroups[0].name; @@ -76,6 +83,35 @@ export const ProxyGroups = (props: Props) => { () => selectedGroup ?? defaultRuleGroup, [selectedGroup, defaultRuleGroup], ); + const hydrationChip = useMemo(() => { + if (proxyHydration === "live") return null; + + const label = + proxyHydration === "snapshot" ? t("Snapshot data") : t("Syncing..."); + + return ( + + ); + }, [proxyHydration, t]); + + const overlayMessage = useMemo(() => { + if (!showHydrationOverlay) return null; + + if (pendingProfileSwitch) { + return t("Loading proxy data for the selected profile..."); + } + + if (proxyHydration === "snapshot") { + return t("Preparing proxy snapshot..."); + } + + return t("Syncing proxy data..."); + }, [showHydrationOverlay, pendingProfileSwitch, proxyHydration, t]); const { renderList, onProxies, onHeadState } = useRenderList( mode, @@ -93,7 +129,7 @@ export const ProxyGroups = (props: Props) => { [renderList], ); - // 统代理选择 + // 系统代理选择 const { handleProxyGroupChange } = useProxySelection({ onSuccess: () => { onProxies(); @@ -306,12 +342,7 @@ export const ProxyGroups = (props: Props) => { try { await Promise.race([ delayManager.checkListDelay(names, groupName, timeout), - delayGroup(groupName, url, timeout).then((result) => { - console.log( - `[ProxyGroups] getGroupProxyDelays返回结果数量:`, - Object.keys(result || {}).length, - ); - }), // 查询group delays 将清除fixed(不关注调用结果) + delayGroup(groupName, url, timeout), ]); console.log(`[ProxyGroups] 延迟测试完成,组: ${groupName}`); } catch (error) { @@ -376,6 +407,11 @@ export const ProxyGroups = (props: Props) => { } if (isChainMode) { + const chainVirtuosoHeight = + mode === "rule" && proxyGroupNames.length > 0 + ? "calc(100% - 80px)" + : "calc(100% - 14px)"; + // 获取所有代理组 const proxyGroups = proxiesData?.groups || []; @@ -454,10 +490,7 @@ export const ProxyGroups = (props: Props) => { 0 - ? "calc(100% - 80px)" // 只有标题的高度 - : "calc(100% - 14px)", + height: chainVirtuosoHeight, }} totalCount={renderList.length} increaseViewportBy={{ top: 200, bottom: 200 }} @@ -548,7 +581,9 @@ export const ProxyGroups = (props: Props) => { {group.name} - {group.type} · {group.all.length} 节点 + {`${t("Group Type")}: ${group.type} · ${t("Proxy Count")}: ${ + Array.isArray(group.all) ? group.all.length : 0 + }`} @@ -556,7 +591,7 @@ export const ProxyGroups = (props: Props) => { {availableGroups.length === 0 && ( - 暂无可用代理组 + {t("Empty")} )} @@ -567,9 +602,29 @@ export const ProxyGroups = (props: Props) => { return (
- {/* 代理组导航栏 */} + {hydrationChip && ( + + {hydrationChip} + + )} {mode === "rule" && ( { )} /> + {showHydrationOverlay && overlayMessage && ( + + + + {overlayMessage} + + + + )}
); }; diff --git a/src/components/proxy/use-render-list.ts b/src/components/proxy/use-render-list.ts index 7a5949ae..1e6e0fd6 100644 --- a/src/components/proxy/use-render-list.ts +++ b/src/components/proxy/use-render-list.ts @@ -14,50 +14,13 @@ import { } from "./use-head-state"; import { useWindowWidth } from "./use-window-width"; -// 定义代理项接口 -interface IProxyItem { - name: string; - type: string; - udp: boolean; - xudp: boolean; - tfo: boolean; - mptcp: boolean; - smux: boolean; - history: { - time: string; - delay: number; - }[]; - provider?: string; - testUrl?: string; - [key: string]: any; // 添加索引签名以适应其他可能的属性 -} - -// 代理组类型 -type ProxyGroup = { - name: string; - type: string; - udp: boolean; - xudp: boolean; - tfo: boolean; - mptcp: boolean; - smux: boolean; - history: { - time: string; - delay: number; - }[]; - now: string; - all: IProxyItem[]; - hidden?: boolean; - icon?: string; - testUrl?: string; - provider?: string; -}; +type RenderGroup = IProxyGroupItem; export interface IRenderItem { // 组 | head | item | empty | item col type: 0 | 1 | 2 | 3 | 4; key: string; - group: ProxyGroup; + group: RenderGroup; proxy?: IProxyItem; col?: number; proxyCol?: IProxyItem[]; @@ -99,7 +62,7 @@ export const useRenderList = ( selectedGroup?: string | null, ) => { // 使用全局数据提供者 - const { proxies: proxiesData, refreshProxy } = useAppData(); + const { proxies: proxiesData, proxyHydration, refreshProxy } = useAppData(); const { verge } = useVerge(); const { width } = useWindowWidth(); const [headStates, setHeadState] = useHeadStateNew(); @@ -123,17 +86,29 @@ export const useRenderList = ( // 确保代理数据加载 useEffect(() => { - if (!proxiesData) return; + if (!proxiesData || proxyHydration !== "live") return; const { groups, proxies } = proxiesData; if ( (mode === "rule" && !groups.length) || (mode === "global" && proxies.length < 2) ) { - const handle = setTimeout(() => refreshProxy(), 500); + const handle = setTimeout(() => { + void refreshProxy().catch(() => {}); + }, 500); return () => clearTimeout(handle); } - }, [proxiesData, mode, refreshProxy]); + }, [proxiesData, proxyHydration, mode, refreshProxy]); + + useEffect(() => { + if (proxyHydration !== "snapshot") return; + + const handle = setTimeout(() => { + void refreshProxy().catch(() => {}); + }, 1800); + + return () => clearTimeout(handle); + }, [proxyHydration, refreshProxy]); // 链式代理模式节点自动计算延迟 useEffect(() => { @@ -147,7 +122,7 @@ export const useRenderList = ( // 设置组监听器,当有延迟更新时自动刷新 const groupListener = () => { console.log("[ChainMode] 延迟更新,刷新UI"); - refreshProxy(); + void refreshProxy().catch(() => {}); }; delayManager.setGroupListener("chain-mode", groupListener); @@ -188,9 +163,12 @@ export const useRenderList = ( // 链式代理模式下,显示代理组和其节点 if (isChainMode && runtimeConfig && mode === "rule") { // 使用正常的规则模式代理组 - const allGroups = proxiesData.groups.length - ? proxiesData.groups - : [proxiesData.global!]; + const chainGroups = proxiesData.groups ?? []; + const allGroups = chainGroups.length + ? chainGroups + : proxiesData.global + ? [proxiesData.global] + : []; // 如果选择了特定代理组,只显示该组的节点 if (selectedGroup) { @@ -282,7 +260,7 @@ export const useRenderList = ( }); // 创建一个虚拟的组来容纳所有节点 - const virtualGroup: ProxyGroup = { + const virtualGroup: RenderGroup = { name: "All Proxies", type: "Selector", udp: false, @@ -340,7 +318,7 @@ export const useRenderList = ( }); // 创建一个虚拟的组来容纳所有节点 - const virtualGroup: ProxyGroup = { + const virtualGroup: RenderGroup = { name: "All Proxies", type: "Selector", udp: false, @@ -380,12 +358,15 @@ export const useRenderList = ( // 正常模式的渲染逻辑 const useRule = mode === "rule" || mode === "script"; - const renderGroups = - useRule && proxiesData.groups.length - ? proxiesData.groups - : [proxiesData.global!]; + const renderGroups = (() => { + const groups = proxiesData.groups ?? []; + if (useRule && groups.length) { + return groups; + } + return proxiesData.global ? [proxiesData.global] : groups; + })(); - const retList = renderGroups.flatMap((group: ProxyGroup) => { + const retList = renderGroups.flatMap((group: RenderGroup) => { const headState = headStates[group.name] || DEFAULT_STATE; const ret: IRenderItem[] = [ { diff --git a/src/hooks/use-current-proxy.ts b/src/hooks/use-current-proxy.ts index 7d352326..0c7108ff 100644 --- a/src/hooks/use-current-proxy.ts +++ b/src/hooks/use-current-proxy.ts @@ -2,12 +2,6 @@ import { useMemo } from "react"; import { useAppData } from "@/providers/app-data-context"; -// 定义代理组类型 -interface ProxyGroup { - name: string; - now: string; -} - // 获取当前代理节点信息的自定义Hook export const useCurrentProxy = () => { // 从AppDataProvider获取数据 @@ -37,15 +31,15 @@ export const useCurrentProxy = () => { "自动选择", ]; const primaryGroup = - groups.find((group: ProxyGroup) => + groups.find((group) => primaryKeywords.some((keyword) => group.name.toLowerCase().includes(keyword.toLowerCase()), ), - ) || groups.filter((g: ProxyGroup) => g.name !== "GLOBAL")[0]; + ) || groups.find((group) => group.name !== "GLOBAL"); if (primaryGroup) { primaryGroupName = primaryGroup.name; - currentName = primaryGroup.now; + currentName = primaryGroup.now ?? currentName; } } diff --git a/src/hooks/use-profiles.ts b/src/hooks/use-profiles.ts index fdb73462..c412ec30 100644 --- a/src/hooks/use-profiles.ts +++ b/src/hooks/use-profiles.ts @@ -5,33 +5,54 @@ import { getProfiles, patchProfile, patchProfilesConfig, + calcuProxies, } from "@/services/cmds"; -import { calcuProxies } from "@/services/cmds"; +import { + useProfileStore, + selectEffectiveProfiles, + selectIsHydrating, + selectLastResult, +} from "@/stores/profile-store"; export const useProfiles = () => { + const profilesFromStore = useProfileStore(selectEffectiveProfiles); + const storeHydrating = useProfileStore(selectIsHydrating); + const lastResult = useProfileStore(selectLastResult); + const commitProfileSnapshot = useProfileStore( + (state) => state.commitHydrated, + ); + const { - data: profiles, + data: swrProfiles, mutate: mutateProfiles, error, isValidating, } = useSWR("getProfiles", getProfiles, { revalidateOnFocus: false, revalidateOnReconnect: false, - dedupingInterval: 500, // 减少去重时间,提高响应性 + dedupingInterval: 500, errorRetryCount: 3, errorRetryInterval: 1000, - refreshInterval: 0, // 完全由手动控制 - onError: (error) => { - console.error("[useProfiles] SWR错误:", error); + refreshInterval: 0, + onError: (err) => { + console.error("[useProfiles] SWR错误:", err); }, onSuccess: (data) => { + commitProfileSnapshot(data); console.log( - "[useProfiles] 配置数据更新成功,配置数量:", + "[useProfiles] 配置数据更新成功,配置数量", data?.items?.length || 0, ); }, }); + const rawProfiles = profilesFromStore ?? swrProfiles; + const profiles = (rawProfiles ?? { + current: null, + items: [], + }) as IProfilesConfig; + const hasProfiles = rawProfiles != null; + const patchProfiles = async ( value: Partial, signal?: AbortSignal, @@ -49,32 +70,30 @@ export const useProfiles = () => { await mutateProfiles(); return success; - } catch (error) { - if (error instanceof DOMException && error.name === "AbortError") { - throw error; + } catch (err) { + if (err instanceof DOMException && err.name === "AbortError") { + throw err; } await mutateProfiles(); - throw error; + throw err; } }; const patchCurrent = async (value: Partial) => { - if (profiles?.current) { - await patchProfile(profiles.current, value); - mutateProfiles(); + if (!hasProfiles || !profiles.current) { + return; } + await patchProfile(profiles.current, value); + mutateProfiles(); }; - // 根据selected的节点选择 const activateSelected = async () => { try { console.log("[ActivateSelected] 开始处理代理选择"); - const [proxiesData, profileData] = await Promise.all([ - calcuProxies(), - getProfiles(), - ]); + const proxiesData = await calcuProxies(); + const profileData = hasProfiles ? profiles : null; if (!profileData || !proxiesData) { console.log("[ActivateSelected] 代理或配置数据不可用,跳过处理"); @@ -90,7 +109,6 @@ export const useProfiles = () => { return; } - // 检查是否有saved的代理选择 const { selected = [] } = current; if (selected.length === 0) { console.log("[ActivateSelected] 当前profile无保存的代理选择,跳过"); @@ -98,7 +116,7 @@ export const useProfiles = () => { } console.log( - `[ActivateSelected] 当前profile有 ${selected.length} 个代理选择配置`, + `[ActivateSelected] 当前profile有${selected.length} 个代理选择配置`, ); const selectedMap = Object.fromEntries( @@ -115,7 +133,6 @@ export const useProfiles = () => { "LoadBalance", ]); - // 处理所有代理组 [global, ...groups].forEach((group) => { if (!group) { return; @@ -150,7 +167,7 @@ export const useProfiles = () => { if (!existsInGroup) { console.warn( - `[ActivateSelected] 保存的代理 ${savedProxy} 不存在于代理组 ${name}`, + `[ActivateSelected] 保存的代理${savedProxy} 不存在于代理组${name}`, ); hasChange = true; newSelected.push({ name, now: now ?? savedProxy }); @@ -173,7 +190,7 @@ export const useProfiles = () => { return; } - console.log(`[ActivateSelected] 完成代理切换,保存新的选择配置`); + console.log("[ActivateSelected] 完成代理切换,保存新的选择配置"); try { await patchProfile(profileData.current!, { selected: newSelected }); @@ -195,14 +212,18 @@ export const useProfiles = () => { return { profiles, - current: profiles?.items?.find((p) => p && p.uid === profiles.current), + hasProfiles, + current: hasProfiles + ? (profiles.items?.find((p) => p && p.uid === profiles.current) ?? null) + : null, activateSelected, patchProfiles, patchCurrent, mutateProfiles, - // 新增故障检测状态 - isLoading: isValidating, + isLoading: isValidating || storeHydrating, + isHydrating: storeHydrating, + lastResult, error, - isStale: !profiles && !error && !isValidating, // 检测是否处于异常状态 + isStale: !hasProfiles && !error && !isValidating, }; }; diff --git a/src/pages/_layout/useLayoutEvents.ts b/src/pages/_layout/useLayoutEvents.ts index c26084a3..e3c9ecbc 100644 --- a/src/pages/_layout/useLayoutEvents.ts +++ b/src/pages/_layout/useLayoutEvents.ts @@ -1,11 +1,9 @@ import { listen } from "@tauri-apps/api/event"; import { getCurrentWebviewWindow } from "@tauri-apps/api/webviewWindow"; import { useEffect } from "react"; -import { mutate } from "swr"; import { useListen } from "@/hooks/use-listen"; -import { getAxios } from "@/services/api"; - +import { refreshClashData, refreshVergeData } from "@/services/refresh"; export const useLayoutEvents = ( handleNotice: (payload: [string, string]) => void, ) => { @@ -37,32 +35,32 @@ export const useLayoutEvents = ( .catch((error) => console.error("[事件监听] 注册失败", error)); }; + register( + addListener("verge://notice-message", ({ payload }) => + handleNotice(payload as [string, string]), + ), + ); + register( addListener("verge://refresh-clash-config", async () => { - await getAxios(true); - mutate("getProxies"); - mutate("getVersion"); - mutate("getClashConfig"); - mutate("getProxyProviders"); + try { + await refreshClashData(); + } catch (error) { + console.error("[事件监听] 刷新 Clash 配置失败", error); + } }), ); register( addListener("verge://refresh-verge-config", () => { - mutate("getVergeConfig"); - mutate("getSystemProxy"); - mutate("getAutotemProxy"); - mutate("getRunningMode"); - mutate("isServiceAvailable"); + try { + refreshVergeData(); + } catch (error) { + console.error("[事件监听] 刷新 Verge 配置失败", error); + } }), ); - register( - addListener("verge://notice-message", ({ payload }) => - handleNotice(payload as [string, string]), - ), - ); - const appWindow = getCurrentWebviewWindow(); register( (async () => { diff --git a/src/pages/profiles.tsx b/src/pages/profiles.tsx index 597ec4ec..0d29fe34 100644 --- a/src/pages/profiles.tsx +++ b/src/pages/profiles.tsx @@ -25,16 +25,23 @@ import { } from "@mui/icons-material"; import { LoadingButton } from "@mui/lab"; import { Box, Button, Divider, Grid, IconButton, Stack } from "@mui/material"; +import { invoke } from "@tauri-apps/api/core"; import { listen, TauriEvent } from "@tauri-apps/api/event"; import { readText } from "@tauri-apps/plugin-clipboard-manager"; import { readTextFile } from "@tauri-apps/plugin-fs"; import { useLockFn } from "ahooks"; import { throttle } from "lodash-es"; -import { useCallback, useEffect, useMemo, useRef, useState } from "react"; +import { + useCallback, + useEffect, + useMemo, + useReducer, + useRef, + useState, +} from "react"; import { useTranslation } from "react-i18next"; import { useLocation } from "react-router"; import useSWR, { mutate } from "swr"; -import { closeAllConnections } from "tauri-plugin-mihomo-api"; import { BasePage, DialogRef } from "@/components/base"; import { BaseStyledTextField } from "@/components/base/base-styled-text-field"; @@ -47,6 +54,7 @@ import { import { ConfigViewer } from "@/components/setting/mods/config-viewer"; import { useListen } from "@/hooks/use-listen"; import { useProfiles } from "@/hooks/use-profiles"; +import { useAppData } from "@/providers/app-data-context"; import { createProfile, deleteProfile, @@ -57,11 +65,16 @@ import { importProfile, reorderProfile, updateProfile, + switchProfileCommand, + type ProfileSwitchStatus, + type SwitchTaskStatus, } from "@/services/cmds"; import { showNotice } from "@/services/noticeService"; +import { refreshClashData } from "@/services/refresh"; import { useSetLoadingCache, useThemeMode } from "@/services/states"; +import { AsyncEventQueue, afterPaint } from "@/utils/asyncQueue"; -// 记录profile切换状态 +// Record profile switch state const debugProfileSwitch = (action: string, profile: string, extra?: any) => { const timestamp = new Date().toISOString().substring(11, 23); console.log( @@ -70,33 +83,80 @@ const debugProfileSwitch = (action: string, profile: string, extra?: any) => { ); }; -// 检查请求是否已过期 -const isRequestOutdated = ( - currentSequence: number, - requestSequenceRef: any, - profile: string, -) => { - if (currentSequence !== requestSequenceRef.current) { - debugProfileSwitch( - "REQUEST_OUTDATED", - profile, - `当前序列号: ${currentSequence}, 最新序列号: ${requestSequenceRef.current}`, - ); - return true; - } - return false; +type RustPanicPayload = { + message: string; + location: string; }; -// 检查是否被中断 -const isOperationAborted = ( - abortController: AbortController, - profile: string, -) => { - if (abortController.signal.aborted) { - debugProfileSwitch("OPERATION_ABORTED", profile); - return true; +type SwitchTaskMeta = { profileId: string; notify: boolean }; + +const collectSwitchingProfileIds = ( + status: ProfileSwitchStatus | null, +): string[] => { + if (!status) return []; + const ids = new Set(); + if (status.active) { + ids.add(status.active.profileId); + } + status.queue.forEach((task) => ids.add(task.profileId)); + return Array.from(ids); +}; + +type ManualActivatingAction = + | { type: "reset" } + | { type: "set"; value: string[] } + | { type: "add"; ids: string[] } + | { type: "remove"; id: string } + | { type: "filterAllowed"; allowed: Set }; + +const manualActivatingReducer = ( + state: string[], + action: ManualActivatingAction, +): string[] => { + switch (action.type) { + case "reset": + return state.length > 0 ? [] : state; + case "set": { + const unique = Array.from( + new Set(action.value.filter((id) => typeof id === "string" && id)), + ); + if ( + unique.length === state.length && + unique.every((id, index) => id === state[index]) + ) { + return state; + } + return unique; + } + case "add": { + const incoming = action.ids.filter((id) => typeof id === "string" && id); + if (incoming.length === 0) { + return state; + } + const next = new Set(state); + let changed = false; + incoming.forEach((id) => { + const before = next.size; + next.add(id); + if (next.size !== before) { + changed = true; + } + }); + return changed ? Array.from(next) : state; + } + case "remove": { + if (!state.includes(action.id)) { + return state; + } + return state.filter((id) => id !== action.id); + } + case "filterAllowed": { + const next = state.filter((id) => action.allowed.has(id)); + return next.length === state.length ? state : next; + } + default: + return state; } - return false; }; const normalizeProfileUrl = (value?: string) => { @@ -117,7 +177,7 @@ const normalizeProfileUrl = (value?: string) => { } catch { const schemeNormalized = trimmed.replace( /^([a-z]+):\/\//i, - (match, scheme: string) => `${scheme.toLowerCase()}://`, + (_match, scheme: string) => `${scheme.toLowerCase()}://`, ); return schemeNormalized.replace(/\/+$/, ""); } @@ -159,7 +219,7 @@ const createImportLandingVerifier = ( if (currentCount > baselineCount) { console.log( - `[导入验证] 配置数量已增加: ${baselineCount} -> ${currentCount}`, + `[Import Verify] Configuration count increased: ${baselineCount} -> ${currentCount}`, ); return true; } @@ -177,7 +237,9 @@ const createImportLandingVerifier = ( } if (!hadBaselineProfile) { - console.log("[导入验证] 检测到新的订阅记录,判定为导入成功"); + console.log( + "[Import Verify] Detected new profile record; treating as success", + ); return true; } @@ -186,13 +248,15 @@ const createImportLandingVerifier = ( if (currentUpdated > baselineUpdated) { console.log( - `[导入验证] 订阅更新时间已更新 ${baselineUpdated} -> ${currentUpdated}`, + `[Import Verify] Profile timestamp updated ${baselineUpdated} -> ${currentUpdated}`, ); return true; } if (currentSignature !== baselineSignature) { - console.log("[导入验证] 订阅详情发生变化,判定为导入成功"); + console.log( + "[Import Verify] Profile details changed; treating as success", + ); return true; } @@ -205,14 +269,110 @@ const createImportLandingVerifier = ( }; }; +const isDev = import.meta.env.DEV; + const ProfilePage = () => { + // Serialize profile switch events so state transitions stay deterministic. + const switchEventQueue = useMemo(() => new AsyncEventQueue(), []); + // Stage follow-up effects (hydration, refresh) to run sequentially after switch completion. + const postSwitchEffectQueue = useMemo(() => new AsyncEventQueue(), []); + const mountedRef = useRef(false); + const { t } = useTranslation(); const location = useLocation(); + const logToBackend = useCallback( + ( + level: "debug" | "info" | "warn" | "error", + message: string, + context?: Record, + ) => { + const payload: Record = { + level, + message, + }; + if (context !== undefined) { + payload.context = context; + } + invoke("frontend_log", { payload }).catch(() => {}); + }, + [], + ); const { addListener } = useListen(); + const { switchStatus } = useAppData(); const [url, setUrl] = useState(""); const [disabled, setDisabled] = useState(false); - const [activatings, setActivatings] = useState([]); + const [manualActivatings, dispatchManualActivatings] = useReducer( + manualActivatingReducer, + [], + ); + const taskMetaRef = useRef>(new Map()); + const lastResultAtRef = useRef(0); + const initialLastResultSyncRef = useRef(true); + + useEffect(() => { + mountedRef.current = true; + return () => { + mountedRef.current = false; + switchEventQueue.clear(); + postSwitchEffectQueue.clear(); + if (isDev) { + console.debug("[ProfileSwitch] component unmounted, queues cleared"); + } + }; + }, [postSwitchEffectQueue, switchEventQueue]); + useEffect(() => { + const handleError = (event: ErrorEvent) => { + logToBackend("error", "[ProfileSwitch] window error captured", { + message: event.message, + filename: event.filename, + lineno: event.lineno, + colno: event.colno, + stack: event.error?.stack, + }); + console.error( + "[ProfileSwitch] window error captured", + event.message, + event.error, + ); + }; + const handleRejection = (event: PromiseRejectionEvent) => { + let reasonSummary: string; + if (typeof event.reason === "object") { + try { + reasonSummary = JSON.stringify(event.reason); + } catch (error) { + reasonSummary = `[unserializable reason: ${String(error)}]`; + } + } else { + reasonSummary = String(event.reason); + } + logToBackend("error", "[ProfileSwitch] unhandled rejection captured", { + reason: reasonSummary, + }); + console.error( + "[ProfileSwitch] unhandled rejection captured", + event.reason, + ); + }; + window.addEventListener("error", handleError); + window.addEventListener("unhandledrejection", handleRejection); + return () => { + window.removeEventListener("error", handleError); + window.removeEventListener("unhandledrejection", handleRejection); + }; + }, [logToBackend]); const [loading, setLoading] = useState(false); + const postSwitchGenerationRef = useRef(0); + const switchingProfileId = switchStatus?.active?.profileId ?? null; + const switchActivatingIds = useMemo( + () => collectSwitchingProfileIds(switchStatus ?? null), + [switchStatus], + ); + const activatings = useMemo(() => { + const merged = new Set(manualActivatings); + switchActivatingIds.forEach((id) => merged.add(id)); + return Array.from(merged); + }, [manualActivatings, switchActivatingIds]); // Batch selection states const [batchMode, setBatchMode] = useState(false); @@ -220,57 +380,6 @@ const ProfilePage = () => { () => new Set(), ); - // 防止重复切换 - const switchingProfileRef = useRef(null); - - // 支持中断当前切换操作 - const abortControllerRef = useRef(null); - - // 只处理最新的切换请求 - const requestSequenceRef = useRef(0); - - // 待处理请求跟踪,取消排队的请求 - const pendingRequestRef = useRef | null>(null); - - // 处理profile切换中断 - const handleProfileInterrupt = useCallback( - (previousSwitching: string, newProfile: string) => { - debugProfileSwitch( - "INTERRUPT_PREVIOUS", - previousSwitching, - `被 ${newProfile} 中断`, - ); - - if (abortControllerRef.current) { - abortControllerRef.current.abort(); - debugProfileSwitch("ABORT_CONTROLLER_TRIGGERED", previousSwitching); - } - - if (pendingRequestRef.current) { - debugProfileSwitch("CANCEL_PENDING_REQUEST", previousSwitching); - } - - setActivatings((prev) => prev.filter((id) => id !== previousSwitching)); - showNotice( - "info", - `${t("Profile switch interrupted by new selection")}: ${previousSwitching} → ${newProfile}`, - 3000, - ); - }, - [t], - ); - - // 清理切换状态 - const cleanupSwitchState = useCallback( - (profile: string, sequence: number) => { - setActivatings((prev) => prev.filter((id) => id !== profile)); - switchingProfileRef.current = null; - abortControllerRef.current = null; - pendingRequestRef.current = null; - debugProfileSwitch("SWITCH_END", profile, `序列号: ${sequence}`); - }, - [], - ); const sensors = useSensors( useSensor(PointerSensor), useSensor(KeyboardSensor, { @@ -282,11 +391,32 @@ const ProfilePage = () => { const { profiles = {}, activateSelected, - patchProfiles, mutateProfiles, error, isStale, } = useProfiles(); + const activateSelectedRef = useRef(activateSelected); + const mutateProfilesRef = useRef(mutateProfiles); + const profileMutateScheduledRef = useRef(false); + const mutateLogsRef = useRef<(() => Promise | void) | null>(null); + const tRef = useRef(t); + const showNoticeRef = useRef(showNotice); + const refreshClashDataRef = useRef(refreshClashData); + + useEffect(() => { + activateSelectedRef.current = activateSelected; + }, [activateSelected]); + + useEffect(() => { + mutateProfilesRef.current = mutateProfiles; + }, [mutateProfiles]); + + useEffect(() => { + tRef.current = t; + }, [t]); + + showNoticeRef.current = showNotice; + refreshClashDataRef.current = refreshClashData; useEffect(() => { const handleFileDrop = async () => { @@ -327,28 +457,28 @@ const ProfilePage = () => { }; }, [addListener, mutateProfiles, t]); - // 添加紧急恢复功能 + // Add emergency recovery capability const onEmergencyRefresh = useLockFn(async () => { - console.log("[紧急刷新] 开始强制刷新所有数据"); + console.log("[Emergency Refresh] Starting forced refresh of all data"); try { - // 清除所有SWR缓存 + // Clear all SWR caches await mutate(() => true, undefined, { revalidate: false }); - // 强制重新获取配置数据 + // Force fetching profile data await mutateProfiles(undefined, { revalidate: true, rollbackOnError: false, }); - // 等待状态稳定后增强配置 + // Wait for state to stabilize before enhancing the profile await new Promise((resolve) => setTimeout(resolve, 500)); await onEnhance(false); - showNotice("success", "数据已强制刷新", 2000); + showNotice("success", "Data forcibly refreshed", 2000); } catch (error: any) { - console.error("[紧急刷新] 失败:", error); - showNotice("error", `紧急刷新失败: ${error.message}`, 4000); + console.error("[Emergency Refresh] Failed:", error); + showNotice("error", `Emergency refresh failed: ${error.message}`, 4000); } }); @@ -356,6 +486,156 @@ const ProfilePage = () => { "getRuntimeLogs", getRuntimeLogs, ); + useEffect(() => { + mutateLogsRef.current = mutateLogs; + }, [mutateLogs]); + + useEffect(() => { + activateSelectedRef.current = activateSelected; + }, [activateSelected]); + + useEffect(() => { + mutateProfilesRef.current = mutateProfiles; + }, [mutateProfiles]); + + const scheduleProfileMutate = useCallback(() => { + if (profileMutateScheduledRef.current) return; + if (!mountedRef.current) return; + profileMutateScheduledRef.current = true; + requestAnimationFrame(() => { + profileMutateScheduledRef.current = false; + const mutateProfilesFn = mutateProfilesRef.current; + if (mutateProfilesFn) { + void mutateProfilesFn(); + if (isDev) { + console.debug( + "[ProfileSwitch] mutateProfiles executed from schedule", + ); + } + } + }); + }, []); + + useEffect(() => { + if (!switchStatus) { + taskMetaRef.current.clear(); + dispatchManualActivatings({ type: "reset" }); + return; + } + + const trackedProfiles = new Set(); + const registerTask = (task: SwitchTaskStatus | null | undefined) => { + if (!task) return; + taskMetaRef.current.set(task.taskId, { + profileId: task.profileId, + notify: task.notify, + }); + trackedProfiles.add(task.profileId); + }; + + registerTask(switchStatus.active ?? null); + switchStatus.queue.forEach((task) => registerTask(task)); + + dispatchManualActivatings({ + type: "filterAllowed", + allowed: trackedProfiles, + }); + + const lastResult = switchStatus.lastResult ?? null; + if (initialLastResultSyncRef.current) { + initialLastResultSyncRef.current = false; + if (lastResult) { + lastResultAtRef.current = lastResult.finishedAt; + } + } + + if (lastResult && lastResult.finishedAt !== lastResultAtRef.current) { + lastResultAtRef.current = lastResult.finishedAt; + const { profileId, success, finishedAt, errorDetail, cancelled } = + lastResult; + const isCancelled = Boolean(cancelled); + const meta = taskMetaRef.current.get(lastResult.taskId); + const notifySuccess = meta?.notify ?? true; + taskMetaRef.current.delete(lastResult.taskId); + + debugProfileSwitch("STATUS_RESULT", profileId, { + success, + finishedAt, + notifySuccess, + cancelled: isCancelled, + }); + + switchEventQueue.enqueue(() => { + if (!mountedRef.current) return; + + dispatchManualActivatings({ type: "remove", id: profileId }); + + const eventGeneration = postSwitchGenerationRef.current; + + postSwitchEffectQueue.enqueue(async () => { + if (!mountedRef.current) return; + if (postSwitchGenerationRef.current !== eventGeneration) { + return; + } + + logToBackend( + success || isCancelled ? "info" : "warn", + "[ProfileSwitch] status result received", + { + profileId, + success, + cancelled: isCancelled, + finishedAt, + }, + ); + + scheduleProfileMutate(); + + if (success) { + if (notifySuccess) { + await afterPaint(); + showNoticeRef.current?.( + "success", + tRef.current("Profile Switched"), + 1000, + ); + } + + const operations: Promise[] = []; + const mutateLogs = mutateLogsRef.current; + if (mutateLogs) { + operations.push(Promise.resolve(mutateLogs())); + } + const activateSelected = activateSelectedRef.current; + if (activateSelected) { + operations.push(Promise.resolve(activateSelected())); + } + const refreshFn = refreshClashDataRef.current; + if (refreshFn) { + operations.push(Promise.resolve(refreshFn())); + } + + if (operations.length > 0) { + void Promise.resolve().then(() => Promise.allSettled(operations)); + } + } else if (!isCancelled) { + await afterPaint(); + showNoticeRef.current?.( + "error", + errorDetail ?? tRef.current("Profile switch failed"), + ); + } + }); + }); + } + }, [ + dispatchManualActivatings, + logToBackend, + postSwitchEffectQueue, + scheduleProfileMutate, + switchEventQueue, + switchStatus, + ]); const viewerRef = useRef(null); const configRef = useRef(null); @@ -375,7 +655,7 @@ const ProfilePage = () => { const onImport = async () => { if (!url) return; - // 校验url是否为http/https + // Validate that the URL uses http/https if (!/^https?:\/\//i.test(url)) { showNotice("error", t("Invalid Profile URL")); return; @@ -405,7 +685,10 @@ const ProfilePage = () => { ); } } catch (verifyErr) { - console.warn("[导入验证] 获取配置状态失败:", verifyErr); + console.warn( + "[Import Verify] Failed to fetch profile state:", + verifyErr, + ); break; } } @@ -414,33 +697,33 @@ const ProfilePage = () => { }; try { - // 尝试正常导入 + // Attempt standard import await importProfile(url); await handleImportSuccess("Profile Imported Successfully"); return; } catch (initialErr) { - console.warn("[订阅导入] 首次导入失败:", initialErr); + console.warn("[Profile Import] Initial import failed:", initialErr); const alreadyImported = await waitForImportLanding(); if (alreadyImported) { console.warn( - "[订阅导入] 接口返回失败,但检测到订阅已导入,跳过回退导入流程", + "[Profile Import] API reported failure, but profile already imported; skipping rollback", ); await handleImportSuccess("Profile Imported Successfully"); return; } - // 首次导入失败且未检测到数据变更,尝试使用自身代理 + // Initial import failed without data change; try built-in proxy showNotice("info", t("Import failed, retrying with Clash proxy...")); try { - // 使用自身代理尝试导入 + // Attempt import using built-in proxy await importProfile(url, { with_proxy: false, self_proxy: true, }); await handleImportSuccess("Profile Imported with Clash proxy"); } catch (retryErr: any) { - // 回退导入也失败 + // Rollback import also failed const retryErrmsg = retryErr?.message || retryErr.toString(); showNotice( "error", @@ -453,7 +736,9 @@ const ProfilePage = () => { } }; - // 强化的刷新策略 + const currentProfileId = profiles.current ?? null; + + // Enhanced refresh strategy const performRobustRefresh = async ( importVerifier: ImportLandingVerifier, ) => { @@ -464,43 +749,50 @@ const ProfilePage = () => { while (retryCount < maxRetries) { try { - console.log(`[导入刷新] 第${retryCount + 1}次尝试刷新配置数据`); + console.log( + `[Import Refresh] Attempt ${retryCount + 1} to refresh profile data`, + ); - // 强制刷新,绕过所有缓存 + // Force refresh and bypass caches await mutateProfiles(undefined, { revalidate: true, rollbackOnError: false, }); - // 等待状态稳定 + // Wait for state to stabilize await new Promise((resolve) => setTimeout(resolve, baseDelay * (retryCount + 1)), ); - // 验证刷新是否成功 + // Verify whether refresh succeeded const currentProfiles = await getProfiles(); const currentCount = currentProfiles?.items?.length || 0; if (currentCount > baselineCount) { console.log( - `[导入刷新] 配置刷新成功,配置数量 ${baselineCount} -> ${currentCount}`, + `[Import Refresh] Profile refresh succeeded; count ${baselineCount} -> ${currentCount}`, ); await onEnhance(false); return; } if (hasLanding(currentProfiles)) { - console.log("[导入刷新] 检测到订阅内容更新,判定刷新成功"); + console.log( + "[Import Refresh] Detected profile update; treating as success", + ); await onEnhance(false); return; } console.warn( - `[导入刷新] 配置数量未增加 (${currentCount}), 继续重试...`, + `[Import Refresh] Profile count unchanged (${currentCount}), retrying...`, ); retryCount++; } catch (error) { - console.error(`[导入刷新] 第${retryCount + 1}次刷新失败:`, error); + console.error( + `[Import Refresh] Attempt ${retryCount + 1} failed:`, + error, + ); retryCount++; await new Promise((resolve) => setTimeout(resolve, baseDelay * retryCount), @@ -508,10 +800,12 @@ const ProfilePage = () => { } } - // 所有重试失败后的最后尝试 - console.warn(`[导入刷新] 常规刷新失败,尝试清除缓存重新获取`); + // Final attempt after all retries fail + console.warn( + `[Import Refresh] Regular refresh failed; clearing cache and retrying`, + ); try { - // 清除SWR缓存并重新获取 + // Clear SWR cache and refetch await mutate("getProfiles", getProfiles(), { revalidate: true }); await onEnhance(false); showNotice( @@ -520,7 +814,10 @@ const ProfilePage = () => { 3000, ); } catch (finalError) { - console.error(`[导入刷新] 最终刷新尝试失败:`, finalError); + console.error( + `[Import Refresh] Final refresh attempt failed:`, + finalError, + ); showNotice( "error", t("Profile imported successfully, please restart if not visible"), @@ -531,209 +828,108 @@ const ProfilePage = () => { const onDragEnd = async (event: DragEndEvent) => { const { active, over } = event; - if (over) { - if (active.id !== over.id) { - await reorderProfile(active.id.toString(), over.id.toString()); - mutateProfiles(); - } + if (over && active.id !== over.id) { + await reorderProfile(active.id.toString(), over.id.toString()); + mutateProfiles(); } }; - const executeBackgroundTasks = useCallback( - async ( - profile: string, - sequence: number, - abortController: AbortController, - ) => { - try { - if ( - sequence === requestSequenceRef.current && - switchingProfileRef.current === profile && - !abortController.signal.aborted - ) { - await activateSelected(); - console.log(`[Profile] 后台处理完成,序列号: ${sequence}`); - } else { - debugProfileSwitch( - "BACKGROUND_TASK_SKIPPED", - profile, - `序列号过期或被中断: ${sequence} vs ${requestSequenceRef.current}`, - ); - } - } catch (err: any) { - console.warn("Failed to activate selected proxies:", err); - } - }, - [activateSelected], - ); + const requestSwitch = useCallback( + (targetProfile: string, notifySuccess: boolean) => { + const nextGeneration = postSwitchGenerationRef.current + 1; + postSwitchGenerationRef.current = nextGeneration; + postSwitchEffectQueue.clear(); - const activateProfile = useCallback( - async (profile: string, notifySuccess: boolean) => { - if (profiles.current === profile && !notifySuccess) { - console.log( - `[Profile] 目标profile ${profile} 已经是当前配置,跳过切换`, - ); - return; - } - - const currentSequence = ++requestSequenceRef.current; - debugProfileSwitch("NEW_REQUEST", profile, `序列号: ${currentSequence}`); - - // 处理中断逻辑 - const previousSwitching = switchingProfileRef.current; - if (previousSwitching && previousSwitching !== profile) { - handleProfileInterrupt(previousSwitching, profile); - } - - // 防止重复切换同一个profile - if (switchingProfileRef.current === profile) { - debugProfileSwitch("DUPLICATE_SWITCH_BLOCKED", profile); - return; - } - - // 初始化切换状态 - switchingProfileRef.current = profile; - debugProfileSwitch("SWITCH_START", profile, `序列号: ${currentSequence}`); - - const currentAbortController = new AbortController(); - abortControllerRef.current = currentAbortController; - - setActivatings((prev) => { - if (prev.includes(profile)) return prev; - return [...prev, profile]; + debugProfileSwitch("REQUEST_SWITCH", targetProfile, { + notifySuccess, + generation: nextGeneration, }); - try { - console.log( - `[Profile] 开始切换到: ${profile},序列号: ${currentSequence}`, - ); + logToBackend("info", "[ProfileSwitch] request switch", { + targetProfile, + notifySuccess, + generation: nextGeneration, + }); - // 检查请求有效性 - if ( - isRequestOutdated(currentSequence, requestSequenceRef, profile) || - isOperationAborted(currentAbortController, profile) - ) { - return; - } + dispatchManualActivatings({ type: "add", ids: [targetProfile] }); - // 执行切换请求 - const requestPromise = patchProfiles( - { current: profile }, - currentAbortController.signal, - ); - pendingRequestRef.current = requestPromise; - - const success = await requestPromise; - - if (pendingRequestRef.current === requestPromise) { - pendingRequestRef.current = null; - } - - // 再次检查有效性 - if ( - isRequestOutdated(currentSequence, requestSequenceRef, profile) || - isOperationAborted(currentAbortController, profile) - ) { - return; - } - - // 完成切换 - await mutateLogs(); - closeAllConnections(); - - if (notifySuccess && success) { - showNotice("success", t("Profile Switched"), 1000); - } - - console.log( - `[Profile] 切换到 ${profile} 完成,序列号: ${currentSequence},开始后台处理`, - ); - - // 延迟执行后台任务 - setTimeout( - () => - executeBackgroundTasks( - profile, - currentSequence, - currentAbortController, - ), - 50, - ); - } catch (err: any) { - if (pendingRequestRef.current) { - pendingRequestRef.current = null; - } - - // 检查是否因为中断或过期而出错 - if ( - isOperationAborted(currentAbortController, profile) || - isRequestOutdated(currentSequence, requestSequenceRef, profile) - ) { - return; - } - - console.error(`[Profile] 切换失败:`, err); - showNotice("error", err?.message || err.toString(), 4000); - } finally { - // 只有当前profile仍然是正在切换的profile且序列号匹配时才清理状态 - if ( - switchingProfileRef.current === profile && - currentSequence === requestSequenceRef.current - ) { - cleanupSwitchState(profile, currentSequence); - } else { - debugProfileSwitch( - "CLEANUP_SKIPPED", - profile, - `序列号不匹配或已被接管: ${currentSequence} vs ${requestSequenceRef.current}`, + void (async () => { + try { + const accepted = await switchProfileCommand( + targetProfile, + notifySuccess, ); + if (!accepted) { + throw new Error(tRef.current("Profile switch failed")); + } + } catch (error: any) { + const message = + error?.message || error?.toString?.() || String(error); + logToBackend("error", "[ProfileSwitch] switch command failed", { + profileId: targetProfile, + message, + }); + dispatchManualActivatings({ type: "remove", id: targetProfile }); + scheduleProfileMutate(); + await afterPaint(); + showNoticeRef.current?.("error", message); } - } + })(); }, [ - profiles, - patchProfiles, - mutateLogs, - t, - executeBackgroundTasks, - handleProfileInterrupt, - cleanupSwitchState, + dispatchManualActivatings, + logToBackend, + postSwitchEffectQueue, + scheduleProfileMutate, ], ); - const onSelect = async (current: string, force: boolean) => { - // 阻止重复点击或已激活的profile - if (switchingProfileRef.current === current) { - debugProfileSwitch("DUPLICATE_CLICK_IGNORED", current); - return; - } - if (!force && current === profiles.current) { - debugProfileSwitch("ALREADY_CURRENT_IGNORED", current); - return; - } - - await activateProfile(current, true); - }; + const onSelect = useCallback( + (targetProfile: string, force: boolean) => { + if (!force && targetProfile === currentProfileId) { + debugProfileSwitch("ALREADY_CURRENT_IGNORED", targetProfile); + return; + } + requestSwitch(targetProfile, true); + }, + [currentProfileId, requestSwitch], + ); useEffect(() => { - (async () => { - if (current) { - mutateProfiles(); - await activateProfile(current, false); - } - })(); - }, [current, activateProfile, mutateProfiles]); + if (!current) return; + if (current === currentProfileId) return; + if (switchActivatingIds.includes(current)) return; + requestSwitch(current, false); + }, [current, currentProfileId, requestSwitch, switchActivatingIds]); + + useEffect(() => { + let mounted = true; + const panicListener = listen("rust-panic", (event) => { + if (!mounted) return; + const payload = event.payload; + if (!payload) return; + showNotice( + "error", + `Rust panic: ${payload.message} @ ${payload.location}`, + ); + console.error("Rust panic reported from backend:", payload); + }); + return () => { + mounted = false; + panicListener.then((unlisten) => unlisten()).catch(() => {}); + }; + }, [t]); const onEnhance = useLockFn(async (notifySuccess: boolean) => { - if (switchingProfileRef.current) { + if (switchingProfileId) { console.log( - `[Profile] 有profile正在切换中(${switchingProfileRef.current}),跳过enhance操作`, + `[Profile] A profile is currently switching (${switchingProfileId}); skipping enhance operation`, ); return; } const currentProfiles = currentActivatings(); - setActivatings((prev) => [...new Set([...prev, ...currentProfiles])]); + dispatchManualActivatings({ type: "add", ids: currentProfiles }); try { await enhanceProfiles(); @@ -744,17 +940,17 @@ const ProfilePage = () => { } catch (err: any) { showNotice("error", err.message || err.toString(), 3000); } finally { - // 保留正在切换的profile,清除其他状态 - setActivatings((prev) => - prev.filter((id) => id === switchingProfileRef.current), - ); + dispatchManualActivatings({ type: "reset" }); } }); const onDelete = useLockFn(async (uid: string) => { const current = profiles.current === uid; try { - setActivatings([...(current ? currentActivatings() : []), uid]); + dispatchManualActivatings({ + type: "set", + value: [...new Set([...(current ? currentActivatings() : []), uid])], + }); await deleteProfile(uid); mutateProfiles(); mutateLogs(); @@ -764,11 +960,11 @@ const ProfilePage = () => { } catch (err: any) { showNotice("error", err?.message || err.toString()); } finally { - setActivatings([]); + dispatchManualActivatings({ type: "reset" }); } }); - // 更新所有订阅 + // Update all profiles const setLoadingCache = useSetLoadingCache(); const onUpdateAll = useLockFn(async () => { const throttleMutate = throttle(mutateProfiles, 2000, { @@ -779,7 +975,7 @@ const ProfilePage = () => { await updateProfile(uid); throttleMutate(); } catch (err: any) { - console.error(`更新订阅 ${uid} 失败:`, err); + console.error(`Failed to update profile ${uid}:`, err); } finally { setLoadingCache((cache) => ({ ...cache, [uid]: false })); } @@ -787,7 +983,7 @@ const ProfilePage = () => { return new Promise((resolve) => { setLoadingCache((cache) => { - // 获取没有正在更新的订阅 + // Gather profiles that are not updating const items = profileItems.filter( (e) => e.type === "remote" && !cache[e.uid], ); @@ -841,11 +1037,11 @@ const ProfilePage = () => { const getSelectionState = () => { if (selectedProfiles.size === 0) { - return "none"; // 无选择 + return "none"; // no selection } else if (selectedProfiles.size === profileItems.length) { - return "all"; // 全选 + return "all"; // all selected } else { - return "partial"; // 部分选择 + return "partial"; // partially selected } }; @@ -859,7 +1055,7 @@ const ProfilePage = () => { ? [profiles.current] : []; - setActivatings((prev) => [...new Set([...prev, ...currentActivating])]); + dispatchManualActivatings({ type: "add", ids: currentActivating }); // Delete all selected profiles for (const uid of selectedProfiles) { @@ -882,17 +1078,17 @@ const ProfilePage = () => { } catch (err: any) { showNotice("error", err?.message || err.toString()); } finally { - setActivatings([]); + dispatchManualActivatings({ type: "reset" }); } }); const mode = useThemeMode(); - const islight = mode === "light" ? true : false; + const islight = mode === "light"; const dividercolor = islight ? "rgba(0, 0, 0, 0.06)" : "rgba(255, 255, 255, 0.06)"; - // 监听后端配置变更 + // Observe configuration changes from backend useEffect(() => { let unlistenPromise: Promise<() => void> | undefined; let lastProfileId: string | null = null; @@ -906,29 +1102,29 @@ const ProfilePage = () => { const newProfileId = event.payload; const now = Date.now(); - console.log(`[Profile] 收到配置变更事件: ${newProfileId}`); + console.log(`[Profile] Received profile-change event: ${newProfileId}`); if ( lastProfileId === newProfileId && now - lastUpdateTime < debounceDelay ) { - console.log(`[Profile] 重复事件被防抖,跳过`); + console.log(`[Profile] Duplicate event throttled; skipping`); return; } lastProfileId = newProfileId; lastUpdateTime = now; - console.log(`[Profile] 执行配置数据刷新`); + console.log(`[Profile] Performing profile data refresh`); if (refreshTimer !== null) { window.clearTimeout(refreshTimer); } - // 使用异步调度避免阻塞事件处理 + // Use async scheduling to avoid blocking event handling refreshTimer = window.setTimeout(() => { mutateProfiles().catch((error) => { - console.error("[Profile] 配置数据刷新失败:", error); + console.error("[Profile] Profile data refresh failed:", error); }); refreshTimer = null; }, 0); @@ -945,16 +1141,6 @@ const ProfilePage = () => { }; }, [mutateProfiles]); - // 组件卸载时清理中断控制器 - useEffect(() => { - return () => { - if (abortControllerRef.current) { - abortControllerRef.current.abort(); - debugProfileSwitch("COMPONENT_UNMOUNT_CLEANUP", "all"); - } - }; - }, []); - return ( { - {/* 故障检测和紧急恢复按钮 */} + {/* Fault detection and emergency recovery button */} {(error || isStale) && ( { ref={viewerRef} onChange={async (isActivating) => { mutateProfiles(); - // 只有更改当前激活的配置时才触发全局重新加载 + // Only trigger global reload when the active profile changes if (isActivating) { await onEnhance(false); } diff --git a/src/providers/app-data-context.ts b/src/providers/app-data-context.ts index 7b7244ab..6bf02313 100644 --- a/src/providers/app-data-context.ts +++ b/src/providers/app-data-context.ts @@ -6,8 +6,15 @@ import { RuleProvider, } from "tauri-plugin-mihomo-api"; +import { ProxiesView, type ProfileSwitchStatus } from "@/services/cmds"; + export interface AppDataContextType { - proxies: any; + proxies: ProxiesView | null; + proxyHydration: "none" | "snapshot" | "live"; + proxyTargetProfileId: string | null; + proxyDisplayProfileId: string | null; + isProxyRefreshPending: boolean; + switchStatus: ProfileSwitchStatus | null; clashConfig: BaseConfig; rules: Rule[]; sysproxy: any; diff --git a/src/providers/app-data-provider.tsx b/src/providers/app-data-provider.tsx index c71528c4..9c97c61f 100644 --- a/src/providers/app-data-provider.tsx +++ b/src/providers/app-data-provider.tsx @@ -1,6 +1,6 @@ import { listen } from "@tauri-apps/api/event"; -import React, { useCallback, useEffect, useMemo } from "react"; -import useSWR from "swr"; +import React, { useCallback, useEffect, useMemo, useRef } from "react"; +import useSWR, { mutate as globalMutate } from "swr"; import { getBaseConfig, getRuleProviders, @@ -9,31 +9,53 @@ import { import { useVerge } from "@/hooks/use-verge"; import { - calcuProxies, calcuProxyProviders, getAppUptime, + getProfileSwitchStatus, + getProfileSwitchEvents, + getProfiles as fetchProfilesConfig, getRunningMode, + readProfileFile, getSystemProxy, + type ProxiesView, + type ProfileSwitchStatus, + type SwitchResultStatus, } from "@/services/cmds"; -import { SWR_DEFAULTS, SWR_REALTIME, SWR_SLOW_POLL } from "@/services/config"; +import { SWR_DEFAULTS, SWR_SLOW_POLL } from "@/services/config"; +import { useProfileStore } from "@/stores/profile-store"; +import { + applyLiveProxyPayload, + fetchLiveProxies, + type ProxiesUpdatedPayload, + useProxyStore, +} from "@/stores/proxy-store"; +import { createProxySnapshotFromProfile } from "@/utils/proxy-snapshot"; import { AppDataContext, AppDataContextType } from "./app-data-context"; -// 全局数据提供者组件 +// Global app data provider export const AppDataProvider = ({ children, }: { children: React.ReactNode; }) => { const { verge } = useVerge(); - - const { data: proxiesData, mutate: refreshProxy } = useSWR( - "getProxies", - calcuProxies, - { - ...SWR_REALTIME, - onError: (err) => console.warn("[DataProvider] Proxy fetch failed:", err), - }, + const applyProfileSwitchResult = useProfileStore( + (state) => state.applySwitchResult, + ); + const commitProfileSnapshot = useProfileStore( + (state) => state.commitHydrated, + ); + const setSwitchEventSeq = useProfileStore((state) => state.setLastEventSeq); + const proxyView = useProxyStore((state) => state.data); + const proxyHydration = useProxyStore((state) => state.hydration); + const proxyProfileId = useProxyStore((state) => state.lastProfileId); + const pendingProxyProfileId = useProxyStore( + (state) => state.pendingProfileId, + ); + const setProxySnapshot = useProxyStore((state) => state.setSnapshot); + const clearPendingProxyProfile = useProxyStore( + (state) => state.clearPendingProfile, ); const { data: clashConfig, mutate: refreshClashConfig } = useSWR( @@ -60,25 +82,259 @@ export const AppDataProvider = ({ SWR_DEFAULTS, ); - useEffect(() => { - let lastProfileId: string | null = null; - let lastUpdateTime = 0; - const refreshThrottle = 800; + const { data: switchStatus, mutate: mutateSwitchStatus } = + useSWR( + "getProfileSwitchStatus", + getProfileSwitchStatus, + { + refreshInterval: (status) => + status && (status.isSwitching || (status.queue?.length ?? 0) > 0) + ? 400 + : 4000, + dedupingInterval: 200, + }, + ); - let isUnmounted = false; - const scheduledTimeouts = new Set(); + const isUnmountedRef = useRef(false); + // Keep track of pending timers so we can cancel them on unmount and avoid stray updates. + const scheduledTimeoutsRef = useRef>(new Set()); + // Shared metadata to dedupe switch events coming from both polling and subscriptions. + const switchMetaRef = useRef<{ + pendingProfileId: string | null; + lastResultTaskId: number | null; + }>({ + pendingProfileId: null, + lastResultTaskId: null, + }); + const switchEventSeqRef = useRef(0); + const profileChangeMetaRef = useRef<{ + lastProfileId: string | null; + lastEventTs: number; + }>({ + lastProfileId: null, + lastEventTs: 0, + }); + const lastClashRefreshAtRef = useRef(0); + const PROFILE_EVENT_DEDUP_MS = 400; + const CLASH_REFRESH_DEDUP_MS = 300; + + // Thin wrapper around setTimeout that no-ops once the provider unmounts. + const scheduleTimeout = useCallback( + (callback: () => void | Promise, delay: number) => { + if (isUnmountedRef.current) return -1; + + const timeoutId = window.setTimeout(() => { + scheduledTimeoutsRef.current.delete(timeoutId); + if (!isUnmountedRef.current) { + void callback(); + } + }, delay); + + scheduledTimeoutsRef.current.add(timeoutId); + return timeoutId; + }, + [], + ); + + const clearAllTimeouts = useCallback(() => { + scheduledTimeoutsRef.current.forEach((timeoutId) => + clearTimeout(timeoutId), + ); + scheduledTimeoutsRef.current.clear(); + }, []); + + // Delay live proxy refreshes slightly so we don't hammer Mihomo while a switch is still applying. + const queueProxyRefresh = useCallback( + (reason: string, delay = 1500) => { + scheduleTimeout(() => { + fetchLiveProxies().catch((error) => + console.warn( + `[DataProvider] Proxy refresh failed (${reason}, fallback):`, + error, + ), + ); + }, delay); + }, + [scheduleTimeout], + ); + // Prime the proxy store with the static selections from the profile YAML before live data arrives. + const seedProxySnapshot = useCallback( + async (profileId: string) => { + if (!profileId) return; + + try { + const yamlContent = await readProfileFile(profileId); + const snapshot = createProxySnapshotFromProfile(yamlContent); + if (!snapshot) return; + + setProxySnapshot(snapshot, profileId); + } catch (error) { + console.warn( + "[DataProvider] Failed to seed proxy snapshot from profile:", + error, + ); + } + }, + [setProxySnapshot], + ); + + const handleSwitchResult = useCallback( + (result: SwitchResultStatus) => { + // Ignore duplicate notifications for the same switch execution. + const meta = switchMetaRef.current; + if (result.taskId === meta.lastResultTaskId) { + return; + } + meta.lastResultTaskId = result.taskId; + + // Optimistically update the SWR cache so the UI shows the new profile immediately. + void globalMutate( + "getProfiles", + (current?: IProfilesConfig | null) => { + if (!current || !result.success) { + return current; + } + if (current.current === result.profileId) { + return current; + } + return { + ...current, + current: result.profileId, + }; + }, + false, + ); + + applyProfileSwitchResult(result); + if (!result.success) { + clearPendingProxyProfile(); + } + + if (result.success && result.cancelled !== true) { + // Once the backend settles, refresh all dependent data in the background. + scheduleTimeout(() => { + void Promise.allSettled([ + fetchProfilesConfig().then((data) => { + commitProfileSnapshot(data); + globalMutate("getProfiles", data, false); + }), + fetchLiveProxies(), + refreshProxyProviders(), + refreshRules(), + refreshRuleProviders(), + ]).catch((error) => { + console.warn( + "[DataProvider] Background refresh after profile switch failed:", + error, + ); + }); + }, 100); + } + + void mutateSwitchStatus((current) => { + if (!current) { + return current; + } + const filteredQueue = current.queue.filter( + (task) => task.taskId !== result.taskId, + ); + const active = + current.active && current.active.taskId === result.taskId + ? null + : current.active; + const isSwitching = filteredQueue.length > 0; + return { + ...current, + active, + queue: filteredQueue, + isSwitching, + lastResult: result, + }; + }, false); + }, + [ + scheduleTimeout, + refreshProxyProviders, + refreshRules, + refreshRuleProviders, + mutateSwitchStatus, + applyProfileSwitchResult, + commitProfileSnapshot, + clearPendingProxyProfile, + ], + ); + + useEffect(() => { + isUnmountedRef.current = false; + return () => { + isUnmountedRef.current = true; + clearAllTimeouts(); + }; + }, [clearAllTimeouts]); + + useEffect(() => { + if (!switchStatus) { + return; + } + + const meta = switchMetaRef.current; + const nextTarget = + switchStatus.active?.profileId ?? + (switchStatus.queue.length > 0 ? switchStatus.queue[0].profileId : null); + + if (nextTarget && nextTarget !== meta.pendingProfileId) { + meta.pendingProfileId = nextTarget; + void seedProxySnapshot(nextTarget); + } else if (!nextTarget) { + meta.pendingProfileId = null; + } + + const lastResult = switchStatus.lastResult ?? null; + if (lastResult) { + handleSwitchResult(lastResult); + } + }, [switchStatus, seedProxySnapshot, handleSwitchResult]); + + useEffect(() => { + let disposed = false; + + const pollEvents = async () => { + if (disposed) { + return; + } + try { + const events = await getProfileSwitchEvents(switchEventSeqRef.current); + if (events.length > 0) { + switchEventSeqRef.current = events[events.length - 1].sequence; + setSwitchEventSeq(switchEventSeqRef.current); + events.forEach((event) => handleSwitchResult(event.result)); + } + } catch (error) { + console.warn("[DataProvider] Failed to poll switch events:", error); + } finally { + if (!disposed) { + const nextDelay = + switchStatus && + (switchStatus.isSwitching || (switchStatus.queue?.length ?? 0) > 0) + ? 250 + : 1000; + scheduleTimeout(pollEvents, nextDelay); + } + } + }; + + scheduleTimeout(pollEvents, 0); + + return () => { + disposed = true; + }; + }, [scheduleTimeout, handleSwitchResult, switchStatus, setSwitchEventSeq]); + + useEffect(() => { const cleanupFns: Array<() => void> = []; const registerCleanup = (fn: () => void) => { - if (isUnmounted) { - try { - fn(); - } catch (error) { - console.error("[DataProvider] Immediate cleanup failed:", error); - } - } else { - cleanupFns.push(fn); - } + cleanupFns.push(fn); }; const addWindowListener = (eventName: string, handler: EventListener) => { @@ -87,140 +343,319 @@ export const AppDataProvider = ({ return () => window.removeEventListener(eventName, handler); }; - const scheduleTimeout = ( - callback: () => void | Promise, - delay: number, + const runProfileChangedPipeline = ( + profileId: string | null, + source: "tauri" | "window", ) => { - if (isUnmounted) return -1; - - const timeoutId = window.setTimeout(() => { - scheduledTimeouts.delete(timeoutId); - if (!isUnmounted) { - void callback(); - } - }, delay); - - scheduledTimeouts.add(timeoutId); - return timeoutId; - }; - - const clearAllTimeouts = () => { - scheduledTimeouts.forEach((timeoutId) => clearTimeout(timeoutId)); - scheduledTimeouts.clear(); - }; - - const handleProfileChanged = (event: { payload: string }) => { - const newProfileId = event.payload; const now = Date.now(); + const meta = profileChangeMetaRef.current; if ( - lastProfileId === newProfileId && - now - lastUpdateTime < refreshThrottle + meta.lastProfileId === profileId && + now - meta.lastEventTs < PROFILE_EVENT_DEDUP_MS ) { return; } - lastProfileId = newProfileId; - lastUpdateTime = now; + meta.lastProfileId = profileId; + meta.lastEventTs = now; - scheduleTimeout(() => { - refreshRules().catch((error) => - console.warn("[DataProvider] Rules refresh failed:", error), - ); - refreshRuleProviders().catch((error) => - console.warn("[DataProvider] Rule providers refresh failed:", error), - ); - }, 200); - }; - - const handleRefreshClash = () => { - const now = Date.now(); - if (now - lastUpdateTime <= refreshThrottle) return; - - lastUpdateTime = now; - scheduleTimeout(() => { - refreshProxy().catch((error) => - console.error("[DataProvider] Proxy refresh failed:", error), - ); - }, 200); - }; - - const handleRefreshProxy = () => { - const now = Date.now(); - if (now - lastUpdateTime <= refreshThrottle) return; - - lastUpdateTime = now; - scheduleTimeout(() => { - refreshProxy().catch((error) => - console.warn("[DataProvider] Proxy refresh failed:", error), - ); - }, 200); - }; - - const initializeListeners = async () => { - try { - const unlistenProfile = await listen( - "profile-changed", - handleProfileChanged, - ); - registerCleanup(unlistenProfile); - } catch (error) { - console.error("[AppDataProvider] 监听 Profile 事件失败:", error); + if (profileId) { + void seedProxySnapshot(profileId); } - try { - const unlistenClash = await listen( - "verge://refresh-clash-config", - handleRefreshClash, + queueProxyRefresh(`profile-changed-${source}`, 500); + + scheduleTimeout(() => { + void fetchProfilesConfig() + .then((data) => { + commitProfileSnapshot(data); + globalMutate("getProfiles", data, false); + }) + .catch((error) => + console.warn( + "[AppDataProvider] Failed to refresh profiles after profile change:", + error, + ), + ); + void refreshProxyProviders().catch((error) => + console.warn( + "[AppDataProvider] Proxy providers refresh failed after profile change:", + error, + ), ); - const unlistenProxy = await listen( - "verge://refresh-proxy-config", - handleRefreshProxy, + void refreshRules().catch((error) => + console.warn( + "[AppDataProvider] Rules refresh failed after profile change:", + error, + ), ); + void refreshRuleProviders().catch((error) => + console.warn( + "[AppDataProvider] Rule providers refresh failed after profile change:", + error, + ), + ); + }, 200); + }; - registerCleanup(() => { - unlistenClash(); - unlistenProxy(); - }); - } catch (error) { - console.warn("[AppDataProvider] 设置 Tauri 事件监听器失败:", error); + const handleProfileChanged = (event: { payload: string }) => { + runProfileChangedPipeline(event.payload ?? null, "tauri"); + }; - const fallbackHandlers: Array<[string, EventListener]> = [ - ["verge://refresh-clash-config", handleRefreshClash], - ["verge://refresh-proxy-config", handleRefreshProxy], - ]; + const runRefreshClashPipeline = (source: "tauri" | "window") => { + const now = Date.now(); + if (now - lastClashRefreshAtRef.current < CLASH_REFRESH_DEDUP_MS) { + return; + } - fallbackHandlers.forEach(([eventName, handler]) => { - registerCleanup(addWindowListener(eventName, handler)); - }); + lastClashRefreshAtRef.current = now; + + scheduleTimeout(() => { + void refreshClashConfig().catch((error) => + console.warn( + "[AppDataProvider] Clash config refresh failed after backend update:", + error, + ), + ); + void refreshRules().catch((error) => + console.warn( + "[AppDataProvider] Rules refresh failed after backend update:", + error, + ), + ); + void refreshRuleProviders().catch((error) => + console.warn( + "[AppDataProvider] Rule providers refresh failed after backend update:", + error, + ), + ); + void refreshProxyProviders().catch((error) => + console.warn( + "[AppDataProvider] Proxy providers refresh failed after backend update:", + error, + ), + ); + }, 0); + + queueProxyRefresh(`refresh-clash-config-${source}`, 400); + }; + + const handleProfileUpdateCompleted = (_: { payload: { uid: string } }) => { + queueProxyRefresh("profile-update-completed", 3000); + if (!isUnmountedRef.current) { + scheduleTimeout(() => { + void refreshProxyProviders().catch((error) => + console.warn( + "[DataProvider] Proxy providers refresh failed after profile update completed:", + error, + ), + ); + }, 0); } }; - void initializeListeners(); + const isProxiesPayload = ( + value: unknown, + ): value is ProxiesUpdatedPayload => { + if (!value || typeof value !== "object") { + return false; + } + const candidate = value as Partial; + return candidate.proxies !== undefined && candidate.proxies !== null; + }; + + const handleProxiesUpdatedPayload = ( + rawPayload: unknown, + source: "tauri" | "window", + ) => { + if (!isProxiesPayload(rawPayload)) { + console.warn( + `[AppDataProvider] Ignored ${source} proxies-updated payload`, + rawPayload, + ); + queueProxyRefresh(`proxies-updated-${source}-invalid`, 500); + return; + } + + try { + applyLiveProxyPayload(rawPayload); + } catch (error) { + console.warn( + `[AppDataProvider] Failed to apply ${source} proxies-updated payload`, + error, + ); + queueProxyRefresh(`proxies-updated-${source}-apply-failed`, 500); + } + }; + + listen<{ uid: string }>( + "profile-update-completed", + handleProfileUpdateCompleted, + ) + .then(registerCleanup) + .catch((error) => + console.error( + "[AppDataProvider] failed to attach profile update listeners:", + error, + ), + ); + + listen("profile-changed", handleProfileChanged) + .then(registerCleanup) + .catch((error) => + console.error( + "[AppDataProvider] failed to attach profile-changed listener:", + error, + ), + ); + + listen("proxies-updated", (event) => { + handleProxiesUpdatedPayload(event.payload, "tauri"); + }) + .then(registerCleanup) + .catch((error) => + console.error( + "[AppDataProvider] failed to attach proxies-updated listener:", + error, + ), + ); + + listen("verge://refresh-clash-config", () => { + runRefreshClashPipeline("tauri"); + }) + .then(registerCleanup) + .catch((error) => + console.error( + "[AppDataProvider] failed to attach refresh-clash-config listener:", + error, + ), + ); + + listen("verge://refresh-proxy-config", () => { + queueProxyRefresh("refresh-proxy-config-tauri", 500); + }) + .then(registerCleanup) + .catch((error) => + console.error( + "[AppDataProvider] failed to attach refresh-proxy-config listener:", + error, + ), + ); + + const fallbackHandlers: Array<[string, EventListener]> = [ + [ + "profile-update-completed", + ((event: Event) => { + const payload = (event as CustomEvent<{ uid: string }>).detail ?? { + uid: "", + }; + handleProfileUpdateCompleted({ payload }); + }) as EventListener, + ], + [ + "profile-changed", + ((event: Event) => { + const payload = (event as CustomEvent).detail ?? null; + runProfileChangedPipeline(payload, "window"); + }) as EventListener, + ], + [ + "proxies-updated", + ((event: Event) => { + const payload = (event as CustomEvent).detail; + handleProxiesUpdatedPayload(payload, "window"); + }) as EventListener, + ], + [ + "verge://refresh-clash-config", + (() => { + runRefreshClashPipeline("window"); + }) as EventListener, + ], + [ + "verge://refresh-proxy-config", + (() => { + queueProxyRefresh("refresh-proxy-config-window", 500); + }) as EventListener, + ], + ]; + + fallbackHandlers.forEach(([eventName, handler]) => { + registerCleanup(addWindowListener(eventName, handler)); + }); return () => { - isUnmounted = true; - clearAllTimeouts(); - - const errors: Error[] = []; - cleanupFns.splice(0).forEach((fn) => { + cleanupFns.forEach((fn) => { try { fn(); } catch (error) { - errors.push( - error instanceof Error ? error : new Error(String(error)), - ); + console.error("[AppDataProvider] cleanup error:", error); } }); - - if (errors.length > 0) { - console.error( - `[DataProvider] ${errors.length} errors during cleanup:`, - errors, - ); - } }; - }, [refreshProxy, refreshRules, refreshRuleProviders]); + }, [ + commitProfileSnapshot, + queueProxyRefresh, + refreshClashConfig, + refreshProxyProviders, + refreshRuleProviders, + refreshRules, + scheduleTimeout, + seedProxySnapshot, + ]); + + const switchTargetProfileId = + switchStatus?.active?.profileId ?? + (switchStatus && switchStatus.queue.length > 0 + ? switchStatus.queue[0].profileId + : null); + + const proxyTargetProfileId = + switchTargetProfileId ?? pendingProxyProfileId ?? proxyProfileId ?? null; + const displayProxyStateRef = useRef<{ + view: ProxiesView | null; + profileId: string | null; + }>({ + view: proxyView, + profileId: proxyTargetProfileId, + }); + + const currentDisplay = displayProxyStateRef.current; + + if (!proxyView) { + if ( + currentDisplay.view !== null || + currentDisplay.profileId !== proxyTargetProfileId + ) { + displayProxyStateRef.current = { + view: null, + profileId: proxyTargetProfileId, + }; + } + } else if (proxyHydration === "live") { + if ( + currentDisplay.view !== proxyView || + currentDisplay.profileId !== proxyTargetProfileId + ) { + displayProxyStateRef.current = { + view: proxyView, + profileId: proxyTargetProfileId, + }; + } + } else if (!currentDisplay.view) { + displayProxyStateRef.current = { + view: proxyView, + profileId: proxyTargetProfileId, + }; + } + const displayProxyState = displayProxyStateRef.current; + const proxyDisplayProfileId = displayProxyState.profileId; + const proxiesForRender = displayProxyState.view ?? proxyView; + const isProxyRefreshPending = + (switchStatus?.isSwitching ?? false) || + proxyHydration !== "live" || + proxyTargetProfileId !== proxyDisplayProfileId; const { data: sysproxy, mutate: refreshSysproxy } = useSWR( "getSystemProxy", @@ -240,10 +675,10 @@ export const AppDataProvider = ({ errorRetryCount: 1, }); - // 提供统一的刷新方法 + // Provide unified refresh method const refreshAll = useCallback(async () => { await Promise.all([ - refreshProxy(), + fetchLiveProxies(), refreshClashConfig(), refreshRules(), refreshSysproxy(), @@ -251,7 +686,6 @@ export const AppDataProvider = ({ refreshRuleProviders(), ]); }, [ - refreshProxy, refreshClashConfig, refreshRules, refreshSysproxy, @@ -259,22 +693,22 @@ export const AppDataProvider = ({ refreshRuleProviders, ]); - // 聚合所有数据 + // Aggregate data into context value const value = useMemo(() => { - // 计算系统代理地址 + // Compute the system proxy address const calculateSystemProxyAddress = () => { if (!verge || !clashConfig) return "-"; const isPacMode = verge.proxy_auto_config ?? false; if (isPacMode) { - // PAC模式:显示我们期望设置的代理地址 + // PAC mode: display the desired proxy address const proxyHost = verge.proxy_host || "127.0.0.1"; const proxyPort = verge.verge_mixed_port || clashConfig.mixedPort || 7897; return `${proxyHost}:${proxyPort}`; } else { - // HTTP代理模式:优先使用系统地址,但如果格式不正确则使用期望地址 + // HTTP proxy mode: prefer system address, fallback to desired address if invalid const systemServer = sysproxy?.server; if ( systemServer && @@ -283,7 +717,7 @@ export const AppDataProvider = ({ ) { return systemServer; } else { - // 系统地址无效,返回期望的代理地址 + // System address invalid: fallback to desired proxy address const proxyHost = verge.proxy_host || "127.0.0.1"; const proxyPort = verge.verge_mixed_port || clashConfig.mixedPort || 7897; @@ -293,22 +727,27 @@ export const AppDataProvider = ({ }; return { - // 数据 - proxies: proxiesData, + // Data + proxies: proxiesForRender, + proxyHydration, + proxyTargetProfileId, + proxyDisplayProfileId, + isProxyRefreshPending, + switchStatus: switchStatus ?? null, clashConfig, rules: rulesData?.rules || [], sysproxy, runningMode, uptime: uptimeData || 0, - // 提供者数据 + // Provider data proxyProviders: proxyProviders || {}, ruleProviders: ruleProviders?.providers || {}, systemProxyAddress: calculateSystemProxyAddress(), - // 刷新方法 - refreshProxy, + // Refresh helpers + refreshProxy: fetchLiveProxies, refreshClashConfig, refreshRules, refreshSysproxy, @@ -317,7 +756,12 @@ export const AppDataProvider = ({ refreshAll, } as AppDataContextType; }, [ - proxiesData, + proxiesForRender, + proxyHydration, + proxyTargetProfileId, + proxyDisplayProfileId, + isProxyRefreshPending, + switchStatus, clashConfig, rulesData, sysproxy, @@ -326,7 +770,6 @@ export const AppDataProvider = ({ proxyProviders, ruleProviders, verge, - refreshProxy, refreshClashConfig, refreshRules, refreshSysproxy, diff --git a/src/services/cmds.ts b/src/services/cmds.ts index e1e686bd..09894418 100644 --- a/src/services/cmds.ts +++ b/src/services/cmds.ts @@ -4,6 +4,52 @@ import { getProxies, getProxyProviders } from "tauri-plugin-mihomo-api"; import { showNotice } from "@/services/noticeService"; +export type ProxyProviderRecord = Record< + string, + IProxyProviderItem | undefined +>; + +export interface SwitchTaskStatus { + taskId: number; + profileId: string; + notify: boolean; + stage?: number | null; + queued: boolean; +} + +export interface SwitchResultStatus { + taskId: number; + profileId: string; + success: boolean; + cancelled?: boolean; + finishedAt: number; + errorStage?: string | null; + errorDetail?: string | null; +} + +export interface ProfileSwitchStatus { + isSwitching: boolean; + active?: SwitchTaskStatus | null; + queue: SwitchTaskStatus[]; + cleanupProfiles: string[]; + lastResult?: SwitchResultStatus | null; + lastUpdated: number; +} + +export interface SwitchResultEvent { + sequence: number; + result: SwitchResultStatus; +} + +// Persist the last proxy provider payload so UI can render while waiting on Mihomo. +let cachedProxyProviders: ProxyProviderRecord | null = null; + +export const getCachedProxyProviders = () => cachedProxyProviders; + +export const setCachedProxyProviders = (record: ProxyProviderRecord | null) => { + cachedProxyProviders = record; +}; + export async function copyClashEnv() { return invoke("copy_clash_env"); } @@ -20,6 +66,14 @@ export async function patchProfilesConfig(profiles: IProfilesConfig) { return invoke("patch_profiles_config", { profiles }); } +// Triggers the async state-machine driven switch flow on the backend. +export async function switchProfileCommand( + profileIndex: string, + notifySuccess: boolean, +) { + return invoke("switch_profile", { profileIndex, notifySuccess }); +} + export async function createProfile( item: Partial, fileData?: string | null, @@ -113,27 +167,29 @@ export async function syncTrayProxySelection() { return invoke("sync_tray_proxy_selection"); } -export async function calcuProxies(): Promise<{ +export interface ProxiesView { global: IProxyGroupItem; direct: IProxyItem; groups: IProxyGroupItem[]; records: Record; proxies: IProxyItem[]; -}> { - const [proxyResponse, providerResponse] = await Promise.all([ - getProxies(), - calcuProxyProviders(), - ]); +} +export function buildProxyView( + proxyResponse: Awaited>, + providerRecord?: ProxyProviderRecord | null, +): ProxiesView { const proxyRecord = proxyResponse.proxies; - const providerRecord = providerResponse; // provider name map - const providerMap = Object.fromEntries( - Object.entries(providerRecord).flatMap(([provider, item]) => - item!.proxies.map((p) => [p.name, { ...p, provider }]), - ), - ); + const providerMap = providerRecord + ? Object.fromEntries( + Object.entries(providerRecord).flatMap(([provider, item]) => { + if (!item) return []; + return item.proxies.map((p) => [p.name, { ...p, provider }]); + }), + ) + : {}; // compatible with proxy-providers const generateItem = (name: string) => { @@ -207,16 +263,56 @@ export async function calcuProxies(): Promise<{ }; } +export async function calcuProxies(): Promise { + const proxyResponse = await getProxies(); + + let providerRecord = cachedProxyProviders; + if (!providerRecord) { + try { + providerRecord = await calcuProxyProviders(); + } catch (error) { + console.warn("[calcuProxies] 代理提供者加载失败:", error); + } + } + + return buildProxyView(proxyResponse, providerRecord); +} + export async function calcuProxyProviders() { const providers = await getProxyProviders(); - return Object.fromEntries( - Object.entries(providers.providers) - .sort() - .filter( - ([_, item]) => - item?.vehicleType === "HTTP" || item?.vehicleType === "File", - ), - ); + const mappedEntries = Object.entries(providers.providers) + .sort() + .filter( + ([, item]) => + item?.vehicleType === "HTTP" || item?.vehicleType === "File", + ) + .map(([name, item]) => { + if (!item) return [name, undefined] as const; + + const subscriptionInfo = + item.subscriptionInfo && typeof item.subscriptionInfo === "object" + ? { + Upload: item.subscriptionInfo.Upload ?? 0, + Download: item.subscriptionInfo.Download ?? 0, + Total: item.subscriptionInfo.Total ?? 0, + Expire: item.subscriptionInfo.Expire ?? 0, + } + : undefined; + + const normalized: IProxyProviderItem = { + name: item.name, + type: item.type, + proxies: item.proxies ?? [], + updatedAt: item.updatedAt ?? "", + vehicleType: item.vehicleType ?? "", + subscriptionInfo, + }; + return [name, normalized] as const; + }); + + const mapped = Object.fromEntries(mappedEntries) as ProxyProviderRecord; + cachedProxyProviders = mapped; + return mapped; } export async function getClashLogs() { @@ -555,3 +651,13 @@ export const isAdmin = async () => { export async function getNextUpdateTime(uid: string) { return invoke("get_next_update_time", { uid }); } + +export async function getProfileSwitchStatus() { + return invoke("get_profile_switch_status"); +} + +export async function getProfileSwitchEvents(afterSequence: number) { + return invoke("get_profile_switch_events", { + afterSequence, + }); +} diff --git a/src/services/noticeService.ts b/src/services/noticeService.ts index 0a3505da..7275dd45 100644 --- a/src/services/noticeService.ts +++ b/src/services/noticeService.ts @@ -14,10 +14,20 @@ let nextId = 0; let notices: NoticeItem[] = []; const listeners: Set = new Set(); -function notifyListeners() { +function flushListeners() { listeners.forEach((listener) => listener([...notices])); // Pass a copy } +let notifyScheduled = false; +function scheduleNotify() { + if (notifyScheduled) return; + notifyScheduled = true; + requestAnimationFrame(() => { + notifyScheduled = false; + flushListeners(); + }); +} + // Shows a notification. export function showNotice( @@ -44,7 +54,7 @@ export function showNotice( } notices = [...notices, newNotice]; - notifyListeners(); + scheduleNotify(); return id; } @@ -56,7 +66,7 @@ export function hideNotice(id: number) { clearTimeout(notice.timerId); // Clear timeout if manually closed } notices = notices.filter((n) => n.id !== id); - notifyListeners(); + scheduleNotify(); } // Subscribes a listener function to notice state changes. @@ -77,5 +87,5 @@ export function clearAllNotices() { if (n.timerId) clearTimeout(n.timerId); }); notices = []; - notifyListeners(); + scheduleNotify(); } diff --git a/src/services/refresh.ts b/src/services/refresh.ts new file mode 100644 index 00000000..6150680d --- /dev/null +++ b/src/services/refresh.ts @@ -0,0 +1,24 @@ +import { mutate } from "swr"; + +import { getAxios } from "@/services/api"; + +export const refreshClashData = async () => { + try { + await getAxios(true); + } catch (error) { + console.warn("[Refresh] getAxios failed during clash refresh:", error); + } + + mutate("getProxies"); + mutate("getVersion"); + mutate("getClashConfig"); + mutate("getProxyProviders"); +}; + +export const refreshVergeData = () => { + mutate("getVergeConfig"); + mutate("getSystemProxy"); + mutate("getAutotemProxy"); + mutate("getRunningMode"); + mutate("isServiceAvailable"); +}; diff --git a/src/stores/profile-store.ts b/src/stores/profile-store.ts new file mode 100644 index 00000000..446b6226 --- /dev/null +++ b/src/stores/profile-store.ts @@ -0,0 +1,59 @@ +import { create } from "zustand"; + +import type { SwitchResultStatus } from "@/services/cmds"; + +interface ProfileStoreState { + data: IProfilesConfig | null; + optimisticCurrent: string | null; + isHydrating: boolean; + lastEventSeq: number; + lastResult: SwitchResultStatus | null; + applySwitchResult: (result: SwitchResultStatus) => void; + commitHydrated: (data: IProfilesConfig) => void; + setLastEventSeq: (sequence: number) => void; +} + +export const useProfileStore = create((set) => ({ + data: null, + optimisticCurrent: null, + isHydrating: false, + lastEventSeq: 0, + lastResult: null, + applySwitchResult(result) { + // Record the optimistic switch outcome so the UI reflects the desired profile immediately. + set((state) => ({ + lastResult: result, + optimisticCurrent: result.success ? result.profileId : null, + isHydrating: result.success ? true : state.isHydrating, + })); + }, + commitHydrated(data) { + set({ + data, + optimisticCurrent: null, + isHydrating: false, + }); + }, + setLastEventSeq(sequence) { + set({ lastEventSeq: sequence }); + }, +})); + +export const selectEffectiveProfiles = (state: ProfileStoreState) => { + if (!state.data) { + return null; + } + // Prefer the optimistic selection while hydration is pending. + const current = state.optimisticCurrent ?? state.data.current; + if ( + state.optimisticCurrent && + state.optimisticCurrent !== state.data.current + ) { + return { ...state.data, current } as IProfilesConfig; + } + return state.data; +}; + +export const selectIsHydrating = (state: ProfileStoreState) => + state.isHydrating; +export const selectLastResult = (state: ProfileStoreState) => state.lastResult; diff --git a/src/stores/proxy-store.ts b/src/stores/proxy-store.ts new file mode 100644 index 00000000..b8c87ef8 --- /dev/null +++ b/src/stores/proxy-store.ts @@ -0,0 +1,298 @@ +import type { getProxies } from "tauri-plugin-mihomo-api"; +import { create } from "zustand"; + +import { + ProxiesView, + ProxyProviderRecord, + buildProxyView, + calcuProxies, + getCachedProxyProviders, + setCachedProxyProviders, +} from "@/services/cmds"; +import { AsyncEventQueue, nextTick } from "@/utils/asyncQueue"; + +type ProxyHydration = "none" | "snapshot" | "live"; +type RawProxiesResponse = Awaited>; + +export interface ProxiesUpdatedPayload { + proxies: RawProxiesResponse; + providers?: Record | null; + emittedAt?: number; + profileId?: string | null; +} + +interface ProxyStoreState { + data: ProxiesView | null; + hydration: ProxyHydration; + lastUpdated: number | null; + lastProfileId: string | null; + liveFetchRequestId: number; + lastAppliedFetchId: number; + pendingProfileId: string | null; + pendingSnapshotFetchId: number | null; + setSnapshot: (snapshot: ProxiesView, profileId: string) => void; + setLive: (payload: ProxiesUpdatedPayload) => void; + startLiveFetch: () => number; + completeLiveFetch: (requestId: number, view: ProxiesView) => void; + clearPendingProfile: () => void; + reset: () => void; +} + +const normalizeProviderPayload = ( + raw: ProxiesUpdatedPayload["providers"], +): ProxyProviderRecord | null => { + if (!raw || typeof raw !== "object") return null; + + const rawRecord = raw as Record; + const source = + rawRecord.providers && typeof rawRecord.providers === "object" + ? (rawRecord.providers as Record) + : rawRecord; + + const entries = Object.entries(source) + .sort(([a], [b]) => a.localeCompare(b)) + .filter(([, value]) => { + if (!value || typeof value !== "object") { + return false; + } + const vehicleType = value.vehicleType; + return vehicleType === "HTTP" || vehicleType === "File"; + }) + .map(([name, value]) => { + const normalized: IProxyProviderItem = { + name: value.name ?? name, + type: value.type ?? "", + proxies: Array.isArray(value.proxies) ? value.proxies : [], + updatedAt: value.updatedAt ?? "", + vehicleType: value.vehicleType ?? "", + subscriptionInfo: + value.subscriptionInfo && typeof value.subscriptionInfo === "object" + ? { + Upload: Number(value.subscriptionInfo.Upload ?? 0), + Download: Number(value.subscriptionInfo.Download ?? 0), + Total: Number(value.subscriptionInfo.Total ?? 0), + Expire: Number(value.subscriptionInfo.Expire ?? 0), + } + : undefined, + }; + + return [name, normalized] as const; + }); + + return Object.fromEntries(entries) as ProxyProviderRecord; +}; + +export const useProxyStore = create((set, get) => ({ + data: null, + hydration: "none", + lastUpdated: null, + lastProfileId: null, + liveFetchRequestId: 0, + lastAppliedFetchId: 0, + pendingProfileId: null, + pendingSnapshotFetchId: null, + setSnapshot(snapshot, profileId) { + const stateBefore = get(); + + set((state) => ({ + data: snapshot, + hydration: "snapshot", + lastUpdated: null, + pendingProfileId: profileId, + pendingSnapshotFetchId: state.liveFetchRequestId, + })); + + const hasLiveHydration = + stateBefore.hydration === "live" && + stateBefore.lastProfileId === profileId; + + if (profileId && !hasLiveHydration) { + void fetchLiveProxies().catch((error) => { + console.warn( + "[ProxyStore] Failed to bootstrap live proxies from snapshot:", + error, + ); + scheduleBootstrapLiveFetch(800); + }); + } + }, + setLive(payload) { + const state = get(); + const emittedAt = payload.emittedAt ?? Date.now(); + + if ( + state.hydration === "live" && + state.lastUpdated !== null && + emittedAt <= state.lastUpdated + ) { + return; + } + + const providersRecord = + normalizeProviderPayload(payload.providers) ?? getCachedProxyProviders(); + + if (providersRecord) { + setCachedProxyProviders(providersRecord); + } + + const view = buildProxyView(payload.proxies, providersRecord); + const nextProfileId = payload.profileId ?? state.lastProfileId; + + set((current) => ({ + data: view, + hydration: "live", + lastUpdated: emittedAt, + lastProfileId: nextProfileId ?? null, + lastAppliedFetchId: current.liveFetchRequestId, + pendingProfileId: null, + pendingSnapshotFetchId: null, + })); + }, + startLiveFetch() { + let nextRequestId = 0; + set((state) => { + nextRequestId = state.liveFetchRequestId + 1; + return { + liveFetchRequestId: nextRequestId, + }; + }); + return nextRequestId; + }, + completeLiveFetch(requestId, view) { + const state = get(); + if (requestId <= state.lastAppliedFetchId) { + return; + } + + const shouldAdoptPending = + state.pendingProfileId !== null && + requestId >= (state.pendingSnapshotFetchId ?? 0); + + set({ + data: view, + hydration: "live", + lastUpdated: Date.now(), + lastProfileId: shouldAdoptPending + ? state.pendingProfileId + : state.lastProfileId, + lastAppliedFetchId: requestId, + pendingProfileId: shouldAdoptPending ? null : state.pendingProfileId, + pendingSnapshotFetchId: shouldAdoptPending + ? null + : state.pendingSnapshotFetchId, + }); + }, + clearPendingProfile() { + set({ + pendingProfileId: null, + pendingSnapshotFetchId: null, + }); + }, + reset() { + set({ + data: null, + hydration: "none", + lastUpdated: null, + lastProfileId: null, + liveFetchRequestId: 0, + lastAppliedFetchId: 0, + pendingProfileId: null, + pendingSnapshotFetchId: null, + }); + scheduleBootstrapLiveFetch(200); + }, +})); + +const liveApplyQueue = new AsyncEventQueue(); +let pendingLivePayload: ProxiesUpdatedPayload | null = null; +let liveApplyScheduled = false; + +const scheduleLiveApply = () => { + if (liveApplyScheduled) return; + liveApplyScheduled = true; + + const dispatch = () => { + liveApplyScheduled = false; + const payload = pendingLivePayload; + pendingLivePayload = null; + if (!payload) return; + + liveApplyQueue.enqueue(async () => { + await nextTick(); + useProxyStore.getState().setLive(payload); + }); + }; + + if ( + typeof window !== "undefined" && + typeof window.requestAnimationFrame === "function" + ) { + window.requestAnimationFrame(dispatch); + } else { + setTimeout(dispatch, 16); + } +}; + +export const applyLiveProxyPayload = (payload: ProxiesUpdatedPayload) => { + pendingLivePayload = payload; + scheduleLiveApply(); +}; + +export const fetchLiveProxies = async () => { + const requestId = useProxyStore.getState().startLiveFetch(); + const view = await calcuProxies(); + useProxyStore.getState().completeLiveFetch(requestId, view); +}; + +const MAX_BOOTSTRAP_ATTEMPTS = 5; +const BOOTSTRAP_BASE_DELAY_MS = 600; +let bootstrapAttempts = 0; +let bootstrapTimer: number | null = null; + +function attemptBootstrapLiveFetch() { + const state = useProxyStore.getState(); + if (state.hydration === "live") { + bootstrapAttempts = 0; + return; + } + + if (bootstrapAttempts >= MAX_BOOTSTRAP_ATTEMPTS) { + return; + } + + const attemptNumber = ++bootstrapAttempts; + + void fetchLiveProxies() + .then(() => { + bootstrapAttempts = 0; + }) + .catch((error) => { + console.warn( + `[ProxyStore] Bootstrap live fetch attempt ${attemptNumber} failed:`, + error, + ); + if (attemptNumber < MAX_BOOTSTRAP_ATTEMPTS) { + scheduleBootstrapLiveFetch(BOOTSTRAP_BASE_DELAY_MS * attemptNumber); + } + }); +} + +function scheduleBootstrapLiveFetch(delay = 0) { + if (typeof window === "undefined") { + return; + } + + if (bootstrapTimer !== null) { + window.clearTimeout(bootstrapTimer); + bootstrapTimer = null; + } + + bootstrapTimer = window.setTimeout(() => { + bootstrapTimer = null; + attemptBootstrapLiveFetch(); + }, delay); +} + +if (typeof window !== "undefined") { + void nextTick().then(() => scheduleBootstrapLiveFetch(0)); +} diff --git a/src/utils/asyncQueue.ts b/src/utils/asyncQueue.ts new file mode 100644 index 00000000..927faa83 --- /dev/null +++ b/src/utils/asyncQueue.ts @@ -0,0 +1,31 @@ +export class AsyncEventQueue { + private tail: Promise = Promise.resolve(); + + enqueue(task: () => Promise | void) { + this.tail = this.tail + .then(async () => { + await task(); + }) + .catch((error) => { + console.error("AsyncEventQueue task failed", error); + }); + } + + clear() { + this.tail = Promise.resolve(); + } +} + +export const nextTick = () => + new Promise((resolve) => { + if (typeof queueMicrotask === "function") { + queueMicrotask(resolve); + } else { + Promise.resolve().then(() => resolve()); + } + }); + +export const afterPaint = () => + new Promise((resolve) => { + requestAnimationFrame(() => resolve()); + }); diff --git a/src/utils/proxy-snapshot.ts b/src/utils/proxy-snapshot.ts new file mode 100644 index 00000000..2e451db2 --- /dev/null +++ b/src/utils/proxy-snapshot.ts @@ -0,0 +1,205 @@ +import yaml from "js-yaml"; + +const createProxyItem = ( + name: string, + partial: Partial = {}, +): IProxyItem => ({ + name, + type: partial.type ?? "unknown", + udp: partial.udp ?? false, + xudp: partial.xudp ?? false, + tfo: partial.tfo ?? false, + mptcp: partial.mptcp ?? false, + smux: partial.smux ?? false, + history: [], + provider: partial.provider, + testUrl: partial.testUrl, + hidden: partial.hidden, + icon: partial.icon, + fixed: partial.fixed, +}); + +const createGroupItem = ( + name: string, + all: IProxyItem[], + partial: Partial = {}, +): IProxyGroupItem => { + const rest = { ...partial } as Partial; + delete (rest as Partial).all; + const base = createProxyItem(name, rest); + return { + ...base, + all, + now: partial.now ?? base.now, + }; +}; + +const ensureProxyItem = ( + map: Map, + name: string, + source?: Partial, +) => { + const key = String(name); + if (map.has(key)) return map.get(key)!; + const item = createProxyItem(key, source); + map.set(key, item); + return item; +}; + +const parseProxyEntry = (entry: any): IProxyItem | null => { + if (!entry || typeof entry !== "object") return null; + const name = entry.name || entry.uid || entry.id; + if (!name) return null; + return createProxyItem(String(name), { + type: entry.type ? String(entry.type) : undefined, + udp: Boolean(entry.udp), + xudp: Boolean(entry.xudp), + tfo: Boolean(entry.tfo), + mptcp: Boolean(entry.mptcp), + smux: Boolean(entry.smux), + testUrl: entry.test_url || entry.testUrl, + }); +}; + +const isNonEmptyString = (value: unknown): value is string => + typeof value === "string" && value.trim().length > 0; + +const parseProxyGroup = ( + entry: any, + proxyMap: Map, +): IProxyGroupItem | null => { + if (!entry || typeof entry !== "object") return null; + const name = entry.name; + if (!name) return null; + + const rawProxies: unknown[] = Array.isArray(entry.proxies) + ? entry.proxies + : []; + + const proxyRefs: string[] = rawProxies + .filter(isNonEmptyString) + .map((item) => item.trim()); + + const uniqueNames: string[] = Array.from(new Set(proxyRefs)); + + const all = uniqueNames.map((proxyName) => + ensureProxyItem(proxyMap, proxyName), + ); + + return createGroupItem(String(name), all, { + type: entry.type ? String(entry.type) : "Selector", + provider: entry.provider, + testUrl: entry.testUrl || entry.test_url, + now: typeof entry.now === "string" ? entry.now : undefined, + }); +}; + +const mapRecords = ( + proxies: Map, + groups: IProxyGroupItem[], + extra: IProxyItem[] = [], +): Record => { + const result: Record = {}; + proxies.forEach((item, key) => { + result[key] = item; + }); + groups.forEach((group) => { + result[group.name] = group as unknown as IProxyItem; + }); + extra.forEach((item) => { + result[item.name] = item; + }); + return result; +}; + +export const createProxySnapshotFromProfile = ( + yamlContent: string, +): { + global: IProxyGroupItem; + direct: IProxyItem; + groups: IProxyGroupItem[]; + records: Record; + proxies: IProxyItem[]; +} | null => { + let parsed: any; + try { + parsed = yaml.load(yamlContent); + } catch (error) { + console.warn("[ProxySnapshot] Failed to parse YAML:", error); + return null; + } + + if (!parsed || typeof parsed !== "object") { + return null; + } + + const proxyMap = new Map(); + + if (Array.isArray((parsed as any).proxies)) { + for (const entry of (parsed as any).proxies) { + const item = parseProxyEntry(entry); + if (item) { + proxyMap.set(item.name, item); + } + } + } + + const proxyProviders = (parsed as any)["proxy-providers"]; + if (proxyProviders && typeof proxyProviders === "object") { + for (const key of Object.keys(proxyProviders)) { + const provider = proxyProviders[key]; + if (provider && Array.isArray(provider.proxies)) { + provider.proxies + .filter( + (proxyName: unknown): proxyName is string => + typeof proxyName === "string", + ) + .forEach((proxyName: string) => ensureProxyItem(proxyMap, proxyName)); + } + } + } + + const groups: IProxyGroupItem[] = []; + if (Array.isArray((parsed as any)["proxy-groups"])) { + for (const entry of (parsed as any)["proxy-groups"]) { + const groupItem = parseProxyGroup(entry, proxyMap); + if (groupItem) { + groups.push(groupItem); + } + } + } + + const direct = createProxyItem("DIRECT", { type: "Direct" }); + const reject = createProxyItem("REJECT", { type: "Reject" }); + + ensureProxyItem(proxyMap, direct.name, direct); + ensureProxyItem(proxyMap, reject.name, reject); + + let global = groups.find((group) => group.name === "GLOBAL"); + if (!global) { + const globalRefs = groups.flatMap((group) => + group.all.map((proxy) => proxy.name), + ); + const unique = Array.from(new Set(globalRefs)); + const all = unique.map((name) => ensureProxyItem(proxyMap, name)); + global = createGroupItem("GLOBAL", all, { + type: "Selector", + hidden: true, + }); + groups.unshift(global); + } + + const proxies = Array.from(proxyMap.values()).filter( + (item) => !groups.some((group) => group.name === item.name), + ); + + const records = mapRecords(proxyMap, groups, [direct, reject]); + + return { + global, + direct, + groups, + records, + proxies, + }; +}; From c27ad3fdcbeae23562f45537aebb23aab54f6ff3 Mon Sep 17 00:00:00 2001 From: Tunglies <77394545+Tunglies@users.noreply.github.com> Date: Thu, 30 Oct 2025 17:32:54 +0800 Subject: [PATCH 29/70] feat: add log opening functionality in tray menu and update localization --- UPDATELOG.md | 1 + src-tauri/src/cmd/app.rs | 14 ++++++++++++++ src-tauri/src/core/tray/menu_def.rs | 2 ++ src-tauri/src/core/tray/mod.rs | 30 ++++++++++++++++++++++++++++- src-tauri/src/lib.rs | 2 ++ src-tauri/src/utils/dirs.rs | 20 ++++++++++++++++++- src/locales/en.json | 4 +++- src/locales/zh.json | 4 +++- 8 files changed, 73 insertions(+), 4 deletions(-) diff --git a/UPDATELOG.md b/UPDATELOG.md index 51236c73..b37eca6e 100644 --- a/UPDATELOG.md +++ b/UPDATELOG.md @@ -48,6 +48,7 @@ - 允许独立控制订阅自动更新 - 托盘 `更多` 中新增 `关闭所有连接` 按钮 - 新增左侧菜单栏的排序功能(右键点击左侧菜单栏) +- 托盘 `打开目录` 中新增 `应用日志` 和 `内核日志`
diff --git a/src-tauri/src/cmd/app.rs b/src-tauri/src/cmd/app.rs index 4daa150f..11a9207f 100644 --- a/src-tauri/src/cmd/app.rs +++ b/src-tauri/src/cmd/app.rs @@ -41,6 +41,20 @@ pub fn open_web_url(url: String) -> CmdResult<()> { open::that(url.as_str()).stringify_err() } +// TODO 后续可以为前端提供接口,当前作为托盘菜单使用 +/// 打开 Verge 最新日志 +#[tauri::command] +pub async fn open_app_log() -> CmdResult<()> { + open::that(dirs::app_latest_log().stringify_err()?).stringify_err() +} + +// TODO 后续可以为前端提供接口,当前作为托盘菜单使用 +/// 打开 Clash 最新日志 +#[tauri::command] +pub async fn open_core_log() -> CmdResult<()> { + open::that(dirs::clash_latest_log().stringify_err()?).stringify_err() +} + /// 打开/关闭开发者工具 #[tauri::command] pub fn open_devtools(app_handle: AppHandle) { diff --git a/src-tauri/src/core/tray/menu_def.rs b/src-tauri/src/core/tray/menu_def.rs index c2acbe25..10fe3835 100644 --- a/src-tauri/src/core/tray/menu_def.rs +++ b/src-tauri/src/core/tray/menu_def.rs @@ -39,6 +39,8 @@ define_menu! { core_dir => CORE_DIR, "tray_core_dir", "Core Dir", logs_dir => LOGS_DIR, "tray_logs_dir", "Logs Dir", open_dir => OPEN_DIR, "tray_open_dir", "Open Dir", + app_log => APP_LOG, "tray_app_log", "Open App Log", + core_log => CORE_LOG, "tray_core_log", "Open Core Log", restart_clash => RESTART_CLASH, "tray_restart_clash", "Restart Clash Core", restart_app => RESTART_APP, "tray_restart_app", "Restart App", verge_version => VERGE_VERSION, "tray_verge_version", "Verge Version", diff --git a/src-tauri/src/core/tray/mod.rs b/src-tauri/src/core/tray/mod.rs index 6b4eef42..a2b15ff6 100644 --- a/src-tauri/src/core/tray/mod.rs +++ b/src-tauri/src/core/tray/mod.rs @@ -1034,12 +1034,34 @@ async fn create_tray_menu( None::<&str>, )?; + let open_app_log = &MenuItem::with_id( + app_handle, + MenuIds::APP_LOG, + &texts.app_log, + true, + None::<&str>, + )?; + + let open_core_log = &MenuItem::with_id( + app_handle, + MenuIds::CORE_LOG, + &texts.core_log, + true, + None::<&str>, + )?; + let open_dir = &Submenu::with_id_and_items( app_handle, MenuIds::OPEN_DIR, &texts.open_dir, true, - &[open_app_dir, open_core_dir, open_logs_dir], + &[ + open_app_dir, + open_core_dir, + open_logs_dir, + open_app_log, + open_core_log, + ], )?; let restart_clash = &MenuItem::with_id( @@ -1169,6 +1191,12 @@ fn on_menu_event(_: &AppHandle, event: MenuEvent) { MenuIds::LOGS_DIR => { let _ = cmd::open_logs_dir().await; } + MenuIds::APP_LOG => { + let _ = cmd::open_app_log().await; + } + MenuIds::CORE_LOG => { + let _ = cmd::open_core_log().await; + } MenuIds::RESTART_CLASH => feat::restart_clash_core().await, MenuIds::RESTART_APP => feat::restart_app().await, MenuIds::LIGHTWEIGHT_MODE => { diff --git a/src-tauri/src/lib.rs b/src-tauri/src/lib.rs index 6f3130fe..84838962 100644 --- a/src-tauri/src/lib.rs +++ b/src-tauri/src/lib.rs @@ -141,6 +141,8 @@ mod app_init { cmd::open_logs_dir, cmd::open_web_url, cmd::open_core_dir, + cmd::open_app_log, + cmd::open_core_log, cmd::get_portable_flag, cmd::get_network_interfaces, cmd::get_system_hostname, diff --git a/src-tauri/src/utils/dirs.rs b/src-tauri/src/utils/dirs.rs index 3730be82..cfae29fe 100644 --- a/src-tauri/src/utils/dirs.rs +++ b/src-tauri/src/utils/dirs.rs @@ -1,4 +1,8 @@ -use crate::{core::handle, logging, utils::logging::Type}; +use crate::{ + core::{CoreManager, handle, manager::RunningMode}, + logging, + utils::logging::Type, +}; use anyhow::Result; use async_trait::async_trait; use once_cell::sync::OnceCell; @@ -122,6 +126,11 @@ pub fn app_logs_dir() -> Result { Ok(app_home_dir()?.join("logs")) } +// latest verge log +pub fn app_latest_log() -> Result { + Ok(app_logs_dir()?.join("latest.log")) +} + /// local backups dir pub fn local_backup_dir() -> Result { let dir = app_home_dir()?.join(BACKUP_DIR); @@ -167,6 +176,15 @@ pub fn service_log_dir() -> Result { Ok(log_dir) } +pub fn clash_latest_log() -> Result { + match *CoreManager::global().get_running_mode() { + RunningMode::Service => Ok(service_log_dir()?.join("service_latest.log")), + RunningMode::Sidecar | RunningMode::NotRunning => { + Ok(sidecar_log_dir()?.join("sidecar_latest.log")) + } + } +} + pub fn path_to_str(path: &PathBuf) -> Result<&str> { let path_str = path .as_os_str() diff --git a/src/locales/en.json b/src/locales/en.json index 28befbd2..e1519a2d 100644 --- a/src/locales/en.json +++ b/src/locales/en.json @@ -713,5 +713,7 @@ "Allow Auto Update": "Allow Auto Update", "Menu reorder mode": "Menu reorder mode", "Unlock menu order": "Unlock menu order", - "Lock menu order": "Lock menu order" + "Lock menu order": "Lock menu order", + "Open App Log": "Open App Log", + "Open Core Log": "Open Core Log" } diff --git a/src/locales/zh.json b/src/locales/zh.json index 1ecd9919..e17e33f9 100644 --- a/src/locales/zh.json +++ b/src/locales/zh.json @@ -713,5 +713,7 @@ "Allow Auto Update": "允许自动更新", "Menu reorder mode": "菜单排序模式", "Unlock menu order": "解锁菜单排序", - "Lock menu order": "锁定菜单排序" + "Lock menu order": "锁定菜单排序", + "Open App Log": "应用日志", + "Open Core Log": "内核日志" } From 928f226d10e7aeced3999b321fc881dc67727841 Mon Sep 17 00:00:00 2001 From: Tunglies <77394545+Tunglies@users.noreply.github.com> Date: Thu, 30 Oct 2025 18:02:24 +0800 Subject: [PATCH 30/70] fix: update clash_verge_service_ipc version to 2.0.21 --- src-tauri/Cargo.lock | 20 ++++++++++---------- src-tauri/Cargo.toml | 2 +- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/src-tauri/Cargo.lock b/src-tauri/Cargo.lock index bbe55504..73a8a381 100644 --- a/src-tauri/Cargo.lock +++ b/src-tauri/Cargo.lock @@ -147,7 +147,7 @@ dependencies = [ "objc2-foundation 0.3.2", "parking_lot 0.12.5", "percent-encoding", - "windows-sys 0.52.0", + "windows-sys 0.59.0", "wl-clipboard-rs", "x11rb", ] @@ -1183,8 +1183,8 @@ dependencies = [ [[package]] name = "clash_verge_service_ipc" -version = "2.0.20" -source = "git+https://github.com/clash-verge-rev/clash-verge-service-ipc#c0b6e99da27e7956047d42aee104f5c33083c970" +version = "2.0.21" +source = "git+https://github.com/clash-verge-rev/clash-verge-service-ipc#1e34c648e48f8580208ff777686092e0a94b8025" dependencies = [ "anyhow", "compact_str", @@ -1935,7 +1935,7 @@ dependencies = [ "libc", "option-ext", "redox_users 0.5.2", - "windows-sys 0.60.2", + "windows-sys 0.59.0", ] [[package]] @@ -2191,7 +2191,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "39cab71617ae0d63f51a36d69f866391735b51691dbda63cf6f96d042b63efeb" dependencies = [ "libc", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -4456,7 +4456,7 @@ version = "0.50.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7957b9740744892f114936ab4a57b3f487491bbeafaf8083688b16841a4240e5" dependencies = [ - "windows-sys 0.60.2", + "windows-sys 0.59.0", ] [[package]] @@ -5790,7 +5790,7 @@ dependencies = [ "once_cell", "socket2 0.5.10", "tracing", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -6283,7 +6283,7 @@ dependencies = [ "errno", "libc", "linux-raw-sys 0.4.15", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -6296,7 +6296,7 @@ dependencies = [ "errno", "libc", "linux-raw-sys 0.11.0", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -7799,7 +7799,7 @@ dependencies = [ "getrandom 0.3.4", "once_cell", "rustix 1.1.2", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] diff --git a/src-tauri/Cargo.toml b/src-tauri/Cargo.toml index bd44656e..1583013a 100755 --- a/src-tauri/Cargo.toml +++ b/src-tauri/Cargo.toml @@ -88,7 +88,7 @@ tauri-plugin-mihomo = { git = "https://github.com/clash-verge-rev/tauri-plugin-m clash_verge_logger = { git = "https://github.com/clash-verge-rev/clash-verge-logger" } async-trait = "0.1.89" smartstring = { version = "1.0.1", features = ["serde"] } -clash_verge_service_ipc = { version = "2.0.20", features = [ +clash_verge_service_ipc = { version = "2.0.21", features = [ "client", ], git = "https://github.com/clash-verge-rev/clash-verge-service-ipc" } From a869dbb441de8d1fb537e20772aee18c58ff5eff Mon Sep 17 00:00:00 2001 From: Tunglies <77394545+Tunglies@users.noreply.github.com> Date: Thu, 30 Oct 2025 18:11:04 +0800 Subject: [PATCH 31/70] Revert "refactor: profile switch (#5197)" This reverts commit c2dcd867228aae3f2fa0855226244cc701ba4f43. --- UPDATELOG.md | 2 - src-tauri/src/cmd/frontend.rs | 48 - src-tauri/src/cmd/mod.rs | 3 - src-tauri/src/cmd/profile.rs | 704 +++++++++------ src-tauri/src/cmd/profile_switch/driver.rs | 683 -------------- src-tauri/src/cmd/profile_switch/mod.rs | 34 - src-tauri/src/cmd/profile_switch/state.rs | 353 -------- .../src/cmd/profile_switch/validation.rs | 113 --- src-tauri/src/cmd/profile_switch/workflow.rs | 385 -------- .../cmd/profile_switch/workflow/cleanup.rs | 65 -- .../workflow/state_machine/context.rs | 178 ---- .../workflow/state_machine/core.rs | 284 ------ .../workflow/state_machine/mod.rs | 11 - .../workflow/state_machine/stages.rs | 597 ------------- src-tauri/src/core/handle.rs | 131 +-- src-tauri/src/core/manager/config.rs | 237 +---- src-tauri/src/core/notification.rs | 269 +----- src-tauri/src/lib.rs | 26 - src-tauri/src/utils/draft.rs | 7 - src/components/home/current-proxy-card.tsx | 22 +- src/components/proxy/provider-button.tsx | 310 +++---- src/components/proxy/proxy-groups.tsx | 122 +-- src/components/proxy/use-render-list.ts | 89 +- src/hooks/use-current-proxy.ts | 12 +- src/hooks/use-profiles.ts | 77 +- src/pages/_layout/useLayoutEvents.ts | 36 +- src/pages/profiles.tsx | 844 +++++++----------- src/providers/app-data-context.ts | 9 +- src/providers/app-data-provider.tsx | 719 +++------------ src/services/cmds.ts | 146 +-- src/services/noticeService.ts | 18 +- src/services/refresh.ts | 24 - src/stores/profile-store.ts | 59 -- src/stores/proxy-store.ts | 298 ------- src/utils/asyncQueue.ts | 31 - src/utils/proxy-snapshot.ts | 205 ----- 36 files changed, 1257 insertions(+), 5894 deletions(-) delete mode 100644 src-tauri/src/cmd/frontend.rs delete mode 100644 src-tauri/src/cmd/profile_switch/driver.rs delete mode 100644 src-tauri/src/cmd/profile_switch/mod.rs delete mode 100644 src-tauri/src/cmd/profile_switch/state.rs delete mode 100644 src-tauri/src/cmd/profile_switch/validation.rs delete mode 100644 src-tauri/src/cmd/profile_switch/workflow.rs delete mode 100644 src-tauri/src/cmd/profile_switch/workflow/cleanup.rs delete mode 100644 src-tauri/src/cmd/profile_switch/workflow/state_machine/context.rs delete mode 100644 src-tauri/src/cmd/profile_switch/workflow/state_machine/core.rs delete mode 100644 src-tauri/src/cmd/profile_switch/workflow/state_machine/mod.rs delete mode 100644 src-tauri/src/cmd/profile_switch/workflow/state_machine/stages.rs delete mode 100644 src/services/refresh.ts delete mode 100644 src/stores/profile-store.ts delete mode 100644 src/stores/proxy-store.ts delete mode 100644 src/utils/asyncQueue.ts delete mode 100644 src/utils/proxy-snapshot.ts diff --git a/UPDATELOG.md b/UPDATELOG.md index b37eca6e..071d8ac1 100644 --- a/UPDATELOG.md +++ b/UPDATELOG.md @@ -30,7 +30,6 @@ - 修复悬浮跳转导航失效 - 修复小键盘热键映射错误 - 修复前端无法及时刷新操作状态 -- 修复切换订阅卡死
✨ 新增功能 @@ -78,7 +77,6 @@ - 优化首页当前节点对MATCH规则的支持 - 允许在 `界面设置` 修改 `悬浮跳转导航延迟` - 添加热键绑定错误的提示信息 -- 重构订阅切换,保证代理页面的及时刷新 - 在 macOS 10.15 及更高版本默认包含 Mihomo-go122,以解决 Intel 架构 Mac 无法运行内核的问题
diff --git a/src-tauri/src/cmd/frontend.rs b/src-tauri/src/cmd/frontend.rs deleted file mode 100644 index 8559c589..00000000 --- a/src-tauri/src/cmd/frontend.rs +++ /dev/null @@ -1,48 +0,0 @@ -use super::CmdResult; -use crate::{logging, utils::logging::Type}; -use serde::Deserialize; - -#[derive(Debug, Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct FrontendLogPayload { - pub level: Option, - pub message: String, - pub context: Option, -} - -#[tauri::command] -pub fn frontend_log(payload: FrontendLogPayload) -> CmdResult<()> { - let level = payload.level.as_deref().unwrap_or("info"); - match level { - "trace" | "debug" => logging!( - debug, - Type::Frontend, - "[frontend] {}", - payload.message.as_str() - ), - "warn" => logging!( - warn, - Type::Frontend, - "[frontend] {}", - payload.message.as_str() - ), - "error" => logging!( - error, - Type::Frontend, - "[frontend] {}", - payload.message.as_str() - ), - _ => logging!( - info, - Type::Frontend, - "[frontend] {}", - payload.message.as_str() - ), - } - - if let Some(context) = payload.context { - logging!(info, Type::Frontend, "[frontend] context: {}", context); - } - - Ok(()) -} diff --git a/src-tauri/src/cmd/mod.rs b/src-tauri/src/cmd/mod.rs index 2cf76898..6c748687 100644 --- a/src-tauri/src/cmd/mod.rs +++ b/src-tauri/src/cmd/mod.rs @@ -7,12 +7,10 @@ pub type CmdResult = Result; pub mod app; pub mod backup; pub mod clash; -pub mod frontend; pub mod lightweight; pub mod media_unlock_checker; pub mod network; pub mod profile; -mod profile_switch; pub mod proxy; pub mod runtime; pub mod save_profile; @@ -27,7 +25,6 @@ pub mod webdav; pub use app::*; pub use backup::*; pub use clash::*; -pub use frontend::*; pub use lightweight::*; pub use media_unlock_checker::*; pub use network::*; diff --git a/src-tauri/src/cmd/profile.rs b/src-tauri/src/cmd/profile.rs index 37cdd2e9..15177936 100644 --- a/src-tauri/src/cmd/profile.rs +++ b/src-tauri/src/cmd/profile.rs @@ -1,4 +1,5 @@ -use super::{CmdResult, StringifyErr, profile_switch}; +use super::CmdResult; +use super::StringifyErr; use crate::{ config::{ Config, IProfiles, PrfItem, PrfOption, @@ -8,191 +9,77 @@ use crate::{ }, profiles_append_item_safe, }, - core::{CoreManager, handle, timer::Timer}, - feat, logging, ret_err, + core::{CoreManager, handle, timer::Timer, tray::Tray}, + feat, logging, + process::AsyncHandler, + ret_err, utils::{dirs, help, logging::Type}, }; -use once_cell::sync::Lazy; -use parking_lot::RwLock; use smartstring::alias::String; -use std::sync::atomic::{AtomicU64, Ordering}; -use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH}; +use std::sync::atomic::{AtomicBool, AtomicU64, Ordering}; +use std::time::Duration; -use crate::cmd::profile_switch::{ProfileSwitchStatus, SwitchResultEvent}; +// 全局请求序列号跟踪,用于避免队列化执行 +static CURRENT_REQUEST_SEQUENCE: AtomicU64 = AtomicU64::new(0); -#[derive(Clone)] -struct CachedProfiles { - snapshot: IProfiles, - captured_at: Instant, -} +static CURRENT_SWITCHING_PROFILE: AtomicBool = AtomicBool::new(false); -static PROFILES_CACHE: Lazy>> = Lazy::new(|| RwLock::new(None)); - -#[derive(Default)] -struct SnapshotMetrics { - fast_hits: AtomicU64, - cache_hits: AtomicU64, - blocking_hits: AtomicU64, - refresh_scheduled: AtomicU64, - last_log_ms: AtomicU64, -} - -static SNAPSHOT_METRICS: Lazy = Lazy::new(SnapshotMetrics::default); - -/// Store the latest snapshot so cache consumers can reuse it without hitting the lock again. -fn update_profiles_cache(snapshot: &IProfiles) { - *PROFILES_CACHE.write() = Some(CachedProfiles { - snapshot: snapshot.clone(), - captured_at: Instant::now(), - }); -} - -/// Return the cached snapshot and how old it is, if present. -fn cached_profiles_snapshot() -> Option<(IProfiles, u128)> { - PROFILES_CACHE.read().as_ref().map(|entry| { - ( - entry.snapshot.clone(), - entry.captured_at.elapsed().as_millis(), - ) - }) -} - -/// Return the latest profiles snapshot, preferring cached data so UI requests never block. #[tauri::command] pub async fn get_profiles() -> CmdResult { - let started_at = Instant::now(); + // 策略1: 尝试快速获取latest数据 + let latest_result = tokio::time::timeout(Duration::from_millis(500), async { + let profiles = Config::profiles().await; + let latest = profiles.latest_ref(); + IProfiles { + current: latest.current.clone(), + items: latest.items.clone(), + } + }) + .await; - // Resolve snapshots in three tiers so UI reads never stall on a mutex: - // 1) try a non-blocking read, 2) fall back to the last cached copy while a - // writer holds the lock, 3) block and refresh the cache as a final resort. - if let Some(snapshot) = read_profiles_snapshot_nonblocking().await { - let item_count = snapshot - .items - .as_ref() - .map(|items| items.len()) - .unwrap_or(0); - update_profiles_cache(&snapshot); - SNAPSHOT_METRICS.fast_hits.fetch_add(1, Ordering::Relaxed); - logging!( - debug, - Type::Cmd, - "[Profiles] Snapshot served (path=fast, items={}, elapsed={}ms)", - item_count, - started_at.elapsed().as_millis() - ); - maybe_log_snapshot_metrics(); - return Ok(snapshot); + match latest_result { + Ok(profiles) => { + logging!(info, Type::Cmd, "快速获取配置列表成功"); + return Ok(profiles); + } + Err(_) => { + logging!(warn, Type::Cmd, "快速获取配置超时(500ms)"); + } } - if let Some((cached, age_ms)) = cached_profiles_snapshot() { - SNAPSHOT_METRICS.cache_hits.fetch_add(1, Ordering::Relaxed); - logging!( - debug, - Type::Cmd, - "[Profiles] Served cached snapshot while lock busy (age={}ms)", - age_ms - ); - schedule_profiles_snapshot_refresh(); - maybe_log_snapshot_metrics(); - return Ok(cached); + // 策略2: 如果快速获取失败,尝试获取data() + let data_result = tokio::time::timeout(Duration::from_secs(2), async { + let profiles = Config::profiles().await; + let data = profiles.latest_ref(); + IProfiles { + current: data.current.clone(), + items: data.items.clone(), + } + }) + .await; + + match data_result { + Ok(profiles) => { + logging!(info, Type::Cmd, "获取draft配置列表成功"); + return Ok(profiles); + } + Err(join_err) => { + logging!( + error, + Type::Cmd, + "获取draft配置任务失败或超时: {}", + join_err + ); + } } - let snapshot = read_profiles_snapshot_blocking().await; - let item_count = snapshot - .items - .as_ref() - .map(|items| items.len()) - .unwrap_or(0); - update_profiles_cache(&snapshot); - SNAPSHOT_METRICS - .blocking_hits - .fetch_add(1, Ordering::Relaxed); - logging!( - debug, - Type::Cmd, - "[Profiles] Snapshot served (path=blocking, items={}, elapsed={}ms)", - item_count, - started_at.elapsed().as_millis() - ); - maybe_log_snapshot_metrics(); - Ok(snapshot) + // 策略3: fallback,尝试重新创建配置 + logging!(warn, Type::Cmd, "所有获取配置策略都失败,尝试fallback"); + + Ok(IProfiles::new().await) } -/// Try to grab the latest profile data without waiting for the writer. -async fn read_profiles_snapshot_nonblocking() -> Option { - let profiles = Config::profiles().await; - profiles.try_latest_ref().map(|guard| (**guard).clone()) -} - -/// Fall back to a blocking read when we absolutely must have fresh data. -async fn read_profiles_snapshot_blocking() -> IProfiles { - let profiles = Config::profiles().await; - let guard = profiles.latest_ref(); - (**guard).clone() -} - -/// Schedule a background cache refresh once the exclusive lock becomes available again. -fn schedule_profiles_snapshot_refresh() { - crate::process::AsyncHandler::spawn(|| async { - // Once the lock is released we refresh the cached snapshot so the next - // request observes the latest data instead of the stale fallback. - SNAPSHOT_METRICS - .refresh_scheduled - .fetch_add(1, Ordering::Relaxed); - let snapshot = read_profiles_snapshot_blocking().await; - update_profiles_cache(&snapshot); - logging!( - debug, - Type::Cmd, - "[Profiles] Cache refreshed after busy snapshot" - ); - }); -} - -fn maybe_log_snapshot_metrics() { - const LOG_INTERVAL_MS: u64 = 5_000; - let now_ms = current_millis(); - let last_ms = SNAPSHOT_METRICS.last_log_ms.load(Ordering::Relaxed); - if now_ms.saturating_sub(last_ms) < LOG_INTERVAL_MS { - return; - } - - if SNAPSHOT_METRICS - .last_log_ms - .compare_exchange(last_ms, now_ms, Ordering::SeqCst, Ordering::Relaxed) - .is_err() - { - return; - } - - let fast = SNAPSHOT_METRICS.fast_hits.swap(0, Ordering::SeqCst); - let cache = SNAPSHOT_METRICS.cache_hits.swap(0, Ordering::SeqCst); - let blocking = SNAPSHOT_METRICS.blocking_hits.swap(0, Ordering::SeqCst); - let refresh = SNAPSHOT_METRICS.refresh_scheduled.swap(0, Ordering::SeqCst); - - if fast == 0 && cache == 0 && blocking == 0 && refresh == 0 { - return; - } - - logging!( - debug, - Type::Cmd, - "[Profiles][Metrics] 5s window => fast={}, cache={}, blocking={}, refresh_jobs={}", - fast, - cache, - blocking, - refresh - ); -} - -fn current_millis() -> u64 { - SystemTime::now() - .duration_since(UNIX_EPOCH) - .unwrap_or(Duration::ZERO) - .as_millis() as u64 -} - -/// Run the optional enhancement pipeline and refresh Clash when it completes. +/// 增强配置文件 #[tauri::command] pub async fn enhance_profiles() -> CmdResult { match feat::enhance_profiles().await { @@ -206,106 +93,79 @@ pub async fn enhance_profiles() -> CmdResult { Ok(()) } -/// Download a profile from the given URL and persist it to the local catalog. +/// 导入配置文件 #[tauri::command] pub async fn import_profile(url: std::string::String, option: Option) -> CmdResult { - logging!(info, Type::Cmd, "[Profile Import] Begin: {}", url); + logging!(info, Type::Cmd, "[导入订阅] 开始导入: {}", url); - // Rely on PrfItem::from_url internal timeout/retry logic instead of wrapping with tokio::time::timeout + // 直接依赖 PrfItem::from_url 自身的超时/重试逻辑,不再使用 tokio::time::timeout 包裹 let item = match PrfItem::from_url(&url, None, None, option).await { Ok(it) => { - logging!( - info, - Type::Cmd, - "[Profile Import] Download complete; saving configuration" - ); + logging!(info, Type::Cmd, "[导入订阅] 下载完成,开始保存配置"); it } Err(e) => { - logging!(error, Type::Cmd, "[Profile Import] Download failed: {}", e); - return Err(format!("Profile import failed: {}", e).into()); + logging!(error, Type::Cmd, "[导入订阅] 下载失败: {}", e); + return Err(format!("导入订阅失败: {}", e).into()); } }; match profiles_append_item_safe(item.clone()).await { Ok(_) => match profiles_save_file_safe().await { Ok(_) => { - logging!( - info, - Type::Cmd, - "[Profile Import] Configuration file saved successfully" - ); + logging!(info, Type::Cmd, "[导入订阅] 配置文件保存成功"); } Err(e) => { - logging!( - error, - Type::Cmd, - "[Profile Import] Failed to save configuration file: {}", - e - ); + logging!(error, Type::Cmd, "[导入订阅] 保存配置文件失败: {}", e); } }, Err(e) => { - logging!( - error, - Type::Cmd, - "[Profile Import] Failed to persist configuration: {}", - e - ); - return Err(format!("Profile import failed: {}", e).into()); + logging!(error, Type::Cmd, "[导入订阅] 保存配置失败: {}", e); + return Err(format!("导入订阅失败: {}", e).into()); } } - // Immediately emit a configuration change notification + // 立即发送配置变更通知 if let Some(uid) = &item.uid { - logging!( - info, - Type::Cmd, - "[Profile Import] Emitting configuration change event: {}", - uid - ); + logging!(info, Type::Cmd, "[导入订阅] 发送配置变更通知: {}", uid); handle::Handle::notify_profile_changed(uid.clone()); } - // Save configuration asynchronously and emit a global notification + // 异步保存配置文件并发送全局通知 let uid_clone = item.uid.clone(); if let Some(uid) = uid_clone { - // Delay notification to ensure the file is fully written + // 延迟发送,确保文件已完全写入 tokio::time::sleep(Duration::from_millis(100)).await; handle::Handle::notify_profile_changed(uid); } - logging!(info, Type::Cmd, "[Profile Import] Completed: {}", url); + logging!(info, Type::Cmd, "[导入订阅] 导入完成: {}", url); Ok(()) } -/// Move a profile in the list relative to another entry. +/// 调整profile的顺序 #[tauri::command] pub async fn reorder_profile(active_id: String, over_id: String) -> CmdResult { match profiles_reorder_safe(active_id, over_id).await { Ok(_) => { - log::info!(target: "app", "Reordered profiles"); + log::info!(target: "app", "重新排序配置文件"); Ok(()) } Err(err) => { - log::error!(target: "app", "Failed to reorder profiles: {}", err); - Err(format!("Failed to reorder profiles: {}", err).into()) + log::error!(target: "app", "重新排序配置文件失败: {}", err); + Err(format!("重新排序配置文件失败: {}", err).into()) } } } -/// Create a new profile entry and optionally write its backing file. +/// 创建新的profile +/// 创建一个新的配置文件 #[tauri::command] pub async fn create_profile(item: PrfItem, file_data: Option) -> CmdResult { match profiles_append_item_with_filedata_safe(item.clone(), file_data).await { Ok(_) => { - // Emit configuration change notification + // 发送配置变更通知 if let Some(uid) = &item.uid { - logging!( - info, - Type::Cmd, - "[Profile Create] Emitting configuration change event: {}", - uid - ); + logging!(info, Type::Cmd, "[创建订阅] 发送配置变更通知: {}", uid); handle::Handle::notify_profile_changed(uid.clone()); } Ok(()) @@ -317,7 +177,7 @@ pub async fn create_profile(item: PrfItem, file_data: Option) -> CmdResu } } -/// Force-refresh a profile from its remote source, if available. +/// 更新配置文件 #[tauri::command] pub async fn update_profile(index: String, option: Option) -> CmdResult { match feat::update_profile(index, option, Some(true)).await { @@ -329,11 +189,11 @@ pub async fn update_profile(index: String, option: Option) -> CmdResu } } -/// Remove a profile and refresh the running configuration if necessary. +/// 删除配置文件 #[tauri::command] pub async fn delete_profile(index: String) -> CmdResult { println!("delete_profile: {}", index); - // Use send-safe helper function + // 使用Send-safe helper函数 let should_update = profiles_delete_item_safe(index.clone()) .await .stringify_err()?; @@ -343,13 +203,8 @@ pub async fn delete_profile(index: String) -> CmdResult { match CoreManager::global().update_config().await { Ok(_) => { handle::Handle::refresh_clash(); - // Emit configuration change notification - logging!( - info, - Type::Cmd, - "[Profile Delete] Emitting configuration change event: {}", - index - ); + // 发送配置变更通知 + logging!(info, Type::Cmd, "[删除订阅] 发送配置变更通知: {}", index); handle::Handle::notify_profile_changed(index); } Err(e) => { @@ -361,28 +216,361 @@ pub async fn delete_profile(index: String) -> CmdResult { Ok(()) } -/// Apply partial profile list updates through the switching workflow. +/// 验证新配置文件的语法 +async fn validate_new_profile(new_profile: &String) -> Result<(), ()> { + logging!(info, Type::Cmd, "正在切换到新配置: {}", new_profile); + + // 获取目标配置文件路径 + let config_file_result = { + let profiles_config = Config::profiles().await; + let profiles_data = profiles_config.latest_ref(); + match profiles_data.get_item(new_profile) { + Ok(item) => { + if let Some(file) = &item.file { + let path = dirs::app_profiles_dir().map(|dir| dir.join(file.as_str())); + path.ok() + } else { + None + } + } + Err(e) => { + logging!(error, Type::Cmd, "获取目标配置信息失败: {}", e); + None + } + } + }; + + // 如果获取到文件路径,检查YAML语法 + if let Some(file_path) = config_file_result { + if !file_path.exists() { + logging!( + error, + Type::Cmd, + "目标配置文件不存在: {}", + file_path.display() + ); + handle::Handle::notice_message( + "config_validate::file_not_found", + format!("{}", file_path.display()), + ); + return Err(()); + } + + // 超时保护 + let file_read_result = tokio::time::timeout( + Duration::from_secs(5), + tokio::fs::read_to_string(&file_path), + ) + .await; + + match file_read_result { + Ok(Ok(content)) => { + let yaml_parse_result = AsyncHandler::spawn_blocking(move || { + serde_yaml_ng::from_str::(&content) + }) + .await; + + match yaml_parse_result { + Ok(Ok(_)) => { + logging!(info, Type::Cmd, "目标配置文件语法正确"); + Ok(()) + } + Ok(Err(err)) => { + let error_msg = format!(" {err}"); + logging!( + error, + Type::Cmd, + "目标配置文件存在YAML语法错误:{}", + error_msg + ); + handle::Handle::notice_message( + "config_validate::yaml_syntax_error", + error_msg.clone(), + ); + Err(()) + } + Err(join_err) => { + let error_msg = format!("YAML解析任务失败: {join_err}"); + logging!(error, Type::Cmd, "{}", error_msg); + handle::Handle::notice_message( + "config_validate::yaml_parse_error", + error_msg.clone(), + ); + Err(()) + } + } + } + Ok(Err(err)) => { + let error_msg = format!("无法读取目标配置文件: {err}"); + logging!(error, Type::Cmd, "{}", error_msg); + handle::Handle::notice_message( + "config_validate::file_read_error", + error_msg.clone(), + ); + Err(()) + } + Err(_) => { + let error_msg = "读取配置文件超时(5秒)".to_string(); + logging!(error, Type::Cmd, "{}", error_msg); + handle::Handle::notice_message( + "config_validate::file_read_timeout", + error_msg.clone(), + ); + Err(()) + } + } + } else { + Ok(()) + } +} + +/// 执行配置更新并处理结果 +async fn restore_previous_profile(prev_profile: String) -> CmdResult<()> { + logging!(info, Type::Cmd, "尝试恢复到之前的配置: {}", prev_profile); + let restore_profiles = IProfiles { + current: Some(prev_profile), + items: None, + }; + Config::profiles() + .await + .draft_mut() + .patch_config(restore_profiles) + .stringify_err()?; + Config::profiles().await.apply(); + crate::process::AsyncHandler::spawn(|| async move { + if let Err(e) = profiles_save_file_safe().await { + log::warn!(target: "app", "异步保存恢复配置文件失败: {e}"); + } + }); + logging!(info, Type::Cmd, "成功恢复到之前的配置"); + Ok(()) +} + +async fn handle_success(current_sequence: u64, current_value: Option) -> CmdResult { + let latest_sequence = CURRENT_REQUEST_SEQUENCE.load(Ordering::SeqCst); + if current_sequence < latest_sequence { + logging!( + info, + Type::Cmd, + "内核操作后发现更新的请求 (序列号: {} < {}),忽略当前结果", + current_sequence, + latest_sequence + ); + Config::profiles().await.discard(); + return Ok(false); + } + + logging!( + info, + Type::Cmd, + "配置更新成功,序列号: {}", + current_sequence + ); + Config::profiles().await.apply(); + handle::Handle::refresh_clash(); + + if let Err(e) = Tray::global().update_tooltip().await { + log::warn!(target: "app", "异步更新托盘提示失败: {e}"); + } + + if let Err(e) = Tray::global().update_menu().await { + log::warn!(target: "app", "异步更新托盘菜单失败: {e}"); + } + + if let Err(e) = profiles_save_file_safe().await { + log::warn!(target: "app", "异步保存配置文件失败: {e}"); + } + + if let Some(current) = ¤t_value { + logging!( + info, + Type::Cmd, + "向前端发送配置变更事件: {}, 序列号: {}", + current, + current_sequence + ); + handle::Handle::notify_profile_changed(current.clone()); + } + + CURRENT_SWITCHING_PROFILE.store(false, Ordering::SeqCst); + Ok(true) +} + +async fn handle_validation_failure( + error_msg: String, + current_profile: Option, +) -> CmdResult { + logging!(warn, Type::Cmd, "配置验证失败: {}", error_msg); + Config::profiles().await.discard(); + if let Some(prev_profile) = current_profile { + restore_previous_profile(prev_profile).await?; + } + handle::Handle::notice_message("config_validate::error", error_msg); + CURRENT_SWITCHING_PROFILE.store(false, Ordering::SeqCst); + Ok(false) +} + +async fn handle_update_error(e: E, current_sequence: u64) -> CmdResult { + logging!( + warn, + Type::Cmd, + "更新过程发生错误: {}, 序列号: {}", + e, + current_sequence + ); + Config::profiles().await.discard(); + handle::Handle::notice_message("config_validate::boot_error", e.to_string()); + CURRENT_SWITCHING_PROFILE.store(false, Ordering::SeqCst); + Ok(false) +} + +async fn handle_timeout(current_profile: Option, current_sequence: u64) -> CmdResult { + let timeout_msg = "配置更新超时(30秒),可能是配置验证或核心通信阻塞"; + logging!( + error, + Type::Cmd, + "{}, 序列号: {}", + timeout_msg, + current_sequence + ); + Config::profiles().await.discard(); + if let Some(prev_profile) = current_profile { + restore_previous_profile(prev_profile).await?; + } + handle::Handle::notice_message("config_validate::timeout", timeout_msg); + CURRENT_SWITCHING_PROFILE.store(false, Ordering::SeqCst); + Ok(false) +} + +async fn perform_config_update( + current_sequence: u64, + current_value: Option, + current_profile: Option, +) -> CmdResult { + logging!( + info, + Type::Cmd, + "开始内核配置更新,序列号: {}", + current_sequence + ); + let update_result = tokio::time::timeout( + Duration::from_secs(30), + CoreManager::global().update_config(), + ) + .await; + + match update_result { + Ok(Ok((true, _))) => handle_success(current_sequence, current_value).await, + Ok(Ok((false, error_msg))) => handle_validation_failure(error_msg, current_profile).await, + Ok(Err(e)) => handle_update_error(e, current_sequence).await, + Err(_) => handle_timeout(current_profile, current_sequence).await, + } +} + +/// 修改profiles的配置 #[tauri::command] pub async fn patch_profiles_config(profiles: IProfiles) -> CmdResult { - profile_switch::patch_profiles_config(profiles).await + if CURRENT_SWITCHING_PROFILE.load(Ordering::SeqCst) { + logging!(info, Type::Cmd, "当前正在切换配置,放弃请求"); + return Ok(false); + } + CURRENT_SWITCHING_PROFILE.store(true, Ordering::SeqCst); + + // 为当前请求分配序列号 + let current_sequence = CURRENT_REQUEST_SEQUENCE.fetch_add(1, Ordering::SeqCst) + 1; + let target_profile = profiles.current.clone(); + + logging!( + info, + Type::Cmd, + "开始修改配置文件,请求序列号: {}, 目标profile: {:?}", + current_sequence, + target_profile + ); + + let latest_sequence = CURRENT_REQUEST_SEQUENCE.load(Ordering::SeqCst); + if current_sequence < latest_sequence { + logging!( + info, + Type::Cmd, + "获取锁后发现更新的请求 (序列号: {} < {}),放弃当前请求", + current_sequence, + latest_sequence + ); + return Ok(false); + } + + // 保存当前配置,以便在验证失败时恢复 + let current_profile = Config::profiles().await.latest_ref().current.clone(); + logging!(info, Type::Cmd, "当前配置: {:?}", current_profile); + + // 如果要切换配置,先检查目标配置文件是否有语法错误 + if let Some(new_profile) = profiles.current.as_ref() + && current_profile.as_ref() != Some(new_profile) + && validate_new_profile(new_profile).await.is_err() + { + CURRENT_SWITCHING_PROFILE.store(false, Ordering::SeqCst); + return Ok(false); + } + + // 检查请求有效性 + let latest_sequence = CURRENT_REQUEST_SEQUENCE.load(Ordering::SeqCst); + if current_sequence < latest_sequence { + logging!( + info, + Type::Cmd, + "在核心操作前发现更新的请求 (序列号: {} < {}),放弃当前请求", + current_sequence, + latest_sequence + ); + return Ok(false); + } + + // 更新profiles配置 + logging!( + info, + Type::Cmd, + "正在更新配置草稿,序列号: {}", + current_sequence + ); + + let current_value = profiles.current.clone(); + + let _ = Config::profiles().await.draft_mut().patch_config(profiles); + + // 在调用内核前再次验证请求有效性 + let latest_sequence = CURRENT_REQUEST_SEQUENCE.load(Ordering::SeqCst); + if current_sequence < latest_sequence { + logging!( + info, + Type::Cmd, + "在内核交互前发现更新的请求 (序列号: {} < {}),放弃当前请求", + current_sequence, + latest_sequence + ); + Config::profiles().await.discard(); + return Ok(false); + } + + perform_config_update(current_sequence, current_value, current_profile).await } -/// Switch to the provided profile index and wait for completion before returning. +/// 根据profile name修改profiles #[tauri::command] pub async fn patch_profiles_config_by_profile_index(profile_index: String) -> CmdResult { - profile_switch::patch_profiles_config_by_profile_index(profile_index).await + logging!(info, Type::Cmd, "切换配置到: {}", profile_index); + + let profiles = IProfiles { + current: Some(profile_index), + items: None, + }; + patch_profiles_config(profiles).await } -/// Enqueue a profile switch request and optionally notify on success. -#[tauri::command] -pub async fn switch_profile(profile_index: String, notify_success: bool) -> CmdResult { - profile_switch::switch_profile(profile_index, notify_success).await -} - -/// Update a specific profile item and refresh timers if its schedule changed. +/// 修改某个profile item的 #[tauri::command] pub async fn patch_profile(index: String, profile: PrfItem) -> CmdResult { - // Check for update_interval changes before saving + // 保存修改前检查是否有更新 update_interval let profiles = Config::profiles().await; let should_refresh_timer = if let Ok(old_profile) = profiles.latest_ref().get_item(&index) { let old_interval = old_profile.option.as_ref().and_then(|o| o.update_interval); @@ -401,19 +589,15 @@ pub async fn patch_profile(index: String, profile: PrfItem) -> CmdResult { .await .stringify_err()?; - // If the interval or auto-update flag changes, refresh the timer asynchronously + // 如果更新间隔或允许自动更新变更,异步刷新定时器 if should_refresh_timer { let index_clone = index.clone(); crate::process::AsyncHandler::spawn(move || async move { - logging!( - info, - Type::Timer, - "Timer interval changed; refreshing timer..." - ); + logging!(info, Type::Timer, "定时器更新间隔已变更,正在刷新定时器..."); if let Err(e) = crate::core::Timer::global().refresh().await { - logging!(error, Type::Timer, "Failed to refresh timer: {}", e); + logging!(error, Type::Timer, "刷新定时器失败: {}", e); } else { - // After refreshing successfully, emit a custom event without triggering a reload + // 刷新成功后发送自定义事件,不触发配置重载 crate::core::handle::Handle::notify_timer_updated(index_clone); } }); @@ -422,7 +606,7 @@ pub async fn patch_profile(index: String, profile: PrfItem) -> CmdResult { Ok(()) } -/// Open the profile file in the system viewer. +/// 查看配置文件 #[tauri::command] pub async fn view_profile(index: String) -> CmdResult { let profiles = Config::profiles().await; @@ -444,7 +628,7 @@ pub async fn view_profile(index: String) -> CmdResult { help::open_file(path).stringify_err() } -/// Return the raw YAML contents for the given profile file. +/// 读取配置文件内容 #[tauri::command] pub async fn read_profile_file(index: String) -> CmdResult { let profiles = Config::profiles().await; @@ -454,22 +638,10 @@ pub async fn read_profile_file(index: String) -> CmdResult { Ok(data) } -/// Report the scheduled refresh timestamp (if any) for the profile timer. +/// 获取下一次更新时间 #[tauri::command] pub async fn get_next_update_time(uid: String) -> CmdResult> { let timer = Timer::global(); let next_time = timer.get_next_update_time(&uid).await; Ok(next_time) } - -/// Return the latest driver snapshot describing active and queued switch tasks. -#[tauri::command] -pub async fn get_profile_switch_status() -> CmdResult { - profile_switch::get_switch_status() -} - -/// Fetch switch result events newer than the provided sequence number. -#[tauri::command] -pub async fn get_profile_switch_events(after_sequence: u64) -> CmdResult> { - profile_switch::get_switch_events(after_sequence) -} diff --git a/src-tauri/src/cmd/profile_switch/driver.rs b/src-tauri/src/cmd/profile_switch/driver.rs deleted file mode 100644 index 8815458e..00000000 --- a/src-tauri/src/cmd/profile_switch/driver.rs +++ /dev/null @@ -1,683 +0,0 @@ -use super::{ - CmdResult, - state::{ - ProfileSwitchStatus, SwitchCancellation, SwitchManager, SwitchRequest, SwitchResultStatus, - SwitchTaskStatus, current_millis, manager, - }, - workflow::{self, SwitchPanicInfo, SwitchStage}, -}; -use crate::{logging, utils::logging::Type}; -use futures::FutureExt; -use once_cell::sync::OnceCell; -use smartstring::alias::String as SmartString; -use std::{ - collections::{HashMap, VecDeque}, - panic::AssertUnwindSafe, - time::Duration, -}; -use tokio::{ - sync::{ - Mutex as AsyncMutex, - mpsc::{self, error::TrySendError}, - oneshot, - }, - time::{self, MissedTickBehavior}, -}; - -// Single shared queue so profile switches are executed sequentially and can -// collapse redundant requests for the same profile. -const SWITCH_QUEUE_CAPACITY: usize = 32; -static SWITCH_QUEUE: OnceCell> = OnceCell::new(); - -type CompletionRegistry = AsyncMutex>>; - -static SWITCH_COMPLETION_WAITERS: OnceCell = OnceCell::new(); - -/// Global map of task id -> completion channel sender used when callers await the result. -fn completion_waiters() -> &'static CompletionRegistry { - SWITCH_COMPLETION_WAITERS.get_or_init(|| AsyncMutex::new(HashMap::new())) -} - -/// Register a oneshot sender so `switch_profile_and_wait` can be notified when its task finishes. -async fn register_completion_waiter(task_id: u64) -> oneshot::Receiver { - let (sender, receiver) = oneshot::channel(); - let mut guard = completion_waiters().lock().await; - if guard.insert(task_id, sender).is_some() { - logging!( - warn, - Type::Cmd, - "Replacing existing completion waiter for task {}", - task_id - ); - } - receiver -} - -/// Remove an outstanding completion waiter; used when enqueue fails or succeeds immediately. -async fn remove_completion_waiter(task_id: u64) -> Option> { - completion_waiters().lock().await.remove(&task_id) -} - -/// Fire-and-forget notify helper so we do not block the driver loop. -fn notify_completion_waiter(task_id: u64, result: SwitchResultStatus) { - tokio::spawn(async move { - let sender = completion_waiters().lock().await.remove(&task_id); - if let Some(sender) = sender { - let _ = sender.send(result); - } - }); -} - -const WATCHDOG_TIMEOUT: Duration = Duration::from_secs(5); -const WATCHDOG_TICK: Duration = Duration::from_millis(500); - -// Mutable snapshot of the driver's world; all mutations happen on the driver task. -#[derive(Debug, Default)] -struct SwitchDriverState { - active: Option, - queue: VecDeque, - latest_tokens: HashMap, - cleanup_profiles: HashMap>, - last_result: Option, -} - -// Messages passed through SWITCH_QUEUE so the driver can react to events in order. -#[derive(Debug)] -enum SwitchDriverMessage { - Request { - request: SwitchRequest, - respond_to: oneshot::Sender, - }, - Completion { - request: SwitchRequest, - outcome: SwitchJobOutcome, - }, - CleanupDone { - profile: SmartString, - }, -} - -#[derive(Debug)] -enum SwitchJobOutcome { - Completed { - success: bool, - cleanup: workflow::CleanupHandle, - }, - Panicked { - info: SwitchPanicInfo, - cleanup: workflow::CleanupHandle, - }, -} - -pub(super) async fn switch_profile( - profile_index: impl Into, - notify_success: bool, -) -> CmdResult { - switch_profile_impl(profile_index.into(), notify_success, false).await -} - -pub(super) async fn switch_profile_and_wait( - profile_index: impl Into, - notify_success: bool, -) -> CmdResult { - switch_profile_impl(profile_index.into(), notify_success, true).await -} - -async fn switch_profile_impl( - profile_index: SmartString, - notify_success: bool, - wait_for_completion: bool, -) -> CmdResult { - // wait_for_completion is used by CLI flows that must block until the switch finishes. - let manager = manager(); - let sender = switch_driver_sender(); - - let request = SwitchRequest::new( - manager.next_task_id(), - profile_index.clone(), - notify_success, - ); - - logging!( - info, - Type::Cmd, - "Queue profile switch task {} -> {} (notify={})", - request.task_id(), - profile_index, - notify_success - ); - - let task_id = request.task_id(); - let mut completion_rx = if wait_for_completion { - Some(register_completion_waiter(task_id).await) - } else { - None - }; - - let (tx, rx) = oneshot::channel(); - - let enqueue_result = match sender.try_send(SwitchDriverMessage::Request { - request, - respond_to: tx, - }) { - Ok(_) => match rx.await { - Ok(result) => Ok(result), - Err(err) => { - logging!( - error, - Type::Cmd, - "Failed to receive enqueue result for profile {}: {}", - profile_index, - err - ); - Err("switch profile queue unavailable".into()) - } - }, - Err(TrySendError::Full(msg)) => { - logging!( - warn, - Type::Cmd, - "Profile switch queue is full; waiting for space: {}", - profile_index - ); - match sender.send(msg).await { - Ok(_) => match rx.await { - Ok(result) => Ok(result), - Err(err) => { - logging!( - error, - Type::Cmd, - "Failed to receive enqueue result after wait for {}: {}", - profile_index, - err - ); - Err("switch profile queue unavailable".into()) - } - }, - Err(err) => { - logging!( - error, - Type::Cmd, - "Profile switch queue closed while waiting ({}): {}", - profile_index, - err - ); - Err("switch profile queue unavailable".into()) - } - } - } - Err(TrySendError::Closed(_)) => { - logging!( - error, - Type::Cmd, - "Profile switch queue is closed, cannot enqueue: {}", - profile_index - ); - Err("switch profile queue unavailable".into()) - } - }; - - let accepted = match enqueue_result { - Ok(result) => result, - Err(err) => { - if completion_rx.is_some() { - remove_completion_waiter(task_id).await; - } - return Err(err); - } - }; - - if !accepted { - if completion_rx.is_some() { - remove_completion_waiter(task_id).await; - } - return Ok(false); - } - - if let Some(rx_completion) = completion_rx.take() { - match rx_completion.await { - Ok(status) => Ok(status.success), - Err(err) => { - logging!( - error, - Type::Cmd, - "Switch task {} completion channel dropped: {}", - task_id, - err - ); - Err("profile switch completion unavailable".into()) - } - } - } else { - Ok(true) - } -} - -fn switch_driver_sender() -> &'static mpsc::Sender { - SWITCH_QUEUE.get_or_init(|| { - let (tx, rx) = mpsc::channel::(SWITCH_QUEUE_CAPACITY); - let driver_tx = tx.clone(); - tokio::spawn(async move { - let manager = manager(); - let driver = SwitchDriver::new(manager, driver_tx); - driver.run(rx).await; - }); - tx - }) -} - -struct SwitchDriver { - manager: &'static SwitchManager, - sender: mpsc::Sender, - state: SwitchDriverState, -} - -impl SwitchDriver { - fn new(manager: &'static SwitchManager, sender: mpsc::Sender) -> Self { - let state = SwitchDriverState::default(); - manager.set_status(state.snapshot(manager)); - Self { - manager, - sender, - state, - } - } - - async fn run(mut self, mut rx: mpsc::Receiver) { - while let Some(message) = rx.recv().await { - match message { - SwitchDriverMessage::Request { - request, - respond_to, - } => { - self.handle_enqueue(request, respond_to); - } - SwitchDriverMessage::Completion { request, outcome } => { - self.handle_completion(request, outcome); - } - SwitchDriverMessage::CleanupDone { profile } => { - self.handle_cleanup_done(profile); - } - } - } - } - - fn handle_enqueue(&mut self, request: SwitchRequest, respond_to: oneshot::Sender) { - // Each new request supersedes older ones for the same profile to avoid thrashing the core. - let mut responder = Some(respond_to); - let accepted = true; - let profile_key = request.profile_id().clone(); - let cleanup_pending = - self.state.active.is_none() && !self.state.cleanup_profiles.is_empty(); - - if cleanup_pending && self.state.cleanup_profiles.contains_key(&profile_key) { - logging!( - debug, - Type::Cmd, - "Cleanup running for {}; queueing switch task {} -> {} to run afterwards", - profile_key, - request.task_id(), - profile_key - ); - if let Some(previous) = self - .state - .latest_tokens - .insert(profile_key.clone(), request.cancel_token().clone()) - { - previous.cancel(); - } - self.state - .queue - .retain(|queued| queued.profile_id() != &profile_key); - self.state.queue.push_back(request); - if let Some(sender) = responder.take() { - let _ = sender.send(accepted); - } - self.publish_status(); - return; - } - - if cleanup_pending { - logging!( - debug, - Type::Cmd, - "Cleanup running for {} profile(s); queueing task {} -> {} to run after cleanup without clearing existing requests", - self.state.cleanup_profiles.len(), - request.task_id(), - profile_key - ); - } - - if let Some(previous) = self - .state - .latest_tokens - .insert(profile_key.clone(), request.cancel_token().clone()) - { - previous.cancel(); - } - - if let Some(active) = self.state.active.as_mut() - && active.profile_id() == &profile_key - { - active.cancel_token().cancel(); - active.merge_notify(request.notify()); - self.state - .queue - .retain(|queued| queued.profile_id() != &profile_key); - self.state.queue.push_front(request.clone()); - if let Some(sender) = responder.take() { - let _ = sender.send(accepted); - } - self.publish_status(); - return; - } - - if let Some(active) = self.state.active.as_ref() { - logging!( - debug, - Type::Cmd, - "Cancelling active switch task {} (profile={}) in favour of task {} -> {}", - active.task_id(), - active.profile_id(), - request.task_id(), - profile_key - ); - active.cancel_token().cancel(); - } - - self.state - .queue - .retain(|queued| queued.profile_id() != &profile_key); - - self.state.queue.push_back(request.clone()); - if let Some(sender) = responder.take() { - let _ = sender.send(accepted); - } - - self.start_next_job(); - self.publish_status(); - } - - fn handle_completion(&mut self, request: SwitchRequest, outcome: SwitchJobOutcome) { - // Translate the workflow result into an event the frontend can understand. - let result_record = match &outcome { - SwitchJobOutcome::Completed { success, .. } => { - logging!( - info, - Type::Cmd, - "Switch task {} completed (success={})", - request.task_id(), - success - ); - if *success { - SwitchResultStatus::success(request.task_id(), request.profile_id()) - } else { - SwitchResultStatus::failed(request.task_id(), request.profile_id(), None, None) - } - } - SwitchJobOutcome::Panicked { info, .. } => { - logging!( - error, - Type::Cmd, - "Switch task {} panicked at stage {:?}: {}", - request.task_id(), - info.stage, - info.detail - ); - SwitchResultStatus::failed( - request.task_id(), - request.profile_id(), - Some(format!("{:?}", info.stage)), - Some(info.detail.clone()), - ) - } - }; - - if let Some(active) = self.state.active.as_ref() - && active.task_id() == request.task_id() - { - self.state.active = None; - } - - if let Some(latest) = self.state.latest_tokens.get(request.profile_id()) - && latest.same_token(request.cancel_token()) - { - self.state.latest_tokens.remove(request.profile_id()); - } - - let cleanup = match outcome { - SwitchJobOutcome::Completed { cleanup, .. } => cleanup, - SwitchJobOutcome::Panicked { cleanup, .. } => cleanup, - }; - - self.track_cleanup(request.profile_id().clone(), cleanup); - - let event_record = result_record.clone(); - self.state.last_result = Some(result_record); - notify_completion_waiter(request.task_id(), event_record.clone()); - self.manager.push_event(event_record); - self.start_next_job(); - self.publish_status(); - } - - fn handle_cleanup_done(&mut self, profile: SmartString) { - if let Some(handle) = self.state.cleanup_profiles.remove(&profile) { - handle.abort(); - } - self.start_next_job(); - self.publish_status(); - } - - fn start_next_job(&mut self) { - if self.state.active.is_some() || !self.state.cleanup_profiles.is_empty() { - self.publish_status(); - return; - } - - while let Some(request) = self.state.queue.pop_front() { - if request.cancel_token().is_cancelled() { - self.discard_request(request); - continue; - } - - self.state.active = Some(request.clone()); - self.start_switch_job(request); - break; - } - - self.publish_status(); - } - - fn track_cleanup(&mut self, profile: SmartString, cleanup: workflow::CleanupHandle) { - if let Some(existing) = self.state.cleanup_profiles.remove(&profile) { - existing.abort(); - } - - let driver_tx = self.sender.clone(); - let profile_clone = profile.clone(); - let handle = tokio::spawn(async move { - let profile_label = profile_clone.clone(); - if let Err(err) = cleanup.await { - logging!( - warn, - Type::Cmd, - "Cleanup task for profile {} failed: {}", - profile_label.as_str(), - err - ); - } - if let Err(err) = driver_tx - .send(SwitchDriverMessage::CleanupDone { - profile: profile_clone, - }) - .await - { - logging!( - error, - Type::Cmd, - "Failed to push cleanup completion for profile {}: {}", - profile_label.as_str(), - err - ); - } - }); - self.state.cleanup_profiles.insert(profile, handle); - } - - fn start_switch_job(&self, request: SwitchRequest) { - // Run the workflow in a background task while the driver keeps processing messages. - let driver_tx = self.sender.clone(); - let manager = self.manager; - - let completion_request = request.clone(); - let heartbeat = request.heartbeat().clone(); - let cancel_token = request.cancel_token().clone(); - let task_id = request.task_id(); - let profile_label = request.profile_id().clone(); - - tokio::spawn(async move { - let mut watchdog_interval = time::interval(WATCHDOG_TICK); - watchdog_interval.set_missed_tick_behavior(MissedTickBehavior::Skip); - - let workflow_fut = - AssertUnwindSafe(workflow::run_switch_job(manager, request)).catch_unwind(); - tokio::pin!(workflow_fut); - - let job_result = loop { - tokio::select! { - res = workflow_fut.as_mut() => { - break match res { - Ok(Ok(result)) => SwitchJobOutcome::Completed { - success: result.success, - cleanup: result.cleanup, - }, - Ok(Err(error)) => SwitchJobOutcome::Panicked { - info: error.info, - cleanup: error.cleanup, - }, - Err(payload) => { - let info = SwitchPanicInfo::driver_task( - workflow::describe_panic_payload(payload.as_ref()), - ); - let cleanup = workflow::schedule_post_switch_failure( - profile_label.clone(), - completion_request.notify(), - completion_request.task_id(), - ); - SwitchJobOutcome::Panicked { info, cleanup } - } - }; - } - _ = watchdog_interval.tick() => { - if cancel_token.is_cancelled() { - continue; - } - let elapsed = heartbeat.elapsed(); - if elapsed > WATCHDOG_TIMEOUT { - let stage = SwitchStage::from_code(heartbeat.stage_code()) - .unwrap_or(SwitchStage::Workflow); - logging!( - warn, - Type::Cmd, - "Switch task {} watchdog timeout (profile={} stage={:?}, elapsed={:?}); cancelling", - task_id, - profile_label.as_str(), - stage, - elapsed - ); - cancel_token.cancel(); - } - } - } - }; - - let request_for_error = completion_request.clone(); - - if let Err(err) = driver_tx - .send(SwitchDriverMessage::Completion { - request: completion_request, - outcome: job_result, - }) - .await - { - logging!( - error, - Type::Cmd, - "Failed to push switch completion to driver: {}", - err - ); - notify_completion_waiter( - request_for_error.task_id(), - SwitchResultStatus::failed( - request_for_error.task_id(), - request_for_error.profile_id(), - Some("driver".to_string()), - Some(format!("completion dispatch failed: {}", err)), - ), - ); - } - }); - } - - /// Mark a request as failed because a newer request superseded it. - fn discard_request(&mut self, request: SwitchRequest) { - let key = request.profile_id().clone(); - let should_remove = self - .state - .latest_tokens - .get(&key) - .map(|latest| latest.same_token(request.cancel_token())) - .unwrap_or(false); - - if should_remove { - self.state.latest_tokens.remove(&key); - } - - if !request.cancel_token().is_cancelled() { - request.cancel_token().cancel(); - } - - let event = SwitchResultStatus::cancelled( - request.task_id(), - request.profile_id(), - Some("request superseded".to_string()), - ); - - self.state.last_result = Some(event.clone()); - notify_completion_waiter(request.task_id(), event.clone()); - self.manager.push_event(event); - } - - fn publish_status(&self) { - self.manager.set_status(self.state.snapshot(self.manager)); - } -} - -impl SwitchDriverState { - /// Lightweight struct suitable for sharing across the command boundary. - fn snapshot(&self, manager: &SwitchManager) -> ProfileSwitchStatus { - let active = self - .active - .as_ref() - .map(|req| SwitchTaskStatus::from_request(req, false)); - let queue = self - .queue - .iter() - .map(|req| SwitchTaskStatus::from_request(req, true)) - .collect::>(); - let cleanup_profiles = self - .cleanup_profiles - .keys() - .map(|key| key.to_string()) - .collect::>(); - - ProfileSwitchStatus { - is_switching: manager.is_switching(), - active, - queue, - cleanup_profiles, - last_result: self.last_result.clone(), - last_updated: current_millis(), - } - } -} diff --git a/src-tauri/src/cmd/profile_switch/mod.rs b/src-tauri/src/cmd/profile_switch/mod.rs deleted file mode 100644 index 0729c68d..00000000 --- a/src-tauri/src/cmd/profile_switch/mod.rs +++ /dev/null @@ -1,34 +0,0 @@ -// Profile switch orchestration: plumbing between the public tauri commands, -// the async driver queue, validation helpers, and the state machine workflow. -mod driver; -mod state; -mod validation; -mod workflow; - -pub use state::{ProfileSwitchStatus, SwitchResultEvent}; - -use smartstring::alias::String; - -use super::CmdResult; - -pub(super) async fn patch_profiles_config(profiles: crate::config::IProfiles) -> CmdResult { - workflow::patch_profiles_config(profiles).await -} - -pub(super) async fn patch_profiles_config_by_profile_index( - profile_index: String, -) -> CmdResult { - driver::switch_profile_and_wait(profile_index, false).await -} - -pub(super) async fn switch_profile(profile_index: String, notify_success: bool) -> CmdResult { - driver::switch_profile(profile_index, notify_success).await -} - -pub(super) fn get_switch_status() -> CmdResult { - Ok(state::manager().status_snapshot()) -} - -pub(super) fn get_switch_events(after_sequence: u64) -> CmdResult> { - Ok(state::manager().events_after(after_sequence)) -} diff --git a/src-tauri/src/cmd/profile_switch/state.rs b/src-tauri/src/cmd/profile_switch/state.rs deleted file mode 100644 index 1bb52d6b..00000000 --- a/src-tauri/src/cmd/profile_switch/state.rs +++ /dev/null @@ -1,353 +0,0 @@ -use once_cell::sync::OnceCell; -use parking_lot::RwLock; -use serde::Serialize; -use smartstring::alias::String as SmartString; -use std::collections::VecDeque; -use std::sync::Arc; -use std::sync::atomic::{AtomicBool, AtomicU32, AtomicU64, Ordering}; -use std::time::{Duration, SystemTime, UNIX_EPOCH}; -use tokio::sync::{Mutex, Notify}; - -pub(super) const SWITCH_JOB_TIMEOUT: Duration = Duration::from_secs(30); -pub(super) const SWITCH_CLEANUP_TIMEOUT: Duration = Duration::from_secs(5); - -static SWITCH_MANAGER: OnceCell = OnceCell::new(); - -pub(super) fn manager() -> &'static SwitchManager { - SWITCH_MANAGER.get_or_init(SwitchManager::default) -} - -#[derive(Debug)] -// Central coordination point shared between the driver and workflow state machine. -pub(super) struct SwitchManager { - core_mutex: Mutex<()>, - request_sequence: AtomicU64, - switching: AtomicBool, - task_sequence: AtomicU64, - status: RwLock, - event_sequence: AtomicU64, - recent_events: RwLock>, -} - -impl Default for SwitchManager { - fn default() -> Self { - Self { - core_mutex: Mutex::new(()), - request_sequence: AtomicU64::new(0), - switching: AtomicBool::new(false), - task_sequence: AtomicU64::new(0), - status: RwLock::new(ProfileSwitchStatus::default()), - event_sequence: AtomicU64::new(0), - recent_events: RwLock::new(VecDeque::with_capacity(32)), - } - } -} - -impl SwitchManager { - pub(super) fn core_mutex(&self) -> &Mutex<()> { - &self.core_mutex - } - - // Monotonic identifiers so logs can correlate enqueue/finish pairs. - pub(super) fn next_task_id(&self) -> u64 { - self.task_sequence.fetch_add(1, Ordering::SeqCst) + 1 - } - - /// Sequence id assigned to each enqueue request so we can spot stale work. - pub(super) fn next_request_sequence(&self) -> u64 { - self.request_sequence.fetch_add(1, Ordering::SeqCst) + 1 - } - - pub(super) fn latest_request_sequence(&self) -> u64 { - self.request_sequence.load(Ordering::SeqCst) - } - - pub(super) fn begin_switch(&'static self) -> SwitchScope<'static> { - self.switching.store(true, Ordering::SeqCst); - SwitchScope { manager: self } - } - - pub(super) fn is_switching(&self) -> bool { - self.switching.load(Ordering::SeqCst) - } - - pub(super) fn set_status(&self, status: ProfileSwitchStatus) { - *self.status.write() = status; - } - - pub(super) fn status_snapshot(&self) -> ProfileSwitchStatus { - self.status.read().clone() - } - pub(super) fn push_event(&self, result: SwitchResultStatus) { - const MAX_EVENTS: usize = 64; - let sequence = self.event_sequence.fetch_add(1, Ordering::SeqCst) + 1; - let mut guard = self.recent_events.write(); - if guard.len() == MAX_EVENTS { - guard.pop_front(); - } - guard.push_back(SwitchResultEvent { sequence, result }); - } - - pub(super) fn events_after(&self, sequence: u64) -> Vec { - self.recent_events - .read() - .iter() - .filter(|event| event.sequence > sequence) - .cloned() - .collect() - } -} - -pub(super) struct SwitchScope<'a> { - manager: &'a SwitchManager, -} - -impl Drop for SwitchScope<'_> { - fn drop(&mut self) { - self.manager.switching.store(false, Ordering::SeqCst); - } -} - -#[derive(Debug, Clone)] -pub(super) struct SwitchCancellation { - flag: Arc, - notify: Arc, -} - -impl SwitchCancellation { - pub(super) fn new() -> Self { - Self { - flag: Arc::new(AtomicBool::new(false)), - notify: Arc::new(Notify::new()), - } - } - - pub(super) fn cancel(&self) { - self.flag.store(true, Ordering::SeqCst); - self.notify.notify_waiters(); - } - - /// True if another request already cancelled this job. - pub(super) fn is_cancelled(&self) -> bool { - self.flag.load(Ordering::SeqCst) - } - - pub(super) fn same_token(&self, other: &SwitchCancellation) -> bool { - Arc::ptr_eq(&self.flag, &other.flag) - } - - pub(super) async fn cancelled_future(&self) { - // Used by async blocks that want to pause until a newer request pre-empts them. - if self.is_cancelled() { - return; - } - self.notify.notified().await; - } -} - -#[derive(Debug, Clone)] -pub(super) struct SwitchRequest { - task_id: u64, - profile_id: SmartString, - notify: bool, - cancel_token: SwitchCancellation, - heartbeat: SwitchHeartbeat, -} - -impl SwitchRequest { - pub(super) fn new(task_id: u64, profile_id: SmartString, notify: bool) -> Self { - Self { - task_id, - profile_id, - notify, - cancel_token: SwitchCancellation::new(), - heartbeat: SwitchHeartbeat::new(), - } - } - - pub(super) fn task_id(&self) -> u64 { - self.task_id - } - - pub(super) fn profile_id(&self) -> &SmartString { - &self.profile_id - } - - pub(super) fn notify(&self) -> bool { - self.notify - } - - pub(super) fn merge_notify(&mut self, notify: bool) { - // When a new request wants a toast, remember it even if an older request did not. - if notify { - self.notify = true; - } - } - - pub(super) fn cancel_token(&self) -> &SwitchCancellation { - &self.cancel_token - } - - pub(super) fn heartbeat(&self) -> &SwitchHeartbeat { - &self.heartbeat - } -} - -#[derive(Debug, Clone)] -pub(super) struct SwitchHeartbeat { - last_tick_millis: Arc, - stage_code: Arc, -} - -fn now_millis() -> u64 { - SystemTime::now() - .duration_since(UNIX_EPOCH) - .unwrap_or(Duration::ZERO) - .as_millis() as u64 -} - -#[derive(Debug, Clone, Serialize, Default)] -#[serde(rename_all = "camelCase")] -pub struct ProfileSwitchStatus { - pub is_switching: bool, - pub active: Option, - pub queue: Vec, - pub cleanup_profiles: Vec, - pub last_result: Option, - pub last_updated: u64, -} - -#[derive(Debug, Clone, Serialize)] -#[serde(rename_all = "camelCase")] -pub struct SwitchTaskStatus { - pub task_id: u64, - pub profile_id: String, - pub notify: bool, - pub stage: Option, - pub queued: bool, -} - -impl SwitchTaskStatus { - pub(super) fn from_request(request: &SwitchRequest, queued: bool) -> Self { - Self { - task_id: request.task_id(), - profile_id: request.profile_id().to_string(), - notify: request.notify(), - stage: if queued { - None - } else { - Some(request.heartbeat().stage_code()) - }, - queued, - } - } -} - -#[derive(Debug, Clone, Serialize)] -#[serde(rename_all = "camelCase")] -pub struct SwitchResultStatus { - pub task_id: u64, - pub profile_id: String, - pub success: bool, - pub cancelled: bool, - pub finished_at: u64, - pub error_stage: Option, - pub error_detail: Option, -} - -impl SwitchResultStatus { - pub(super) fn success(task_id: u64, profile_id: &SmartString) -> Self { - Self { - task_id, - profile_id: profile_id.to_string(), - success: true, - cancelled: false, - finished_at: now_millis(), - error_stage: None, - error_detail: None, - } - } - - pub(super) fn failed( - task_id: u64, - profile_id: &SmartString, - stage: Option, - detail: Option, - ) -> Self { - Self { - task_id, - profile_id: profile_id.to_string(), - success: false, - cancelled: false, - finished_at: now_millis(), - error_stage: stage, - error_detail: detail, - } - } - - pub(super) fn cancelled( - task_id: u64, - profile_id: &SmartString, - detail: Option, - ) -> Self { - Self { - task_id, - profile_id: profile_id.to_string(), - success: false, - cancelled: true, - finished_at: now_millis(), - error_stage: Some("cancelled".to_string()), - error_detail: detail, - } - } -} - -#[derive(Debug, Clone, Serialize)] -#[serde(rename_all = "camelCase")] -pub struct SwitchResultEvent { - pub sequence: u64, - pub result: SwitchResultStatus, -} - -pub(super) fn current_millis() -> u64 { - now_millis() -} - -impl SwitchHeartbeat { - fn now_millis() -> u64 { - SystemTime::now() - .duration_since(UNIX_EPOCH) - .unwrap_or(Duration::ZERO) - .as_millis() as u64 - } - - pub(super) fn new() -> Self { - let heartbeat = Self { - last_tick_millis: Arc::new(AtomicU64::new(Self::now_millis())), - stage_code: Arc::new(AtomicU32::new(0)), - }; - heartbeat.touch(); - heartbeat - } - - pub(super) fn touch(&self) { - self.last_tick_millis - .store(Self::now_millis(), Ordering::SeqCst); - } - - /// Update the internal timer to reflect the amount of time since the last heartbeat. - pub(super) fn elapsed(&self) -> Duration { - let last = self.last_tick_millis.load(Ordering::SeqCst); - let now = Self::now_millis(); - Duration::from_millis(now.saturating_sub(last)) - } - - pub(super) fn set_stage(&self, stage: u32) { - self.stage_code.store(stage, Ordering::SeqCst); - self.touch(); - } - - pub(super) fn stage_code(&self) -> u32 { - self.stage_code.load(Ordering::SeqCst) - } -} diff --git a/src-tauri/src/cmd/profile_switch/validation.rs b/src-tauri/src/cmd/profile_switch/validation.rs deleted file mode 100644 index b15806cf..00000000 --- a/src-tauri/src/cmd/profile_switch/validation.rs +++ /dev/null @@ -1,113 +0,0 @@ -use crate::{ - config::Config, - logging, - process::AsyncHandler, - utils::{dirs, logging::Type}, -}; -use serde_yaml_ng as serde_yaml; -use smartstring::alias::String; -use std::time::Duration; -use tokio::{fs as tokio_fs, time}; - -const YAML_READ_TIMEOUT: Duration = Duration::from_secs(5); - -/// Verify that the requested profile exists locally and is well-formed before switching. -pub(super) async fn validate_switch_request(task_id: u64, profile_id: &str) -> Result<(), String> { - logging!( - info, - Type::Cmd, - "Validating profile switch task {} -> {}", - task_id, - profile_id - ); - - let profile_key: String = profile_id.into(); - let (file_path, profile_type, is_current, remote_url) = { - let profiles_guard = Config::profiles().await; - let latest = profiles_guard.latest_ref(); - let item = latest.get_item(&profile_key).map_err(|err| -> String { - format!("Target profile {} not found: {}", profile_id, err).into() - })?; - ( - item.file.clone().map(|f| f.to_string()), - item.itype.clone().map(|t| t.to_string()), - latest - .current - .as_ref() - .map(|current| current.as_str() == profile_id) - .unwrap_or(false), - item.url.clone().map(|u| u.to_string()), - ) - }; - - if is_current { - logging!( - info, - Type::Cmd, - "Switch task {} is targeting the current profile {}; skipping validation", - task_id, - profile_id - ); - return Ok(()); - } - - if matches!(profile_type.as_deref(), Some("remote")) { - // Remote profiles must retain a URL so the subsequent refresh job knows where to download. - let has_url = remote_url.as_ref().map(|u| !u.is_empty()).unwrap_or(false); - if !has_url { - return Err({ - let msg = format!("Remote profile {} is missing a download URL", profile_id); - msg.into() - }); - } - } - - if let Some(file) = file_path { - let profiles_dir = dirs::app_profiles_dir().map_err(|err| -> String { - format!("Failed to resolve profiles directory: {}", err).into() - })?; - let path = profiles_dir.join(&file); - - let contents = match time::timeout(YAML_READ_TIMEOUT, tokio_fs::read_to_string(&path)).await - { - Ok(Ok(contents)) => contents, - Ok(Err(err)) => { - return Err( - format!("Failed to read profile file {}: {}", path.display(), err).into(), - ); - } - Err(_) => { - return Err(format!( - "Timed out reading profile file {} after {:?}", - path.display(), - YAML_READ_TIMEOUT - ) - .into()); - } - }; - - let parse_result = AsyncHandler::spawn_blocking(move || { - serde_yaml::from_str::(&contents) - }) - .await; - - match parse_result { - Ok(Ok(_)) => {} - Ok(Err(err)) => { - return Err( - format!("Profile YAML parse failed for {}: {}", path.display(), err).into(), - ); - } - Err(join_err) => { - return Err(format!( - "Profile YAML parse task panicked for {}: {}", - path.display(), - join_err - ) - .into()); - } - } - } - - Ok(()) -} diff --git a/src-tauri/src/cmd/profile_switch/workflow.rs b/src-tauri/src/cmd/profile_switch/workflow.rs deleted file mode 100644 index 27d16269..00000000 --- a/src-tauri/src/cmd/profile_switch/workflow.rs +++ /dev/null @@ -1,385 +0,0 @@ -use super::{ - CmdResult, - state::{SWITCH_JOB_TIMEOUT, SwitchManager, SwitchRequest, manager}, - validation::validate_switch_request, -}; -use crate::cmd::StringifyErr; -use crate::{ - config::{Config, IProfiles, profiles::profiles_save_file_safe}, - core::handle, - logging, - process::AsyncHandler, - utils::{dirs, logging::Type}, -}; -use futures::FutureExt; -use serde_yaml_ng as serde_yaml; -use smartstring::alias::String as SmartString; -use std::{any::Any, panic::AssertUnwindSafe, time::Duration}; -use tokio::{fs as tokio_fs, time}; - -mod cleanup; -mod state_machine; -pub(super) use cleanup::{ - CleanupHandle, schedule_post_switch_failure, schedule_post_switch_success, -}; - -use state_machine::{CONFIG_APPLY_TIMEOUT, SAVE_PROFILES_TIMEOUT, SwitchStateMachine}; -pub(super) use state_machine::{SwitchPanicInfo, SwitchStage}; - -pub(super) struct SwitchWorkflowResult { - pub success: bool, - pub cleanup: CleanupHandle, -} - -pub(super) struct SwitchWorkflowError { - pub info: SwitchPanicInfo, - pub cleanup: CleanupHandle, -} - -pub(super) async fn run_switch_job( - manager: &'static SwitchManager, - request: SwitchRequest, -) -> Result { - // Short-circuit cancelled jobs before we allocate resources or emit events. - if request.cancel_token().is_cancelled() { - logging!( - info, - Type::Cmd, - "Switch task {} cancelled before validation", - request.task_id() - ); - let cleanup = schedule_post_switch_failure( - request.profile_id().clone(), - request.notify(), - request.task_id(), - ); - return Ok(SwitchWorkflowResult { - success: false, - cleanup, - }); - } - - let profile_id = request.profile_id().clone(); - let task_id = request.task_id(); - let notify = request.notify(); - - if let Err(err) = validate_switch_request(task_id, profile_id.as_str()).await { - logging!( - warn, - Type::Cmd, - "Validation failed for switch task {} -> {}: {}", - task_id, - profile_id, - err - ); - handle::Handle::notice_message("config_validate::error", err.clone()); - let cleanup = schedule_post_switch_failure(profile_id.clone(), notify, task_id); - return Ok(SwitchWorkflowResult { - success: false, - cleanup, - }); - } - - logging!( - info, - Type::Cmd, - "Starting switch task {} for profile {} (notify={})", - task_id, - profile_id, - notify - ); - - let pipeline_request = request; - // The state machine owns the heavy lifting. We wrap it with timeout/panic guards so the driver never hangs. - let pipeline = async move { - let target_profile = pipeline_request.profile_id().clone(); - SwitchStateMachine::new( - manager, - Some(pipeline_request), - IProfiles { - current: Some(target_profile), - items: None, - }, - ) - .run() - .await - }; - - match time::timeout( - SWITCH_JOB_TIMEOUT, - AssertUnwindSafe(pipeline).catch_unwind(), - ) - .await - { - Err(_) => { - logging!( - error, - Type::Cmd, - "Profile switch task {} timed out after {:?}", - task_id, - SWITCH_JOB_TIMEOUT - ); - handle::Handle::notice_message( - "config_validate::error", - format!("profile switch timed out: {}", profile_id), - ); - let cleanup = schedule_post_switch_failure(profile_id.clone(), notify, task_id); - Ok(SwitchWorkflowResult { - success: false, - cleanup, - }) - } - Ok(Err(panic_payload)) => { - let panic_message = describe_panic_payload(panic_payload.as_ref()); - logging!( - error, - Type::Cmd, - "Panic captured during profile switch task {} ({}): {}", - task_id, - profile_id, - panic_message - ); - handle::Handle::notice_message( - "config_validate::panic", - format!("profile switch panic: {}", profile_id), - ); - let cleanup = schedule_post_switch_failure(profile_id.clone(), notify, task_id); - Err(SwitchWorkflowError { - info: SwitchPanicInfo::workflow_root(panic_message), - cleanup, - }) - } - Ok(Ok(machine_result)) => match machine_result { - Ok(cmd_result) => match cmd_result { - Ok(success) => { - let cleanup = - schedule_post_switch_success(profile_id.clone(), success, notify, task_id); - Ok(SwitchWorkflowResult { success, cleanup }) - } - Err(err) => { - logging!( - error, - Type::Cmd, - "Profile switch failed ({}): {}", - profile_id, - err - ); - handle::Handle::notice_message("config_validate::error", err.clone()); - let cleanup = schedule_post_switch_failure(profile_id.clone(), notify, task_id); - Ok(SwitchWorkflowResult { - success: false, - cleanup, - }) - } - }, - Err(panic_info) => { - logging!( - error, - Type::Cmd, - "State machine panic during profile switch task {} ({} {:?}): {}", - task_id, - profile_id, - panic_info.stage, - panic_info.detail - ); - handle::Handle::notice_message( - "config_validate::panic", - format!("profile switch panic: {}", profile_id), - ); - let cleanup = schedule_post_switch_failure(profile_id.clone(), notify, task_id); - Err(SwitchWorkflowError { - info: panic_info, - cleanup, - }) - } - }, - } -} - -/// Allow patch operations (no driver request) to use the same state machine pipeline. -pub(super) async fn patch_profiles_config(profiles: IProfiles) -> CmdResult { - match SwitchStateMachine::new(manager(), None, profiles) - .run() - .await - { - Ok(result) => result, - Err(panic_info) => Err(format!( - "profile switch panic ({:?}): {}", - panic_info.stage, panic_info.detail - ) - .into()), - } -} - -/// Parse the target profile YAML on a background thread to catch syntax errors early. -pub(super) async fn validate_profile_yaml(profile: &SmartString) -> CmdResult { - let file_path = { - let profiles_guard = Config::profiles().await; - let profiles_data = profiles_guard.latest_ref(); - match profiles_data.get_item(profile) { - Ok(item) => item.file.as_ref().and_then(|file| { - dirs::app_profiles_dir() - .ok() - .map(|dir| dir.join(file.as_str())) - }), - Err(e) => { - logging!( - error, - Type::Cmd, - "Failed to load target profile metadata: {}", - e - ); - return Ok(false); - } - } - }; - - let Some(path) = file_path else { - return Ok(true); - }; - - if !path.exists() { - logging!( - error, - Type::Cmd, - "Target profile file does not exist: {}", - path.display() - ); - handle::Handle::notice_message( - "config_validate::file_not_found", - format!("{}", path.display()), - ); - return Ok(false); - } - - let file_read_result = - time::timeout(Duration::from_secs(5), tokio_fs::read_to_string(&path)).await; - - match file_read_result { - Ok(Ok(content)) => { - let yaml_parse_result = AsyncHandler::spawn_blocking(move || { - serde_yaml::from_str::(&content) - }) - .await; - - match yaml_parse_result { - Ok(Ok(_)) => { - logging!(info, Type::Cmd, "Target profile YAML syntax is valid"); - Ok(true) - } - Ok(Err(err)) => { - let error_msg = format!(" {err}"); - logging!( - error, - Type::Cmd, - "Target profile contains YAML syntax errors: {}", - error_msg - ); - handle::Handle::notice_message( - "config_validate::yaml_syntax_error", - error_msg.clone(), - ); - Ok(false) - } - Err(join_err) => { - let error_msg = format!("YAML parsing task failed: {join_err}"); - logging!(error, Type::Cmd, "{}", error_msg); - handle::Handle::notice_message( - "config_validate::yaml_parse_error", - error_msg.clone(), - ); - Ok(false) - } - } - } - Ok(Err(err)) => { - let error_msg = format!("Failed to read target profile file: {err}"); - logging!(error, Type::Cmd, "{}", error_msg); - handle::Handle::notice_message("config_validate::file_read_error", error_msg.clone()); - Ok(false) - } - Err(_) => { - let error_msg = "Timed out reading profile file (5s)".to_string(); - logging!(error, Type::Cmd, "{}", error_msg); - handle::Handle::notice_message("config_validate::file_read_timeout", error_msg.clone()); - Err(error_msg.into()) - } - } -} - -/// Best-effort rollback invoked when a switch fails midway through the pipeline. -pub(super) async fn restore_previous_profile(previous: Option) -> CmdResult<()> { - if let Some(prev_profile) = previous { - logging!( - info, - Type::Cmd, - "Attempting to restore previous configuration: {}", - prev_profile - ); - let restore_profiles = IProfiles { - current: Some(prev_profile), - items: None, - }; - Config::profiles() - .await - .draft_mut() - .patch_config(restore_profiles) - .stringify_err()?; - if time::timeout(CONFIG_APPLY_TIMEOUT, async { - Config::profiles().await.apply(); - }) - .await - .is_err() - { - logging!( - warn, - Type::Cmd, - "Restoring previous configuration timed out after {:?}", - CONFIG_APPLY_TIMEOUT - ); - return Ok(()); - } - - AsyncHandler::spawn(|| async move { - let save_future = AsyncHandler::spawn_blocking(|| { - futures::executor::block_on(async { profiles_save_file_safe().await }) - }); - match time::timeout(SAVE_PROFILES_TIMEOUT, save_future).await { - Ok(join_res) => match join_res { - Ok(Ok(())) => {} - Ok(Err(err)) => { - logging!( - warn, - Type::Cmd, - "Failed to persist restored configuration asynchronously: {}", - err - ); - } - Err(join_err) => { - logging!(warn, Type::Cmd, "Blocking save task failed: {}", join_err); - } - }, - Err(_) => { - logging!( - warn, - Type::Cmd, - "Persisting restored configuration timed out after {:?}", - SAVE_PROFILES_TIMEOUT - ); - } - } - }); - } - - Ok(()) -} - -pub(super) fn describe_panic_payload(payload: &(dyn Any + Send)) -> String { - if let Some(message) = payload.downcast_ref::<&str>() { - (*message).to_string() - } else if let Some(message) = payload.downcast_ref::() { - message.clone() - } else { - "unknown panic".into() - } -} diff --git a/src-tauri/src/cmd/profile_switch/workflow/cleanup.rs b/src-tauri/src/cmd/profile_switch/workflow/cleanup.rs deleted file mode 100644 index 2f7e1aac..00000000 --- a/src-tauri/src/cmd/profile_switch/workflow/cleanup.rs +++ /dev/null @@ -1,65 +0,0 @@ -use super::super::state::SWITCH_CLEANUP_TIMEOUT; -use crate::{core::handle, logging, process::AsyncHandler, utils::logging::Type}; -use smartstring::alias::String as SmartString; -use tokio::time; - -pub(crate) type CleanupHandle = tauri::async_runtime::JoinHandle<()>; - -pub(crate) fn schedule_post_switch_success( - profile_id: SmartString, - success: bool, - notify: bool, - task_id: u64, -) -> CleanupHandle { - // Post-success cleanup runs detached from the driver so the queue keeps moving. - AsyncHandler::spawn(move || async move { - handle::Handle::notify_profile_switch_finished( - profile_id.clone(), - success, - notify, - task_id, - ); - if success { - close_connections_after_switch(profile_id).await; - } - }) -} - -pub(crate) fn schedule_post_switch_failure( - profile_id: SmartString, - notify: bool, - task_id: u64, -) -> CleanupHandle { - // Failures or cancellations do not alter the active profile, so skip draining live connections. - AsyncHandler::spawn(move || async move { - handle::Handle::notify_profile_switch_finished(profile_id.clone(), false, notify, task_id); - }) -} - -async fn close_connections_after_switch(profile_id: SmartString) { - match time::timeout(SWITCH_CLEANUP_TIMEOUT, async { - handle::Handle::mihomo().await.close_all_connections().await - }) - .await - { - Ok(Ok(())) => {} - Ok(Err(err)) => { - logging!( - warn, - Type::Cmd, - "Failed to close connections after profile switch ({}): {}", - profile_id, - err - ); - } - Err(_) => { - logging!( - warn, - Type::Cmd, - "Closing connections after profile switch ({}) timed out after {:?}", - profile_id, - SWITCH_CLEANUP_TIMEOUT - ); - } - } -} diff --git a/src-tauri/src/cmd/profile_switch/workflow/state_machine/context.rs b/src-tauri/src/cmd/profile_switch/workflow/state_machine/context.rs deleted file mode 100644 index 9de753db..00000000 --- a/src-tauri/src/cmd/profile_switch/workflow/state_machine/context.rs +++ /dev/null @@ -1,178 +0,0 @@ -use super::{CmdResult, core::SwitchStage}; -use crate::{ - cmd::profile_switch::state::{ - SwitchCancellation, SwitchHeartbeat, SwitchManager, SwitchRequest, SwitchScope, - }, - config::IProfiles, - logging, - utils::logging::Type, -}; -use smartstring::alias::String as SmartString; -use tokio::sync::MutexGuard; - -pub(super) struct SwitchContext { - pub(super) manager: &'static SwitchManager, - pub(super) request: Option, - pub(super) profiles_patch: Option, - pub(super) sequence: Option, - pub(super) target_profile: Option, - pub(super) previous_profile: Option, - pub(super) new_profile_for_event: Option, - pub(super) switch_scope: Option>, - pub(super) core_guard: Option>, - pub(super) heartbeat: SwitchHeartbeat, - pub(super) task_id: Option, - pub(super) profile_label: SmartString, - pub(super) active_stage: SwitchStage, -} - -impl SwitchContext { - // Captures all mutable data required across states (locks, profile ids, etc). - pub(super) fn new( - manager: &'static SwitchManager, - request: Option, - profiles: IProfiles, - heartbeat: SwitchHeartbeat, - ) -> Self { - let task_id = request.as_ref().map(|req| req.task_id()); - let profile_label = request - .as_ref() - .map(|req| req.profile_id().clone()) - .or_else(|| profiles.current.clone()) - .unwrap_or_else(|| SmartString::from("unknown")); - heartbeat.touch(); - Self { - manager, - request, - profiles_patch: Some(profiles), - sequence: None, - target_profile: None, - previous_profile: None, - new_profile_for_event: None, - switch_scope: None, - core_guard: None, - heartbeat, - task_id, - profile_label, - active_stage: SwitchStage::Start, - } - } - - pub(super) fn ensure_target_profile(&mut self) { - // Lazily determine which profile we're switching to so shared paths (patch vs. driver) behave the same. - if let Some(patch) = self.profiles_patch.as_mut() { - if patch.current.is_none() - && let Some(request) = self.request.as_ref() - { - patch.current = Some(request.profile_id().clone()); - } - self.target_profile = patch.current.clone(); - } - } - - pub(super) fn take_profiles_patch(&mut self) -> CmdResult { - self.profiles_patch - .take() - .ok_or_else(|| "profiles patch already consumed".into()) - } - - pub(super) fn cancel_token(&self) -> Option { - self.request.as_ref().map(|req| req.cancel_token().clone()) - } - - pub(super) fn cancelled(&self) -> bool { - self.request - .as_ref() - .map(|req| req.cancel_token().is_cancelled()) - .unwrap_or(false) - } - - pub(super) fn log_cancelled(&self, stage: &str) { - if let Some(request) = self.request.as_ref() { - logging!( - info, - Type::Cmd, - "Switch task {} cancelled {}; profile={}", - request.task_id(), - stage, - request.profile_id() - ); - } else { - logging!(info, Type::Cmd, "Profile switch cancelled {}", stage); - } - } - - pub(super) fn should_validate_target(&self) -> bool { - match (&self.target_profile, &self.previous_profile) { - (Some(target), Some(current)) => current != target, - (Some(_), None) => true, - _ => false, - } - } - - pub(super) fn stale(&self) -> bool { - self.sequence - .map(|seq| seq < self.manager.latest_request_sequence()) - .unwrap_or(false) - } - - pub(super) fn sequence(&self) -> u64 { - self.sequence.unwrap_or_else(|| { - logging!( - warn, - Type::Cmd, - "Sequence unexpectedly missing in switch context; defaulting to 0" - ); - 0 - }) - } - - pub(super) fn record_stage(&mut self, stage: SwitchStage) { - let since_last = self.heartbeat.elapsed(); - let previous = self.active_stage; - self.active_stage = stage; - self.heartbeat.set_stage(stage.as_code()); - - match self.task_id { - Some(task_id) => logging!( - debug, - Type::Cmd, - "Switch task {} (profile={}) transitioned {:?} -> {:?} after {:?}", - task_id, - self.profile_label, - previous, - stage, - since_last - ), - None => logging!( - debug, - Type::Cmd, - "Profile patch {} transitioned {:?} -> {:?} after {:?}", - self.profile_label, - previous, - stage, - since_last - ), - } - } - - pub(super) fn release_core_guard(&mut self) { - self.core_guard = None; - } - - pub(super) fn release_switch_scope(&mut self) { - self.switch_scope = None; - } - - pub(super) fn release_locks(&mut self) { - self.release_core_guard(); - self.release_switch_scope(); - } -} - -impl Drop for SwitchContext { - fn drop(&mut self) { - self.core_guard.take(); - self.switch_scope.take(); - } -} diff --git a/src-tauri/src/cmd/profile_switch/workflow/state_machine/core.rs b/src-tauri/src/cmd/profile_switch/workflow/state_machine/core.rs deleted file mode 100644 index 1c4e32ab..00000000 --- a/src-tauri/src/cmd/profile_switch/workflow/state_machine/core.rs +++ /dev/null @@ -1,284 +0,0 @@ -use super::{CmdResult, context::SwitchContext, describe_panic_payload}; -use crate::{ - cmd::profile_switch::state::{SwitchHeartbeat, SwitchManager, SwitchRequest}, - config::IProfiles, - logging, - utils::logging::Type, -}; -use futures::FutureExt; -use std::{ - mem, - panic::AssertUnwindSafe, - time::{Duration, Instant}, -}; -pub(crate) const CONFIG_APPLY_TIMEOUT: Duration = Duration::from_secs(5); -pub(crate) const TRAY_UPDATE_TIMEOUT: Duration = Duration::from_secs(3); -pub(crate) const REFRESH_TIMEOUT: Duration = Duration::from_secs(3); -pub(crate) const SAVE_PROFILES_TIMEOUT: Duration = Duration::from_secs(5); -pub(crate) const SWITCH_IDLE_WAIT_TIMEOUT: Duration = Duration::from_secs(30); -pub(crate) const SWITCH_IDLE_WAIT_POLL: Duration = Duration::from_millis(25); -pub(crate) const SWITCH_IDLE_WAIT_MAX_BACKOFF: Duration = Duration::from_millis(250); - -/// Explicit state machine for profile switching so we can reason about -/// cancellation, stale requests, and side effects at each stage. -pub(crate) struct SwitchStateMachine { - pub(super) ctx: SwitchContext, - state: SwitchState, -} - -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub(crate) enum SwitchStage { - Start, - AcquireCore, - Prepare, - ValidateTarget, - PatchDraft, - UpdateCore, - Finalize, - Workflow, - DriverTask, -} - -impl SwitchStage { - pub(crate) fn as_code(self) -> u32 { - match self { - SwitchStage::Start => 0, - SwitchStage::AcquireCore => 1, - SwitchStage::Prepare => 2, - SwitchStage::ValidateTarget => 3, - SwitchStage::PatchDraft => 4, - SwitchStage::UpdateCore => 5, - SwitchStage::Finalize => 6, - SwitchStage::Workflow => 7, - SwitchStage::DriverTask => 8, - } - } - - pub(crate) fn from_code(code: u32) -> Option { - Some(match code { - 0 => SwitchStage::Start, - 1 => SwitchStage::AcquireCore, - 2 => SwitchStage::Prepare, - 3 => SwitchStage::ValidateTarget, - 4 => SwitchStage::PatchDraft, - 5 => SwitchStage::UpdateCore, - 6 => SwitchStage::Finalize, - 7 => SwitchStage::Workflow, - 8 => SwitchStage::DriverTask, - _ => return None, - }) - } -} - -#[derive(Debug, Clone)] -pub(crate) struct SwitchPanicInfo { - pub(crate) stage: SwitchStage, - pub(crate) detail: String, -} - -impl SwitchPanicInfo { - pub(crate) fn new(stage: SwitchStage, detail: String) -> Self { - Self { stage, detail } - } - - pub(crate) fn workflow_root(detail: String) -> Self { - Self::new(SwitchStage::Workflow, detail) - } - - pub(crate) fn driver_task(detail: String) -> Self { - Self::new(SwitchStage::DriverTask, detail) - } -} - -/// High-level state machine nodes executed in strict sequence. -pub(crate) enum SwitchState { - Start, - AcquireCore, - Prepare, - ValidateTarget, - PatchDraft, - UpdateCore, - Finalize(CoreUpdateOutcome), - Complete(bool), -} - -/// Result of trying to apply the draft configuration to the core. -pub(crate) enum CoreUpdateOutcome { - Success, - ValidationFailed { message: String }, - CoreError { message: String }, - Timeout, -} - -/// Indicates where a stale request was detected so logs stay descriptive. -pub(crate) enum StaleStage { - AfterLock, - BeforeCoreOperation, - BeforeCoreInteraction, - AfterCoreOperation, -} - -impl StaleStage { - pub(super) fn log(&self, ctx: &SwitchContext) { - let sequence = ctx.sequence(); - let latest = ctx.manager.latest_request_sequence(); - match self { - StaleStage::AfterLock => logging!( - info, - Type::Cmd, - "Detected a newer request after acquiring the lock (sequence: {} < {}), abandoning current request", - sequence, - latest - ), - StaleStage::BeforeCoreOperation => logging!( - info, - Type::Cmd, - "Detected a newer request before core operation (sequence: {} < {}), abandoning current request", - sequence, - latest - ), - StaleStage::BeforeCoreInteraction => logging!( - info, - Type::Cmd, - "Detected a newer request before core interaction (sequence: {} < {}), abandoning current request", - sequence, - latest - ), - StaleStage::AfterCoreOperation => logging!( - info, - Type::Cmd, - "Detected a newer request after core operation (sequence: {} < {}), ignoring current result", - sequence, - latest - ), - } - } -} - -impl SwitchStateMachine { - pub(crate) fn new( - manager: &'static SwitchManager, - request: Option, - profiles: IProfiles, - ) -> Self { - let heartbeat = request - .as_ref() - .map(|req| req.heartbeat().clone()) - .unwrap_or_else(SwitchHeartbeat::new); - - Self { - ctx: SwitchContext::new(manager, request, profiles, heartbeat), - state: SwitchState::Start, - } - } - - pub(crate) async fn run(mut self) -> Result, SwitchPanicInfo> { - // Drive the state machine until we either complete successfully or bubble up a panic. - loop { - let current_state = mem::replace(&mut self.state, SwitchState::Complete(false)); - match current_state { - SwitchState::Complete(result) => return Ok(Ok(result)), - _ => match self.run_state(current_state).await? { - Ok(state) => self.state = state, - Err(err) => return Ok(Err(err)), - }, - } - } - } - - async fn run_state( - &mut self, - current: SwitchState, - ) -> Result, SwitchPanicInfo> { - match current { - SwitchState::Start => { - self.with_stage( - SwitchStage::Start, - |this| async move { this.handle_start() }, - ) - .await - } - SwitchState::AcquireCore => { - self.with_stage(SwitchStage::AcquireCore, |this| async move { - this.handle_acquire_core().await - }) - .await - } - SwitchState::Prepare => { - self.with_stage(SwitchStage::Prepare, |this| async move { - this.handle_prepare().await - }) - .await - } - SwitchState::ValidateTarget => { - self.with_stage(SwitchStage::ValidateTarget, |this| async move { - this.handle_validate_target().await - }) - .await - } - SwitchState::PatchDraft => { - self.with_stage(SwitchStage::PatchDraft, |this| async move { - this.handle_patch_draft().await - }) - .await - } - SwitchState::UpdateCore => { - self.with_stage(SwitchStage::UpdateCore, |this| async move { - this.handle_update_core().await - }) - .await - } - SwitchState::Finalize(outcome) => { - self.with_stage(SwitchStage::Finalize, |this| async move { - this.handle_finalize(outcome).await - }) - .await - } - SwitchState::Complete(result) => Ok(Ok(SwitchState::Complete(result))), - } - } - - /// Helper that wraps each stage with consistent logging and panic reporting. - async fn with_stage<'a, F, Fut>( - &'a mut self, - stage: SwitchStage, - f: F, - ) -> Result, SwitchPanicInfo> - where - F: FnOnce(&'a mut Self) -> Fut, - Fut: std::future::Future> + 'a, - { - let sequence = self.ctx.sequence(); - let task = self.ctx.task_id; - let profile = self.ctx.profile_label.clone(); - logging!( - info, - Type::Cmd, - "Enter {:?} (sequence={}, task={:?}, profile={})", - stage, - sequence, - task, - profile - ); - let stage_start = Instant::now(); - self.ctx.record_stage(stage); - AssertUnwindSafe(f(self)) - .catch_unwind() - .await - .map_err(|payload| { - SwitchPanicInfo::new(stage, describe_panic_payload(payload.as_ref())) - }) - .inspect(|_| { - logging!( - info, - Type::Cmd, - "Exit {:?} (sequence={}, task={:?}, profile={}, elapsed={}ms)", - stage, - sequence, - task, - profile, - stage_start.elapsed().as_millis() - ); - }) - } -} diff --git a/src-tauri/src/cmd/profile_switch/workflow/state_machine/mod.rs b/src-tauri/src/cmd/profile_switch/workflow/state_machine/mod.rs deleted file mode 100644 index 84ee0f49..00000000 --- a/src-tauri/src/cmd/profile_switch/workflow/state_machine/mod.rs +++ /dev/null @@ -1,11 +0,0 @@ -mod context; -mod core; -mod stages; - -pub(crate) use core::{ - CONFIG_APPLY_TIMEOUT, SAVE_PROFILES_TIMEOUT, SwitchPanicInfo, SwitchStage, SwitchStateMachine, -}; - -pub(super) use super::{ - CmdResult, describe_panic_payload, restore_previous_profile, validate_profile_yaml, -}; diff --git a/src-tauri/src/cmd/profile_switch/workflow/state_machine/stages.rs b/src-tauri/src/cmd/profile_switch/workflow/state_machine/stages.rs deleted file mode 100644 index 78c313d4..00000000 --- a/src-tauri/src/cmd/profile_switch/workflow/state_machine/stages.rs +++ /dev/null @@ -1,597 +0,0 @@ -use super::{ - CmdResult, - core::{ - CONFIG_APPLY_TIMEOUT, CoreUpdateOutcome, REFRESH_TIMEOUT, SAVE_PROFILES_TIMEOUT, - SWITCH_IDLE_WAIT_MAX_BACKOFF, SWITCH_IDLE_WAIT_POLL, SWITCH_IDLE_WAIT_TIMEOUT, StaleStage, - SwitchState, SwitchStateMachine, TRAY_UPDATE_TIMEOUT, - }, - restore_previous_profile, validate_profile_yaml, -}; -use crate::{ - config::{Config, profiles::profiles_save_file_safe}, - core::{CoreManager, handle, tray::Tray}, - logging, - process::AsyncHandler, - utils::logging::Type, -}; -use anyhow::Error; -use futures::future; -use smartstring::alias::String as SmartString; -use std::{ - pin::Pin, - time::{Duration, Instant}, -}; -use tokio::time; - -impl SwitchStateMachine { - pub(super) fn handle_start(&mut self) -> CmdResult { - if self.ctx.manager.is_switching() { - logging!( - info, - Type::Cmd, - "Profile switch already in progress; queuing request for task={:?}, profile={}", - self.ctx.task_id, - self.ctx.profile_label - ); - } - Ok(SwitchState::AcquireCore) - } - - /// Grab the core lock, mark the manager as switching, and compute the target profile. - pub(super) async fn handle_acquire_core(&mut self) -> CmdResult { - let manager = self.ctx.manager; - let core_guard = manager.core_mutex().lock().await; - - if manager.is_switching() { - logging!( - info, - Type::Cmd, - "Active profile switch detected; waiting before acquiring scope" - ); - let wait_start = Instant::now(); - let mut backoff = SWITCH_IDLE_WAIT_POLL; - while manager.is_switching() { - if self.ctx.cancelled() { - self.ctx - .log_cancelled("while waiting for active switch to finish"); - return Ok(SwitchState::Complete(false)); - } - if wait_start.elapsed() >= SWITCH_IDLE_WAIT_TIMEOUT { - let message = format!( - "Timed out after {:?} waiting for active profile switch to finish", - SWITCH_IDLE_WAIT_TIMEOUT - ); - logging!(error, Type::Cmd, "{}", message); - return Err(message.into()); - } - - time::sleep(backoff).await; - backoff = backoff.saturating_mul(2).min(SWITCH_IDLE_WAIT_MAX_BACKOFF); - } - let waited = wait_start.elapsed().as_millis(); - if waited > 0 { - logging!( - info, - Type::Cmd, - "Waited {}ms for active switch to finish before acquiring scope", - waited - ); - } - } - - self.ctx.core_guard = Some(core_guard); - self.ctx.switch_scope = Some(manager.begin_switch()); - self.ctx.sequence = Some(manager.next_request_sequence()); - self.ctx.ensure_target_profile(); - - logging!( - info, - Type::Cmd, - "Begin modifying configuration; sequence: {}, target profile: {:?}", - self.ctx.sequence(), - self.ctx.target_profile - ); - - if self.ctx.cancelled() { - self.ctx.log_cancelled("after acquiring core lock"); - return Ok(SwitchState::Complete(false)); - } - - if self.ctx.stale() { - StaleStage::AfterLock.log(&self.ctx); - return Ok(SwitchState::Complete(false)); - } - - Ok(SwitchState::Prepare) - } - - pub(super) async fn handle_prepare(&mut self) -> CmdResult { - let current_profile = { - let profiles_guard = Config::profiles().await; - profiles_guard.latest_ref().current.clone() - }; - - logging!(info, Type::Cmd, "Current profile: {:?}", current_profile); - self.ctx.previous_profile = current_profile; - Ok(SwitchState::ValidateTarget) - } - - pub(super) async fn handle_validate_target(&mut self) -> CmdResult { - if self.ctx.cancelled() { - self.ctx.log_cancelled("before validation"); - return Ok(SwitchState::Complete(false)); - } - - if self.ctx.should_validate_target() { - let Some(target) = self.ctx.target_profile.clone() else { - logging!( - error, - Type::Cmd, - "Missing target profile while validation was requested; aborting switch" - ); - return Err("missing target profile at validation".into()); - }; - if !validate_profile_yaml(&target).await? { - return Ok(SwitchState::Complete(false)); - } - } - - if self.ctx.stale() { - StaleStage::BeforeCoreOperation.log(&self.ctx); - return Ok(SwitchState::Complete(false)); - } - - Ok(SwitchState::PatchDraft) - } - - pub(super) async fn handle_patch_draft(&mut self) -> CmdResult { - if self.ctx.cancelled() { - self.ctx.log_cancelled("before patching configuration"); - return Ok(SwitchState::Complete(false)); - } - - logging!( - info, - Type::Cmd, - "Updating configuration draft, sequence: {}", - self.ctx.sequence() - ); - - let patch = self.ctx.take_profiles_patch()?; - self.ctx.new_profile_for_event = patch.current.clone(); - let _ = Config::profiles().await.draft_mut().patch_config(patch); - - if self.ctx.stale() { - StaleStage::BeforeCoreInteraction.log(&self.ctx); - Config::profiles().await.discard(); - return Ok(SwitchState::Complete(false)); - } - - Ok(SwitchState::UpdateCore) - } - - pub(super) async fn handle_update_core(&mut self) -> CmdResult { - let sequence = self.ctx.sequence(); - let task_id = self.ctx.task_id; - let profile = self.ctx.profile_label.clone(); - logging!( - info, - Type::Cmd, - "Starting core configuration update, sequence: {}, task={:?}, profile={}", - sequence, - task_id, - profile - ); - - let heartbeat = self.ctx.heartbeat.clone(); - let start = Instant::now(); - let mut ticker = time::interval(Duration::from_secs(1)); - ticker.set_missed_tick_behavior(time::MissedTickBehavior::Delay); - - let update_future = CoreManager::global().update_config(); - tokio::pin!(update_future); - - let timeout = time::sleep(Duration::from_secs(30)); - tokio::pin!(timeout); - - let cancel_token = self.ctx.cancel_token(); - let mut cancel_notifier: Pin + Send>> = - match cancel_token { - Some(token) => Box::pin(async move { - token.cancelled_future().await; - }), - None => Box::pin(future::pending()), - }; - - enum UpdateOutcome { - Finished(Result<(bool, SmartString), Error>), - Timeout, - Cancelled, - } - - let update_outcome = loop { - tokio::select! { - res = &mut update_future => break UpdateOutcome::Finished(res), - _ = &mut timeout => break UpdateOutcome::Timeout, - _ = &mut cancel_notifier => break UpdateOutcome::Cancelled, - _ = ticker.tick() => { - let elapsed_ms = start.elapsed().as_millis(); - heartbeat.touch(); - match task_id { - Some(id) => logging!( - debug, - Type::Cmd, - "Switch task {} (profile={}) UpdateCore still running (elapsed={}ms)", - id, - profile, - elapsed_ms - ), - None => logging!( - debug, - Type::Cmd, - "Profile patch {} UpdateCore still running (elapsed={}ms)", - profile, - elapsed_ms - ), - } - } - } - }; - - let elapsed_ms = start.elapsed().as_millis(); - - let outcome = match update_outcome { - UpdateOutcome::Finished(Ok((true, _))) => { - logging!( - info, - Type::Cmd, - "Core configuration update succeeded in {}ms", - elapsed_ms - ); - CoreUpdateOutcome::Success - } - UpdateOutcome::Finished(Ok((false, msg))) => { - logging!( - warn, - Type::Cmd, - "Core configuration update validation failed in {}ms: {}", - elapsed_ms, - msg - ); - CoreUpdateOutcome::ValidationFailed { - message: msg.to_string(), - } - } - UpdateOutcome::Finished(Err(err)) => { - logging!( - error, - Type::Cmd, - "Core configuration update errored in {}ms: {}", - elapsed_ms, - err - ); - CoreUpdateOutcome::CoreError { - message: err.to_string(), - } - } - UpdateOutcome::Timeout => { - logging!( - error, - Type::Cmd, - "Core configuration update timed out after {}ms", - elapsed_ms - ); - CoreUpdateOutcome::Timeout - } - UpdateOutcome::Cancelled => { - self.ctx.log_cancelled("during core update"); - logging!( - info, - Type::Cmd, - "Core configuration update cancelled after {}ms", - elapsed_ms - ); - self.ctx.release_locks(); - Config::profiles().await.discard(); - return Ok(SwitchState::Complete(false)); - } - }; - - self.ctx.release_core_guard(); - - Ok(SwitchState::Finalize(outcome)) - } - - pub(super) async fn handle_finalize( - &mut self, - outcome: CoreUpdateOutcome, - ) -> CmdResult { - let next_state = match outcome { - CoreUpdateOutcome::Success => self.finalize_success().await, - CoreUpdateOutcome::ValidationFailed { message } => { - self.finalize_validation_failed(message).await - } - CoreUpdateOutcome::CoreError { message } => self.finalize_core_error(message).await, - CoreUpdateOutcome::Timeout => self.finalize_timeout().await, - }; - - if next_state.is_err() || matches!(next_state, Ok(SwitchState::Complete(_))) { - self.ctx.release_switch_scope(); - } - - next_state - } - - pub(super) async fn finalize_success(&mut self) -> CmdResult { - if self.abort_if_stale_post_core().await? { - return Ok(SwitchState::Complete(false)); - } - - self.log_successful_update(); - - if !self.apply_config_with_timeout().await? { - logging!( - warn, - Type::Cmd, - "Apply step failed; attempting to restore previous profile before completing" - ); - restore_previous_profile(self.ctx.previous_profile.clone()).await?; - return Ok(SwitchState::Complete(false)); - } - - self.refresh_clash_with_timeout().await; - self.update_tray_tooltip_with_timeout().await; - self.update_tray_menu_with_timeout().await; - if let Err(err) = self.persist_profiles_with_timeout().await { - logging!( - error, - Type::Cmd, - "Persisting new profile configuration failed; attempting to restore previous profile: {}", - err - ); - restore_previous_profile(self.ctx.previous_profile.clone()).await?; - return Err(err); - } - self.emit_profile_change_event(); - logging!( - debug, - Type::Cmd, - "Finalize success pipeline completed for sequence {}", - self.ctx.sequence() - ); - - Ok(SwitchState::Complete(true)) - } - - pub(super) async fn finalize_validation_failed( - &mut self, - message: String, - ) -> CmdResult { - logging!( - warn, - Type::Cmd, - "Configuration validation failed: {}", - message - ); - Config::profiles().await.discard(); - restore_previous_profile(self.ctx.previous_profile.clone()).await?; - handle::Handle::notice_message("config_validate::error", message); - Ok(SwitchState::Complete(false)) - } - - pub(super) async fn finalize_core_error(&mut self, message: String) -> CmdResult { - logging!( - warn, - Type::Cmd, - "Error occurred during update: {}, sequence: {}", - message, - self.ctx.sequence() - ); - Config::profiles().await.discard(); - handle::Handle::notice_message("config_validate::boot_error", message); - Ok(SwitchState::Complete(false)) - } - - pub(super) async fn finalize_timeout(&mut self) -> CmdResult { - let timeout_msg = - "Configuration update timed out (30s); possible validation or core communication stall"; - logging!( - error, - Type::Cmd, - "{}, sequence: {}", - timeout_msg, - self.ctx.sequence() - ); - Config::profiles().await.discard(); - restore_previous_profile(self.ctx.previous_profile.clone()).await?; - handle::Handle::notice_message("config_validate::timeout", timeout_msg); - Ok(SwitchState::Complete(false)) - } - - pub(super) async fn abort_if_stale_post_core(&mut self) -> CmdResult { - if self.ctx.stale() { - StaleStage::AfterCoreOperation.log(&self.ctx); - Config::profiles().await.discard(); - return Ok(true); - } - - Ok(false) - } - - pub(super) fn log_successful_update(&self) { - logging!( - info, - Type::Cmd, - "Configuration update succeeded, sequence: {}", - self.ctx.sequence() - ); - } - - pub(super) async fn apply_config_with_timeout(&mut self) -> CmdResult { - let apply_result = time::timeout(CONFIG_APPLY_TIMEOUT, async { - Config::profiles().await.apply() - }) - .await; - - if apply_result.is_ok() { - Ok(true) - } else { - logging!( - warn, - Type::Cmd, - "Applying profile configuration timed out after {:?}", - CONFIG_APPLY_TIMEOUT - ); - Config::profiles().await.discard(); - Ok(false) - } - } - - pub(super) async fn refresh_clash_with_timeout(&self) { - let start = Instant::now(); - let result = time::timeout(REFRESH_TIMEOUT, async { - handle::Handle::refresh_clash(); - }) - .await; - - let elapsed = start.elapsed().as_millis(); - match result { - Ok(_) => logging!( - debug, - Type::Cmd, - "refresh_clash_with_timeout completed in {}ms", - elapsed - ), - Err(_) => logging!( - warn, - Type::Cmd, - "Refreshing Clash state timed out after {:?} (elapsed={}ms)", - REFRESH_TIMEOUT, - elapsed - ), - } - } - - pub(super) async fn update_tray_tooltip_with_timeout(&self) { - let start = Instant::now(); - let update_tooltip = time::timeout(TRAY_UPDATE_TIMEOUT, async { - Tray::global().update_tooltip().await - }) - .await; - let elapsed = start.elapsed().as_millis(); - - if update_tooltip.is_err() { - logging!( - warn, - Type::Cmd, - "Updating tray tooltip timed out after {:?} (elapsed={}ms)", - TRAY_UPDATE_TIMEOUT, - elapsed - ); - } else if let Ok(Err(err)) = update_tooltip { - logging!( - warn, - Type::Cmd, - "Failed to update tray tooltip asynchronously: {}", - err - ); - } else { - logging!( - debug, - Type::Cmd, - "update_tray_tooltip_with_timeout completed in {}ms", - elapsed - ); - } - } - - pub(super) async fn update_tray_menu_with_timeout(&self) { - let start = Instant::now(); - let update_menu = time::timeout(TRAY_UPDATE_TIMEOUT, async { - Tray::global().update_menu().await - }) - .await; - let elapsed = start.elapsed().as_millis(); - - if update_menu.is_err() { - logging!( - warn, - Type::Cmd, - "Updating tray menu timed out after {:?} (elapsed={}ms)", - TRAY_UPDATE_TIMEOUT, - elapsed - ); - } else if let Ok(Err(err)) = update_menu { - logging!( - warn, - Type::Cmd, - "Failed to update tray menu asynchronously: {}", - err - ); - } else { - logging!( - debug, - Type::Cmd, - "update_tray_menu_with_timeout completed in {}ms", - elapsed - ); - } - } - - pub(super) async fn persist_profiles_with_timeout(&self) -> CmdResult<()> { - let start = Instant::now(); - let save_future = AsyncHandler::spawn_blocking(|| { - futures::executor::block_on(async { profiles_save_file_safe().await }) - }); - - let elapsed = start.elapsed().as_millis(); - match time::timeout(SAVE_PROFILES_TIMEOUT, save_future).await { - Err(_) => { - let message = format!( - "Persisting configuration file timed out after {:?} (elapsed={}ms)", - SAVE_PROFILES_TIMEOUT, elapsed - ); - logging!(warn, Type::Cmd, "{}", message); - Err(message.into()) - } - Ok(join_result) => match join_result { - Err(join_err) => { - let message = format!( - "Persisting configuration file failed: blocking task join error: {join_err}" - ); - logging!(error, Type::Cmd, "{}", message); - Err(message.into()) - } - Ok(save_result) => match save_result { - Ok(()) => { - logging!( - debug, - Type::Cmd, - "persist_profiles_with_timeout completed in {}ms", - elapsed - ); - Ok(()) - } - Err(err) => { - let message = format!("Persisting configuration file failed: {}", err); - logging!(error, Type::Cmd, "{}", message); - Err(message.into()) - } - }, - }, - } - } - - pub(super) fn emit_profile_change_event(&self) { - if let Some(current) = self.ctx.new_profile_for_event.clone() { - logging!( - info, - Type::Cmd, - "Emitting configuration change event to frontend: {}, sequence: {}", - current, - self.ctx.sequence() - ); - handle::Handle::notify_profile_changed(current); - } - } -} diff --git a/src-tauri/src/core/handle.rs b/src-tauri/src/core/handle.rs index e3735d46..ef868f59 100644 --- a/src-tauri/src/core/handle.rs +++ b/src-tauri/src/core/handle.rs @@ -1,14 +1,7 @@ -use crate::{ - APP_HANDLE, config::Config, constants::timing, logging, singleton, utils::logging::Type, -}; +use crate::{APP_HANDLE, constants::timing, singleton}; use parking_lot::RwLock; -use serde_json::{Value, json}; use smartstring::alias::String; -use std::{ - sync::Arc, - thread, - time::{SystemTime, UNIX_EPOCH}, -}; +use std::{sync::Arc, thread}; use tauri::{AppHandle, Manager, WebviewWindow}; use tauri_plugin_mihomo::{Mihomo, MihomoExt}; use tokio::sync::RwLockReadGuard; @@ -73,14 +66,10 @@ impl Handle { return; } - { - let system_opt = handle.notification_system.read(); - if let Some(system) = system_opt.as_ref() { - system.send_event(FrontendEvent::RefreshClash); - } + let system_opt = handle.notification_system.read(); + if let Some(system) = system_opt.as_ref() { + system.send_event(FrontendEvent::RefreshClash); } - - Self::spawn_proxy_snapshot(); } pub fn refresh_verge() { @@ -96,37 +85,11 @@ impl Handle { } pub fn notify_profile_changed(profile_id: String) { - let handle = Self::global(); - if handle.is_exiting() { - return; - } - - let system_opt = handle.notification_system.read(); - if let Some(system) = system_opt.as_ref() { - system.send_event(FrontendEvent::ProfileChanged { - current_profile_id: profile_id, - }); - } - } - - pub fn notify_profile_switch_finished( - profile_id: String, - success: bool, - notify: bool, - task_id: u64, - ) { - Self::send_event(FrontendEvent::ProfileSwitchFinished { - profile_id, - success, - notify, - task_id, + Self::send_event(FrontendEvent::ProfileChanged { + current_profile_id: profile_id, }); } - pub fn notify_rust_panic(message: String, location: String) { - Self::send_event(FrontendEvent::RustPanic { message, location }); - } - pub fn notify_timer_updated(profile_index: String) { Self::send_event(FrontendEvent::TimerUpdated { profile_index }); } @@ -137,86 +100,6 @@ impl Handle { pub fn notify_profile_update_completed(uid: String) { Self::send_event(FrontendEvent::ProfileUpdateCompleted { uid }); - Self::spawn_proxy_snapshot(); - } - - pub fn notify_proxies_updated(payload: Value) { - Self::send_event(FrontendEvent::ProxiesUpdated { payload }); - } - - pub async fn build_proxy_snapshot() -> Option { - let mihomo_guard = Self::mihomo().await; - let proxies = match mihomo_guard.get_proxies().await { - Ok(data) => match serde_json::to_value(&data) { - Ok(value) => value, - Err(error) => { - logging!( - warn, - Type::Frontend, - "Failed to serialize proxies snapshot: {error}" - ); - return None; - } - }, - Err(error) => { - logging!( - warn, - Type::Frontend, - "Failed to fetch proxies for snapshot: {error}" - ); - return None; - } - }; - - drop(mihomo_guard); - - let providers_guard = Self::mihomo().await; - let providers_value = match providers_guard.get_proxy_providers().await { - Ok(data) => serde_json::to_value(&data).unwrap_or_else(|error| { - logging!( - warn, - Type::Frontend, - "Failed to serialize proxy providers for snapshot: {error}" - ); - Value::Null - }), - Err(error) => { - logging!( - warn, - Type::Frontend, - "Failed to fetch proxy providers for snapshot: {error}" - ); - Value::Null - } - }; - - drop(providers_guard); - - let profile_guard = Config::profiles().await; - let profile_id = profile_guard.latest_ref().current.clone(); - drop(profile_guard); - - let emitted_at = SystemTime::now() - .duration_since(UNIX_EPOCH) - .map(|duration| duration.as_millis() as i64) - .unwrap_or(0); - - let payload = json!({ - "proxies": proxies, - "providers": providers_value, - "profileId": profile_id, - "emittedAt": emitted_at, - }); - - Some(payload) - } - - fn spawn_proxy_snapshot() { - tauri::async_runtime::spawn(async { - if let Some(payload) = Handle::build_proxy_snapshot().await { - Handle::notify_proxies_updated(payload); - } - }); } pub fn notice_message, M: Into>(status: S, msg: M) { diff --git a/src-tauri/src/core/manager/config.rs b/src-tauri/src/core/manager/config.rs index e93d5244..263ddb4b 100644 --- a/src-tauri/src/core/manager/config.rs +++ b/src-tauri/src/core/manager/config.rs @@ -10,10 +10,7 @@ use anyhow::{Result, anyhow}; use smartstring::alias::String; use std::{path::PathBuf, time::Instant}; use tauri_plugin_mihomo::Error as MihomoError; -use tokio::time::{sleep, timeout}; - -const RELOAD_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(5); -const MAX_RELOAD_ATTEMPTS: usize = 3; +use tokio::time::sleep; impl CoreManager { pub async fn use_default_config(&self, error_key: &str, error_msg: &str) -> Result<()> { @@ -42,38 +39,12 @@ impl CoreManager { return Ok((true, String::new())); } - let start = Instant::now(); - let _permit = self .update_semaphore .try_acquire() .map_err(|_| anyhow!("Config update already in progress"))?; - let result = self.perform_config_update().await; - - match &result { - Ok((success, msg)) => { - logging!( - info, - Type::Core, - "[ConfigUpdate] Finished (success={}, elapsed={}ms, msg={})", - success, - start.elapsed().as_millis(), - msg - ); - } - Err(err) => { - logging!( - error, - Type::Core, - "[ConfigUpdate] Failed after {}ms: {}", - start.elapsed().as_millis(), - err - ); - } - } - - result + self.perform_config_update().await } fn should_update_config(&self) -> Result { @@ -91,73 +62,20 @@ impl CoreManager { } async fn perform_config_update(&self) -> Result<(bool, String)> { - logging!(debug, Type::Core, "[ConfigUpdate] Pipeline start"); - let total_start = Instant::now(); - - let mut stage_timer = Instant::now(); Config::generate().await?; - logging!( - debug, - Type::Core, - "[ConfigUpdate] Generation completed in {}ms", - stage_timer.elapsed().as_millis() - ); - stage_timer = Instant::now(); - let validation_result = CoreConfigValidator::global().validate_config().await; - logging!( - debug, - Type::Core, - "[ConfigUpdate] Validation completed in {}ms", - stage_timer.elapsed().as_millis() - ); - - match validation_result { + match CoreConfigValidator::global().validate_config().await { Ok((true, _)) => { - stage_timer = Instant::now(); let run_path = Config::generate_file(ConfigType::Run).await?; - logging!( - debug, - Type::Core, - "[ConfigUpdate] Runtime file generated in {}ms", - stage_timer.elapsed().as_millis() - ); - stage_timer = Instant::now(); self.apply_config(run_path).await?; - logging!( - debug, - Type::Core, - "[ConfigUpdate] Core apply completed in {}ms", - stage_timer.elapsed().as_millis() - ); - logging!( - debug, - Type::Core, - "[ConfigUpdate] Pipeline succeeded in {}ms", - total_start.elapsed().as_millis() - ); Ok((true, String::new())) } Ok((false, error_msg)) => { Config::runtime().await.discard(); - logging!( - warn, - Type::Core, - "[ConfigUpdate] Validation reported failure after {}ms: {}", - total_start.elapsed().as_millis(), - error_msg - ); Ok((false, error_msg)) } Err(e) => { Config::runtime().await.discard(); - logging!( - error, - Type::Core, - "[ConfigUpdate] Validation errored after {}ms: {}", - total_start.elapsed().as_millis(), - e - ); Err(e) } } @@ -170,49 +88,17 @@ impl CoreManager { pub(super) async fn apply_config(&self, path: PathBuf) -> Result<()> { let path_str = dirs::path_to_str(&path)?; - let reload_start = Instant::now(); - match self.reload_config_with_retry(path_str).await { + match self.reload_config(path_str).await { Ok(_) => { Config::runtime().await.apply(); - logging!( - debug, - Type::Core, - "Configuration applied (reload={}ms)", - reload_start.elapsed().as_millis() - ); + logging!(info, Type::Core, "Configuration applied"); Ok(()) } + Err(err) if Self::should_restart_on_error(&err) => { + self.retry_with_restart(path_str).await + } Err(err) => { - if Self::should_restart_for_anyhow(&err) { - logging!( - warn, - Type::Core, - "Reload failed after {}ms with retryable/timeout error; attempting restart: {}", - reload_start.elapsed().as_millis(), - err - ); - match self.retry_with_restart(path_str).await { - Ok(_) => return Ok(()), - Err(retry_err) => { - logging!( - error, - Type::Core, - "Reload retry with restart failed: {}", - retry_err - ); - Config::runtime().await.discard(); - return Err(retry_err); - } - } - } Config::runtime().await.discard(); - logging!( - error, - Type::Core, - "Failed to apply config after {}ms: {}", - reload_start.elapsed().as_millis(), - err - ); Err(anyhow!("Failed to apply config: {}", err)) } } @@ -227,116 +113,17 @@ impl CoreManager { self.restart_core().await?; sleep(timing::CONFIG_RELOAD_DELAY).await; - self.reload_config_with_retry(config_path).await?; + self.reload_config(config_path).await?; Config::runtime().await.apply(); logging!(info, Type::Core, "Configuration applied after restart"); Ok(()) } - async fn reload_config_with_retry(&self, path: &str) -> Result<()> { - for attempt in 1..=MAX_RELOAD_ATTEMPTS { - let attempt_start = Instant::now(); - let reload_future = self.reload_config_once(path); - match timeout(RELOAD_TIMEOUT, reload_future).await { - Ok(Ok(())) => { - logging!( - debug, - Type::Core, - "reload_config attempt {}/{} succeeded in {}ms", - attempt, - MAX_RELOAD_ATTEMPTS, - attempt_start.elapsed().as_millis() - ); - return Ok(()); - } - Ok(Err(err)) => { - logging!( - warn, - Type::Core, - "reload_config attempt {}/{} failed after {}ms: {}", - attempt, - MAX_RELOAD_ATTEMPTS, - attempt_start.elapsed().as_millis(), - err - ); - if attempt == MAX_RELOAD_ATTEMPTS { - return Err(anyhow!( - "Failed to reload config after {} attempts: {}", - attempt, - err - )); - } - } - Err(_) => { - logging!( - warn, - Type::Core, - "reload_config attempt {}/{} timed out after {:?}", - attempt, - MAX_RELOAD_ATTEMPTS, - RELOAD_TIMEOUT - ); - if attempt == MAX_RELOAD_ATTEMPTS { - return Err(anyhow!( - "Config reload timed out after {:?} ({} attempts)", - RELOAD_TIMEOUT, - MAX_RELOAD_ATTEMPTS - )); - } - } - } - } - - Err(anyhow!( - "Config reload retry loop exited unexpectedly ({} attempts)", - MAX_RELOAD_ATTEMPTS - )) - } - - async fn reload_config_once(&self, path: &str) -> Result<(), MihomoError> { - logging!( - info, - Type::Core, - "[ConfigUpdate] reload_config_once begin path={} ", - path - ); - let start = Instant::now(); - let result = handle::Handle::mihomo() + async fn reload_config(&self, path: &str) -> Result<(), MihomoError> { + handle::Handle::mihomo() .await .reload_config(true, path) - .await; - let elapsed = start.elapsed().as_millis(); - match result { - Ok(()) => { - logging!( - info, - Type::Core, - "[ConfigUpdate] reload_config_once succeeded (elapsed={}ms)", - elapsed - ); - Ok(()) - } - Err(err) => { - logging!( - warn, - Type::Core, - "[ConfigUpdate] reload_config_once failed (elapsed={}ms, err={})", - elapsed, - err - ); - Err(err) - } - } - } - - fn should_restart_for_anyhow(err: &anyhow::Error) -> bool { - if let Some(mihomo_err) = err.downcast_ref::() { - return Self::should_restart_on_error(mihomo_err); - } - let msg = err.to_string(); - msg.contains("timed out") - || msg.contains("reload") - || msg.contains("Failed to apply config") + .await } fn should_restart_on_error(err: &MihomoError) -> bool { diff --git a/src-tauri/src/core/notification.rs b/src-tauri/src/core/notification.rs index 5754fecb..071bcedb 100644 --- a/src-tauri/src/core/notification.rs +++ b/src-tauri/src/core/notification.rs @@ -1,71 +1,38 @@ -use crate::{constants::retry, logging, utils::logging::Type}; -use once_cell::sync::Lazy; +use crate::{ + constants::{retry, timing}, + logging, + utils::logging::Type, +}; use parking_lot::RwLock; use smartstring::alias::String; use std::{ sync::{ - Arc, - atomic::{AtomicBool, AtomicU64, Ordering}, + atomic::{AtomicU64, Ordering}, mpsc, }, thread, time::Instant, }; -use tauri::Emitter; -use tauri::async_runtime; +use tauri::{Emitter, WebviewWindow}; -#[allow(dead_code)] // Temporarily suppress warnings while diagnostics disable certain events #[derive(Debug, Clone)] pub enum FrontendEvent { RefreshClash, RefreshVerge, - RefreshProxy, - ProxiesUpdated { - payload: serde_json::Value, - }, - NoticeMessage { - status: String, - message: String, - }, - ProfileChanged { - current_profile_id: String, - }, - ProfileSwitchFinished { - profile_id: String, - success: bool, - notify: bool, - task_id: u64, - }, - TimerUpdated { - profile_index: String, - }, - ProfileUpdateStarted { - uid: String, - }, - ProfileUpdateCompleted { - uid: String, - }, - RustPanic { - message: String, - location: String, - }, + NoticeMessage { status: String, message: String }, + ProfileChanged { current_profile_id: String }, + TimerUpdated { profile_index: String }, + ProfileUpdateStarted { uid: String }, + ProfileUpdateCompleted { uid: String }, } -static EMIT_SERIALIZER: Lazy> = Lazy::new(|| tokio::sync::Mutex::new(())); - #[derive(Debug, Default)] struct EventStats { + total_sent: AtomicU64, total_errors: AtomicU64, last_error_time: RwLock>, } -#[derive(Debug, Default)] -#[allow(dead_code)] -struct BufferedProxies { - pending: parking_lot::Mutex>, - in_flight: AtomicBool, -} - #[derive(Debug, Clone)] pub struct ErrorMessage { pub status: String, @@ -80,7 +47,6 @@ pub struct NotificationSystem { pub(super) is_running: bool, stats: EventStats, emergency_mode: RwLock, - proxies_buffer: Arc, } impl Default for NotificationSystem { @@ -97,7 +63,6 @@ impl NotificationSystem { is_running: false, stats: EventStats::default(), emergency_mode: RwLock::new(false), - proxies_buffer: Arc::new(BufferedProxies::default()), } } @@ -152,78 +117,13 @@ impl NotificationSystem { return; }; - let event_label = Self::describe_event(&event); - - match event { - FrontendEvent::ProxiesUpdated { payload } => { - logging!( - debug, - Type::Frontend, - "Queueing proxies-updated event for buffered emit: {}", - event_label - ); - system.enqueue_proxies_updated(payload); - } - other => { - logging!( - debug, - Type::Frontend, - "Queueing event for async emit: {}", - event_label - ); - - let (event_name, payload_result) = system.serialize_event(other); - let payload = match payload_result { - Ok(value) => value, - Err(err) => { - logging!( - warn, - Type::Frontend, - "Failed to serialize event {}: {}", - event_name, - err - ); - return; - } - }; - - logging!( - debug, - Type::Frontend, - "Dispatching async emit: {}", - event_name - ); - let _ = Self::emit_via_app(event_name, payload); - } - } - } - - fn enqueue_proxies_updated(&self, payload: serde_json::Value) { - let replaced = { - let mut slot = self.proxies_buffer.pending.lock(); - let had_pending = slot.is_some(); - *slot = Some(payload); - had_pending - }; - - if replaced { - logging!( - debug, - Type::Frontend, - "Replaced pending proxies-updated payload with latest snapshot" - ); + if system.should_skip_event(&event) { + return; } - if self - .proxies_buffer - .in_flight - .compare_exchange(false, true, Ordering::AcqRel, Ordering::Acquire) - .is_ok() - { - let buffer = Arc::clone(&self.proxies_buffer); - async_runtime::spawn(async move { - Self::flush_proxies(buffer).await; - }); + if let Some(window) = super::handle::Handle::get_window() { + system.emit_to_window(&window, event); + thread::sleep(timing::EVENT_EMIT_DELAY); } } @@ -235,95 +135,25 @@ impl NotificationSystem { ) } - fn emit_via_app(event_name: &'static str, payload: serde_json::Value) -> Result<(), String> { - let app_handle = super::handle::Handle::app_handle().clone(); - let event_name = event_name.to_string(); - async_runtime::spawn(async move { - if let Err(err) = app_handle.emit_to("main", event_name.as_str(), payload) { - logging!( - warn, - Type::Frontend, - "emit_to failed for {}: {}", - event_name, - err - ); + fn emit_to_window(&self, window: &WebviewWindow, event: FrontendEvent) { + let (event_name, payload) = self.serialize_event(event); + + let Ok(payload) = payload else { + self.stats.total_errors.fetch_add(1, Ordering::Relaxed); + return; + }; + + match window.emit(event_name, payload) { + Ok(_) => { + self.stats.total_sent.fetch_add(1, Ordering::Relaxed); } - }); - Ok(()) - } - - async fn flush_proxies(buffer: Arc) { - const EVENT_NAME: &str = "proxies-updated"; - - loop { - let payload_opt = { - let mut guard = buffer.pending.lock(); - guard.take() - }; - - let Some(payload) = payload_opt else { - buffer.in_flight.store(false, Ordering::Release); - - if buffer.pending.lock().is_some() - && buffer - .in_flight - .compare_exchange(false, true, Ordering::AcqRel, Ordering::Acquire) - .is_ok() - { - continue; - } - - break; - }; - - logging!(debug, Type::Frontend, "Dispatching buffered proxies emit"); - let _guard = EMIT_SERIALIZER.lock().await; - if let Err(err) = Self::emit_via_app(EVENT_NAME, payload) { - logging!( - warn, - Type::Frontend, - "Buffered proxies emit failed: {}", - err - ); + Err(e) => { + logging!(warn, Type::Frontend, "Event emit failed: {}", e); + self.handle_emit_error(); } } } - fn describe_event(event: &FrontendEvent) -> String { - match event { - FrontendEvent::RefreshClash => "RefreshClash".into(), - FrontendEvent::RefreshVerge => "RefreshVerge".into(), - FrontendEvent::RefreshProxy => "RefreshProxy".into(), - FrontendEvent::ProxiesUpdated { .. } => "ProxiesUpdated".into(), - FrontendEvent::NoticeMessage { status, .. } => { - format!("NoticeMessage({})", status).into() - } - FrontendEvent::ProfileChanged { current_profile_id } => { - format!("ProfileChanged({})", current_profile_id).into() - } - FrontendEvent::ProfileSwitchFinished { - profile_id, - task_id, - .. - } => format!( - "ProfileSwitchFinished(profile={}, task={})", - profile_id, task_id - ) - .into(), - FrontendEvent::TimerUpdated { profile_index } => { - format!("TimerUpdated({})", profile_index).into() - } - FrontendEvent::ProfileUpdateStarted { uid } => { - format!("ProfileUpdateStarted({})", uid).into() - } - FrontendEvent::ProfileUpdateCompleted { uid } => { - format!("ProfileUpdateCompleted({})", uid).into() - } - FrontendEvent::RustPanic { message, .. } => format!("RustPanic({})", message).into(), - } - } - - #[allow(dead_code)] fn serialize_event( &self, event: FrontendEvent, @@ -337,25 +167,9 @@ impl NotificationSystem { "verge://notice-message", serde_json::to_value((status, message)), ), - FrontendEvent::RefreshProxy => ("verge://refresh-proxy-config", Ok(json!("yes"))), - FrontendEvent::ProxiesUpdated { payload } => ("proxies-updated", Ok(payload)), FrontendEvent::ProfileChanged { current_profile_id } => { ("profile-changed", Ok(json!(current_profile_id))) } - FrontendEvent::ProfileSwitchFinished { - profile_id, - success, - notify, - task_id, - } => ( - "profile-switch-finished", - Ok(json!({ - "profileId": profile_id, - "success": success, - "notify": notify, - "taskId": task_id - })), - ), FrontendEvent::TimerUpdated { profile_index } => { ("verge://timer-updated", Ok(json!(profile_index))) } @@ -365,10 +179,6 @@ impl NotificationSystem { FrontendEvent::ProfileUpdateCompleted { uid } => { ("profile-update-completed", Ok(json!({ "uid": uid }))) } - FrontendEvent::RustPanic { message, location } => ( - "rust-panic", - Ok(json!({ "message": message, "location": location })), - ), } } @@ -394,19 +204,10 @@ impl NotificationSystem { } if let Some(sender) = &self.sender { - if sender.send(event).is_err() { - logging!( - warn, - Type::Frontend, - "Failed to send event to worker thread" - ); - self.handle_emit_error(); - return false; - } - return true; + sender.send(event).is_ok() + } else { + false } - - false } pub fn shutdown(&mut self) { diff --git a/src-tauri/src/lib.rs b/src-tauri/src/lib.rs index 84838962..bbd16795 100644 --- a/src-tauri/src/lib.rs +++ b/src-tauri/src/lib.rs @@ -194,7 +194,6 @@ mod app_init { cmd::get_profiles, cmd::enhance_profiles, cmd::patch_profiles_config, - cmd::switch_profile, cmd::view_profile, cmd::patch_profile, cmd::create_profile, @@ -205,8 +204,6 @@ mod app_init { cmd::read_profile_file, cmd::save_profile_file, cmd::get_next_update_time, - cmd::get_profile_switch_status, - cmd::get_profile_switch_events, cmd::script_validate_notice, cmd::validate_script_file, cmd::create_local_backup, @@ -223,7 +220,6 @@ mod app_init { cmd::get_system_info, cmd::get_unlock_items, cmd::check_media_unlock, - cmd::frontend_log, ] } } @@ -362,28 +358,6 @@ pub fn run() { } } - std::panic::set_hook(Box::new(|info| { - let payload = info - .payload() - .downcast_ref::<&'static str>() - .map(|s| (*s).to_string()) - .or_else(|| info.payload().downcast_ref::().cloned()) - .unwrap_or_else(|| "Unknown panic".to_string()); - let location = info - .location() - .map(|loc| format!("{}:{}", loc.file(), loc.line())) - .unwrap_or_else(|| "unknown location".to_string()); - - logging!( - error, - Type::System, - "Rust panic captured: {} @ {}", - payload, - location - ); - handle::Handle::notify_rust_panic(payload.into(), location.into()); - })); - #[cfg(feature = "clippy")] let context = tauri::test::mock_context(tauri::test::noop_assets()); #[cfg(feature = "clippy")] diff --git a/src-tauri/src/utils/draft.rs b/src-tauri/src/utils/draft.rs index cc72f9c3..044f6f1f 100644 --- a/src-tauri/src/utils/draft.rs +++ b/src-tauri/src/utils/draft.rs @@ -68,13 +68,6 @@ impl Draft> { }) } - /// 尝试获取最新只读视图,若当前持有写锁则返回 `None` - pub fn try_latest_ref(&self) -> Option>> { - self.inner - .try_read() - .map(|guard| RwLockReadGuard::map(guard, |inner| inner.1.as_ref().unwrap_or(&inner.0))) - } - /// 提交草稿,返回旧正式数据 pub fn apply(&self) -> Option> { let mut inner = self.inner.write(); diff --git a/src/components/home/current-proxy-card.tsx b/src/components/home/current-proxy-card.tsx index ed74def8..ceea82d7 100644 --- a/src/components/home/current-proxy-card.tsx +++ b/src/components/home/current-proxy-card.tsx @@ -100,12 +100,10 @@ export const CurrentProxyCard = () => { const { t } = useTranslation(); const navigate = useNavigate(); const theme = useTheme(); - const { proxies, proxyHydration, clashConfig, refreshProxy, rules } = - useAppData(); + const { proxies, clashConfig, refreshProxy, rules } = useAppData(); const { verge } = useVerge(); const { current: currentProfile } = useProfiles(); const autoDelayEnabled = verge?.enable_auto_delay_detection ?? false; - const isLiveHydration = proxyHydration === "live"; const currentProfileId = currentProfile?.uid || null; const getProfileStorageKey = useCallback( @@ -717,6 +715,7 @@ export const CurrentProxyCard = () => { ); } } + refreshProxy(); if (sortType === 1) { setDelaySortRefresh((prev) => prev + 1); @@ -841,24 +840,13 @@ export const CurrentProxyCard = () => { iconColor={currentProxy ? "primary" : undefined} action={ - {!isLiveHydration && ( - - )} @@ -972,7 +960,7 @@ export const CurrentProxyCard = () => { value={state.selection.group} onChange={handleGroupChange} label={t("Group")} - disabled={isGlobalMode || isDirectMode || !isLiveHydration} + disabled={isGlobalMode || isDirectMode} > {state.proxyData.groups.map((group) => ( @@ -990,7 +978,7 @@ export const CurrentProxyCard = () => { value={state.selection.proxy} onChange={handleProxyChange} label={t("Proxy")} - disabled={isDirectMode || !isLiveHydration} + disabled={isDirectMode} renderValue={renderProxyValue} MenuProps={{ PaperProps: { diff --git a/src/components/proxy/provider-button.tsx b/src/components/proxy/provider-button.tsx index 4d0f2c39..e22b856e 100644 --- a/src/components/proxy/provider-button.tsx +++ b/src/components/proxy/provider-button.tsx @@ -1,7 +1,6 @@ import { RefreshRounded, StorageOutlined } from "@mui/icons-material"; import { Box, - Chip, Button, Dialog, DialogActions, @@ -19,7 +18,7 @@ import { } from "@mui/material"; import { useLockFn } from "ahooks"; import dayjs from "dayjs"; -import { useMemo, useState } from "react"; +import { useState } from "react"; import { useTranslation } from "react-i18next"; import { updateProxyProvider } from "tauri-plugin-mihomo-api"; @@ -49,61 +48,29 @@ const parseExpire = (expire?: number) => { export const ProviderButton = () => { const { t } = useTranslation(); const [open, setOpen] = useState(false); - const { - proxyProviders, - proxyHydration, - refreshProxy, - refreshProxyProviders, - } = useAppData(); - - const isHydrating = proxyHydration !== "live"; + const { proxyProviders, refreshProxy, refreshProxyProviders } = useAppData(); const [updating, setUpdating] = useState>({}); // 检查是否有提供者 const hasProviders = Object.keys(proxyProviders || {}).length > 0; - // Hydration hint badge keeps users aware of sync state - const hydrationChip = useMemo(() => { - if (proxyHydration === "live") return null; - - return ( - - ); - }, [proxyHydration, t]); - // 更新单个代理提供者 const updateProvider = useLockFn(async (name: string) => { - if (isHydrating) { - showNotice("info", t("Proxy data is syncing, please wait")); - return; - } - try { // 设置更新状态 setUpdating((prev) => ({ ...prev, [name]: true })); + await updateProxyProvider(name); - await refreshProxyProviders(); + + // 刷新数据 await refreshProxy(); - showNotice( - "success", - t("Provider {{name}} updated successfully", { name }), - ); + await refreshProxyProviders(); + + showNotice("success", `${name} 更新成功`); } catch (err: any) { showNotice( "error", - t("Provider {{name}} update failed: {{message}}", { - name, - message: err?.message || err.toString(), - }), + `${name} 更新失败: ${err?.message || err.toString()}`, ); } finally { // 清除更新状态 @@ -113,16 +80,11 @@ export const ProviderButton = () => { // 更新所有代理提供者 const updateAllProviders = useLockFn(async () => { - if (isHydrating) { - showNotice("info", t("Proxy data is syncing, please wait")); - return; - } - try { // 获取所有provider的名称 const allProviders = Object.keys(proxyProviders || {}); if (allProviders.length === 0) { - showNotice("info", t("No providers to update")); + showNotice("info", "没有可更新的代理提供者"); return; } @@ -148,67 +110,54 @@ export const ProviderButton = () => { } } - await refreshProxyProviders(); + // 刷新数据 await refreshProxy(); - showNotice("success", t("All providers updated successfully")); + await refreshProxyProviders(); + + showNotice("success", "全部代理提供者更新成功"); } catch (err: any) { - showNotice( - "error", - t("Failed to update providers: {{message}}", { - message: err?.message || err.toString(), - }), - ); + showNotice("error", `更新失败: ${err?.message || err.toString()}`); } finally { // 清除所有更新状态 setUpdating({}); } }); - const handleClose = () => setOpen(false); + const handleClose = () => { + setOpen(false); + }; if (!hasProviders) return null; return ( <> - - - {hydrationChip} - + {t("Proxy Provider")} - + + + @@ -217,63 +166,54 @@ export const ProviderButton = () => { {Object.entries(proxyProviders || {}) .sort() .map(([key, item]) => { - if (!item) return null; - - const time = dayjs(item.updatedAt); + const provider = item; + const time = dayjs(provider.updatedAt); const isUpdating = updating[key]; - const sub = item.subscriptionInfo; - const hasSubInfo = Boolean(sub); - const upload = sub?.Upload ?? 0; - const download = sub?.Download ?? 0; - const total = sub?.Total ?? 0; - const expire = sub?.Expire ?? 0; + + // 订阅信息 + const sub = provider.subscriptionInfo; + const hasSubInfo = !!sub; + const upload = sub?.Upload || 0; + const download = sub?.Download || 0; + const total = sub?.Total || 0; + const expire = sub?.Expire || 0; + + // 流量使用进度 const progress = total > 0 ? Math.min( + Math.round(((download + upload) * 100) / total) + 1, 100, - Math.max(0, ((upload + download) / total) * 100), ) : 0; return ( - updateProvider(key)} - disabled={isUpdating || isHydrating} - sx={{ - animation: isUpdating - ? "spin 1s linear infinite" - : "none", - "@keyframes spin": { - "0%": { transform: "rotate(0deg)" }, - "100%": { transform: "rotate(360deg)" }, - }, - }} - title={t("Update Provider") as string} - > - - - - } - sx={{ - mb: 1, - borderRadius: 1, - border: "1px solid", - borderColor: alpha("#ccc", 0.4), - backgroundColor: alpha("#fff", 0.02), - }} + sx={[ + { + p: 0, + mb: "8px", + borderRadius: 2, + overflow: "hidden", + transition: "all 0.2s", + }, + ({ palette: { mode, primary } }) => { + const bgcolor = + mode === "light" ? "#ffffff" : "#24252f"; + const hoverColor = + mode === "light" + ? alpha(primary.main, 0.1) + : alpha(primary.main, 0.2); + + return { + backgroundColor: bgcolor, + "&:hover": { + backgroundColor: hoverColor, + }, + }; + }, + ]} > { display: "flex", justifyContent: "space-between", alignItems: "center", - gap: 1, }} > { title={key} sx={{ display: "flex", alignItems: "center" }} > - {key} + {key} - {item.proxies.length} + {provider.proxies.length} - {item.vehicleType} + {provider.vehicleType} @@ -313,39 +252,72 @@ export const ProviderButton = () => { } secondary={ - hasSubInfo ? ( - <> - - - {parseTraffic(upload + download)} /{" "} - {parseTraffic(total)} - - - {parseExpire(expire)} - - + <> + {/* 订阅信息 */} + {hasSubInfo && ( + <> + + + {parseTraffic(upload + download)} /{" "} + {parseTraffic(total)} + + + {parseExpire(expire)} + + - 0 ? 1 : 0, - }} - /> - - ) : null + {/* 进度条 */} + 0 ? 1 : 0, + }} + /> + + )} + } /> + + { + updateProvider(key); + }} + disabled={isUpdating} + sx={{ + animation: isUpdating + ? "spin 1s linear infinite" + : "none", + "@keyframes spin": { + "0%": { transform: "rotate(0deg)" }, + "100%": { transform: "rotate(360deg)" }, + }, + }} + title={t("Update Provider") as string} + > + + + ); })} diff --git a/src/components/proxy/proxy-groups.tsx b/src/components/proxy/proxy-groups.tsx index 1a82f2c8..eec3f2e8 100644 --- a/src/components/proxy/proxy-groups.tsx +++ b/src/components/proxy/proxy-groups.tsx @@ -61,17 +61,10 @@ export const ProxyGroups = (props: Props) => { }>({ open: false, message: "" }); const { verge } = useVerge(); - const { - proxies: proxiesData, - proxyHydration, - proxyTargetProfileId, - proxyDisplayProfileId, - isProxyRefreshPending, - } = useAppData(); + const { proxies: proxiesData } = useAppData(); const groups = proxiesData?.groups; const availableGroups = useMemo(() => groups ?? [], [groups]); - const showHydrationOverlay = isProxyRefreshPending; - const pendingProfileSwitch = proxyTargetProfileId !== proxyDisplayProfileId; + const defaultRuleGroup = useMemo(() => { if (isChainMode && mode === "rule" && availableGroups.length > 0) { return availableGroups[0].name; @@ -83,35 +76,6 @@ export const ProxyGroups = (props: Props) => { () => selectedGroup ?? defaultRuleGroup, [selectedGroup, defaultRuleGroup], ); - const hydrationChip = useMemo(() => { - if (proxyHydration === "live") return null; - - const label = - proxyHydration === "snapshot" ? t("Snapshot data") : t("Syncing..."); - - return ( - - ); - }, [proxyHydration, t]); - - const overlayMessage = useMemo(() => { - if (!showHydrationOverlay) return null; - - if (pendingProfileSwitch) { - return t("Loading proxy data for the selected profile..."); - } - - if (proxyHydration === "snapshot") { - return t("Preparing proxy snapshot..."); - } - - return t("Syncing proxy data..."); - }, [showHydrationOverlay, pendingProfileSwitch, proxyHydration, t]); const { renderList, onProxies, onHeadState } = useRenderList( mode, @@ -129,7 +93,7 @@ export const ProxyGroups = (props: Props) => { [renderList], ); - // 系统代理选择 + // 统代理选择 const { handleProxyGroupChange } = useProxySelection({ onSuccess: () => { onProxies(); @@ -342,7 +306,12 @@ export const ProxyGroups = (props: Props) => { try { await Promise.race([ delayManager.checkListDelay(names, groupName, timeout), - delayGroup(groupName, url, timeout), + delayGroup(groupName, url, timeout).then((result) => { + console.log( + `[ProxyGroups] getGroupProxyDelays返回结果数量:`, + Object.keys(result || {}).length, + ); + }), // 查询group delays 将清除fixed(不关注调用结果) ]); console.log(`[ProxyGroups] 延迟测试完成,组: ${groupName}`); } catch (error) { @@ -407,11 +376,6 @@ export const ProxyGroups = (props: Props) => { } if (isChainMode) { - const chainVirtuosoHeight = - mode === "rule" && proxyGroupNames.length > 0 - ? "calc(100% - 80px)" - : "calc(100% - 14px)"; - // 获取所有代理组 const proxyGroups = proxiesData?.groups || []; @@ -490,7 +454,10 @@ export const ProxyGroups = (props: Props) => { 0 + ? "calc(100% - 80px)" // 只有标题的高度 + : "calc(100% - 14px)", }} totalCount={renderList.length} increaseViewportBy={{ top: 200, bottom: 200 }} @@ -581,9 +548,7 @@ export const ProxyGroups = (props: Props) => { {group.name} - {`${t("Group Type")}: ${group.type} · ${t("Proxy Count")}: ${ - Array.isArray(group.all) ? group.all.length : 0 - }`} + {group.type} · {group.all.length} 节点 @@ -591,7 +556,7 @@ export const ProxyGroups = (props: Props) => { {availableGroups.length === 0 && ( - {t("Empty")} + 暂无可用代理组 )} @@ -602,29 +567,9 @@ export const ProxyGroups = (props: Props) => { return (
- {hydrationChip && ( - - {hydrationChip} - - )} + {/* 代理组导航栏 */} {mode === "rule" && ( { )} /> - {showHydrationOverlay && overlayMessage && ( - - - - {overlayMessage} - - - - )}
); }; diff --git a/src/components/proxy/use-render-list.ts b/src/components/proxy/use-render-list.ts index 1e6e0fd6..7a5949ae 100644 --- a/src/components/proxy/use-render-list.ts +++ b/src/components/proxy/use-render-list.ts @@ -14,13 +14,50 @@ import { } from "./use-head-state"; import { useWindowWidth } from "./use-window-width"; -type RenderGroup = IProxyGroupItem; +// 定义代理项接口 +interface IProxyItem { + name: string; + type: string; + udp: boolean; + xudp: boolean; + tfo: boolean; + mptcp: boolean; + smux: boolean; + history: { + time: string; + delay: number; + }[]; + provider?: string; + testUrl?: string; + [key: string]: any; // 添加索引签名以适应其他可能的属性 +} + +// 代理组类型 +type ProxyGroup = { + name: string; + type: string; + udp: boolean; + xudp: boolean; + tfo: boolean; + mptcp: boolean; + smux: boolean; + history: { + time: string; + delay: number; + }[]; + now: string; + all: IProxyItem[]; + hidden?: boolean; + icon?: string; + testUrl?: string; + provider?: string; +}; export interface IRenderItem { // 组 | head | item | empty | item col type: 0 | 1 | 2 | 3 | 4; key: string; - group: RenderGroup; + group: ProxyGroup; proxy?: IProxyItem; col?: number; proxyCol?: IProxyItem[]; @@ -62,7 +99,7 @@ export const useRenderList = ( selectedGroup?: string | null, ) => { // 使用全局数据提供者 - const { proxies: proxiesData, proxyHydration, refreshProxy } = useAppData(); + const { proxies: proxiesData, refreshProxy } = useAppData(); const { verge } = useVerge(); const { width } = useWindowWidth(); const [headStates, setHeadState] = useHeadStateNew(); @@ -86,29 +123,17 @@ export const useRenderList = ( // 确保代理数据加载 useEffect(() => { - if (!proxiesData || proxyHydration !== "live") return; + if (!proxiesData) return; const { groups, proxies } = proxiesData; if ( (mode === "rule" && !groups.length) || (mode === "global" && proxies.length < 2) ) { - const handle = setTimeout(() => { - void refreshProxy().catch(() => {}); - }, 500); + const handle = setTimeout(() => refreshProxy(), 500); return () => clearTimeout(handle); } - }, [proxiesData, proxyHydration, mode, refreshProxy]); - - useEffect(() => { - if (proxyHydration !== "snapshot") return; - - const handle = setTimeout(() => { - void refreshProxy().catch(() => {}); - }, 1800); - - return () => clearTimeout(handle); - }, [proxyHydration, refreshProxy]); + }, [proxiesData, mode, refreshProxy]); // 链式代理模式节点自动计算延迟 useEffect(() => { @@ -122,7 +147,7 @@ export const useRenderList = ( // 设置组监听器,当有延迟更新时自动刷新 const groupListener = () => { console.log("[ChainMode] 延迟更新,刷新UI"); - void refreshProxy().catch(() => {}); + refreshProxy(); }; delayManager.setGroupListener("chain-mode", groupListener); @@ -163,12 +188,9 @@ export const useRenderList = ( // 链式代理模式下,显示代理组和其节点 if (isChainMode && runtimeConfig && mode === "rule") { // 使用正常的规则模式代理组 - const chainGroups = proxiesData.groups ?? []; - const allGroups = chainGroups.length - ? chainGroups - : proxiesData.global - ? [proxiesData.global] - : []; + const allGroups = proxiesData.groups.length + ? proxiesData.groups + : [proxiesData.global!]; // 如果选择了特定代理组,只显示该组的节点 if (selectedGroup) { @@ -260,7 +282,7 @@ export const useRenderList = ( }); // 创建一个虚拟的组来容纳所有节点 - const virtualGroup: RenderGroup = { + const virtualGroup: ProxyGroup = { name: "All Proxies", type: "Selector", udp: false, @@ -318,7 +340,7 @@ export const useRenderList = ( }); // 创建一个虚拟的组来容纳所有节点 - const virtualGroup: RenderGroup = { + const virtualGroup: ProxyGroup = { name: "All Proxies", type: "Selector", udp: false, @@ -358,15 +380,12 @@ export const useRenderList = ( // 正常模式的渲染逻辑 const useRule = mode === "rule" || mode === "script"; - const renderGroups = (() => { - const groups = proxiesData.groups ?? []; - if (useRule && groups.length) { - return groups; - } - return proxiesData.global ? [proxiesData.global] : groups; - })(); + const renderGroups = + useRule && proxiesData.groups.length + ? proxiesData.groups + : [proxiesData.global!]; - const retList = renderGroups.flatMap((group: RenderGroup) => { + const retList = renderGroups.flatMap((group: ProxyGroup) => { const headState = headStates[group.name] || DEFAULT_STATE; const ret: IRenderItem[] = [ { diff --git a/src/hooks/use-current-proxy.ts b/src/hooks/use-current-proxy.ts index 0c7108ff..7d352326 100644 --- a/src/hooks/use-current-proxy.ts +++ b/src/hooks/use-current-proxy.ts @@ -2,6 +2,12 @@ import { useMemo } from "react"; import { useAppData } from "@/providers/app-data-context"; +// 定义代理组类型 +interface ProxyGroup { + name: string; + now: string; +} + // 获取当前代理节点信息的自定义Hook export const useCurrentProxy = () => { // 从AppDataProvider获取数据 @@ -31,15 +37,15 @@ export const useCurrentProxy = () => { "自动选择", ]; const primaryGroup = - groups.find((group) => + groups.find((group: ProxyGroup) => primaryKeywords.some((keyword) => group.name.toLowerCase().includes(keyword.toLowerCase()), ), - ) || groups.find((group) => group.name !== "GLOBAL"); + ) || groups.filter((g: ProxyGroup) => g.name !== "GLOBAL")[0]; if (primaryGroup) { primaryGroupName = primaryGroup.name; - currentName = primaryGroup.now ?? currentName; + currentName = primaryGroup.now; } } diff --git a/src/hooks/use-profiles.ts b/src/hooks/use-profiles.ts index c412ec30..fdb73462 100644 --- a/src/hooks/use-profiles.ts +++ b/src/hooks/use-profiles.ts @@ -5,54 +5,33 @@ import { getProfiles, patchProfile, patchProfilesConfig, - calcuProxies, } from "@/services/cmds"; -import { - useProfileStore, - selectEffectiveProfiles, - selectIsHydrating, - selectLastResult, -} from "@/stores/profile-store"; +import { calcuProxies } from "@/services/cmds"; export const useProfiles = () => { - const profilesFromStore = useProfileStore(selectEffectiveProfiles); - const storeHydrating = useProfileStore(selectIsHydrating); - const lastResult = useProfileStore(selectLastResult); - const commitProfileSnapshot = useProfileStore( - (state) => state.commitHydrated, - ); - const { - data: swrProfiles, + data: profiles, mutate: mutateProfiles, error, isValidating, } = useSWR("getProfiles", getProfiles, { revalidateOnFocus: false, revalidateOnReconnect: false, - dedupingInterval: 500, + dedupingInterval: 500, // 减少去重时间,提高响应性 errorRetryCount: 3, errorRetryInterval: 1000, - refreshInterval: 0, - onError: (err) => { - console.error("[useProfiles] SWR错误:", err); + refreshInterval: 0, // 完全由手动控制 + onError: (error) => { + console.error("[useProfiles] SWR错误:", error); }, onSuccess: (data) => { - commitProfileSnapshot(data); console.log( - "[useProfiles] 配置数据更新成功,配置数量", + "[useProfiles] 配置数据更新成功,配置数量:", data?.items?.length || 0, ); }, }); - const rawProfiles = profilesFromStore ?? swrProfiles; - const profiles = (rawProfiles ?? { - current: null, - items: [], - }) as IProfilesConfig; - const hasProfiles = rawProfiles != null; - const patchProfiles = async ( value: Partial, signal?: AbortSignal, @@ -70,30 +49,32 @@ export const useProfiles = () => { await mutateProfiles(); return success; - } catch (err) { - if (err instanceof DOMException && err.name === "AbortError") { - throw err; + } catch (error) { + if (error instanceof DOMException && error.name === "AbortError") { + throw error; } await mutateProfiles(); - throw err; + throw error; } }; const patchCurrent = async (value: Partial) => { - if (!hasProfiles || !profiles.current) { - return; + if (profiles?.current) { + await patchProfile(profiles.current, value); + mutateProfiles(); } - await patchProfile(profiles.current, value); - mutateProfiles(); }; + // 根据selected的节点选择 const activateSelected = async () => { try { console.log("[ActivateSelected] 开始处理代理选择"); - const proxiesData = await calcuProxies(); - const profileData = hasProfiles ? profiles : null; + const [proxiesData, profileData] = await Promise.all([ + calcuProxies(), + getProfiles(), + ]); if (!profileData || !proxiesData) { console.log("[ActivateSelected] 代理或配置数据不可用,跳过处理"); @@ -109,6 +90,7 @@ export const useProfiles = () => { return; } + // 检查是否有saved的代理选择 const { selected = [] } = current; if (selected.length === 0) { console.log("[ActivateSelected] 当前profile无保存的代理选择,跳过"); @@ -116,7 +98,7 @@ export const useProfiles = () => { } console.log( - `[ActivateSelected] 当前profile有${selected.length} 个代理选择配置`, + `[ActivateSelected] 当前profile有 ${selected.length} 个代理选择配置`, ); const selectedMap = Object.fromEntries( @@ -133,6 +115,7 @@ export const useProfiles = () => { "LoadBalance", ]); + // 处理所有代理组 [global, ...groups].forEach((group) => { if (!group) { return; @@ -167,7 +150,7 @@ export const useProfiles = () => { if (!existsInGroup) { console.warn( - `[ActivateSelected] 保存的代理${savedProxy} 不存在于代理组${name}`, + `[ActivateSelected] 保存的代理 ${savedProxy} 不存在于代理组 ${name}`, ); hasChange = true; newSelected.push({ name, now: now ?? savedProxy }); @@ -190,7 +173,7 @@ export const useProfiles = () => { return; } - console.log("[ActivateSelected] 完成代理切换,保存新的选择配置"); + console.log(`[ActivateSelected] 完成代理切换,保存新的选择配置`); try { await patchProfile(profileData.current!, { selected: newSelected }); @@ -212,18 +195,14 @@ export const useProfiles = () => { return { profiles, - hasProfiles, - current: hasProfiles - ? (profiles.items?.find((p) => p && p.uid === profiles.current) ?? null) - : null, + current: profiles?.items?.find((p) => p && p.uid === profiles.current), activateSelected, patchProfiles, patchCurrent, mutateProfiles, - isLoading: isValidating || storeHydrating, - isHydrating: storeHydrating, - lastResult, + // 新增故障检测状态 + isLoading: isValidating, error, - isStale: !hasProfiles && !error && !isValidating, + isStale: !profiles && !error && !isValidating, // 检测是否处于异常状态 }; }; diff --git a/src/pages/_layout/useLayoutEvents.ts b/src/pages/_layout/useLayoutEvents.ts index e3c9ecbc..c26084a3 100644 --- a/src/pages/_layout/useLayoutEvents.ts +++ b/src/pages/_layout/useLayoutEvents.ts @@ -1,9 +1,11 @@ import { listen } from "@tauri-apps/api/event"; import { getCurrentWebviewWindow } from "@tauri-apps/api/webviewWindow"; import { useEffect } from "react"; +import { mutate } from "swr"; import { useListen } from "@/hooks/use-listen"; -import { refreshClashData, refreshVergeData } from "@/services/refresh"; +import { getAxios } from "@/services/api"; + export const useLayoutEvents = ( handleNotice: (payload: [string, string]) => void, ) => { @@ -35,32 +37,32 @@ export const useLayoutEvents = ( .catch((error) => console.error("[事件监听] 注册失败", error)); }; - register( - addListener("verge://notice-message", ({ payload }) => - handleNotice(payload as [string, string]), - ), - ); - register( addListener("verge://refresh-clash-config", async () => { - try { - await refreshClashData(); - } catch (error) { - console.error("[事件监听] 刷新 Clash 配置失败", error); - } + await getAxios(true); + mutate("getProxies"); + mutate("getVersion"); + mutate("getClashConfig"); + mutate("getProxyProviders"); }), ); register( addListener("verge://refresh-verge-config", () => { - try { - refreshVergeData(); - } catch (error) { - console.error("[事件监听] 刷新 Verge 配置失败", error); - } + mutate("getVergeConfig"); + mutate("getSystemProxy"); + mutate("getAutotemProxy"); + mutate("getRunningMode"); + mutate("isServiceAvailable"); }), ); + register( + addListener("verge://notice-message", ({ payload }) => + handleNotice(payload as [string, string]), + ), + ); + const appWindow = getCurrentWebviewWindow(); register( (async () => { diff --git a/src/pages/profiles.tsx b/src/pages/profiles.tsx index 0d29fe34..597ec4ec 100644 --- a/src/pages/profiles.tsx +++ b/src/pages/profiles.tsx @@ -25,23 +25,16 @@ import { } from "@mui/icons-material"; import { LoadingButton } from "@mui/lab"; import { Box, Button, Divider, Grid, IconButton, Stack } from "@mui/material"; -import { invoke } from "@tauri-apps/api/core"; import { listen, TauriEvent } from "@tauri-apps/api/event"; import { readText } from "@tauri-apps/plugin-clipboard-manager"; import { readTextFile } from "@tauri-apps/plugin-fs"; import { useLockFn } from "ahooks"; import { throttle } from "lodash-es"; -import { - useCallback, - useEffect, - useMemo, - useReducer, - useRef, - useState, -} from "react"; +import { useCallback, useEffect, useMemo, useRef, useState } from "react"; import { useTranslation } from "react-i18next"; import { useLocation } from "react-router"; import useSWR, { mutate } from "swr"; +import { closeAllConnections } from "tauri-plugin-mihomo-api"; import { BasePage, DialogRef } from "@/components/base"; import { BaseStyledTextField } from "@/components/base/base-styled-text-field"; @@ -54,7 +47,6 @@ import { import { ConfigViewer } from "@/components/setting/mods/config-viewer"; import { useListen } from "@/hooks/use-listen"; import { useProfiles } from "@/hooks/use-profiles"; -import { useAppData } from "@/providers/app-data-context"; import { createProfile, deleteProfile, @@ -65,16 +57,11 @@ import { importProfile, reorderProfile, updateProfile, - switchProfileCommand, - type ProfileSwitchStatus, - type SwitchTaskStatus, } from "@/services/cmds"; import { showNotice } from "@/services/noticeService"; -import { refreshClashData } from "@/services/refresh"; import { useSetLoadingCache, useThemeMode } from "@/services/states"; -import { AsyncEventQueue, afterPaint } from "@/utils/asyncQueue"; -// Record profile switch state +// 记录profile切换状态 const debugProfileSwitch = (action: string, profile: string, extra?: any) => { const timestamp = new Date().toISOString().substring(11, 23); console.log( @@ -83,80 +70,33 @@ const debugProfileSwitch = (action: string, profile: string, extra?: any) => { ); }; -type RustPanicPayload = { - message: string; - location: string; +// 检查请求是否已过期 +const isRequestOutdated = ( + currentSequence: number, + requestSequenceRef: any, + profile: string, +) => { + if (currentSequence !== requestSequenceRef.current) { + debugProfileSwitch( + "REQUEST_OUTDATED", + profile, + `当前序列号: ${currentSequence}, 最新序列号: ${requestSequenceRef.current}`, + ); + return true; + } + return false; }; -type SwitchTaskMeta = { profileId: string; notify: boolean }; - -const collectSwitchingProfileIds = ( - status: ProfileSwitchStatus | null, -): string[] => { - if (!status) return []; - const ids = new Set(); - if (status.active) { - ids.add(status.active.profileId); - } - status.queue.forEach((task) => ids.add(task.profileId)); - return Array.from(ids); -}; - -type ManualActivatingAction = - | { type: "reset" } - | { type: "set"; value: string[] } - | { type: "add"; ids: string[] } - | { type: "remove"; id: string } - | { type: "filterAllowed"; allowed: Set }; - -const manualActivatingReducer = ( - state: string[], - action: ManualActivatingAction, -): string[] => { - switch (action.type) { - case "reset": - return state.length > 0 ? [] : state; - case "set": { - const unique = Array.from( - new Set(action.value.filter((id) => typeof id === "string" && id)), - ); - if ( - unique.length === state.length && - unique.every((id, index) => id === state[index]) - ) { - return state; - } - return unique; - } - case "add": { - const incoming = action.ids.filter((id) => typeof id === "string" && id); - if (incoming.length === 0) { - return state; - } - const next = new Set(state); - let changed = false; - incoming.forEach((id) => { - const before = next.size; - next.add(id); - if (next.size !== before) { - changed = true; - } - }); - return changed ? Array.from(next) : state; - } - case "remove": { - if (!state.includes(action.id)) { - return state; - } - return state.filter((id) => id !== action.id); - } - case "filterAllowed": { - const next = state.filter((id) => action.allowed.has(id)); - return next.length === state.length ? state : next; - } - default: - return state; +// 检查是否被中断 +const isOperationAborted = ( + abortController: AbortController, + profile: string, +) => { + if (abortController.signal.aborted) { + debugProfileSwitch("OPERATION_ABORTED", profile); + return true; } + return false; }; const normalizeProfileUrl = (value?: string) => { @@ -177,7 +117,7 @@ const normalizeProfileUrl = (value?: string) => { } catch { const schemeNormalized = trimmed.replace( /^([a-z]+):\/\//i, - (_match, scheme: string) => `${scheme.toLowerCase()}://`, + (match, scheme: string) => `${scheme.toLowerCase()}://`, ); return schemeNormalized.replace(/\/+$/, ""); } @@ -219,7 +159,7 @@ const createImportLandingVerifier = ( if (currentCount > baselineCount) { console.log( - `[Import Verify] Configuration count increased: ${baselineCount} -> ${currentCount}`, + `[导入验证] 配置数量已增加: ${baselineCount} -> ${currentCount}`, ); return true; } @@ -237,9 +177,7 @@ const createImportLandingVerifier = ( } if (!hadBaselineProfile) { - console.log( - "[Import Verify] Detected new profile record; treating as success", - ); + console.log("[导入验证] 检测到新的订阅记录,判定为导入成功"); return true; } @@ -248,15 +186,13 @@ const createImportLandingVerifier = ( if (currentUpdated > baselineUpdated) { console.log( - `[Import Verify] Profile timestamp updated ${baselineUpdated} -> ${currentUpdated}`, + `[导入验证] 订阅更新时间已更新 ${baselineUpdated} -> ${currentUpdated}`, ); return true; } if (currentSignature !== baselineSignature) { - console.log( - "[Import Verify] Profile details changed; treating as success", - ); + console.log("[导入验证] 订阅详情发生变化,判定为导入成功"); return true; } @@ -269,110 +205,14 @@ const createImportLandingVerifier = ( }; }; -const isDev = import.meta.env.DEV; - const ProfilePage = () => { - // Serialize profile switch events so state transitions stay deterministic. - const switchEventQueue = useMemo(() => new AsyncEventQueue(), []); - // Stage follow-up effects (hydration, refresh) to run sequentially after switch completion. - const postSwitchEffectQueue = useMemo(() => new AsyncEventQueue(), []); - const mountedRef = useRef(false); - const { t } = useTranslation(); const location = useLocation(); - const logToBackend = useCallback( - ( - level: "debug" | "info" | "warn" | "error", - message: string, - context?: Record, - ) => { - const payload: Record = { - level, - message, - }; - if (context !== undefined) { - payload.context = context; - } - invoke("frontend_log", { payload }).catch(() => {}); - }, - [], - ); const { addListener } = useListen(); - const { switchStatus } = useAppData(); const [url, setUrl] = useState(""); const [disabled, setDisabled] = useState(false); - const [manualActivatings, dispatchManualActivatings] = useReducer( - manualActivatingReducer, - [], - ); - const taskMetaRef = useRef>(new Map()); - const lastResultAtRef = useRef(0); - const initialLastResultSyncRef = useRef(true); - - useEffect(() => { - mountedRef.current = true; - return () => { - mountedRef.current = false; - switchEventQueue.clear(); - postSwitchEffectQueue.clear(); - if (isDev) { - console.debug("[ProfileSwitch] component unmounted, queues cleared"); - } - }; - }, [postSwitchEffectQueue, switchEventQueue]); - useEffect(() => { - const handleError = (event: ErrorEvent) => { - logToBackend("error", "[ProfileSwitch] window error captured", { - message: event.message, - filename: event.filename, - lineno: event.lineno, - colno: event.colno, - stack: event.error?.stack, - }); - console.error( - "[ProfileSwitch] window error captured", - event.message, - event.error, - ); - }; - const handleRejection = (event: PromiseRejectionEvent) => { - let reasonSummary: string; - if (typeof event.reason === "object") { - try { - reasonSummary = JSON.stringify(event.reason); - } catch (error) { - reasonSummary = `[unserializable reason: ${String(error)}]`; - } - } else { - reasonSummary = String(event.reason); - } - logToBackend("error", "[ProfileSwitch] unhandled rejection captured", { - reason: reasonSummary, - }); - console.error( - "[ProfileSwitch] unhandled rejection captured", - event.reason, - ); - }; - window.addEventListener("error", handleError); - window.addEventListener("unhandledrejection", handleRejection); - return () => { - window.removeEventListener("error", handleError); - window.removeEventListener("unhandledrejection", handleRejection); - }; - }, [logToBackend]); + const [activatings, setActivatings] = useState([]); const [loading, setLoading] = useState(false); - const postSwitchGenerationRef = useRef(0); - const switchingProfileId = switchStatus?.active?.profileId ?? null; - const switchActivatingIds = useMemo( - () => collectSwitchingProfileIds(switchStatus ?? null), - [switchStatus], - ); - const activatings = useMemo(() => { - const merged = new Set(manualActivatings); - switchActivatingIds.forEach((id) => merged.add(id)); - return Array.from(merged); - }, [manualActivatings, switchActivatingIds]); // Batch selection states const [batchMode, setBatchMode] = useState(false); @@ -380,6 +220,57 @@ const ProfilePage = () => { () => new Set(), ); + // 防止重复切换 + const switchingProfileRef = useRef(null); + + // 支持中断当前切换操作 + const abortControllerRef = useRef(null); + + // 只处理最新的切换请求 + const requestSequenceRef = useRef(0); + + // 待处理请求跟踪,取消排队的请求 + const pendingRequestRef = useRef | null>(null); + + // 处理profile切换中断 + const handleProfileInterrupt = useCallback( + (previousSwitching: string, newProfile: string) => { + debugProfileSwitch( + "INTERRUPT_PREVIOUS", + previousSwitching, + `被 ${newProfile} 中断`, + ); + + if (abortControllerRef.current) { + abortControllerRef.current.abort(); + debugProfileSwitch("ABORT_CONTROLLER_TRIGGERED", previousSwitching); + } + + if (pendingRequestRef.current) { + debugProfileSwitch("CANCEL_PENDING_REQUEST", previousSwitching); + } + + setActivatings((prev) => prev.filter((id) => id !== previousSwitching)); + showNotice( + "info", + `${t("Profile switch interrupted by new selection")}: ${previousSwitching} → ${newProfile}`, + 3000, + ); + }, + [t], + ); + + // 清理切换状态 + const cleanupSwitchState = useCallback( + (profile: string, sequence: number) => { + setActivatings((prev) => prev.filter((id) => id !== profile)); + switchingProfileRef.current = null; + abortControllerRef.current = null; + pendingRequestRef.current = null; + debugProfileSwitch("SWITCH_END", profile, `序列号: ${sequence}`); + }, + [], + ); const sensors = useSensors( useSensor(PointerSensor), useSensor(KeyboardSensor, { @@ -391,32 +282,11 @@ const ProfilePage = () => { const { profiles = {}, activateSelected, + patchProfiles, mutateProfiles, error, isStale, } = useProfiles(); - const activateSelectedRef = useRef(activateSelected); - const mutateProfilesRef = useRef(mutateProfiles); - const profileMutateScheduledRef = useRef(false); - const mutateLogsRef = useRef<(() => Promise | void) | null>(null); - const tRef = useRef(t); - const showNoticeRef = useRef(showNotice); - const refreshClashDataRef = useRef(refreshClashData); - - useEffect(() => { - activateSelectedRef.current = activateSelected; - }, [activateSelected]); - - useEffect(() => { - mutateProfilesRef.current = mutateProfiles; - }, [mutateProfiles]); - - useEffect(() => { - tRef.current = t; - }, [t]); - - showNoticeRef.current = showNotice; - refreshClashDataRef.current = refreshClashData; useEffect(() => { const handleFileDrop = async () => { @@ -457,28 +327,28 @@ const ProfilePage = () => { }; }, [addListener, mutateProfiles, t]); - // Add emergency recovery capability + // 添加紧急恢复功能 const onEmergencyRefresh = useLockFn(async () => { - console.log("[Emergency Refresh] Starting forced refresh of all data"); + console.log("[紧急刷新] 开始强制刷新所有数据"); try { - // Clear all SWR caches + // 清除所有SWR缓存 await mutate(() => true, undefined, { revalidate: false }); - // Force fetching profile data + // 强制重新获取配置数据 await mutateProfiles(undefined, { revalidate: true, rollbackOnError: false, }); - // Wait for state to stabilize before enhancing the profile + // 等待状态稳定后增强配置 await new Promise((resolve) => setTimeout(resolve, 500)); await onEnhance(false); - showNotice("success", "Data forcibly refreshed", 2000); + showNotice("success", "数据已强制刷新", 2000); } catch (error: any) { - console.error("[Emergency Refresh] Failed:", error); - showNotice("error", `Emergency refresh failed: ${error.message}`, 4000); + console.error("[紧急刷新] 失败:", error); + showNotice("error", `紧急刷新失败: ${error.message}`, 4000); } }); @@ -486,156 +356,6 @@ const ProfilePage = () => { "getRuntimeLogs", getRuntimeLogs, ); - useEffect(() => { - mutateLogsRef.current = mutateLogs; - }, [mutateLogs]); - - useEffect(() => { - activateSelectedRef.current = activateSelected; - }, [activateSelected]); - - useEffect(() => { - mutateProfilesRef.current = mutateProfiles; - }, [mutateProfiles]); - - const scheduleProfileMutate = useCallback(() => { - if (profileMutateScheduledRef.current) return; - if (!mountedRef.current) return; - profileMutateScheduledRef.current = true; - requestAnimationFrame(() => { - profileMutateScheduledRef.current = false; - const mutateProfilesFn = mutateProfilesRef.current; - if (mutateProfilesFn) { - void mutateProfilesFn(); - if (isDev) { - console.debug( - "[ProfileSwitch] mutateProfiles executed from schedule", - ); - } - } - }); - }, []); - - useEffect(() => { - if (!switchStatus) { - taskMetaRef.current.clear(); - dispatchManualActivatings({ type: "reset" }); - return; - } - - const trackedProfiles = new Set(); - const registerTask = (task: SwitchTaskStatus | null | undefined) => { - if (!task) return; - taskMetaRef.current.set(task.taskId, { - profileId: task.profileId, - notify: task.notify, - }); - trackedProfiles.add(task.profileId); - }; - - registerTask(switchStatus.active ?? null); - switchStatus.queue.forEach((task) => registerTask(task)); - - dispatchManualActivatings({ - type: "filterAllowed", - allowed: trackedProfiles, - }); - - const lastResult = switchStatus.lastResult ?? null; - if (initialLastResultSyncRef.current) { - initialLastResultSyncRef.current = false; - if (lastResult) { - lastResultAtRef.current = lastResult.finishedAt; - } - } - - if (lastResult && lastResult.finishedAt !== lastResultAtRef.current) { - lastResultAtRef.current = lastResult.finishedAt; - const { profileId, success, finishedAt, errorDetail, cancelled } = - lastResult; - const isCancelled = Boolean(cancelled); - const meta = taskMetaRef.current.get(lastResult.taskId); - const notifySuccess = meta?.notify ?? true; - taskMetaRef.current.delete(lastResult.taskId); - - debugProfileSwitch("STATUS_RESULT", profileId, { - success, - finishedAt, - notifySuccess, - cancelled: isCancelled, - }); - - switchEventQueue.enqueue(() => { - if (!mountedRef.current) return; - - dispatchManualActivatings({ type: "remove", id: profileId }); - - const eventGeneration = postSwitchGenerationRef.current; - - postSwitchEffectQueue.enqueue(async () => { - if (!mountedRef.current) return; - if (postSwitchGenerationRef.current !== eventGeneration) { - return; - } - - logToBackend( - success || isCancelled ? "info" : "warn", - "[ProfileSwitch] status result received", - { - profileId, - success, - cancelled: isCancelled, - finishedAt, - }, - ); - - scheduleProfileMutate(); - - if (success) { - if (notifySuccess) { - await afterPaint(); - showNoticeRef.current?.( - "success", - tRef.current("Profile Switched"), - 1000, - ); - } - - const operations: Promise[] = []; - const mutateLogs = mutateLogsRef.current; - if (mutateLogs) { - operations.push(Promise.resolve(mutateLogs())); - } - const activateSelected = activateSelectedRef.current; - if (activateSelected) { - operations.push(Promise.resolve(activateSelected())); - } - const refreshFn = refreshClashDataRef.current; - if (refreshFn) { - operations.push(Promise.resolve(refreshFn())); - } - - if (operations.length > 0) { - void Promise.resolve().then(() => Promise.allSettled(operations)); - } - } else if (!isCancelled) { - await afterPaint(); - showNoticeRef.current?.( - "error", - errorDetail ?? tRef.current("Profile switch failed"), - ); - } - }); - }); - } - }, [ - dispatchManualActivatings, - logToBackend, - postSwitchEffectQueue, - scheduleProfileMutate, - switchEventQueue, - switchStatus, - ]); const viewerRef = useRef(null); const configRef = useRef(null); @@ -655,7 +375,7 @@ const ProfilePage = () => { const onImport = async () => { if (!url) return; - // Validate that the URL uses http/https + // 校验url是否为http/https if (!/^https?:\/\//i.test(url)) { showNotice("error", t("Invalid Profile URL")); return; @@ -685,10 +405,7 @@ const ProfilePage = () => { ); } } catch (verifyErr) { - console.warn( - "[Import Verify] Failed to fetch profile state:", - verifyErr, - ); + console.warn("[导入验证] 获取配置状态失败:", verifyErr); break; } } @@ -697,33 +414,33 @@ const ProfilePage = () => { }; try { - // Attempt standard import + // 尝试正常导入 await importProfile(url); await handleImportSuccess("Profile Imported Successfully"); return; } catch (initialErr) { - console.warn("[Profile Import] Initial import failed:", initialErr); + console.warn("[订阅导入] 首次导入失败:", initialErr); const alreadyImported = await waitForImportLanding(); if (alreadyImported) { console.warn( - "[Profile Import] API reported failure, but profile already imported; skipping rollback", + "[订阅导入] 接口返回失败,但检测到订阅已导入,跳过回退导入流程", ); await handleImportSuccess("Profile Imported Successfully"); return; } - // Initial import failed without data change; try built-in proxy + // 首次导入失败且未检测到数据变更,尝试使用自身代理 showNotice("info", t("Import failed, retrying with Clash proxy...")); try { - // Attempt import using built-in proxy + // 使用自身代理尝试导入 await importProfile(url, { with_proxy: false, self_proxy: true, }); await handleImportSuccess("Profile Imported with Clash proxy"); } catch (retryErr: any) { - // Rollback import also failed + // 回退导入也失败 const retryErrmsg = retryErr?.message || retryErr.toString(); showNotice( "error", @@ -736,9 +453,7 @@ const ProfilePage = () => { } }; - const currentProfileId = profiles.current ?? null; - - // Enhanced refresh strategy + // 强化的刷新策略 const performRobustRefresh = async ( importVerifier: ImportLandingVerifier, ) => { @@ -749,50 +464,43 @@ const ProfilePage = () => { while (retryCount < maxRetries) { try { - console.log( - `[Import Refresh] Attempt ${retryCount + 1} to refresh profile data`, - ); + console.log(`[导入刷新] 第${retryCount + 1}次尝试刷新配置数据`); - // Force refresh and bypass caches + // 强制刷新,绕过所有缓存 await mutateProfiles(undefined, { revalidate: true, rollbackOnError: false, }); - // Wait for state to stabilize + // 等待状态稳定 await new Promise((resolve) => setTimeout(resolve, baseDelay * (retryCount + 1)), ); - // Verify whether refresh succeeded + // 验证刷新是否成功 const currentProfiles = await getProfiles(); const currentCount = currentProfiles?.items?.length || 0; if (currentCount > baselineCount) { console.log( - `[Import Refresh] Profile refresh succeeded; count ${baselineCount} -> ${currentCount}`, + `[导入刷新] 配置刷新成功,配置数量 ${baselineCount} -> ${currentCount}`, ); await onEnhance(false); return; } if (hasLanding(currentProfiles)) { - console.log( - "[Import Refresh] Detected profile update; treating as success", - ); + console.log("[导入刷新] 检测到订阅内容更新,判定刷新成功"); await onEnhance(false); return; } console.warn( - `[Import Refresh] Profile count unchanged (${currentCount}), retrying...`, + `[导入刷新] 配置数量未增加 (${currentCount}), 继续重试...`, ); retryCount++; } catch (error) { - console.error( - `[Import Refresh] Attempt ${retryCount + 1} failed:`, - error, - ); + console.error(`[导入刷新] 第${retryCount + 1}次刷新失败:`, error); retryCount++; await new Promise((resolve) => setTimeout(resolve, baseDelay * retryCount), @@ -800,12 +508,10 @@ const ProfilePage = () => { } } - // Final attempt after all retries fail - console.warn( - `[Import Refresh] Regular refresh failed; clearing cache and retrying`, - ); + // 所有重试失败后的最后尝试 + console.warn(`[导入刷新] 常规刷新失败,尝试清除缓存重新获取`); try { - // Clear SWR cache and refetch + // 清除SWR缓存并重新获取 await mutate("getProfiles", getProfiles(), { revalidate: true }); await onEnhance(false); showNotice( @@ -814,10 +520,7 @@ const ProfilePage = () => { 3000, ); } catch (finalError) { - console.error( - `[Import Refresh] Final refresh attempt failed:`, - finalError, - ); + console.error(`[导入刷新] 最终刷新尝试失败:`, finalError); showNotice( "error", t("Profile imported successfully, please restart if not visible"), @@ -828,108 +531,209 @@ const ProfilePage = () => { const onDragEnd = async (event: DragEndEvent) => { const { active, over } = event; - if (over && active.id !== over.id) { - await reorderProfile(active.id.toString(), over.id.toString()); - mutateProfiles(); + if (over) { + if (active.id !== over.id) { + await reorderProfile(active.id.toString(), over.id.toString()); + mutateProfiles(); + } } }; - const requestSwitch = useCallback( - (targetProfile: string, notifySuccess: boolean) => { - const nextGeneration = postSwitchGenerationRef.current + 1; - postSwitchGenerationRef.current = nextGeneration; - postSwitchEffectQueue.clear(); - - debugProfileSwitch("REQUEST_SWITCH", targetProfile, { - notifySuccess, - generation: nextGeneration, - }); - - logToBackend("info", "[ProfileSwitch] request switch", { - targetProfile, - notifySuccess, - generation: nextGeneration, - }); - - dispatchManualActivatings({ type: "add", ids: [targetProfile] }); - - void (async () => { - try { - const accepted = await switchProfileCommand( - targetProfile, - notifySuccess, + const executeBackgroundTasks = useCallback( + async ( + profile: string, + sequence: number, + abortController: AbortController, + ) => { + try { + if ( + sequence === requestSequenceRef.current && + switchingProfileRef.current === profile && + !abortController.signal.aborted + ) { + await activateSelected(); + console.log(`[Profile] 后台处理完成,序列号: ${sequence}`); + } else { + debugProfileSwitch( + "BACKGROUND_TASK_SKIPPED", + profile, + `序列号过期或被中断: ${sequence} vs ${requestSequenceRef.current}`, ); - if (!accepted) { - throw new Error(tRef.current("Profile switch failed")); - } - } catch (error: any) { - const message = - error?.message || error?.toString?.() || String(error); - logToBackend("error", "[ProfileSwitch] switch command failed", { - profileId: targetProfile, - message, - }); - dispatchManualActivatings({ type: "remove", id: targetProfile }); - scheduleProfileMutate(); - await afterPaint(); - showNoticeRef.current?.("error", message); } - })(); + } catch (err: any) { + console.warn("Failed to activate selected proxies:", err); + } }, - [ - dispatchManualActivatings, - logToBackend, - postSwitchEffectQueue, - scheduleProfileMutate, - ], + [activateSelected], ); - const onSelect = useCallback( - (targetProfile: string, force: boolean) => { - if (!force && targetProfile === currentProfileId) { - debugProfileSwitch("ALREADY_CURRENT_IGNORED", targetProfile); + const activateProfile = useCallback( + async (profile: string, notifySuccess: boolean) => { + if (profiles.current === profile && !notifySuccess) { + console.log( + `[Profile] 目标profile ${profile} 已经是当前配置,跳过切换`, + ); return; } - requestSwitch(targetProfile, true); + + const currentSequence = ++requestSequenceRef.current; + debugProfileSwitch("NEW_REQUEST", profile, `序列号: ${currentSequence}`); + + // 处理中断逻辑 + const previousSwitching = switchingProfileRef.current; + if (previousSwitching && previousSwitching !== profile) { + handleProfileInterrupt(previousSwitching, profile); + } + + // 防止重复切换同一个profile + if (switchingProfileRef.current === profile) { + debugProfileSwitch("DUPLICATE_SWITCH_BLOCKED", profile); + return; + } + + // 初始化切换状态 + switchingProfileRef.current = profile; + debugProfileSwitch("SWITCH_START", profile, `序列号: ${currentSequence}`); + + const currentAbortController = new AbortController(); + abortControllerRef.current = currentAbortController; + + setActivatings((prev) => { + if (prev.includes(profile)) return prev; + return [...prev, profile]; + }); + + try { + console.log( + `[Profile] 开始切换到: ${profile},序列号: ${currentSequence}`, + ); + + // 检查请求有效性 + if ( + isRequestOutdated(currentSequence, requestSequenceRef, profile) || + isOperationAborted(currentAbortController, profile) + ) { + return; + } + + // 执行切换请求 + const requestPromise = patchProfiles( + { current: profile }, + currentAbortController.signal, + ); + pendingRequestRef.current = requestPromise; + + const success = await requestPromise; + + if (pendingRequestRef.current === requestPromise) { + pendingRequestRef.current = null; + } + + // 再次检查有效性 + if ( + isRequestOutdated(currentSequence, requestSequenceRef, profile) || + isOperationAborted(currentAbortController, profile) + ) { + return; + } + + // 完成切换 + await mutateLogs(); + closeAllConnections(); + + if (notifySuccess && success) { + showNotice("success", t("Profile Switched"), 1000); + } + + console.log( + `[Profile] 切换到 ${profile} 完成,序列号: ${currentSequence},开始后台处理`, + ); + + // 延迟执行后台任务 + setTimeout( + () => + executeBackgroundTasks( + profile, + currentSequence, + currentAbortController, + ), + 50, + ); + } catch (err: any) { + if (pendingRequestRef.current) { + pendingRequestRef.current = null; + } + + // 检查是否因为中断或过期而出错 + if ( + isOperationAborted(currentAbortController, profile) || + isRequestOutdated(currentSequence, requestSequenceRef, profile) + ) { + return; + } + + console.error(`[Profile] 切换失败:`, err); + showNotice("error", err?.message || err.toString(), 4000); + } finally { + // 只有当前profile仍然是正在切换的profile且序列号匹配时才清理状态 + if ( + switchingProfileRef.current === profile && + currentSequence === requestSequenceRef.current + ) { + cleanupSwitchState(profile, currentSequence); + } else { + debugProfileSwitch( + "CLEANUP_SKIPPED", + profile, + `序列号不匹配或已被接管: ${currentSequence} vs ${requestSequenceRef.current}`, + ); + } + } }, - [currentProfileId, requestSwitch], + [ + profiles, + patchProfiles, + mutateLogs, + t, + executeBackgroundTasks, + handleProfileInterrupt, + cleanupSwitchState, + ], ); + const onSelect = async (current: string, force: boolean) => { + // 阻止重复点击或已激活的profile + if (switchingProfileRef.current === current) { + debugProfileSwitch("DUPLICATE_CLICK_IGNORED", current); + return; + } + + if (!force && current === profiles.current) { + debugProfileSwitch("ALREADY_CURRENT_IGNORED", current); + return; + } + + await activateProfile(current, true); + }; useEffect(() => { - if (!current) return; - if (current === currentProfileId) return; - if (switchActivatingIds.includes(current)) return; - requestSwitch(current, false); - }, [current, currentProfileId, requestSwitch, switchActivatingIds]); - - useEffect(() => { - let mounted = true; - const panicListener = listen("rust-panic", (event) => { - if (!mounted) return; - const payload = event.payload; - if (!payload) return; - showNotice( - "error", - `Rust panic: ${payload.message} @ ${payload.location}`, - ); - console.error("Rust panic reported from backend:", payload); - }); - return () => { - mounted = false; - panicListener.then((unlisten) => unlisten()).catch(() => {}); - }; - }, [t]); + (async () => { + if (current) { + mutateProfiles(); + await activateProfile(current, false); + } + })(); + }, [current, activateProfile, mutateProfiles]); const onEnhance = useLockFn(async (notifySuccess: boolean) => { - if (switchingProfileId) { + if (switchingProfileRef.current) { console.log( - `[Profile] A profile is currently switching (${switchingProfileId}); skipping enhance operation`, + `[Profile] 有profile正在切换中(${switchingProfileRef.current}),跳过enhance操作`, ); return; } const currentProfiles = currentActivatings(); - dispatchManualActivatings({ type: "add", ids: currentProfiles }); + setActivatings((prev) => [...new Set([...prev, ...currentProfiles])]); try { await enhanceProfiles(); @@ -940,17 +744,17 @@ const ProfilePage = () => { } catch (err: any) { showNotice("error", err.message || err.toString(), 3000); } finally { - dispatchManualActivatings({ type: "reset" }); + // 保留正在切换的profile,清除其他状态 + setActivatings((prev) => + prev.filter((id) => id === switchingProfileRef.current), + ); } }); const onDelete = useLockFn(async (uid: string) => { const current = profiles.current === uid; try { - dispatchManualActivatings({ - type: "set", - value: [...new Set([...(current ? currentActivatings() : []), uid])], - }); + setActivatings([...(current ? currentActivatings() : []), uid]); await deleteProfile(uid); mutateProfiles(); mutateLogs(); @@ -960,11 +764,11 @@ const ProfilePage = () => { } catch (err: any) { showNotice("error", err?.message || err.toString()); } finally { - dispatchManualActivatings({ type: "reset" }); + setActivatings([]); } }); - // Update all profiles + // 更新所有订阅 const setLoadingCache = useSetLoadingCache(); const onUpdateAll = useLockFn(async () => { const throttleMutate = throttle(mutateProfiles, 2000, { @@ -975,7 +779,7 @@ const ProfilePage = () => { await updateProfile(uid); throttleMutate(); } catch (err: any) { - console.error(`Failed to update profile ${uid}:`, err); + console.error(`更新订阅 ${uid} 失败:`, err); } finally { setLoadingCache((cache) => ({ ...cache, [uid]: false })); } @@ -983,7 +787,7 @@ const ProfilePage = () => { return new Promise((resolve) => { setLoadingCache((cache) => { - // Gather profiles that are not updating + // 获取没有正在更新的订阅 const items = profileItems.filter( (e) => e.type === "remote" && !cache[e.uid], ); @@ -1037,11 +841,11 @@ const ProfilePage = () => { const getSelectionState = () => { if (selectedProfiles.size === 0) { - return "none"; // no selection + return "none"; // 无选择 } else if (selectedProfiles.size === profileItems.length) { - return "all"; // all selected + return "all"; // 全选 } else { - return "partial"; // partially selected + return "partial"; // 部分选择 } }; @@ -1055,7 +859,7 @@ const ProfilePage = () => { ? [profiles.current] : []; - dispatchManualActivatings({ type: "add", ids: currentActivating }); + setActivatings((prev) => [...new Set([...prev, ...currentActivating])]); // Delete all selected profiles for (const uid of selectedProfiles) { @@ -1078,17 +882,17 @@ const ProfilePage = () => { } catch (err: any) { showNotice("error", err?.message || err.toString()); } finally { - dispatchManualActivatings({ type: "reset" }); + setActivatings([]); } }); const mode = useThemeMode(); - const islight = mode === "light"; + const islight = mode === "light" ? true : false; const dividercolor = islight ? "rgba(0, 0, 0, 0.06)" : "rgba(255, 255, 255, 0.06)"; - // Observe configuration changes from backend + // 监听后端配置变更 useEffect(() => { let unlistenPromise: Promise<() => void> | undefined; let lastProfileId: string | null = null; @@ -1102,29 +906,29 @@ const ProfilePage = () => { const newProfileId = event.payload; const now = Date.now(); - console.log(`[Profile] Received profile-change event: ${newProfileId}`); + console.log(`[Profile] 收到配置变更事件: ${newProfileId}`); if ( lastProfileId === newProfileId && now - lastUpdateTime < debounceDelay ) { - console.log(`[Profile] Duplicate event throttled; skipping`); + console.log(`[Profile] 重复事件被防抖,跳过`); return; } lastProfileId = newProfileId; lastUpdateTime = now; - console.log(`[Profile] Performing profile data refresh`); + console.log(`[Profile] 执行配置数据刷新`); if (refreshTimer !== null) { window.clearTimeout(refreshTimer); } - // Use async scheduling to avoid blocking event handling + // 使用异步调度避免阻塞事件处理 refreshTimer = window.setTimeout(() => { mutateProfiles().catch((error) => { - console.error("[Profile] Profile data refresh failed:", error); + console.error("[Profile] 配置数据刷新失败:", error); }); refreshTimer = null; }, 0); @@ -1141,6 +945,16 @@ const ProfilePage = () => { }; }, [mutateProfiles]); + // 组件卸载时清理中断控制器 + useEffect(() => { + return () => { + if (abortControllerRef.current) { + abortControllerRef.current.abort(); + debugProfileSwitch("COMPONENT_UNMOUNT_CLEANUP", "all"); + } + }; + }, []); + return ( { - {/* Fault detection and emergency recovery button */} + {/* 故障检测和紧急恢复按钮 */} {(error || isStale) && ( { ref={viewerRef} onChange={async (isActivating) => { mutateProfiles(); - // Only trigger global reload when the active profile changes + // 只有更改当前激活的配置时才触发全局重新加载 if (isActivating) { await onEnhance(false); } diff --git a/src/providers/app-data-context.ts b/src/providers/app-data-context.ts index 6bf02313..7b7244ab 100644 --- a/src/providers/app-data-context.ts +++ b/src/providers/app-data-context.ts @@ -6,15 +6,8 @@ import { RuleProvider, } from "tauri-plugin-mihomo-api"; -import { ProxiesView, type ProfileSwitchStatus } from "@/services/cmds"; - export interface AppDataContextType { - proxies: ProxiesView | null; - proxyHydration: "none" | "snapshot" | "live"; - proxyTargetProfileId: string | null; - proxyDisplayProfileId: string | null; - isProxyRefreshPending: boolean; - switchStatus: ProfileSwitchStatus | null; + proxies: any; clashConfig: BaseConfig; rules: Rule[]; sysproxy: any; diff --git a/src/providers/app-data-provider.tsx b/src/providers/app-data-provider.tsx index 9c97c61f..c71528c4 100644 --- a/src/providers/app-data-provider.tsx +++ b/src/providers/app-data-provider.tsx @@ -1,6 +1,6 @@ import { listen } from "@tauri-apps/api/event"; -import React, { useCallback, useEffect, useMemo, useRef } from "react"; -import useSWR, { mutate as globalMutate } from "swr"; +import React, { useCallback, useEffect, useMemo } from "react"; +import useSWR from "swr"; import { getBaseConfig, getRuleProviders, @@ -9,53 +9,31 @@ import { import { useVerge } from "@/hooks/use-verge"; import { + calcuProxies, calcuProxyProviders, getAppUptime, - getProfileSwitchStatus, - getProfileSwitchEvents, - getProfiles as fetchProfilesConfig, getRunningMode, - readProfileFile, getSystemProxy, - type ProxiesView, - type ProfileSwitchStatus, - type SwitchResultStatus, } from "@/services/cmds"; -import { SWR_DEFAULTS, SWR_SLOW_POLL } from "@/services/config"; -import { useProfileStore } from "@/stores/profile-store"; -import { - applyLiveProxyPayload, - fetchLiveProxies, - type ProxiesUpdatedPayload, - useProxyStore, -} from "@/stores/proxy-store"; -import { createProxySnapshotFromProfile } from "@/utils/proxy-snapshot"; +import { SWR_DEFAULTS, SWR_REALTIME, SWR_SLOW_POLL } from "@/services/config"; import { AppDataContext, AppDataContextType } from "./app-data-context"; -// Global app data provider +// 全局数据提供者组件 export const AppDataProvider = ({ children, }: { children: React.ReactNode; }) => { const { verge } = useVerge(); - const applyProfileSwitchResult = useProfileStore( - (state) => state.applySwitchResult, - ); - const commitProfileSnapshot = useProfileStore( - (state) => state.commitHydrated, - ); - const setSwitchEventSeq = useProfileStore((state) => state.setLastEventSeq); - const proxyView = useProxyStore((state) => state.data); - const proxyHydration = useProxyStore((state) => state.hydration); - const proxyProfileId = useProxyStore((state) => state.lastProfileId); - const pendingProxyProfileId = useProxyStore( - (state) => state.pendingProfileId, - ); - const setProxySnapshot = useProxyStore((state) => state.setSnapshot); - const clearPendingProxyProfile = useProxyStore( - (state) => state.clearPendingProfile, + + const { data: proxiesData, mutate: refreshProxy } = useSWR( + "getProxies", + calcuProxies, + { + ...SWR_REALTIME, + onError: (err) => console.warn("[DataProvider] Proxy fetch failed:", err), + }, ); const { data: clashConfig, mutate: refreshClashConfig } = useSWR( @@ -82,259 +60,25 @@ export const AppDataProvider = ({ SWR_DEFAULTS, ); - const { data: switchStatus, mutate: mutateSwitchStatus } = - useSWR( - "getProfileSwitchStatus", - getProfileSwitchStatus, - { - refreshInterval: (status) => - status && (status.isSwitching || (status.queue?.length ?? 0) > 0) - ? 400 - : 4000, - dedupingInterval: 200, - }, - ); - - const isUnmountedRef = useRef(false); - // Keep track of pending timers so we can cancel them on unmount and avoid stray updates. - const scheduledTimeoutsRef = useRef>(new Set()); - // Shared metadata to dedupe switch events coming from both polling and subscriptions. - const switchMetaRef = useRef<{ - pendingProfileId: string | null; - lastResultTaskId: number | null; - }>({ - pendingProfileId: null, - lastResultTaskId: null, - }); - const switchEventSeqRef = useRef(0); - const profileChangeMetaRef = useRef<{ - lastProfileId: string | null; - lastEventTs: number; - }>({ - lastProfileId: null, - lastEventTs: 0, - }); - const lastClashRefreshAtRef = useRef(0); - const PROFILE_EVENT_DEDUP_MS = 400; - const CLASH_REFRESH_DEDUP_MS = 300; - - // Thin wrapper around setTimeout that no-ops once the provider unmounts. - const scheduleTimeout = useCallback( - (callback: () => void | Promise, delay: number) => { - if (isUnmountedRef.current) return -1; - - const timeoutId = window.setTimeout(() => { - scheduledTimeoutsRef.current.delete(timeoutId); - if (!isUnmountedRef.current) { - void callback(); - } - }, delay); - - scheduledTimeoutsRef.current.add(timeoutId); - return timeoutId; - }, - [], - ); - - const clearAllTimeouts = useCallback(() => { - scheduledTimeoutsRef.current.forEach((timeoutId) => - clearTimeout(timeoutId), - ); - scheduledTimeoutsRef.current.clear(); - }, []); - - // Delay live proxy refreshes slightly so we don't hammer Mihomo while a switch is still applying. - const queueProxyRefresh = useCallback( - (reason: string, delay = 1500) => { - scheduleTimeout(() => { - fetchLiveProxies().catch((error) => - console.warn( - `[DataProvider] Proxy refresh failed (${reason}, fallback):`, - error, - ), - ); - }, delay); - }, - [scheduleTimeout], - ); - // Prime the proxy store with the static selections from the profile YAML before live data arrives. - const seedProxySnapshot = useCallback( - async (profileId: string) => { - if (!profileId) return; - - try { - const yamlContent = await readProfileFile(profileId); - const snapshot = createProxySnapshotFromProfile(yamlContent); - if (!snapshot) return; - - setProxySnapshot(snapshot, profileId); - } catch (error) { - console.warn( - "[DataProvider] Failed to seed proxy snapshot from profile:", - error, - ); - } - }, - [setProxySnapshot], - ); - - const handleSwitchResult = useCallback( - (result: SwitchResultStatus) => { - // Ignore duplicate notifications for the same switch execution. - const meta = switchMetaRef.current; - if (result.taskId === meta.lastResultTaskId) { - return; - } - meta.lastResultTaskId = result.taskId; - - // Optimistically update the SWR cache so the UI shows the new profile immediately. - void globalMutate( - "getProfiles", - (current?: IProfilesConfig | null) => { - if (!current || !result.success) { - return current; - } - if (current.current === result.profileId) { - return current; - } - return { - ...current, - current: result.profileId, - }; - }, - false, - ); - - applyProfileSwitchResult(result); - if (!result.success) { - clearPendingProxyProfile(); - } - - if (result.success && result.cancelled !== true) { - // Once the backend settles, refresh all dependent data in the background. - scheduleTimeout(() => { - void Promise.allSettled([ - fetchProfilesConfig().then((data) => { - commitProfileSnapshot(data); - globalMutate("getProfiles", data, false); - }), - fetchLiveProxies(), - refreshProxyProviders(), - refreshRules(), - refreshRuleProviders(), - ]).catch((error) => { - console.warn( - "[DataProvider] Background refresh after profile switch failed:", - error, - ); - }); - }, 100); - } - - void mutateSwitchStatus((current) => { - if (!current) { - return current; - } - const filteredQueue = current.queue.filter( - (task) => task.taskId !== result.taskId, - ); - const active = - current.active && current.active.taskId === result.taskId - ? null - : current.active; - const isSwitching = filteredQueue.length > 0; - return { - ...current, - active, - queue: filteredQueue, - isSwitching, - lastResult: result, - }; - }, false); - }, - [ - scheduleTimeout, - refreshProxyProviders, - refreshRules, - refreshRuleProviders, - mutateSwitchStatus, - applyProfileSwitchResult, - commitProfileSnapshot, - clearPendingProxyProfile, - ], - ); - useEffect(() => { - isUnmountedRef.current = false; - return () => { - isUnmountedRef.current = true; - clearAllTimeouts(); - }; - }, [clearAllTimeouts]); + let lastProfileId: string | null = null; + let lastUpdateTime = 0; + const refreshThrottle = 800; - useEffect(() => { - if (!switchStatus) { - return; - } - - const meta = switchMetaRef.current; - const nextTarget = - switchStatus.active?.profileId ?? - (switchStatus.queue.length > 0 ? switchStatus.queue[0].profileId : null); - - if (nextTarget && nextTarget !== meta.pendingProfileId) { - meta.pendingProfileId = nextTarget; - void seedProxySnapshot(nextTarget); - } else if (!nextTarget) { - meta.pendingProfileId = null; - } - - const lastResult = switchStatus.lastResult ?? null; - if (lastResult) { - handleSwitchResult(lastResult); - } - }, [switchStatus, seedProxySnapshot, handleSwitchResult]); - - useEffect(() => { - let disposed = false; - - const pollEvents = async () => { - if (disposed) { - return; - } - try { - const events = await getProfileSwitchEvents(switchEventSeqRef.current); - if (events.length > 0) { - switchEventSeqRef.current = events[events.length - 1].sequence; - setSwitchEventSeq(switchEventSeqRef.current); - events.forEach((event) => handleSwitchResult(event.result)); - } - } catch (error) { - console.warn("[DataProvider] Failed to poll switch events:", error); - } finally { - if (!disposed) { - const nextDelay = - switchStatus && - (switchStatus.isSwitching || (switchStatus.queue?.length ?? 0) > 0) - ? 250 - : 1000; - scheduleTimeout(pollEvents, nextDelay); - } - } - }; - - scheduleTimeout(pollEvents, 0); - - return () => { - disposed = true; - }; - }, [scheduleTimeout, handleSwitchResult, switchStatus, setSwitchEventSeq]); - - useEffect(() => { + let isUnmounted = false; + const scheduledTimeouts = new Set(); const cleanupFns: Array<() => void> = []; const registerCleanup = (fn: () => void) => { - cleanupFns.push(fn); + if (isUnmounted) { + try { + fn(); + } catch (error) { + console.error("[DataProvider] Immediate cleanup failed:", error); + } + } else { + cleanupFns.push(fn); + } }; const addWindowListener = (eventName: string, handler: EventListener) => { @@ -343,319 +87,140 @@ export const AppDataProvider = ({ return () => window.removeEventListener(eventName, handler); }; - const runProfileChangedPipeline = ( - profileId: string | null, - source: "tauri" | "window", + const scheduleTimeout = ( + callback: () => void | Promise, + delay: number, ) => { + if (isUnmounted) return -1; + + const timeoutId = window.setTimeout(() => { + scheduledTimeouts.delete(timeoutId); + if (!isUnmounted) { + void callback(); + } + }, delay); + + scheduledTimeouts.add(timeoutId); + return timeoutId; + }; + + const clearAllTimeouts = () => { + scheduledTimeouts.forEach((timeoutId) => clearTimeout(timeoutId)); + scheduledTimeouts.clear(); + }; + + const handleProfileChanged = (event: { payload: string }) => { + const newProfileId = event.payload; const now = Date.now(); - const meta = profileChangeMetaRef.current; if ( - meta.lastProfileId === profileId && - now - meta.lastEventTs < PROFILE_EVENT_DEDUP_MS + lastProfileId === newProfileId && + now - lastUpdateTime < refreshThrottle ) { return; } - meta.lastProfileId = profileId; - meta.lastEventTs = now; - - if (profileId) { - void seedProxySnapshot(profileId); - } - - queueProxyRefresh(`profile-changed-${source}`, 500); + lastProfileId = newProfileId; + lastUpdateTime = now; scheduleTimeout(() => { - void fetchProfilesConfig() - .then((data) => { - commitProfileSnapshot(data); - globalMutate("getProfiles", data, false); - }) - .catch((error) => - console.warn( - "[AppDataProvider] Failed to refresh profiles after profile change:", - error, - ), - ); - void refreshProxyProviders().catch((error) => - console.warn( - "[AppDataProvider] Proxy providers refresh failed after profile change:", - error, - ), + refreshRules().catch((error) => + console.warn("[DataProvider] Rules refresh failed:", error), ); - void refreshRules().catch((error) => - console.warn( - "[AppDataProvider] Rules refresh failed after profile change:", - error, - ), - ); - void refreshRuleProviders().catch((error) => - console.warn( - "[AppDataProvider] Rule providers refresh failed after profile change:", - error, - ), + refreshRuleProviders().catch((error) => + console.warn("[DataProvider] Rule providers refresh failed:", error), ); }, 200); }; - const handleProfileChanged = (event: { payload: string }) => { - runProfileChangedPipeline(event.payload ?? null, "tauri"); - }; - - const runRefreshClashPipeline = (source: "tauri" | "window") => { + const handleRefreshClash = () => { const now = Date.now(); - if (now - lastClashRefreshAtRef.current < CLASH_REFRESH_DEDUP_MS) { - return; - } - - lastClashRefreshAtRef.current = now; + if (now - lastUpdateTime <= refreshThrottle) return; + lastUpdateTime = now; scheduleTimeout(() => { - void refreshClashConfig().catch((error) => - console.warn( - "[AppDataProvider] Clash config refresh failed after backend update:", - error, - ), + refreshProxy().catch((error) => + console.error("[DataProvider] Proxy refresh failed:", error), ); - void refreshRules().catch((error) => - console.warn( - "[AppDataProvider] Rules refresh failed after backend update:", - error, - ), - ); - void refreshRuleProviders().catch((error) => - console.warn( - "[AppDataProvider] Rule providers refresh failed after backend update:", - error, - ), - ); - void refreshProxyProviders().catch((error) => - console.warn( - "[AppDataProvider] Proxy providers refresh failed after backend update:", - error, - ), - ); - }, 0); - - queueProxyRefresh(`refresh-clash-config-${source}`, 400); + }, 200); }; - const handleProfileUpdateCompleted = (_: { payload: { uid: string } }) => { - queueProxyRefresh("profile-update-completed", 3000); - if (!isUnmountedRef.current) { - scheduleTimeout(() => { - void refreshProxyProviders().catch((error) => - console.warn( - "[DataProvider] Proxy providers refresh failed after profile update completed:", - error, - ), - ); - }, 0); - } - }; + const handleRefreshProxy = () => { + const now = Date.now(); + if (now - lastUpdateTime <= refreshThrottle) return; - const isProxiesPayload = ( - value: unknown, - ): value is ProxiesUpdatedPayload => { - if (!value || typeof value !== "object") { - return false; - } - const candidate = value as Partial; - return candidate.proxies !== undefined && candidate.proxies !== null; - }; - - const handleProxiesUpdatedPayload = ( - rawPayload: unknown, - source: "tauri" | "window", - ) => { - if (!isProxiesPayload(rawPayload)) { - console.warn( - `[AppDataProvider] Ignored ${source} proxies-updated payload`, - rawPayload, + lastUpdateTime = now; + scheduleTimeout(() => { + refreshProxy().catch((error) => + console.warn("[DataProvider] Proxy refresh failed:", error), ); - queueProxyRefresh(`proxies-updated-${source}-invalid`, 500); - return; + }, 200); + }; + + const initializeListeners = async () => { + try { + const unlistenProfile = await listen( + "profile-changed", + handleProfileChanged, + ); + registerCleanup(unlistenProfile); + } catch (error) { + console.error("[AppDataProvider] 监听 Profile 事件失败:", error); } try { - applyLiveProxyPayload(rawPayload); - } catch (error) { - console.warn( - `[AppDataProvider] Failed to apply ${source} proxies-updated payload`, - error, + const unlistenClash = await listen( + "verge://refresh-clash-config", + handleRefreshClash, ); - queueProxyRefresh(`proxies-updated-${source}-apply-failed`, 500); + const unlistenProxy = await listen( + "verge://refresh-proxy-config", + handleRefreshProxy, + ); + + registerCleanup(() => { + unlistenClash(); + unlistenProxy(); + }); + } catch (error) { + console.warn("[AppDataProvider] 设置 Tauri 事件监听器失败:", error); + + const fallbackHandlers: Array<[string, EventListener]> = [ + ["verge://refresh-clash-config", handleRefreshClash], + ["verge://refresh-proxy-config", handleRefreshProxy], + ]; + + fallbackHandlers.forEach(([eventName, handler]) => { + registerCleanup(addWindowListener(eventName, handler)); + }); } }; - listen<{ uid: string }>( - "profile-update-completed", - handleProfileUpdateCompleted, - ) - .then(registerCleanup) - .catch((error) => - console.error( - "[AppDataProvider] failed to attach profile update listeners:", - error, - ), - ); - - listen("profile-changed", handleProfileChanged) - .then(registerCleanup) - .catch((error) => - console.error( - "[AppDataProvider] failed to attach profile-changed listener:", - error, - ), - ); - - listen("proxies-updated", (event) => { - handleProxiesUpdatedPayload(event.payload, "tauri"); - }) - .then(registerCleanup) - .catch((error) => - console.error( - "[AppDataProvider] failed to attach proxies-updated listener:", - error, - ), - ); - - listen("verge://refresh-clash-config", () => { - runRefreshClashPipeline("tauri"); - }) - .then(registerCleanup) - .catch((error) => - console.error( - "[AppDataProvider] failed to attach refresh-clash-config listener:", - error, - ), - ); - - listen("verge://refresh-proxy-config", () => { - queueProxyRefresh("refresh-proxy-config-tauri", 500); - }) - .then(registerCleanup) - .catch((error) => - console.error( - "[AppDataProvider] failed to attach refresh-proxy-config listener:", - error, - ), - ); - - const fallbackHandlers: Array<[string, EventListener]> = [ - [ - "profile-update-completed", - ((event: Event) => { - const payload = (event as CustomEvent<{ uid: string }>).detail ?? { - uid: "", - }; - handleProfileUpdateCompleted({ payload }); - }) as EventListener, - ], - [ - "profile-changed", - ((event: Event) => { - const payload = (event as CustomEvent).detail ?? null; - runProfileChangedPipeline(payload, "window"); - }) as EventListener, - ], - [ - "proxies-updated", - ((event: Event) => { - const payload = (event as CustomEvent).detail; - handleProxiesUpdatedPayload(payload, "window"); - }) as EventListener, - ], - [ - "verge://refresh-clash-config", - (() => { - runRefreshClashPipeline("window"); - }) as EventListener, - ], - [ - "verge://refresh-proxy-config", - (() => { - queueProxyRefresh("refresh-proxy-config-window", 500); - }) as EventListener, - ], - ]; - - fallbackHandlers.forEach(([eventName, handler]) => { - registerCleanup(addWindowListener(eventName, handler)); - }); + void initializeListeners(); return () => { - cleanupFns.forEach((fn) => { + isUnmounted = true; + clearAllTimeouts(); + + const errors: Error[] = []; + cleanupFns.splice(0).forEach((fn) => { try { fn(); } catch (error) { - console.error("[AppDataProvider] cleanup error:", error); + errors.push( + error instanceof Error ? error : new Error(String(error)), + ); } }); + + if (errors.length > 0) { + console.error( + `[DataProvider] ${errors.length} errors during cleanup:`, + errors, + ); + } }; - }, [ - commitProfileSnapshot, - queueProxyRefresh, - refreshClashConfig, - refreshProxyProviders, - refreshRuleProviders, - refreshRules, - scheduleTimeout, - seedProxySnapshot, - ]); - - const switchTargetProfileId = - switchStatus?.active?.profileId ?? - (switchStatus && switchStatus.queue.length > 0 - ? switchStatus.queue[0].profileId - : null); - - const proxyTargetProfileId = - switchTargetProfileId ?? pendingProxyProfileId ?? proxyProfileId ?? null; - const displayProxyStateRef = useRef<{ - view: ProxiesView | null; - profileId: string | null; - }>({ - view: proxyView, - profileId: proxyTargetProfileId, - }); - - const currentDisplay = displayProxyStateRef.current; - - if (!proxyView) { - if ( - currentDisplay.view !== null || - currentDisplay.profileId !== proxyTargetProfileId - ) { - displayProxyStateRef.current = { - view: null, - profileId: proxyTargetProfileId, - }; - } - } else if (proxyHydration === "live") { - if ( - currentDisplay.view !== proxyView || - currentDisplay.profileId !== proxyTargetProfileId - ) { - displayProxyStateRef.current = { - view: proxyView, - profileId: proxyTargetProfileId, - }; - } - } else if (!currentDisplay.view) { - displayProxyStateRef.current = { - view: proxyView, - profileId: proxyTargetProfileId, - }; - } - const displayProxyState = displayProxyStateRef.current; - const proxyDisplayProfileId = displayProxyState.profileId; - const proxiesForRender = displayProxyState.view ?? proxyView; - const isProxyRefreshPending = - (switchStatus?.isSwitching ?? false) || - proxyHydration !== "live" || - proxyTargetProfileId !== proxyDisplayProfileId; + }, [refreshProxy, refreshRules, refreshRuleProviders]); const { data: sysproxy, mutate: refreshSysproxy } = useSWR( "getSystemProxy", @@ -675,10 +240,10 @@ export const AppDataProvider = ({ errorRetryCount: 1, }); - // Provide unified refresh method + // 提供统一的刷新方法 const refreshAll = useCallback(async () => { await Promise.all([ - fetchLiveProxies(), + refreshProxy(), refreshClashConfig(), refreshRules(), refreshSysproxy(), @@ -686,6 +251,7 @@ export const AppDataProvider = ({ refreshRuleProviders(), ]); }, [ + refreshProxy, refreshClashConfig, refreshRules, refreshSysproxy, @@ -693,22 +259,22 @@ export const AppDataProvider = ({ refreshRuleProviders, ]); - // Aggregate data into context value + // 聚合所有数据 const value = useMemo(() => { - // Compute the system proxy address + // 计算系统代理地址 const calculateSystemProxyAddress = () => { if (!verge || !clashConfig) return "-"; const isPacMode = verge.proxy_auto_config ?? false; if (isPacMode) { - // PAC mode: display the desired proxy address + // PAC模式:显示我们期望设置的代理地址 const proxyHost = verge.proxy_host || "127.0.0.1"; const proxyPort = verge.verge_mixed_port || clashConfig.mixedPort || 7897; return `${proxyHost}:${proxyPort}`; } else { - // HTTP proxy mode: prefer system address, fallback to desired address if invalid + // HTTP代理模式:优先使用系统地址,但如果格式不正确则使用期望地址 const systemServer = sysproxy?.server; if ( systemServer && @@ -717,7 +283,7 @@ export const AppDataProvider = ({ ) { return systemServer; } else { - // System address invalid: fallback to desired proxy address + // 系统地址无效,返回期望的代理地址 const proxyHost = verge.proxy_host || "127.0.0.1"; const proxyPort = verge.verge_mixed_port || clashConfig.mixedPort || 7897; @@ -727,27 +293,22 @@ export const AppDataProvider = ({ }; return { - // Data - proxies: proxiesForRender, - proxyHydration, - proxyTargetProfileId, - proxyDisplayProfileId, - isProxyRefreshPending, - switchStatus: switchStatus ?? null, + // 数据 + proxies: proxiesData, clashConfig, rules: rulesData?.rules || [], sysproxy, runningMode, uptime: uptimeData || 0, - // Provider data + // 提供者数据 proxyProviders: proxyProviders || {}, ruleProviders: ruleProviders?.providers || {}, systemProxyAddress: calculateSystemProxyAddress(), - // Refresh helpers - refreshProxy: fetchLiveProxies, + // 刷新方法 + refreshProxy, refreshClashConfig, refreshRules, refreshSysproxy, @@ -756,12 +317,7 @@ export const AppDataProvider = ({ refreshAll, } as AppDataContextType; }, [ - proxiesForRender, - proxyHydration, - proxyTargetProfileId, - proxyDisplayProfileId, - isProxyRefreshPending, - switchStatus, + proxiesData, clashConfig, rulesData, sysproxy, @@ -770,6 +326,7 @@ export const AppDataProvider = ({ proxyProviders, ruleProviders, verge, + refreshProxy, refreshClashConfig, refreshRules, refreshSysproxy, diff --git a/src/services/cmds.ts b/src/services/cmds.ts index 09894418..e1e686bd 100644 --- a/src/services/cmds.ts +++ b/src/services/cmds.ts @@ -4,52 +4,6 @@ import { getProxies, getProxyProviders } from "tauri-plugin-mihomo-api"; import { showNotice } from "@/services/noticeService"; -export type ProxyProviderRecord = Record< - string, - IProxyProviderItem | undefined ->; - -export interface SwitchTaskStatus { - taskId: number; - profileId: string; - notify: boolean; - stage?: number | null; - queued: boolean; -} - -export interface SwitchResultStatus { - taskId: number; - profileId: string; - success: boolean; - cancelled?: boolean; - finishedAt: number; - errorStage?: string | null; - errorDetail?: string | null; -} - -export interface ProfileSwitchStatus { - isSwitching: boolean; - active?: SwitchTaskStatus | null; - queue: SwitchTaskStatus[]; - cleanupProfiles: string[]; - lastResult?: SwitchResultStatus | null; - lastUpdated: number; -} - -export interface SwitchResultEvent { - sequence: number; - result: SwitchResultStatus; -} - -// Persist the last proxy provider payload so UI can render while waiting on Mihomo. -let cachedProxyProviders: ProxyProviderRecord | null = null; - -export const getCachedProxyProviders = () => cachedProxyProviders; - -export const setCachedProxyProviders = (record: ProxyProviderRecord | null) => { - cachedProxyProviders = record; -}; - export async function copyClashEnv() { return invoke("copy_clash_env"); } @@ -66,14 +20,6 @@ export async function patchProfilesConfig(profiles: IProfilesConfig) { return invoke("patch_profiles_config", { profiles }); } -// Triggers the async state-machine driven switch flow on the backend. -export async function switchProfileCommand( - profileIndex: string, - notifySuccess: boolean, -) { - return invoke("switch_profile", { profileIndex, notifySuccess }); -} - export async function createProfile( item: Partial, fileData?: string | null, @@ -167,29 +113,27 @@ export async function syncTrayProxySelection() { return invoke("sync_tray_proxy_selection"); } -export interface ProxiesView { +export async function calcuProxies(): Promise<{ global: IProxyGroupItem; direct: IProxyItem; groups: IProxyGroupItem[]; records: Record; proxies: IProxyItem[]; -} +}> { + const [proxyResponse, providerResponse] = await Promise.all([ + getProxies(), + calcuProxyProviders(), + ]); -export function buildProxyView( - proxyResponse: Awaited>, - providerRecord?: ProxyProviderRecord | null, -): ProxiesView { const proxyRecord = proxyResponse.proxies; + const providerRecord = providerResponse; // provider name map - const providerMap = providerRecord - ? Object.fromEntries( - Object.entries(providerRecord).flatMap(([provider, item]) => { - if (!item) return []; - return item.proxies.map((p) => [p.name, { ...p, provider }]); - }), - ) - : {}; + const providerMap = Object.fromEntries( + Object.entries(providerRecord).flatMap(([provider, item]) => + item!.proxies.map((p) => [p.name, { ...p, provider }]), + ), + ); // compatible with proxy-providers const generateItem = (name: string) => { @@ -263,56 +207,16 @@ export function buildProxyView( }; } -export async function calcuProxies(): Promise { - const proxyResponse = await getProxies(); - - let providerRecord = cachedProxyProviders; - if (!providerRecord) { - try { - providerRecord = await calcuProxyProviders(); - } catch (error) { - console.warn("[calcuProxies] 代理提供者加载失败:", error); - } - } - - return buildProxyView(proxyResponse, providerRecord); -} - export async function calcuProxyProviders() { const providers = await getProxyProviders(); - const mappedEntries = Object.entries(providers.providers) - .sort() - .filter( - ([, item]) => - item?.vehicleType === "HTTP" || item?.vehicleType === "File", - ) - .map(([name, item]) => { - if (!item) return [name, undefined] as const; - - const subscriptionInfo = - item.subscriptionInfo && typeof item.subscriptionInfo === "object" - ? { - Upload: item.subscriptionInfo.Upload ?? 0, - Download: item.subscriptionInfo.Download ?? 0, - Total: item.subscriptionInfo.Total ?? 0, - Expire: item.subscriptionInfo.Expire ?? 0, - } - : undefined; - - const normalized: IProxyProviderItem = { - name: item.name, - type: item.type, - proxies: item.proxies ?? [], - updatedAt: item.updatedAt ?? "", - vehicleType: item.vehicleType ?? "", - subscriptionInfo, - }; - return [name, normalized] as const; - }); - - const mapped = Object.fromEntries(mappedEntries) as ProxyProviderRecord; - cachedProxyProviders = mapped; - return mapped; + return Object.fromEntries( + Object.entries(providers.providers) + .sort() + .filter( + ([_, item]) => + item?.vehicleType === "HTTP" || item?.vehicleType === "File", + ), + ); } export async function getClashLogs() { @@ -651,13 +555,3 @@ export const isAdmin = async () => { export async function getNextUpdateTime(uid: string) { return invoke("get_next_update_time", { uid }); } - -export async function getProfileSwitchStatus() { - return invoke("get_profile_switch_status"); -} - -export async function getProfileSwitchEvents(afterSequence: number) { - return invoke("get_profile_switch_events", { - afterSequence, - }); -} diff --git a/src/services/noticeService.ts b/src/services/noticeService.ts index 7275dd45..0a3505da 100644 --- a/src/services/noticeService.ts +++ b/src/services/noticeService.ts @@ -14,20 +14,10 @@ let nextId = 0; let notices: NoticeItem[] = []; const listeners: Set = new Set(); -function flushListeners() { +function notifyListeners() { listeners.forEach((listener) => listener([...notices])); // Pass a copy } -let notifyScheduled = false; -function scheduleNotify() { - if (notifyScheduled) return; - notifyScheduled = true; - requestAnimationFrame(() => { - notifyScheduled = false; - flushListeners(); - }); -} - // Shows a notification. export function showNotice( @@ -54,7 +44,7 @@ export function showNotice( } notices = [...notices, newNotice]; - scheduleNotify(); + notifyListeners(); return id; } @@ -66,7 +56,7 @@ export function hideNotice(id: number) { clearTimeout(notice.timerId); // Clear timeout if manually closed } notices = notices.filter((n) => n.id !== id); - scheduleNotify(); + notifyListeners(); } // Subscribes a listener function to notice state changes. @@ -87,5 +77,5 @@ export function clearAllNotices() { if (n.timerId) clearTimeout(n.timerId); }); notices = []; - scheduleNotify(); + notifyListeners(); } diff --git a/src/services/refresh.ts b/src/services/refresh.ts deleted file mode 100644 index 6150680d..00000000 --- a/src/services/refresh.ts +++ /dev/null @@ -1,24 +0,0 @@ -import { mutate } from "swr"; - -import { getAxios } from "@/services/api"; - -export const refreshClashData = async () => { - try { - await getAxios(true); - } catch (error) { - console.warn("[Refresh] getAxios failed during clash refresh:", error); - } - - mutate("getProxies"); - mutate("getVersion"); - mutate("getClashConfig"); - mutate("getProxyProviders"); -}; - -export const refreshVergeData = () => { - mutate("getVergeConfig"); - mutate("getSystemProxy"); - mutate("getAutotemProxy"); - mutate("getRunningMode"); - mutate("isServiceAvailable"); -}; diff --git a/src/stores/profile-store.ts b/src/stores/profile-store.ts deleted file mode 100644 index 446b6226..00000000 --- a/src/stores/profile-store.ts +++ /dev/null @@ -1,59 +0,0 @@ -import { create } from "zustand"; - -import type { SwitchResultStatus } from "@/services/cmds"; - -interface ProfileStoreState { - data: IProfilesConfig | null; - optimisticCurrent: string | null; - isHydrating: boolean; - lastEventSeq: number; - lastResult: SwitchResultStatus | null; - applySwitchResult: (result: SwitchResultStatus) => void; - commitHydrated: (data: IProfilesConfig) => void; - setLastEventSeq: (sequence: number) => void; -} - -export const useProfileStore = create((set) => ({ - data: null, - optimisticCurrent: null, - isHydrating: false, - lastEventSeq: 0, - lastResult: null, - applySwitchResult(result) { - // Record the optimistic switch outcome so the UI reflects the desired profile immediately. - set((state) => ({ - lastResult: result, - optimisticCurrent: result.success ? result.profileId : null, - isHydrating: result.success ? true : state.isHydrating, - })); - }, - commitHydrated(data) { - set({ - data, - optimisticCurrent: null, - isHydrating: false, - }); - }, - setLastEventSeq(sequence) { - set({ lastEventSeq: sequence }); - }, -})); - -export const selectEffectiveProfiles = (state: ProfileStoreState) => { - if (!state.data) { - return null; - } - // Prefer the optimistic selection while hydration is pending. - const current = state.optimisticCurrent ?? state.data.current; - if ( - state.optimisticCurrent && - state.optimisticCurrent !== state.data.current - ) { - return { ...state.data, current } as IProfilesConfig; - } - return state.data; -}; - -export const selectIsHydrating = (state: ProfileStoreState) => - state.isHydrating; -export const selectLastResult = (state: ProfileStoreState) => state.lastResult; diff --git a/src/stores/proxy-store.ts b/src/stores/proxy-store.ts deleted file mode 100644 index b8c87ef8..00000000 --- a/src/stores/proxy-store.ts +++ /dev/null @@ -1,298 +0,0 @@ -import type { getProxies } from "tauri-plugin-mihomo-api"; -import { create } from "zustand"; - -import { - ProxiesView, - ProxyProviderRecord, - buildProxyView, - calcuProxies, - getCachedProxyProviders, - setCachedProxyProviders, -} from "@/services/cmds"; -import { AsyncEventQueue, nextTick } from "@/utils/asyncQueue"; - -type ProxyHydration = "none" | "snapshot" | "live"; -type RawProxiesResponse = Awaited>; - -export interface ProxiesUpdatedPayload { - proxies: RawProxiesResponse; - providers?: Record | null; - emittedAt?: number; - profileId?: string | null; -} - -interface ProxyStoreState { - data: ProxiesView | null; - hydration: ProxyHydration; - lastUpdated: number | null; - lastProfileId: string | null; - liveFetchRequestId: number; - lastAppliedFetchId: number; - pendingProfileId: string | null; - pendingSnapshotFetchId: number | null; - setSnapshot: (snapshot: ProxiesView, profileId: string) => void; - setLive: (payload: ProxiesUpdatedPayload) => void; - startLiveFetch: () => number; - completeLiveFetch: (requestId: number, view: ProxiesView) => void; - clearPendingProfile: () => void; - reset: () => void; -} - -const normalizeProviderPayload = ( - raw: ProxiesUpdatedPayload["providers"], -): ProxyProviderRecord | null => { - if (!raw || typeof raw !== "object") return null; - - const rawRecord = raw as Record; - const source = - rawRecord.providers && typeof rawRecord.providers === "object" - ? (rawRecord.providers as Record) - : rawRecord; - - const entries = Object.entries(source) - .sort(([a], [b]) => a.localeCompare(b)) - .filter(([, value]) => { - if (!value || typeof value !== "object") { - return false; - } - const vehicleType = value.vehicleType; - return vehicleType === "HTTP" || vehicleType === "File"; - }) - .map(([name, value]) => { - const normalized: IProxyProviderItem = { - name: value.name ?? name, - type: value.type ?? "", - proxies: Array.isArray(value.proxies) ? value.proxies : [], - updatedAt: value.updatedAt ?? "", - vehicleType: value.vehicleType ?? "", - subscriptionInfo: - value.subscriptionInfo && typeof value.subscriptionInfo === "object" - ? { - Upload: Number(value.subscriptionInfo.Upload ?? 0), - Download: Number(value.subscriptionInfo.Download ?? 0), - Total: Number(value.subscriptionInfo.Total ?? 0), - Expire: Number(value.subscriptionInfo.Expire ?? 0), - } - : undefined, - }; - - return [name, normalized] as const; - }); - - return Object.fromEntries(entries) as ProxyProviderRecord; -}; - -export const useProxyStore = create((set, get) => ({ - data: null, - hydration: "none", - lastUpdated: null, - lastProfileId: null, - liveFetchRequestId: 0, - lastAppliedFetchId: 0, - pendingProfileId: null, - pendingSnapshotFetchId: null, - setSnapshot(snapshot, profileId) { - const stateBefore = get(); - - set((state) => ({ - data: snapshot, - hydration: "snapshot", - lastUpdated: null, - pendingProfileId: profileId, - pendingSnapshotFetchId: state.liveFetchRequestId, - })); - - const hasLiveHydration = - stateBefore.hydration === "live" && - stateBefore.lastProfileId === profileId; - - if (profileId && !hasLiveHydration) { - void fetchLiveProxies().catch((error) => { - console.warn( - "[ProxyStore] Failed to bootstrap live proxies from snapshot:", - error, - ); - scheduleBootstrapLiveFetch(800); - }); - } - }, - setLive(payload) { - const state = get(); - const emittedAt = payload.emittedAt ?? Date.now(); - - if ( - state.hydration === "live" && - state.lastUpdated !== null && - emittedAt <= state.lastUpdated - ) { - return; - } - - const providersRecord = - normalizeProviderPayload(payload.providers) ?? getCachedProxyProviders(); - - if (providersRecord) { - setCachedProxyProviders(providersRecord); - } - - const view = buildProxyView(payload.proxies, providersRecord); - const nextProfileId = payload.profileId ?? state.lastProfileId; - - set((current) => ({ - data: view, - hydration: "live", - lastUpdated: emittedAt, - lastProfileId: nextProfileId ?? null, - lastAppliedFetchId: current.liveFetchRequestId, - pendingProfileId: null, - pendingSnapshotFetchId: null, - })); - }, - startLiveFetch() { - let nextRequestId = 0; - set((state) => { - nextRequestId = state.liveFetchRequestId + 1; - return { - liveFetchRequestId: nextRequestId, - }; - }); - return nextRequestId; - }, - completeLiveFetch(requestId, view) { - const state = get(); - if (requestId <= state.lastAppliedFetchId) { - return; - } - - const shouldAdoptPending = - state.pendingProfileId !== null && - requestId >= (state.pendingSnapshotFetchId ?? 0); - - set({ - data: view, - hydration: "live", - lastUpdated: Date.now(), - lastProfileId: shouldAdoptPending - ? state.pendingProfileId - : state.lastProfileId, - lastAppliedFetchId: requestId, - pendingProfileId: shouldAdoptPending ? null : state.pendingProfileId, - pendingSnapshotFetchId: shouldAdoptPending - ? null - : state.pendingSnapshotFetchId, - }); - }, - clearPendingProfile() { - set({ - pendingProfileId: null, - pendingSnapshotFetchId: null, - }); - }, - reset() { - set({ - data: null, - hydration: "none", - lastUpdated: null, - lastProfileId: null, - liveFetchRequestId: 0, - lastAppliedFetchId: 0, - pendingProfileId: null, - pendingSnapshotFetchId: null, - }); - scheduleBootstrapLiveFetch(200); - }, -})); - -const liveApplyQueue = new AsyncEventQueue(); -let pendingLivePayload: ProxiesUpdatedPayload | null = null; -let liveApplyScheduled = false; - -const scheduleLiveApply = () => { - if (liveApplyScheduled) return; - liveApplyScheduled = true; - - const dispatch = () => { - liveApplyScheduled = false; - const payload = pendingLivePayload; - pendingLivePayload = null; - if (!payload) return; - - liveApplyQueue.enqueue(async () => { - await nextTick(); - useProxyStore.getState().setLive(payload); - }); - }; - - if ( - typeof window !== "undefined" && - typeof window.requestAnimationFrame === "function" - ) { - window.requestAnimationFrame(dispatch); - } else { - setTimeout(dispatch, 16); - } -}; - -export const applyLiveProxyPayload = (payload: ProxiesUpdatedPayload) => { - pendingLivePayload = payload; - scheduleLiveApply(); -}; - -export const fetchLiveProxies = async () => { - const requestId = useProxyStore.getState().startLiveFetch(); - const view = await calcuProxies(); - useProxyStore.getState().completeLiveFetch(requestId, view); -}; - -const MAX_BOOTSTRAP_ATTEMPTS = 5; -const BOOTSTRAP_BASE_DELAY_MS = 600; -let bootstrapAttempts = 0; -let bootstrapTimer: number | null = null; - -function attemptBootstrapLiveFetch() { - const state = useProxyStore.getState(); - if (state.hydration === "live") { - bootstrapAttempts = 0; - return; - } - - if (bootstrapAttempts >= MAX_BOOTSTRAP_ATTEMPTS) { - return; - } - - const attemptNumber = ++bootstrapAttempts; - - void fetchLiveProxies() - .then(() => { - bootstrapAttempts = 0; - }) - .catch((error) => { - console.warn( - `[ProxyStore] Bootstrap live fetch attempt ${attemptNumber} failed:`, - error, - ); - if (attemptNumber < MAX_BOOTSTRAP_ATTEMPTS) { - scheduleBootstrapLiveFetch(BOOTSTRAP_BASE_DELAY_MS * attemptNumber); - } - }); -} - -function scheduleBootstrapLiveFetch(delay = 0) { - if (typeof window === "undefined") { - return; - } - - if (bootstrapTimer !== null) { - window.clearTimeout(bootstrapTimer); - bootstrapTimer = null; - } - - bootstrapTimer = window.setTimeout(() => { - bootstrapTimer = null; - attemptBootstrapLiveFetch(); - }, delay); -} - -if (typeof window !== "undefined") { - void nextTick().then(() => scheduleBootstrapLiveFetch(0)); -} diff --git a/src/utils/asyncQueue.ts b/src/utils/asyncQueue.ts deleted file mode 100644 index 927faa83..00000000 --- a/src/utils/asyncQueue.ts +++ /dev/null @@ -1,31 +0,0 @@ -export class AsyncEventQueue { - private tail: Promise = Promise.resolve(); - - enqueue(task: () => Promise | void) { - this.tail = this.tail - .then(async () => { - await task(); - }) - .catch((error) => { - console.error("AsyncEventQueue task failed", error); - }); - } - - clear() { - this.tail = Promise.resolve(); - } -} - -export const nextTick = () => - new Promise((resolve) => { - if (typeof queueMicrotask === "function") { - queueMicrotask(resolve); - } else { - Promise.resolve().then(() => resolve()); - } - }); - -export const afterPaint = () => - new Promise((resolve) => { - requestAnimationFrame(() => resolve()); - }); diff --git a/src/utils/proxy-snapshot.ts b/src/utils/proxy-snapshot.ts deleted file mode 100644 index 2e451db2..00000000 --- a/src/utils/proxy-snapshot.ts +++ /dev/null @@ -1,205 +0,0 @@ -import yaml from "js-yaml"; - -const createProxyItem = ( - name: string, - partial: Partial = {}, -): IProxyItem => ({ - name, - type: partial.type ?? "unknown", - udp: partial.udp ?? false, - xudp: partial.xudp ?? false, - tfo: partial.tfo ?? false, - mptcp: partial.mptcp ?? false, - smux: partial.smux ?? false, - history: [], - provider: partial.provider, - testUrl: partial.testUrl, - hidden: partial.hidden, - icon: partial.icon, - fixed: partial.fixed, -}); - -const createGroupItem = ( - name: string, - all: IProxyItem[], - partial: Partial = {}, -): IProxyGroupItem => { - const rest = { ...partial } as Partial; - delete (rest as Partial).all; - const base = createProxyItem(name, rest); - return { - ...base, - all, - now: partial.now ?? base.now, - }; -}; - -const ensureProxyItem = ( - map: Map, - name: string, - source?: Partial, -) => { - const key = String(name); - if (map.has(key)) return map.get(key)!; - const item = createProxyItem(key, source); - map.set(key, item); - return item; -}; - -const parseProxyEntry = (entry: any): IProxyItem | null => { - if (!entry || typeof entry !== "object") return null; - const name = entry.name || entry.uid || entry.id; - if (!name) return null; - return createProxyItem(String(name), { - type: entry.type ? String(entry.type) : undefined, - udp: Boolean(entry.udp), - xudp: Boolean(entry.xudp), - tfo: Boolean(entry.tfo), - mptcp: Boolean(entry.mptcp), - smux: Boolean(entry.smux), - testUrl: entry.test_url || entry.testUrl, - }); -}; - -const isNonEmptyString = (value: unknown): value is string => - typeof value === "string" && value.trim().length > 0; - -const parseProxyGroup = ( - entry: any, - proxyMap: Map, -): IProxyGroupItem | null => { - if (!entry || typeof entry !== "object") return null; - const name = entry.name; - if (!name) return null; - - const rawProxies: unknown[] = Array.isArray(entry.proxies) - ? entry.proxies - : []; - - const proxyRefs: string[] = rawProxies - .filter(isNonEmptyString) - .map((item) => item.trim()); - - const uniqueNames: string[] = Array.from(new Set(proxyRefs)); - - const all = uniqueNames.map((proxyName) => - ensureProxyItem(proxyMap, proxyName), - ); - - return createGroupItem(String(name), all, { - type: entry.type ? String(entry.type) : "Selector", - provider: entry.provider, - testUrl: entry.testUrl || entry.test_url, - now: typeof entry.now === "string" ? entry.now : undefined, - }); -}; - -const mapRecords = ( - proxies: Map, - groups: IProxyGroupItem[], - extra: IProxyItem[] = [], -): Record => { - const result: Record = {}; - proxies.forEach((item, key) => { - result[key] = item; - }); - groups.forEach((group) => { - result[group.name] = group as unknown as IProxyItem; - }); - extra.forEach((item) => { - result[item.name] = item; - }); - return result; -}; - -export const createProxySnapshotFromProfile = ( - yamlContent: string, -): { - global: IProxyGroupItem; - direct: IProxyItem; - groups: IProxyGroupItem[]; - records: Record; - proxies: IProxyItem[]; -} | null => { - let parsed: any; - try { - parsed = yaml.load(yamlContent); - } catch (error) { - console.warn("[ProxySnapshot] Failed to parse YAML:", error); - return null; - } - - if (!parsed || typeof parsed !== "object") { - return null; - } - - const proxyMap = new Map(); - - if (Array.isArray((parsed as any).proxies)) { - for (const entry of (parsed as any).proxies) { - const item = parseProxyEntry(entry); - if (item) { - proxyMap.set(item.name, item); - } - } - } - - const proxyProviders = (parsed as any)["proxy-providers"]; - if (proxyProviders && typeof proxyProviders === "object") { - for (const key of Object.keys(proxyProviders)) { - const provider = proxyProviders[key]; - if (provider && Array.isArray(provider.proxies)) { - provider.proxies - .filter( - (proxyName: unknown): proxyName is string => - typeof proxyName === "string", - ) - .forEach((proxyName: string) => ensureProxyItem(proxyMap, proxyName)); - } - } - } - - const groups: IProxyGroupItem[] = []; - if (Array.isArray((parsed as any)["proxy-groups"])) { - for (const entry of (parsed as any)["proxy-groups"]) { - const groupItem = parseProxyGroup(entry, proxyMap); - if (groupItem) { - groups.push(groupItem); - } - } - } - - const direct = createProxyItem("DIRECT", { type: "Direct" }); - const reject = createProxyItem("REJECT", { type: "Reject" }); - - ensureProxyItem(proxyMap, direct.name, direct); - ensureProxyItem(proxyMap, reject.name, reject); - - let global = groups.find((group) => group.name === "GLOBAL"); - if (!global) { - const globalRefs = groups.flatMap((group) => - group.all.map((proxy) => proxy.name), - ); - const unique = Array.from(new Set(globalRefs)); - const all = unique.map((name) => ensureProxyItem(proxyMap, name)); - global = createGroupItem("GLOBAL", all, { - type: "Selector", - hidden: true, - }); - groups.unshift(global); - } - - const proxies = Array.from(proxyMap.values()).filter( - (item) => !groups.some((group) => group.name === item.name), - ); - - const records = mapRecords(proxyMap, groups, [direct, reject]); - - return { - global, - direct, - groups, - records, - proxies, - }; -}; From 6d7efbbf2835fbf786b0a6ede5ce8444cb001b43 Mon Sep 17 00:00:00 2001 From: Tunglies <77394545+Tunglies@users.noreply.github.com> Date: Thu, 30 Oct 2025 20:08:57 +0800 Subject: [PATCH 32/70] fix: reorder import statements and enhance normalizeDetailsTags function --- scripts/telegram.mjs | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/scripts/telegram.mjs b/scripts/telegram.mjs index d7c741fc..f1240494 100644 --- a/scripts/telegram.mjs +++ b/scripts/telegram.mjs @@ -1,6 +1,6 @@ import axios from "axios"; import { readFileSync } from "fs"; -import { log_success, log_error, log_info } from "./utils.mjs"; +import { log_error, log_info, log_success } from "./utils.mjs"; const CHAT_ID_RELEASE = "@clash_verge_re"; // 正式发布频道 const CHAT_ID_TEST = "@vergetest"; // 测试频道 @@ -71,6 +71,19 @@ async function sendTelegramNotification() { .join("\n"); } + function normalizeDetailsTags(content) { + return content + .replace( + /\s*\s*(.*?)\s*<\/strong>\s*<\/summary>/g, + "\n$1\n", + ) + .replace(/\s*(.*?)\s*<\/summary>/g, "\n$1\n") + .replace(/<\/?details>/g, "") + .replace(/<\/?strong>/g, (m) => (m === "" ? "" : "")) + .replace(//g, "\n"); + } + + releaseContent = normalizeDetailsTags(releaseContent); const formattedContent = convertMarkdownToTelegramHTML(releaseContent); const releaseTitle = isAutobuild ? "滚动更新版发布" : "正式发布"; From 999830aaf590964965fa49155644f8954b4e708c Mon Sep 17 00:00:00 2001 From: Tunglies <77394545+Tunglies@users.noreply.github.com> Date: Thu, 30 Oct 2025 20:18:23 +0800 Subject: [PATCH 33/70] fix: correct download link for ARM64 Windows setup in autobuild workflow --- .github/workflows/autobuild.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/autobuild.yml b/.github/workflows/autobuild.yml index f4dc74e4..793a1c59 100644 --- a/.github/workflows/autobuild.yml +++ b/.github/workflows/autobuild.yml @@ -538,7 +538,7 @@ jobs: ### Windows (不再支持Win7) #### 正常版本(推荐) - - [64位(常用)](${{ env.DOWNLOAD_URL }}/Clash.Verge_${{ env.VERSION }}_x64-setup_windows.exe) | [ARM64(不常用)](${{ env.DOWNLOAD_URL }}/Clash.Verge_${{ env.VERSION }}_arm64-setup.exe) + - [64位(常用)](${{ env.DOWNLOAD_URL }}/Clash.Verge_${{ env.VERSION }}_x64-setup_windows.exe) | [ARM64(不常用)](${{ env.DOWNLOAD_URL }}/Clash.Verge_${{ env.VERSION }}_arm64-setup_windows.exe) #### 内置Webview2版(体积较大,仅在企业版系统或无法安装webview2时使用) - [64位](${{ env.DOWNLOAD_URL }}/Clash.Verge_${{ env.VERSION }}_x64_fixed_webview2-setup.exe) | [ARM64](${{ env.DOWNLOAD_URL }}/Clash.Verge_${{ env.VERSION }}_arm64_fixed_webview2-setup.exe) From 4d5f1f432790a346212749f982a1c80eaeea3758 Mon Sep 17 00:00:00 2001 From: oomeow Date: Thu, 30 Oct 2025 20:28:56 +0800 Subject: [PATCH 34/70] fix: incorrect proxies route --- src/components/home/current-proxy-card.tsx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/components/home/current-proxy-card.tsx b/src/components/home/current-proxy-card.tsx index ceea82d7..c5061938 100644 --- a/src/components/home/current-proxy-card.tsx +++ b/src/components/home/current-proxy-card.tsx @@ -506,7 +506,7 @@ export const CurrentProxyCard = () => { // 导航到代理页面 const goToProxies = useCallback(() => { - navigate("/"); + navigate("/proxies"); }, [navigate]); // 获取要显示的代理节点 From ffb7400a221b5686b0e1ef57f7e83514eb1cbee5 Mon Sep 17 00:00:00 2001 From: Tunglies <77394545+Tunglies@users.noreply.github.com> Date: Fri, 31 Oct 2025 00:11:55 +0800 Subject: [PATCH 35/70] fix: add updateCargoLock to postUpdateOptions in renovate.json --- renovate.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/renovate.json b/renovate.json index 7df27b15..3aa47244 100644 --- a/renovate.json +++ b/renovate.json @@ -42,6 +42,6 @@ "groupName": "github actions" } ], - "postUpdateOptions": ["pnpmDedupe"], + "postUpdateOptions": ["pnpmDedupe", "updateCargoLock"], "ignoreDeps": ["criterion"] } From ef35752d84cb2a8d88d3803fcce5bdd49d988e8a Mon Sep 17 00:00:00 2001 From: Tunglies <77394545+Tunglies@users.noreply.github.com> Date: Fri, 31 Oct 2025 00:33:11 +0800 Subject: [PATCH 36/70] fix: specify version for sysproxy dependency in Cargo.toml --- src-tauri/Cargo.lock | 332 +++++++++++++++++++++---------------------- src-tauri/Cargo.toml | 2 +- 2 files changed, 165 insertions(+), 169 deletions(-) diff --git a/src-tauri/Cargo.lock b/src-tauri/Cargo.lock index 73a8a381..7c7f2dd2 100644 --- a/src-tauri/Cargo.lock +++ b/src-tauri/Cargo.lock @@ -58,9 +58,9 @@ dependencies = [ [[package]] name = "aho-corasick" -version = "1.1.3" +version = "1.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916" +checksum = "ddd31a130427c27518df266943a5308ed92d4b226cc639f5a8f1002816174301" dependencies = [ "memchr", ] @@ -147,7 +147,7 @@ dependencies = [ "objc2-foundation 0.3.2", "parking_lot 0.12.5", "percent-encoding", - "windows-sys 0.59.0", + "windows-sys 0.60.2", "wl-clipboard-rs", "x11rb", ] @@ -1068,18 +1068,18 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.50" +version = "4.5.51" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c2cfd7bf8a6017ddaa4e32ffe7403d547790db06bd171c1c53926faab501623" +checksum = "4c26d721170e0295f191a69bd9a1f93efcdb0aff38684b61ab5750468972e5f5" dependencies = [ "clap_builder", ] [[package]] name = "clap_builder" -version = "4.5.50" +version = "4.5.51" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a4c05b9e80c5ccd3a7ef080ad7b6ba7d6fc00a985b8b157197075677c82c7a0" +checksum = "75835f0c7bf681bfd05abe44e965760fea999a5286c6eb2d59883634fd02011a" dependencies = [ "anstyle", "clap_lex", @@ -1243,7 +1243,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "117725a109d387c937a1533ce01b450cbde6b88abceea8473c4d7a85853cda3c" dependencies = [ "lazy_static", - "windows-sys 0.48.0", + "windows-sys 0.59.0", ] [[package]] @@ -1935,7 +1935,7 @@ dependencies = [ "libc", "option-ext", "redox_users 0.5.2", - "windows-sys 0.59.0", + "windows-sys 0.61.2", ] [[package]] @@ -2191,7 +2191,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "39cab71617ae0d63f51a36d69f866391735b51691dbda63cf6f96d042b63efeb" dependencies = [ "libc", - "windows-sys 0.59.0", + "windows-sys 0.61.2", ] [[package]] @@ -3356,7 +3356,7 @@ dependencies = [ "libc", "percent-encoding", "pin-project-lite", - "socket2 0.5.10", + "socket2 0.6.1", "system-configuration", "tokio", "tower-service", @@ -3376,7 +3376,7 @@ dependencies = [ "js-sys", "log", "wasm-bindgen", - "windows-core 0.58.0", + "windows-core 0.62.2", ] [[package]] @@ -3413,12 +3413,13 @@ dependencies = [ [[package]] name = "icu_locale_core" -version = "2.0.0" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0cde2700ccaed3872079a65fb1a78f6c0a36c91570f28755dda67bc8f7d9f00a" +checksum = "edba7861004dd3714265b4db54a3c390e880ab658fec5f7db895fae2046b5bb6" dependencies = [ "displaydoc", "litemap", + "serde", "tinystr", "writeable", "zerovec", @@ -3426,9 +3427,9 @@ dependencies = [ [[package]] name = "icu_normalizer" -version = "2.0.0" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "436880e8e18df4d7bbc06d58432329d6458cc84531f7ac5f024e93deadb37979" +checksum = "8b24a59706036ba941c9476a55cd57b82b77f38a3c667d637ee7cabbc85eaedc" dependencies = [ "displaydoc", "icu_collections", @@ -3449,9 +3450,9 @@ checksum = "00210d6893afc98edb752b664b8890f0ef174c8adbb8d0be9710fa66fbbf72d3" [[package]] name = "icu_properties" -version = "2.0.1" +version = "2.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "016c619c1eeb94efb86809b015c58f479963de65bdb6253345c1a1276f22e32b" +checksum = "f5a97b8ac6235e69506e8dacfb2adf38461d2ce6d3e9bd9c94c4cbc3cd4400a4" dependencies = [ "displaydoc", "icu_collections", @@ -3471,14 +3472,14 @@ checksum = "298459143998310acd25ffe6810ed544932242d3f07083eee1084d83a71bd632" [[package]] name = "icu_provider" -version = "2.0.0" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03c80da27b5f4187909049ee2d72f276f0d9f99a42c306bd0131ecfe04d8e5af" +checksum = "85962cf0ce02e1e0a629cc34e7ca3e373ce20dda4c4d7294bbd0bf1fdb59e614" dependencies = [ "displaydoc", "icu_locale_core", + "serde", "stable_deref_trait", - "tinystr", "writeable", "yoke", "zerofrom", @@ -3792,9 +3793,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.81" +version = "0.3.82" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec48937a97411dcb524a265206ccd4c90bb711fca92b2792c407f268825b9305" +checksum = "b011eec8cc36da2aab2d5cff675ec18454fad408585853910a202391cf9f8e65" dependencies = [ "once_cell", "wasm-bindgen", @@ -3987,9 +3988,9 @@ checksum = "df1d3c3b53da64cf5760482273a98e575c651a67eec7f77df96b5b642de8f039" [[package]] name = "litemap" -version = "0.8.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "241eaef5fd12c88705a01fc1066c48c4b36e0dd4377dcdc7ec3942cea7a69956" +checksum = "6373607a59f0be73a39b6fe456b8192fcc3585f602af20751600e974dd455e77" [[package]] name = "litrs" @@ -4226,9 +4227,9 @@ dependencies = [ [[package]] name = "moxcms" -version = "0.7.7" +version = "0.7.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c588e11a3082784af229e23e8e4ecf5bcc6fbe4f69101e0421ce8d79da7f0b40" +checksum = "0fbdd3d7436f8b5e892b8b7ea114271ff0fa00bc5acae845d53b07d498616ef6" dependencies = [ "num-traits", "pxfm", @@ -4456,7 +4457,7 @@ version = "0.50.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7957b9740744892f114936ab4a57b3f487491bbeafaf8083688b16841a4240e5" dependencies = [ - "windows-sys 0.59.0", + "windows-sys 0.61.2", ] [[package]] @@ -4950,7 +4951,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7d8fae84b431384b68627d0f9b3b1245fcf9f46f6c0e3dc902e9dce64edd1967" dependencies = [ "libc", - "windows-sys 0.45.0", + "windows-sys 0.61.2", ] [[package]] @@ -5503,9 +5504,9 @@ dependencies = [ [[package]] name = "potential_utf" -version = "0.1.3" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84df19adbe5b5a0782edcab45899906947ab039ccf4573713735ee7de1e6b08a" +checksum = "b73949432f5e2a09657003c25bca5e19a0e9c84f8058ca374f49e0ebe605af77" dependencies = [ "zerovec", ] @@ -5751,7 +5752,7 @@ dependencies = [ "quinn-udp", "rustc-hash", "rustls", - "socket2 0.5.10", + "socket2 0.6.1", "thiserror 2.0.17", "tokio", "tracing", @@ -5788,9 +5789,9 @@ dependencies = [ "cfg_aliases", "libc", "once_cell", - "socket2 0.5.10", + "socket2 0.6.1", "tracing", - "windows-sys 0.59.0", + "windows-sys 0.60.2", ] [[package]] @@ -6050,11 +6051,11 @@ checksum = "7a2d987857b319362043e95f5353c0535c1f58eec5336fdfcf626430af7def58" [[package]] name = "regress" -version = "0.10.4" +version = "0.10.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "145bb27393fe455dd64d6cbc8d059adfa392590a45eadf079c01b11857e7b010" +checksum = "2057b2325e68a893284d1538021ab90279adac1139957ca2a74426c6f118fb48" dependencies = [ - "hashbrown 0.15.5", + "hashbrown 0.16.0", "memchr", ] @@ -6296,7 +6297,7 @@ dependencies = [ "errno", "libc", "linux-raw-sys 0.11.0", - "windows-sys 0.59.0", + "windows-sys 0.61.2", ] [[package]] @@ -6315,9 +6316,9 @@ dependencies = [ [[package]] name = "rustls-pki-types" -version = "1.12.0" +version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "229a4a4c221013e7e1f1a043678c5cc39fe5171437c88fb47151a21e6f5b5c79" +checksum = "94182ad936a0c91c324cd46c6511b9510ed16af436d7b5bab34beab0afd55f7a" dependencies = [ "web-time", "zeroize", @@ -6325,9 +6326,9 @@ dependencies = [ [[package]] name = "rustls-webpki" -version = "0.103.7" +version = "0.103.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e10b3f4191e8a80e6b43eebabfac91e5dcecebb27a71f04e820c47ec41d314bf" +checksum = "2ffdfa2f5286e2247234e03f680868ac2815974dc39e00ea15adc445d0aafe52" dependencies = [ "ring", "rustls-pki-types", @@ -7137,16 +7138,16 @@ dependencies = [ [[package]] name = "sysproxy" -version = "0.3.0" -source = "git+https://github.com/clash-verge-rev/sysproxy-rs#9fe61ca25dc5808cb6d7f13ae73a7a250ab56173" +version = "0.3.1" +source = "git+https://github.com/clash-verge-rev/sysproxy-rs#f1f5ac38614669d03d56821192dc6a1a512d368e" dependencies = [ "interfaces", "iptools", "log", - "thiserror 1.0.69", + "thiserror 2.0.17", "url", - "windows 0.58.0", - "winreg 0.52.0", + "windows 0.62.2", + "winreg 0.55.0", "xdg", ] @@ -7266,9 +7267,9 @@ checksum = "61c41af27dd6d1e27b1b16b489db798443478cef1f06a660c96db617ba5de3b1" [[package]] name = "tauri" -version = "2.9.1" +version = "2.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9871670c6711f50fddd4e20350be6b9dd6e6c2b5d77d8ee8900eb0d58cd837a" +checksum = "8bceb52453e507c505b330afe3398510e87f428ea42b6e76ecb6bd63b15965b5" dependencies = [ "anyhow", "bytes", @@ -7799,7 +7800,7 @@ dependencies = [ "getrandom 0.3.4", "once_cell", "rustix 1.1.2", - "windows-sys 0.59.0", + "windows-sys 0.61.2", ] [[package]] @@ -7990,11 +7991,12 @@ dependencies = [ [[package]] name = "tinystr" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d4f6d1145dcb577acf783d4e601bc1d76a13337bb54e6233add580b07344c8b" +checksum = "42d3e9c45c09de15d06dd8acf5f4e0e399e85927b7f00711024eb7ae10fa4869" dependencies = [ "displaydoc", + "serde_core", "zerovec", ] @@ -8616,9 +8618,9 @@ checksum = "75b844d17643ee918803943289730bec8aac480150456169e647ed0b576ba539" [[package]] name = "unicode-ident" -version = "1.0.20" +version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "462eeb75aeb73aea900253ce739c8e18a67423fadf006037cd3ff27e82748a06" +checksum = "9312f7c4f6ff9069b165498234ce8be658059c6728633667c526e27dc2cf1df5" [[package]] name = "unicode-segmentation" @@ -8854,9 +8856,9 @@ dependencies = [ [[package]] name = "wasm-bindgen" -version = "0.2.104" +version = "0.2.105" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1da10c01ae9f1ae40cbfac0bac3b1e724b320abfcf52229f80b547c0d250e2d" +checksum = "da95793dfc411fbbd93f5be7715b0578ec61fe87cb1a42b12eb625caa5c5ea60" dependencies = [ "cfg-if", "once_cell", @@ -8865,25 +8867,11 @@ dependencies = [ "wasm-bindgen-shared", ] -[[package]] -name = "wasm-bindgen-backend" -version = "0.2.104" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "671c9a5a66f49d8a47345ab942e2cb93c7d1d0339065d4f8139c486121b43b19" -dependencies = [ - "bumpalo", - "log", - "proc-macro2", - "quote", - "syn 2.0.108", - "wasm-bindgen-shared", -] - [[package]] name = "wasm-bindgen-futures" -version = "0.4.54" +version = "0.4.55" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e038d41e478cc73bae0ff9b36c60cff1c98b8f38f8d7e8061e79ee63608ac5c" +checksum = "551f88106c6d5e7ccc7cd9a16f312dd3b5d36ea8b4954304657d5dfba115d4a0" dependencies = [ "cfg-if", "js-sys", @@ -8894,9 +8882,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.104" +version = "0.2.105" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ca60477e4c59f5f2986c50191cd972e3a50d8a95603bc9434501cf156a9a119" +checksum = "04264334509e04a7bf8690f2384ef5265f05143a4bff3889ab7a3269adab59c2" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -8904,22 +8892,22 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.104" +version = "0.2.105" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f07d2f20d4da7b26400c9f4a0511e6e0345b040694e8a75bd41d578fa4421d7" +checksum = "420bc339d9f322e562942d52e115d57e950d12d88983a14c79b86859ee6c7ebc" dependencies = [ + "bumpalo", "proc-macro2", "quote", "syn 2.0.108", - "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.104" +version = "0.2.105" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bad67dc8b2a1a6e5448428adec4c3e84c43e561d8c9ee8a9e5aabeb193ec41d1" +checksum = "76f218a38c84bcb33c25ec7059b07847d465ce0e0a76b995e134a45adcb6af76" dependencies = [ "unicode-ident", ] @@ -9012,9 +9000,9 @@ dependencies = [ [[package]] name = "web-sys" -version = "0.3.81" +version = "0.3.82" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9367c417a924a74cae129e6a2ae3b47fabb1f8995595ab474029da749a8be120" +checksum = "3a1f95c0d03a47f4ae1f7a64643a6bb97465d9b740f0fa8f90ea33915c99a9a1" dependencies = [ "js-sys", "wasm-bindgen", @@ -9093,8 +9081,8 @@ dependencies = [ "webview2-com-sys", "windows 0.61.3", "windows-core 0.61.2", - "windows-implement 0.60.2", - "windows-interface 0.59.3", + "windows-implement", + "windows-interface", ] [[package]] @@ -9165,7 +9153,7 @@ version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c2a7b1c03c876122aa43f3020e6c3c3ee5c05081c9a00739faf7503aeba10d22" dependencies = [ - "windows-sys 0.48.0", + "windows-sys 0.61.2", ] [[package]] @@ -9189,27 +9177,29 @@ dependencies = [ "windows-version", ] -[[package]] -name = "windows" -version = "0.58.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd04d41d93c4992d421894c18c8b43496aa748dd4c081bac0dc93eb0489272b6" -dependencies = [ - "windows-core 0.58.0", - "windows-targets 0.52.6", -] - [[package]] name = "windows" version = "0.61.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9babd3a767a4c1aef6900409f85f5d53ce2544ccdfaa86dad48c91782c6d6893" dependencies = [ - "windows-collections", + "windows-collections 0.2.0", "windows-core 0.61.2", - "windows-future", + "windows-future 0.2.1", "windows-link 0.1.3", - "windows-numerics", + "windows-numerics 0.2.0", +] + +[[package]] +name = "windows" +version = "0.62.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "527fadee13e0c05939a6a05d5bd6eec6cd2e3dbd648b9f8e447c6518133d8580" +dependencies = [ + "windows-collections 0.3.2", + "windows-core 0.62.2", + "windows-future 0.3.2", + "windows-numerics 0.3.1", ] [[package]] @@ -9222,16 +9212,12 @@ dependencies = [ ] [[package]] -name = "windows-core" -version = "0.58.0" +name = "windows-collections" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ba6d44ec8c2591c134257ce647b7ea6b20335bf6379a27dac5f1641fcf59f99" +checksum = "23b2d95af1a8a14a3c7367e1ed4fc9c20e0a26e79551b1454d72583c97cc6610" dependencies = [ - "windows-implement 0.58.0", - "windows-interface 0.58.0", - "windows-result 0.2.0", - "windows-strings 0.1.0", - "windows-targets 0.52.6", + "windows-core 0.62.2", ] [[package]] @@ -9240,13 +9226,26 @@ version = "0.61.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c0fdd3ddb90610c7638aa2b3a3ab2904fb9e5cdbecc643ddb3647212781c4ae3" dependencies = [ - "windows-implement 0.60.2", - "windows-interface 0.59.3", + "windows-implement", + "windows-interface", "windows-link 0.1.3", "windows-result 0.3.4", "windows-strings 0.4.2", ] +[[package]] +name = "windows-core" +version = "0.62.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8e83a14d34d0623b51dce9581199302a221863196a1dde71a7663a4c2be9deb" +dependencies = [ + "windows-implement", + "windows-interface", + "windows-link 0.2.1", + "windows-result 0.4.1", + "windows-strings 0.5.1", +] + [[package]] name = "windows-future" version = "0.2.1" @@ -9255,18 +9254,18 @@ checksum = "fc6a41e98427b19fe4b73c550f060b59fa592d7d686537eebf9385621bfbad8e" dependencies = [ "windows-core 0.61.2", "windows-link 0.1.3", - "windows-threading", + "windows-threading 0.1.0", ] [[package]] -name = "windows-implement" -version = "0.58.0" +name = "windows-future" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2bbd5b46c938e506ecbce286b6628a02171d56153ba733b6c741fc627ec9579b" +checksum = "e1d6f90251fe18a279739e78025bd6ddc52a7e22f921070ccdc67dde84c605cb" dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.108", + "windows-core 0.62.2", + "windows-link 0.2.1", + "windows-threading 0.2.1", ] [[package]] @@ -9280,17 +9279,6 @@ dependencies = [ "syn 2.0.108", ] -[[package]] -name = "windows-interface" -version = "0.58.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "053c4c462dc91d3b1504c6fe5a726dd15e216ba718e84a0e46a88fbe5ded3515" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.108", -] - [[package]] name = "windows-interface" version = "0.59.3" @@ -9324,6 +9312,16 @@ dependencies = [ "windows-link 0.1.3", ] +[[package]] +name = "windows-numerics" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e2e40844ac143cdb44aead537bbf727de9b044e107a0f1220392177d15b0f26" +dependencies = [ + "windows-core 0.62.2", + "windows-link 0.2.1", +] + [[package]] name = "windows-registry" version = "0.5.3" @@ -9335,15 +9333,6 @@ dependencies = [ "windows-strings 0.4.2", ] -[[package]] -name = "windows-result" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d1043d8214f791817bab27572aaa8af63732e11bf84aa21a45a78d6c317ae0e" -dependencies = [ - "windows-targets 0.52.6", -] - [[package]] name = "windows-result" version = "0.3.4" @@ -9354,13 +9343,12 @@ dependencies = [ ] [[package]] -name = "windows-strings" -version = "0.1.0" +name = "windows-result" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4cd9b125c486025df0eabcb585e62173c6c9eddcec5d117d3b6e8c30e2ee4d10" +checksum = "7781fa89eaf60850ac3d2da7af8e5242a5ea78d1a11c49bf2910bb5a73853eb5" dependencies = [ - "windows-result 0.2.0", - "windows-targets 0.52.6", + "windows-link 0.2.1", ] [[package]] @@ -9372,6 +9360,15 @@ dependencies = [ "windows-link 0.1.3", ] +[[package]] +name = "windows-strings" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7837d08f69c77cf6b07689544538e017c1bfcf57e34b4c0ff58e6c2cd3b37091" +dependencies = [ + "windows-link 0.2.1", +] + [[package]] name = "windows-sys" version = "0.45.0" @@ -9498,6 +9495,15 @@ dependencies = [ "windows-link 0.1.3", ] +[[package]] +name = "windows-threading" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3949bd5b99cafdf1c7ca86b43ca564028dfe27d66958f2470940f73d86d75b37" +dependencies = [ + "windows-link 0.2.1", +] + [[package]] name = "windows-version" version = "0.1.7" @@ -9714,16 +9720,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "winreg" -version = "0.52.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a277a57398d4bfa075df44f501a17cfdf8542d224f0d36095a2adc7aee4ef0a5" -dependencies = [ - "cfg-if", - "windows-sys 0.48.0", -] - [[package]] name = "winreg" version = "0.55.0" @@ -9767,9 +9763,9 @@ checksum = "d1890f4022759daae28ed4fe62859b1236caebfc61ede2f63ed4e695f3f6d936" [[package]] name = "writeable" -version = "0.6.1" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea2f10b9bb0928dfb1b42b65e1f9e36f7f54dbdf08457afefb38afcdec4fa2bb" +checksum = "9edde0db4769d2dc68579893f2306b26c6ecfbe0ef499b013d731b7b9247e0b9" [[package]] name = "wry" @@ -9867,9 +9863,9 @@ dependencies = [ [[package]] name = "xdg" -version = "2.5.2" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "213b7324336b53d2414b2db8537e56544d981803139155afa84f76eeebb7a546" +checksum = "2fb433233f2df9344722454bc7e96465c9d03bff9d77c248f9e7523fe79585b5" [[package]] name = "xkeysym" @@ -9879,9 +9875,9 @@ checksum = "b9cc00251562a284751c9973bace760d86c0276c471b4be569fe6b068ee97a56" [[package]] name = "xml-rs" -version = "0.8.27" +version = "0.8.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6fd8403733700263c6eb89f192880191f1b83e332f7a20371ddcf421c4a337c7" +checksum = "3ae8337f8a065cfc972643663ea4279e04e7256de865aa66fe25cec5fb912d3f" [[package]] name = "xsum" @@ -9891,11 +9887,10 @@ checksum = "0637d3a5566a82fa5214bae89087bc8c9fb94cd8e8a3c07feb691bb8d9c632db" [[package]] name = "yoke" -version = "0.8.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f41bb01b8226ef4bfd589436a297c53d118f65921786300e427be8d487695cc" +checksum = "72d6e5c6afb84d73944e5cedb052c4680d5657337201555f9f2a16b7406d4954" dependencies = [ - "serde", "stable_deref_trait", "yoke-derive", "zerofrom", @@ -9903,9 +9898,9 @@ dependencies = [ [[package]] name = "yoke-derive" -version = "0.8.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38da3c9736e16c5d3c8c597a9aaa5d1fa565d0532ae05e27c24aa62fb32c0ab6" +checksum = "b659052874eb698efe5b9e8cf382204678a0086ebf46982b79d6ca3182927e5d" dependencies = [ "proc-macro2", "quote", @@ -10038,9 +10033,9 @@ dependencies = [ [[package]] name = "zerotrie" -version = "0.2.2" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36f0bbd478583f79edad978b407914f61b2972f5af6fa089686016be8f9af595" +checksum = "2a59c17a5562d507e4b54960e8569ebee33bee890c70aa3fe7b97e85a9fd7851" dependencies = [ "displaydoc", "yoke", @@ -10049,10 +10044,11 @@ dependencies = [ [[package]] name = "zerovec" -version = "0.11.4" +version = "0.11.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7aa2bd55086f1ab526693ecbe444205da57e25f4489879da80635a46d90e73b" +checksum = "6c28719294829477f525be0186d13efa9a3c602f7ec202ca9e353d310fb9a002" dependencies = [ + "serde", "yoke", "zerofrom", "zerovec-derive", @@ -10060,9 +10056,9 @@ dependencies = [ [[package]] name = "zerovec-derive" -version = "0.11.1" +version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b96237efa0c878c64bd89c436f661be4e46b2f3eff1ebb976f7ef2321d2f58f" +checksum = "eadce39539ca5cb3985590102671f2567e659fca9666581ad3411d59207951f3" dependencies = [ "proc-macro2", "quote", @@ -10116,9 +10112,9 @@ checksum = "2f06ae92f42f5e5c42443fd094f245eb656abf56dd7cce9b8b263236565e00f2" [[package]] name = "zopfli" -version = "0.8.2" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "edfc5ee405f504cd4984ecc6f14d02d55cfda60fa4b689434ef4102aae150cd7" +checksum = "f05cd8797d63865425ff89b5c4a48804f35ba0ce8d125800027ad6017d2b5249" dependencies = [ "bumpalo", "crc32fast", diff --git a/src-tauri/Cargo.toml b/src-tauri/Cargo.toml index 1583013a..edf941f6 100755 --- a/src-tauri/Cargo.toml +++ b/src-tauri/Cargo.toml @@ -42,7 +42,7 @@ tokio = { version = "1.48.0", features = [ serde = { version = "1.0.228", features = ["derive"] } reqwest = { version = "0.12.24", features = ["json", "cookies"] } regex = "1.12.2" -sysproxy = { git = "https://github.com/clash-verge-rev/sysproxy-rs" } +sysproxy = { version = "0.3.1", git = "https://github.com/clash-verge-rev/sysproxy-rs" } tauri = { version = "2.9.1", features = [ "protocol-asset", "devtools", From 1e9df69ffc9c3136db78ee01ee677405400fb9b0 Mon Sep 17 00:00:00 2001 From: Tunglies <77394545+Tunglies@users.noreply.github.com> Date: Fri, 31 Oct 2025 00:55:50 +0800 Subject: [PATCH 37/70] fix: remove unused dependencies from Cargo.toml and Cargo.lock --- src-tauri/Cargo.lock | 5 ----- src-tauri/Cargo.toml | 5 ----- 2 files changed, 10 deletions(-) diff --git a/src-tauri/Cargo.lock b/src-tauri/Cargo.lock index 7c7f2dd2..2fd3d8c1 100644 --- a/src-tauri/Cargo.lock +++ b/src-tauri/Cargo.lock @@ -1107,17 +1107,13 @@ dependencies = [ "compact_str", "console-subscriber", "criterion", - "dashmap 6.1.0", "deelevate", "delay_timer", - "dirs 6.0.0", "dunce", "flexi_logger", "futures", "gethostname", "getrandom 0.3.4", - "hex", - "hmac", "isahc", "libc", "log", @@ -1136,7 +1132,6 @@ dependencies = [ "serde", "serde_json", "serde_yaml_ng", - "sha2 0.10.9", "signal-hook 0.3.18", "smartstring", "sys-locale", diff --git a/src-tauri/Cargo.toml b/src-tauri/Cargo.toml index edf941f6..f3253251 100755 --- a/src-tauri/Cargo.toml +++ b/src-tauri/Cargo.toml @@ -18,7 +18,6 @@ tauri-build = { version = "2.5.1", features = [] } [dependencies] warp = { version = "0.4.2", features = ["server"] } anyhow = "1.0.100" -dirs = "6.0" open = "5.3.2" log = "0.4.28" dunce = "1.0.5" @@ -67,11 +66,7 @@ futures = "0.3.31" sys-locale = "0.3.2" libc = "0.2.177" gethostname = "1.1.0" -hmac = "0.12.1" -sha2 = "0.10.9" -hex = "0.4.3" scopeguard = "1.2.0" -dashmap = "6.1.0" tauri-plugin-notification = "2.3.3" tokio-stream = "0.1.17" isahc = { version = "1.7.2", default-features = false, features = [ From 648c93c066cb0a089a8649062246681577d77a6a Mon Sep 17 00:00:00 2001 From: Tunglies <77394545+Tunglies@users.noreply.github.com> Date: Fri, 31 Oct 2025 05:15:39 +0800 Subject: [PATCH 38/70] chore(i18n): update localization files sorting and add i18n contribution guidline - Add i18n contribution guidline - Chinese (zh.json): fix TPROXY port missing translation --- CONTRIBUTING.md | 4 + CONTRIBUTING_i18n.md | 81 +++++++++++ src/locales/ar.json | 38 ++--- src/locales/de.json | 20 +-- src/locales/es.json | 20 +-- src/locales/fa.json | 20 +-- src/locales/id.json | 82 +++++------ src/locales/jp.json | 26 ++-- src/locales/ko.json | 328 +++++++++++++++++++++--------------------- src/locales/tr.json | 4 +- src/locales/tt.json | 24 ++-- src/locales/zh.json | 9 +- src/locales/zhtw.json | 9 +- 13 files changed, 375 insertions(+), 290 deletions(-) create mode 100644 CONTRIBUTING_i18n.md diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 3b55dd91..debd1009 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -2,6 +2,10 @@ Thank you for your interest in contributing to Clash Verge Rev! This document provides guidelines and instructions to help you set up your development environment and start contributing. +## Internationalization (i18n) + +We welcome translations and improvements to existing locales. Please follow the detailed guidelines in [CONTRIBUTING_i18n.md](CONTRIBUTING_i18n.md) for instructions on extracting strings, file naming conventions, testing translations, and submitting translation PRs. + ## Development Setup Before you start contributing to the project, you need to set up your development environment. Here are the steps you need to follow: diff --git a/CONTRIBUTING_i18n.md b/CONTRIBUTING_i18n.md new file mode 100644 index 00000000..6e97e463 --- /dev/null +++ b/CONTRIBUTING_i18n.md @@ -0,0 +1,81 @@ +# CONTRIBUTING — i18n + +Thank you for considering contributing to our localization work — your help is appreciated. + +Quick overview + +- cvr-i18 is a CLI that helps manage simple top-level JSON locale files: + - Detect duplicated top-level keys + - Find keys missing versus a base file (default: en.json) + - Export missing entries for translators + - Reorder keys to match the base file for predictable diffs + - Operate on a directory or a single file + +Get the CLI (No binary provided yet) + +```bash +git clone https://github.com/clash-verge-rev/clash-verge-rev-i18n-cli +cd clash-verge-rev-i18n-cli +cargo install --path . +# or +cargo install --git https://github.com/clash-verge-rev/clash-verge-rev-i18n-cli +``` + +Common commands + +- Show help: `cvr-i18` +- Directory (auto-detects `./locales` or `./src/locales`): `cvr-i18 -d /path/to/locales` +- Check duplicates: `cvr-i18 -k` +- Check missing keys: `cvr-i18 -m` +- Export missing keys: `cvr-i18 -m -e ./exports` +- Sort keys to base file: `cvr-i18 -s` +- Use a base file: `cvr-i18 -b base.json` +- Single file: `cvr-i18 -f locales/zh.json` + +Options (short) + +- `-d, --directory ` +- `-f, --file ` +- `-k, --duplicated-key` +- `-m, --missing-key` +- `-e, --export ` +- `-s, --sort` +- `-b, --base ` + +Exit codes + +- `0` — success (no issues) +- `1` — issues found (duplicates/missing) +- `2` — error (IO/parse/runtime) + +How to contribute (recommended steps) + +- Start small: fix typos, improve phrasing, or refine tone and consistency. +- Run the CLI against your locale files to detect duplicates or missing keys. +- Export starter JSONs for translators with `-m -e `. +- Prefer incremental PRs or draft PRs; leave a comment on the issue if you want guidance. +- Open an issue to report missing strings, UI context, or localization bugs. +- Add or improve docs and tests to make future contributions easier. + +PR checklist + +- Keep JSON files UTF-8 encoded. +- Follow the repo’s locale file structure and naming conventions. +- Reorder keys to match the base file (`-s`) for minimal diffs. +- Test translations in a local dev build before opening a PR. +- Reference related issues and explain any context for translations or changes. + +Notes + +- The tool expects simple top-level JSON key/value maps. +- Exported JSONs are starter files for translators (fill in values, keep keys). +- Sorting keeps diffs consistent and reviewable. + +Repository +https://github.com/clash-verge-rev/clash-verge-rev-i18n-cli + +## Feedback & Contributions + +- For tool usage issues or feedback: please open an Issue in the [repository](https://github.com/clash-verge-rev/clash-verge-rev-i18n-cli) so it can be tracked and addressed. +- For localization contributions (translations, fixes, context notes, etc.): submit a PR or Issue in this repository and include examples, context, and testing instructions when possible. +- If you need help or a review, leave a comment on your submission requesting assistance. diff --git a/src/locales/ar.json b/src/locales/ar.json index 41138721..ded40404 100644 --- a/src/locales/ar.json +++ b/src/locales/ar.json @@ -21,7 +21,6 @@ "Label-Connections": "الاتصالات", "Label-Rules": "القواعد", "Label-Logs": "السجلات", - "Label-Test": "اختبار", "Label-Settings": "الإعدادات", "Proxies": "الوكلاء", "Proxy Groups": "مجموعات الوكلاء", @@ -166,7 +165,6 @@ "Table View": "عرض الجدول", "List View": "عرض القائمة", "Close All": "إغلاق الكل", - "Default": "افتراضي", "Download Speed": "سرعة التنزيل", "Upload Speed": "سرعة الرفع", "Host": "المضيف", @@ -197,6 +195,8 @@ "Settings": "الإعدادات", "System Setting": "إعدادات النظام", "Tun Mode": "وضع TUN", + "TUN requires Service Mode": "يتطلب وضع TUN خدمة", + "Install Service": "تثبيت الخدمة ", "Reset to Default": "إعادة تعيين إلى الافتراضي", "Tun Mode Info": "وضع TUN (بطاقة شبكة افتراضية): يلتقط كل حركة المرور في النظام. عند تمكينه، لا حاجة لتفعيل وكيل النظام.", "Stack": "مكدس TUN", @@ -279,7 +279,8 @@ "Open UWP tool": "فتح أداة UWP", "Open UWP tool Info": "منذ نظام ويندوز 8، يتم تقييد تطبيقات UWP من الوصول المباشر إلى المضيف المحلي. هذه الأداة تتيح تجاوز هذا التقييد", "Update GeoData": "تحديث البيانات الجغرافية", - "Verge Setting": "إعدادات Verge", + "Verge Basic Setting": "الإعدادات الأساسية Verge", + "Verge Advanced Setting": "الإعدادات الأساسية Verge", "Language": "اللغة", "Theme Mode": "وضع السمة", "theme.light": "سمة فاتحة", @@ -365,6 +366,7 @@ "Profile Reactivated": "تم إعادة تنشيط الملف الشخصي", "Only YAML Files Supported": "لا يتم دعم سوى ملفات YAML", "Settings Applied": "تم تطبيق الإعدادات", + "Installing Service...": "جاري تثبيت الخدمة...", "Service Installed Successfully": "تم تثبيت الخدمة بنجاح", "Service Uninstalled Successfully": "تم إلغاء تثبيت الخدمة بنجاح", "Proxy Daemon Duration Cannot be Less than 1 Second": "لا يمكن أن تقل مدة خادم الوكيل عن ثانية واحدة", @@ -407,19 +409,12 @@ "Help": "مساعدة", "About": "حول", "Theme": "السمة", - "TUN Mode": "وضع TUN", "Main Window": "النافذة الرئيسية", "Group Icon": "أيقونة المجموعة", "Menu Icon": "أيقونة القائمة", - "System Proxy Bypass": "تخطي وكيل النظام", "PAC File": "ملف PAC", "Web UI": "واجهة الويب", "Hotkeys": "اختصارات لوحة المفاتيح", - "Auto Close Connection": "إغلاق الاتصال تلقائيًا", - "Enable Built-in Enhanced": "تفعيل التحسين المدمج", - "Proxy Layout Column": "عمود عرض الوكيل", - "Test List": "قائمة الاختبارات", - "Enable Random Port": "تفعيل المنفذ العشوائي", "Verge Mixed Port": "منفذ Verge المختلط", "Verge Socks Port": "منفذ Verge SOCKS", "Verge Redir Port": "منفذ إعادة التوجيه لـ Verge", @@ -429,15 +424,16 @@ "WebDAV URL": "رابط WebDAV", "WebDAV Username": "اسم المستخدم لـ WebDAV", "WebDAV Password": "كلمة مرور WebDAV", + "Dashboard": "لوحة التحكم", + "Restart App": "إعادة تشغيل التطبيق", + "Restart Clash Core": "إعادة تشغيل نواة Clash", + "TUN Mode": "وضع TUN", "Copy Env": "نسخ البيئة", "Conf Dir": "مجلد الإعدادات", "Core Dir": "مجلد النواة", "Logs Dir": "مجلد السجلات", "Open Dir": "فتح المجلد", - "Restart Clash Core": "إعادة تشغيل نواة Clash", - "Restart App": "إعادة تشغيل التطبيق", "More": "المزيد", - "Dashboard": "لوحة التحكم", "Rule Mode": "وضع القواعد", "Global Mode": "الوضع العالمي", "Direct Mode": "الوضع المباشر", @@ -454,10 +450,14 @@ "Script File Error": "خطأ في ملف السكريبت، تم التراجع عن التغييرات", "Core Changed Successfully": "تم تغيير النواة بنجاح", "Failed to Change Core": "فشل تغيير النواة", - "Verge Basic Setting": "الإعدادات الأساسية Verge", - "Verge Advanced Setting": "الإعدادات الأساسية Verge", - "TUN requires Service Mode": "يتطلب وضع TUN خدمة", - "Install Service": "تثبيت الخدمة ", - "Installing Service...": "جاري تثبيت الخدمة...", - "Service Administrator Prompt": "يتطلب Clash Verge امتيازات المسؤول لإعادة تثبيت خدمة النظام" + "Service Administrator Prompt": "يتطلب Clash Verge امتيازات المسؤول لإعادة تثبيت خدمة النظام", + "Auto Close Connection": "إغلاق الاتصال تلقائيًا", + "Default": "افتراضي", + "Enable Built-in Enhanced": "تفعيل التحسين المدمج", + "Enable Random Port": "تفعيل المنفذ العشوائي", + "Label-Test": "اختبار", + "Proxy Layout Column": "عمود عرض الوكيل", + "System Proxy Bypass": "تخطي وكيل النظام", + "Test List": "قائمة الاختبارات", + "Verge Setting": "إعدادات Verge" } diff --git a/src/locales/de.json b/src/locales/de.json index e6e6e03a..a13b7181 100644 --- a/src/locales/de.json +++ b/src/locales/de.json @@ -195,10 +195,11 @@ "Settings": "Einstellungen", "System Setting": "Systemeinstellungen", "Tun Mode": "Virtual Network Interface-Modus", - "TUN requires Service Mode or Admin Mode": "TUN-Modus erfordert Service-Modus oder Administrator-Modus", "Install Service": "Service installieren", + "Uninstall Service": "Dienst deinstallieren", "Reset to Default": "Auf Standardwerte zurücksetzen", "Tun Mode Info": "Der TUN-Modus (Virtual Network Interface) übernimmt den gesamten Systemverkehr. Wenn dieser Modus aktiviert ist, muss der Systemproxy nicht geöffnet werden.", + "TUN requires Service Mode or Admin Mode": "TUN-Modus erfordert Service-Modus oder Administrator-Modus", "System Proxy Enabled": "Der Systemproxy ist aktiviert. Ihre Anwendungen werden über den Proxy auf das Netzwerk zugreifen.", "System Proxy Disabled": "Der Systemproxy ist deaktiviert. Es wird empfohlen, diesen Eintrag für die meisten Benutzer zu aktivieren.", "TUN Mode Enabled": "Der TUN-Modus ist aktiviert. Die Anwendungen werden über die virtuelle Netzwerkschnittstelle auf das Netzwerk zugreifen.", @@ -254,6 +255,7 @@ "Unified Delay Info": "Wenn die einheitliche Latenz aktiviert ist, werden zwei Latenztests durchgeführt, um die Latenzunterschiede zwischen verschiedenen Knotentypen aufgrund von Verbindungsaufbau und anderen Faktoren zu eliminieren.", "Log Level": "Protokolliergrad", "Log Level Info": "Dies wirkt sich nur auf die Kernprotokolldateien im Verzeichnis Service im Protokollverzeichnis aus.", + "Port Config": "Port-Konfiguration", "Random Port": "Zufälliger Port", "Mixed Port": "Mischter Proxy-Port", "Socks Port": "SOCKS-Proxy-Port", @@ -367,17 +369,16 @@ "Stopping Core...": "Kern wird gestoppt...", "Restarting Core...": "Kern wird neu gestartet...", "Installing Service...": "Service wird installiert...", - "Uninstall Service": "Dienst deinstallieren", - "Service Installed Successfully": "Service erfolgreich installiert", - "Service is ready and core restarted": "Service ist bereit und Kern wurde neu gestartet", - "Core restarted. Service is now available.": "Kern wurde neu gestartet. Service ist jetzt verfügbar", - "Service was ready, but core restart might have issues or service became unavailable. Please check.": "Der Dienst war bereit, aber beim Neustart des Kerns könnten Probleme aufgetreten sein oder der Dienst ist möglicherweise nicht verfügbar. Bitte überprüfen Sie dies.", - "Service installation or core restart encountered issues. Service might not be available. Please check system logs.": "Bei der Dienstinstallation oder dem Neustart des Kerns sind Probleme aufgetreten. Der Dienst ist möglicherweise nicht verfügbar. Bitte prüfen Sie die Systemprotokolle.", "Uninstalling Service...": "Service wird deinstalliert...", - "Waiting for service to be ready...": "Auf Service-Bereitschaft gewartet...", + "Service Installed Successfully": "Service erfolgreich installiert", "Service Uninstalled Successfully": "Service erfolgreich deinstalliert", "Proxy Daemon Duration Cannot be Less than 1 Second": "Das Intervall des Proxy-Daemons darf nicht weniger als 1 Sekunde betragen.", "Invalid Bypass Format": "Ungültiges Format für die Proxy-Umgehung", + "Waiting for service to be ready...": "Auf Service-Bereitschaft gewartet...", + "Service was ready, but core restart might have issues or service became unavailable. Please check.": "Der Dienst war bereit, aber beim Neustart des Kerns könnten Probleme aufgetreten sein oder der Dienst ist möglicherweise nicht verfügbar. Bitte überprüfen Sie dies.", + "Service installation or core restart encountered issues. Service might not be available. Please check system logs.": "Bei der Dienstinstallation oder dem Neustart des Kerns sind Probleme aufgetreten. Der Dienst ist möglicherweise nicht verfügbar. Bitte prüfen Sie die Systemprotokolle.", + "Service is ready and core restarted": "Service ist bereit und Kern wurde neu gestartet", + "Core restarted. Service is now available.": "Kern wurde neu gestartet. Service ist jetzt verfügbar", "Core Version Updated": "Kernversion wurde aktualisiert", "Clash Core Restarted": "Clash-Kern wurde neu gestartet", "GeoData Updated": "Geo-Daten wurden aktualisiert", @@ -555,10 +556,9 @@ "Disallowed ISP": "Nicht zugelassener Internetdienstanbieter", "Originals Only": "Nur Original", "Unsupported Country/Region": "Nicht unterstütztes Land/Region", + "Configuration saved successfully": "Zufalls-Konfiguration erfolgreich gespeichert", "Controller address copied to clipboard": "API-Port in die Zwischenablage kopiert", "Secret copied to clipboard": "API-Schlüssel in die Zwischenablage kopiert", "Copy to clipboard": "Klicken Sie hier, um zu kopieren", - "Port Config": "Port-Konfiguration", - "Configuration saved successfully": "Zufalls-Konfiguration erfolgreich gespeichert", "Enable one-click random API port and key. Click to randomize the port and key": "Einstellsichere Zufalls-API-Port- und Schlüsselgenerierung aktivieren. Klicken Sie, um Port und Schlüssel zu randomisieren" } diff --git a/src/locales/es.json b/src/locales/es.json index 456a4da4..e13a81c2 100644 --- a/src/locales/es.json +++ b/src/locales/es.json @@ -195,10 +195,11 @@ "Settings": "Ajustes", "System Setting": "Ajustes del sistema", "Tun Mode": "Modo de interfaz virtual (TUN)", - "TUN requires Service Mode or Admin Mode": "El modo TUN requiere el modo de servicio o el modo de administrador", "Install Service": "Instalar servicio", + "Uninstall Service": "Desinstalar servicio", "Reset to Default": "Restablecer a los valores predeterminados", "Tun Mode Info": "El modo TUN (interfaz virtual) gestiona todo el tráfico del sistema. No es necesario habilitar el proxy del sistema cuando está activado.", + "TUN requires Service Mode or Admin Mode": "El modo TUN requiere el modo de servicio o el modo de administrador", "System Proxy Enabled": "El proxy del sistema está habilitado. Sus aplicaciones accederán a Internet a través del proxy.", "System Proxy Disabled": "El proxy del sistema está deshabilitado. Se recomienda a la mayoría de los usuarios habilitar esta opción.", "TUN Mode Enabled": "El modo TUN está habilitado. Las aplicaciones accederán a Internet a través de la interfaz virtual.", @@ -254,6 +255,7 @@ "Unified Delay Info": "Al habilitar la latencia unificada, se realizarán dos pruebas de latencia para eliminar las diferencias de latencia entre diferentes tipos de nodos causadas por el handshake de conexión, etc.", "Log Level": "Nivel de registro", "Log Level Info": "Solo se aplica al archivo de registro del núcleo en la carpeta Service del directorio de registros.", + "Port Config": "Configuración de puerto", "Random Port": "Puerto aleatorio", "Mixed Port": "Puerto de proxy mixto", "Socks Port": "Puerto de proxy SOCKS", @@ -367,17 +369,16 @@ "Stopping Core...": "Deteniendo núcleo...", "Restarting Core...": "Reiniciando núcleo...", "Installing Service...": "Instalando servicio...", - "Uninstall Service": "Desinstalar servicio", - "Service Installed Successfully": "Servicio instalado con éxito", - "Service is ready and core restarted": "El servicio está listo y el núcleo se ha reiniciado", - "Core restarted. Service is now available.": "El núcleo se ha reiniciado. El servicio está disponible.", - "Service was ready, but core restart might have issues or service became unavailable. Please check.": "El servicio estaba listo, pero puede haber habido problemas al reiniciar el núcleo o el servicio se volvió inaccesible. Por favor, verifique.", - "Service installation or core restart encountered issues. Service might not be available. Please check system logs.": "Hubo problemas durante la instalación del servicio o al reiniciar el núcleo. El servicio podría no estar disponible. Por favor, revise los registros del sistema.", "Uninstalling Service...": "Desinstalando servicio...", - "Waiting for service to be ready...": "Esperando a que el servicio esté listo...", + "Service Installed Successfully": "Servicio instalado con éxito", "Service Uninstalled Successfully": "Servicio desinstalado con éxito", "Proxy Daemon Duration Cannot be Less than 1 Second": "El intervalo de tiempo del daemon de proxy no puede ser menor de 1 segundo", "Invalid Bypass Format": "Formato de omisión de proxy no válido", + "Waiting for service to be ready...": "Esperando a que el servicio esté listo...", + "Service was ready, but core restart might have issues or service became unavailable. Please check.": "El servicio estaba listo, pero puede haber habido problemas al reiniciar el núcleo o el servicio se volvió inaccesible. Por favor, verifique.", + "Service installation or core restart encountered issues. Service might not be available. Please check system logs.": "Hubo problemas durante la instalación del servicio o al reiniciar el núcleo. El servicio podría no estar disponible. Por favor, revise los registros del sistema.", + "Service is ready and core restarted": "El servicio está listo y el núcleo se ha reiniciado", + "Core restarted. Service is now available.": "El núcleo se ha reiniciado. El servicio está disponible.", "Core Version Updated": "Versión del núcleo actualizada", "Clash Core Restarted": "Núcleo de Clash reiniciado", "GeoData Updated": "GeoData actualizado", @@ -555,10 +556,9 @@ "Disallowed ISP": "Proveedor de servicios de Internet no permitido", "Originals Only": "Solo originales", "Unsupported Country/Region": "País/región no soportado", + "Configuration saved successfully": "Configuración aleatoria guardada correctamente", "Controller address copied to clipboard": "El puerto API se copió al portapapeles", "Secret copied to clipboard": "La clave API se copió al portapapeles", "Copy to clipboard": "Haz clic aquí para copiar", - "Port Config": "Configuración de puerto", - "Configuration saved successfully": "Configuración aleatoria guardada correctamente", "Enable one-click random API port and key. Click to randomize the port and key": "Habilitar la generación de puerto y clave API aleatorios con un solo clic. Haz clic para randomizar el puerto y la clave" } diff --git a/src/locales/fa.json b/src/locales/fa.json index 3d529254..346394f9 100644 --- a/src/locales/fa.json +++ b/src/locales/fa.json @@ -21,7 +21,6 @@ "Label-Connections": "اتصالات", "Label-Rules": "قوانین", "Label-Logs": "لاگ‌ها", - "Label-Test": "آزمون", "Label-Settings": "تنظیمات", "Proxies": "پراکسی‌ها", "Proxy Groups": "گروه‌های پراکسی", @@ -166,7 +165,6 @@ "Table View": "نمای جدولی", "List View": "نمای لیستی", "Close All": "بستن همه", - "Default": "پیش‌فرض", "Download Speed": "سرعت دانلود", "Upload Speed": "سرعت بارگذاری", "Host": "میزبان", @@ -197,6 +195,8 @@ "Settings": "تنظیمات", "System Setting": "تنظیمات سیستم", "Tun Mode": "Tun (کارت شبکه مجازی)", + "TUN requires Service Mode": "حالت تونل‌زنی نیاز به سرویس دارد", + "Install Service": "نصب سرویس", "Reset to Default": "بازنشانی به پیش‌فرض", "Tun Mode Info": "حالت Tun (NIC مجازی): تمام ترافیک سیستم را ضبط می کند، وقتی فعال باشد، نیازی به فعال کردن پروکسی سیستم نیست.", "Stack": "انباشته Tun", @@ -275,13 +275,13 @@ "Restart": "راه‌اندازی مجدد", "Release Version": "نسخه نهایی", "Alpha Version": "نسخه آلفا", - "Please Install and Enable Service Mode First": "لطفاً ابتدا حالت سرویس را نصب و فعال کنید", "Please enter your root password": "لطفاً رمز ریشه خود را وارد کنید", "Grant": "اعطا", "Open UWP tool": "باز کردن ابزار UWP", "Open UWP tool Info": "از ویندوز 8 به بعد، برنامه‌های UWP (مانند Microsoft Store) از دسترسی مستقیم به خدمات شبکه محلی محدود شده‌اند و این ابزار می‌تواند برای دور زدن این محدودیت استفاده شود", "Update GeoData": "به‌روزرسانی GeoData", - "Verge Setting": "تنظیمات Verge", + "Verge Basic Setting": "تنظیمات پایه Verge", + "Verge Advanced Setting": "تنظیمات پیشرفته Verge", "Language": "زبان", "Theme Mode": "حالت تم", "theme.light": "روشن", @@ -368,6 +368,7 @@ "Profile Reactivated": "پروفایل مجدداً فعال شد", "Only YAML Files Supported": "فقط فایل‌های YAML پشتیبانی می‌شوند", "Settings Applied": "تنظیمات اعمال شد", + "Installing Service...": "در حال نصب سرویس...", "Service Installed Successfully": "سرویس با موفقیت نصب شد", "Service Uninstalled Successfully": "سرویس با موفقیت حذف نصب شد", "Proxy Daemon Duration Cannot be Less than 1 Second": "مدت زمان دیمن پراکسی نمی‌تواند کمتر از 1 ثانیه باشد", @@ -451,10 +452,9 @@ "Script File Error": "خطای فایل اسکریپت، تغییرات برگشت داده شد", "Core Changed Successfully": "هسته با موفقیت تغییر کرد", "Failed to Change Core": "تغییر هسته ناموفق بود", - "Verge Basic Setting": "تنظیمات پایه Verge", - "Verge Advanced Setting": "تنظیمات پیشرفته Verge", - "TUN requires Service Mode": "حالت تونل‌زنی نیاز به سرویس دارد", - "Install Service": "نصب سرویس", - "Installing Service...": "در حال نصب سرویس...", - "Service Administrator Prompt": "Clash Verge برای نصب مجدد سرویس سیستم به امتیازات مدیر نیاز دارد" + "Service Administrator Prompt": "Clash Verge برای نصب مجدد سرویس سیستم به امتیازات مدیر نیاز دارد", + "Default": "پیش‌فرض", + "Label-Test": "آزمون", + "Please Install and Enable Service Mode First": "لطفاً ابتدا حالت سرویس را نصب و فعال کنید", + "Verge Setting": "تنظیمات Verge" } diff --git a/src/locales/id.json b/src/locales/id.json index 82fc74d8..7d15a601 100644 --- a/src/locales/id.json +++ b/src/locales/id.json @@ -21,40 +21,7 @@ "Label-Connections": "Koneksi", "Label-Rules": "Aturan", "Label-Logs": "Log", - "Label-Test": "Tes", "Label-Settings": "Pengaturan", - "Dashboard": "Dasbor", - "Profile": "Profil", - "Help": "Bantuan", - "About": "Tentang", - "Theme": "Tema", - "Main Window": "Jendela Utama", - "Group Icon": "Ikon Grup", - "Menu Icon": "Ikon Menu", - "PAC File": "Berkas PAC", - "Web UI": "Antarmuka Web", - "Hotkeys": "Pintasan", - "Verge Mixed Port": "Port Campuran Verge", - "Verge Socks Port": "Port Socks Verge", - "Verge Redir Port": "Port Pengalihan Verge", - "Verge Tproxy Port": "Port Tproxy Verge", - "Verge Port": "Port Verge", - "Verge HTTP Enabled": "HTTP Verge Diaktifkan", - "WebDAV URL": "URL WebDAV", - "WebDAV Username": "Nama Pengguna WebDAV", - "WebDAV Password": "Kata Sandi WebDAV", - "Restart App": "Mulai Ulang Aplikasi", - "Restart Clash Core": "Mulai Ulang Core Clash", - "TUN Mode": "Mode TUN", - "Copy Env": "Salin Env", - "Conf Dir": "Direktori Konfigurasi", - "Core Dir": "Direktori Core", - "Logs Dir": "Direktori Log", - "Open Dir": "Buka Direktori", - "More": "Lainnya", - "Rule Mode": "Mode Aturan", - "Global Mode": "Mode Global", - "Direct Mode": "Mode Langsung", "Proxies": "Proksi", "Proxy Groups": "Grup Proksi", "Proxy Provider": "Penyedia Proksi", @@ -198,7 +165,6 @@ "Table View": "Tampilan Tabel", "List View": "Tampilan Daftar", "Close All": "Tutup Semua", - "Default": "Default", "Download Speed": "Kecepatan Unduh", "Upload Speed": "Kecepatan Unggah", "Host": "Host", @@ -229,6 +195,8 @@ "Settings": "Pengaturan", "System Setting": "Pengaturan Sistem", "Tun Mode": "Mode Tun (NIC Virtual)", + "TUN requires Service Mode": "Mode TUN memerlukan layanan", + "Install Service": "Instal Layanan", "Reset to Default": "Setel Ulang ke Default", "Tun Mode Info": "Mode Tun (NIC Virtual): Menangkap semua lalu lintas sistem, saat diaktifkan, tidak perlu mengaktifkan proksi sistem.", "Stack": "Tumpukan Tun", @@ -312,7 +280,8 @@ "Open UWP tool": "Buka alat UWP", "Open UWP tool Info": "Sejak Windows 8, aplikasi UWP (seperti Microsoft Store) dibatasi dari mengakses layanan jaringan host lokal secara langsung, dan alat ini dapat digunakan untuk melewati pembatasan ini", "Update GeoData": "Perbarui GeoData", - "Verge Setting": "Pengaturan Verge", + "Verge Basic Setting": "Pengaturan Dasar Verge", + "Verge Advanced Setting": "Pengaturan Lanjutan Verge", "Language": "Bahasa", "Theme Mode": "Mode Tema", "theme.light": "Terang", @@ -399,6 +368,7 @@ "Profile Reactivated": "Profil Diaktifkan Kembali", "Only YAML Files Supported": "Hanya File YAML yang Didukung", "Settings Applied": "Pengaturan Diterapkan", + "Installing Service...": "Memasang Layanan...", "Service Installed Successfully": "Layanan Berhasil Diinstal", "Service Uninstalled Successfully": "Layanan Berhasil Dicopot", "Proxy Daemon Duration Cannot be Less than 1 Second": "Durasi Daemon Proksi Tidak Boleh Kurang dari 1 Detik", @@ -437,6 +407,38 @@ "Confirm to restore this backup file?": "Konfirmasi untuk memulihkan file cadangan ini?", "Restore Success, App will restart in 1s": "Pemulihan Berhasil, Aplikasi akan dimulai ulang dalam 1 detik", "Failed to fetch backup files": "Gagal mengambil file cadangan", + "Profile": "Profil", + "Help": "Bantuan", + "About": "Tentang", + "Theme": "Tema", + "Main Window": "Jendela Utama", + "Group Icon": "Ikon Grup", + "Menu Icon": "Ikon Menu", + "PAC File": "Berkas PAC", + "Web UI": "Antarmuka Web", + "Hotkeys": "Pintasan", + "Verge Mixed Port": "Port Campuran Verge", + "Verge Socks Port": "Port Socks Verge", + "Verge Redir Port": "Port Pengalihan Verge", + "Verge Tproxy Port": "Port Tproxy Verge", + "Verge Port": "Port Verge", + "Verge HTTP Enabled": "HTTP Verge Diaktifkan", + "WebDAV URL": "URL WebDAV", + "WebDAV Username": "Nama Pengguna WebDAV", + "WebDAV Password": "Kata Sandi WebDAV", + "Dashboard": "Dasbor", + "Restart App": "Mulai Ulang Aplikasi", + "Restart Clash Core": "Mulai Ulang Core Clash", + "TUN Mode": "Mode TUN", + "Copy Env": "Salin Env", + "Conf Dir": "Direktori Konfigurasi", + "Core Dir": "Direktori Core", + "Logs Dir": "Direktori Log", + "Open Dir": "Buka Direktori", + "More": "Lainnya", + "Rule Mode": "Mode Aturan", + "Global Mode": "Mode Global", + "Direct Mode": "Mode Langsung", "Enable Tray Speed": "Aktifkan Tray Speed", "LightWeight Mode": "Mode Ringan", "LightWeight Mode Info": "Tutup GUI dan biarkan hanya kernel yang berjalan", @@ -450,10 +452,8 @@ "Script File Error": "Kesalahan file skrip, perubahan dibatalkan", "Core Changed Successfully": "Inti berhasil diubah", "Failed to Change Core": "Gagal mengubah inti", - "Verge Basic Setting": "Pengaturan Dasar Verge", - "Verge Advanced Setting": "Pengaturan Lanjutan Verge", - "TUN requires Service Mode": "Mode TUN memerlukan layanan", - "Install Service": "Instal Layanan", - "Installing Service...": "Memasang Layanan...", - "Service Administrator Prompt": "Clash Verge memerlukan hak administrator untuk menginstal ulang layanan sistem" + "Service Administrator Prompt": "Clash Verge memerlukan hak administrator untuk menginstal ulang layanan sistem", + "Default": "Default", + "Label-Test": "Tes", + "Verge Setting": "Pengaturan Verge" } diff --git a/src/locales/jp.json b/src/locales/jp.json index 1d4ad133..ae3f363e 100644 --- a/src/locales/jp.json +++ b/src/locales/jp.json @@ -195,10 +195,11 @@ "Settings": "設定", "System Setting": "システム設定", "Tun Mode": "仮想ネットワークカードモード", - "TUN requires Service Mode or Admin Mode": "TUNモードはサービスモードまたは管理者モードが必要です", "Install Service": "サービスをインストール", + "Uninstall Service": "サービスのアンインストール", "Reset to Default": "デフォルト値にリセット", "Tun Mode Info": "TUN(仮想ネットワークカード)モードはシステムのすべてのトラフィックを制御します。有効にすると、システムプロキシを開く必要はありません。", + "TUN requires Service Mode or Admin Mode": "TUNモードはサービスモードまたは管理者モードが必要です", "System Proxy Enabled": "システムプロキシが有効になっています。アプリケーションはプロキシを通じてネットワークにアクセスします。", "System Proxy Disabled": "システムプロキシが無効になっています。ほとんどのユーザーはこのオプションをオンにすることをお勧めします。", "TUN Mode Enabled": "TUNモードが有効になっています。アプリケーションは仮想ネットワークカードを通じてネットワークにアクセスします。", @@ -254,6 +255,7 @@ "Unified Delay Info": "統一遅延を有効にすると、2回の遅延テストが行われ、接続ハンドシェイクなどによる異なるタイプのノードの遅延差を解消します。", "Log Level": "ログレベル", "Log Level Info": "ログディレクトリのServiceフォルダ内のコアログファイルにのみ適用されます。", + "Port Config": "ポート設定", "Random Port": "ランダムポート", "Mixed Port": "混合プロキシポート", "Socks Port": "SOCKSプロキシポート", @@ -370,17 +372,16 @@ "Stopping Core...": "コアを停止中...", "Restarting Core...": "コアを再起動中...", "Installing Service...": "サービスをインストール中...", - "Uninstall Service": "サービスのアンインストール", - "Service Installed Successfully": "サービスのインストールに成功しました。", - "Service is ready and core restarted": "サービスが準備完了し、コアが再起動されました。", - "Core restarted. Service is now available.": "コアが再起動され、サービスが利用可能になりました。", - "Service was ready, but core restart might have issues or service became unavailable. Please check.": "サービスは準備が整っていましたが、コアの再起動に問題が発生したか、サービスが利用できなくなった可能性があります。ご確認ください。", - "Service installation or core restart encountered issues. Service might not be available. Please check system logs.": "サービスのインストールまたはコアの再起動中に問題が発生しました。サービスが利用できない可能性があります。システムログを確認してください。", "Uninstalling Service...": "サービスをアンインストール中...", - "Waiting for service to be ready...": "サービスの準備を待っています...", + "Service Installed Successfully": "サービスのインストールに成功しました。", "Service Uninstalled Successfully": "サービスのアンインストールに成功しました。", "Proxy Daemon Duration Cannot be Less than 1 Second": "プロキシデーモンの間隔は1秒以上に設定する必要があります。", "Invalid Bypass Format": "無効なバイパス形式", + "Waiting for service to be ready...": "サービスの準備を待っています...", + "Service was ready, but core restart might have issues or service became unavailable. Please check.": "サービスは準備が整っていましたが、コアの再起動に問題が発生したか、サービスが利用できなくなった可能性があります。ご確認ください。", + "Service installation or core restart encountered issues. Service might not be available. Please check system logs.": "サービスのインストールまたはコアの再起動中に問題が発生しました。サービスが利用できない可能性があります。システムログを確認してください。", + "Service is ready and core restarted": "サービスが準備完了し、コアが再起動されました。", + "Core restarted. Service is now available.": "コアが再起動され、サービスが利用可能になりました。", "Core Version Updated": "コアバージョンが更新されました。", "Clash Core Restarted": "Clashコアが再起動されました。", "GeoData Updated": "GeoDataが更新されました。", @@ -558,12 +559,9 @@ "Disallowed ISP": "許可されていないインターネットサービスプロバイダー", "Originals Only": "オリジナルのみ", "Unsupported Country/Region": "サポートされていない国/地域", + "Configuration saved successfully": "ランダム設定を保存完了", "Controller address copied to clipboard": "API ポートがクリップボードにコピーされました", "Secret copied to clipboard": "API キーがクリップボードにコピーされました", - "Copy to clipboard": "クリックしてコピー", - "Port Config": "ポート設定", - "Configuration saved successfully": "ランダム設定を保存完了", - "Enable one-click random API port and key. Click to randomize the port and key": "ワンクリックでランダム API ポートとキーを有効化。ポートとキーをランダム化するにはクリックしてください", "Batch Operations": "バッチ操作", "Delete Selected Profiles": "選択したプロファイルを削除", "Deselect All": "すべての選択を解除", @@ -571,5 +569,7 @@ "items": "アイテム", "Select All": "すべて選択", "Selected": "選択済み", - "Selected profiles deleted successfully": "選択したプロファイルが正常に削除されました" + "Selected profiles deleted successfully": "選択したプロファイルが正常に削除されました", + "Copy to clipboard": "クリックしてコピー", + "Enable one-click random API port and key. Click to randomize the port and key": "ワンクリックでランダム API ポートとキーを有効化。ポートとキーをランダム化するにはクリックしてください" } diff --git a/src/locales/ko.json b/src/locales/ko.json index 6c7a6073..07173504 100644 --- a/src/locales/ko.json +++ b/src/locales/ko.json @@ -127,6 +127,7 @@ "Lazy": "지연 로딩", "Timeout": "타임아웃", "Max Failed Times": "최대 실패 횟수", + "Interface Name": "인터페이스 이름", "Routing Mark": "라우팅 마크", "Include All": "모든 프록시 및 제공자 포함", "Include All Providers": "모든 제공자 포함", @@ -198,176 +199,52 @@ "Edit Test": "테스트 편집", "Icon": "아이콘", "Test URL": "테스트 URL", - "Timeout (ms)": "타임아웃 (ms)", - "Expected": "예상됨", - "URL": "URL", - "Method": "메소드", - "Failed": "실패", - "Succeed": "성공", "Settings": "설정", - "Core Config": "코어 설정", - "Clash Setting": "Clash 설정", - "Verge Setting": "Verge 설정", "System Setting": "시스템 설정", - "Appearance": "외관", - "Experimental Features": "실험적 기능", - "Others": "기타", - "Mixed Port": "혼합 포트", - "Allow LAN": "LAN 허용", - "IPv6": "IPv6", - "Log Level": "로그 레벨", - "Core Type": "코어 유형", - "General": "일반", - "Mode": "모드", "Tun Mode": "Tun 모드", - "Transparent Proxy": "투명 프록시", - "Specify YAML": "YAML 지정", - "Status": "상태", - "Memory Usage": "메모리 사용량", "Stack": "스택", - "Network": "네트워크", - "MTU": "MTU", "Auto Route": "자동 라우팅", "Auto Detect Interface": "인터페이스 자동 감지", - "Interface Name": "인터페이스 이름", - "Endpoint Independent Nat": "엔드포인트 독립 NAT", - "Include Reserved": "예약된 IP 포함", - "Enable Default DNS Hijack": "기본 DNS 하이재킹 활성화", - "TCP Fast Open": "TCP 빠른 열기", - "Silent Start": "자동 시작", - "TcpConcurrent": "TCP 동시성", + "MTU": "MTU", "Service Mode": "서비스 모드", "System Proxy": "시스템 프록시", - "Start With System": "시스템과 함께 시작", - "Set System Proxy": "시스템 프록시 설정", - "Set as System Proxy": "시스템 프록시로 설정", - "System Proxy Status": "시스템 프록시 상태", - "Start Option": "시작 옵션", - "Start Core on Start": "시작 시 코어 시작", - "Start Core with System": "시스템과 함께 코어 시작", - "Start Core with System Proxy": "시스템 프록시와 함께 코어 시작", - "Start Core with Tun": "Tun과 함께 코어 시작", - "Silent Start Option": "자동 시작 옵션", - "Hidden Window on Start": "시작 시 창 숨기기", - "Log Notice": "로그 알림", - "Warning": "경고", - "Error": "오류", + "Silent Start": "자동 시작", + "Clash Setting": "Clash 설정", + "IPv6": "IPv6", + "Log Level": "로그 레벨", + "Mixed Port": "혼합 포트", "Verge Basic Setting": "Verge 기본 설정", "Language": "언어", "Theme Mode": "테마 모드", "Tray Click Event": "트레이 클릭 이벤트", "Show Main Window": "메인 창 표시", "Show Tray Menu": "트레이 메뉴 표시", - "Open Config Folder": "설정 폴더 열기", - "Open Dashboard": "대시보드 열기", - "Hotkey Setting": "단축키 설정", - "Misc Setting": "기타 설정", - "Layout Setting": "레이아웃 설정", - "Update Setting": "업데이트 설정", - "Enable Hotkeys": "단축키 활성화", - "System Hotkey": "시스템 단축키", - "Hotkey Enable": "단축키 활성화", - "Require Clash Core Running": "Clash 코어 실행 필요", "Copy Env Type": "환경 유형 복사", + "Copy Success": "복사 성공", "Start Page": "시작 페이지", "Startup Script": "시작 스크립트", - "Icon Group Type": "아이콘 그룹 유형", - "Always": "항상", - "On Update": "업데이트 시", - "By Traffic": "트래픽별", - "Web UI List": "웹 UI 목록", - "Installed Web UI": "설치된 웹 UI", - "Built-in Web UI": "내장 웹 UI", - "Current Config": "현재 설정", - "System Config": "시스템 설정", - "Port": "포트", - "WebUI Current Port": "웹 UI 현재 포트", - "Theme": "테마", - "Light": "라이트", - "Dark": "다크", - "Auto": "자동", - "System": "시스템", - "Proxy Item Width": "프록시 항목 너비", - "Proxy Item Height": "프록시 항목 높이", - "Compact Mode": "압축 모드", - "Git Proxy": "Git 프록시", - "Enable API": "API 활성화", - "Enable Lan": "LAN 활성화", - "Select a config file": "설정 파일 선택", - "Open Config Dir": "설정 디렉토리 열기", - "System Proxy Permission": "시스템 프록시 권한", - "System Stack Type": "시스템 스택 유형", - "Undefined stack": "정의되지 않은 스택", - "Auto Start": "자동 시작", - "Mixin": "혼합", - "Set as System Auto Proxy": "시스템 자동 프록시로 설정", - "System Auto Proxy Status": "시스템 자동 프록시 상태", - "Authorization for requests coming through HTTP Proxy (e.g. local connections)": "HTTP 프록시를 통한 요청에 대한 인증 (예: 로컬 연결)", + "Primary Color": "기본 색상", + "Layout Setting": "레이아웃 설정", + "Traffic Graph": "트래픽 그래프", + "Memory Usage": "메모리 사용량", + "Auto Delay Detection": "자동 지연 감지", + "Auto Delay Detection Info": "백그라운드에서 현재 노드의 지연을 주기적으로 검사합니다", + "Hotkey Setting": "단축키 설정", + "Filter": "필터", + "Import Subscription Successful": "구독 가져오기 성공", "Username": "사용자 이름", "Password": "비밀번호", - "Auth Proxy": "인증 프록시", - "Geox User": "Geox 사용자", - "Geox Password": "Geox 비밀번호", - "Log File": "로그 파일", - "Enable Clash.Meta Logs": "Clash.Meta 로그 활성화", - "Verge Log": "Verge 로그", - "Enable Verge Logs": "Verge 로그 활성화", - "Traffic Graph": "트래픽 그래프", - "Profile Token": "프로필 토큰", - "Profile User Agent": "프로필 사용자 에이전트", - "The User Agent to use when refreshing a subscription profile.": "구독 프로필을 새로 고칠 때 사용할 사용자 에이전트입니다.", - "Profile Format": "프로필 포맷", - "The expected content type to send in the `Accept` header when refreshing a subscription profile.": "구독 프로필을 새로 고칠 때 `Accept` 헤더에 보낼 예상 컨텐츠 타입입니다.", - "Theme Color": "테마 색상", - "Primary Color": "기본 색상", - "Customize primary color": "기본 색상 사용자 정의", - "Danger Zone": "위험 영역", - "Reset Verge Theme": "Verge 테마 재설정", - "Inject CSS": "CSS 주입", - "Inject a custom CSS content into the GUI": "사용자 정의 CSS 내용을 GUI에 주입", - "Inject HTML": "HTML 주입", - "Inject a custom HTML content into the GUI (appended in body)": "사용자 정의 HTML 내용을 GUI에 주입 (본문에 추가)", - "Capture": "캡처", - "Color Scheme": "색상 구성표", - "Default": "기본값", - "Pink": "분홍색", - "Red": "빨간색", - "Yellow": "노란색", - "Green": "녹색", - "Cyan": "청록색", - "Blue": "파란색", - "Purple": "보라색", - "Proxy Detail": "프록시 상세", - "Address": "주소", - "Filter": "필터", - "Check Updates on Start": "시작 시 업데이트 확인", - "For Alpha Version": "알파 버전용", - "Latest Build Version": "최신 빌드 버전", - "Check Updates": "업데이트 확인", - "Proxy Setting": "프록시 설정", - "WebDav Setting": "WebDav 설정", - "WebDav Upload": "WebDav 업로드", - "WebDav Download": "WebDav 다운로드", - "Clean Cache": "캐시 정리", - "Check Network": "네트워크 확인", - "WebDav Status": "WebDav 상태", - "WebDav URL": "WebDav URL", - "WebDav Username": "WebDav 사용자 이름", - "WebDav Password": "WebDav 비밀번호", - "Update Interval(minute)": "업데이트 간격(분)", - "Skip Cert Verify": "인증서 확인 건너뛰기", - "Import Subscription Successful": "구독 가져오기 성공", - "Update with Clash proxy successfully": "Clash 프록시로 업데이트 성공", - "Update failed, retrying with Clash proxy...": "업데이트 실패, Clash 프록시로 재시도 중...", - "Update failed even with Clash proxy": "Clash 프록시로도 업데이트 실패", + "Theme": "테마", + "Config Validation Failed": "설정 검증 실패", "Boot Config Validation Failed": "부팅 설정 검증 실패", "Core Change Config Validation Failed": "코어 변경 설정 검증 실패", - "Config Validation Failed": "설정 검증 실패", "Config Validation Process Terminated": "설정 검증 프로세스 종료됨", - "Script File Error": "스크립트 파일 오류", "Script Syntax Error": "스크립트 구문 오류", "Script Missing Main": "스크립트 메인 없음", "File Not Found": "파일을 찾을 수 없음", + "Script File Error": "스크립트 파일 오류", + "Core Changed Successfully": "코어 변경 성공", + "Failed to Change Core": "코어 변경 실패", "YAML Syntax Error": "YAML 구문 오류", "YAML Read Error": "YAML 읽기 오류", "YAML Mapping Error": "YAML 매핑 오류", @@ -377,28 +254,151 @@ "Merge File Mapping Error": "병합 파일 매핑 오류", "Merge File Key Error": "병합 파일 키 오류", "Merge File Error": "병합 파일 오류", - "Core Changed Successfully": "코어 변경 성공", - "Failed to Change Core": "코어 변경 실패", - "Copy Success": "복사 성공", - "Copy Failed": "복사 실패", + "Update failed, retrying with Clash proxy...": "업데이트 실패, Clash 프록시로 재시도 중...", + "Update with Clash proxy successfully": "Clash 프록시로 업데이트 성공", + "Update failed even with Clash proxy": "Clash 프록시로도 업데이트 실패", + "Failed": "실패", + "Address": "주소", + "Allow LAN": "LAN 허용", + "Always": "항상", + "Appearance": "외관", + "Auth Proxy": "인증 프록시", + "Authorization for requests coming through HTTP Proxy (e.g. local connections)": "HTTP 프록시를 통한 요청에 대한 인증 (예: 로컬 연결)", + "Auto": "자동", + "Auto Start": "자동 시작", + "Blue": "파란색", + "Built-in Web UI": "내장 웹 UI", + "By Traffic": "트래픽별", "Cannot Import Empty Subscription URL": "빈 구독 URL을 가져올 수 없습니다", - "Profile Already Exists": "프로필이 이미 존재합니다", - "Input Subscription URL": "구독 URL 입력", - "Create Profile Successful": "프로필 생성 성공", + "Capture": "캡처", + "Check Network": "네트워크 확인", + "Check Updates": "업데이트 확인", + "Check Updates on Start": "시작 시 업데이트 확인", + "Clean Cache": "캐시 정리", + "Color Scheme": "색상 구성표", + "Compact Mode": "압축 모드", + "Copy Failed": "복사 실패", + "Core Config": "코어 설정", + "Core Type": "코어 유형", "Create Profile Failed": "프로필 생성 실패", - "Patch Profile Successful": "프로필 패치 성공", - "Patch Profile Failed": "프로필 패치 실패", - "Delete Profile Successful": "프로필 삭제 성공", + "Create Profile Successful": "프로필 생성 성공", + "Current Config": "현재 설정", + "Customize primary color": "기본 색상 사용자 정의", + "Cyan": "청록색", + "Danger Zone": "위험 영역", + "Dark": "다크", + "Default": "기본값", "Delete Profile Failed": "프로필 삭제 실패", - "Select Active Profile Successful": "활성 프로필 선택 성공", + "Delete Profile Successful": "프로필 삭제 성공", + "Enable API": "API 활성화", + "Enable Clash.Meta Logs": "Clash.Meta 로그 활성화", + "Enable Default DNS Hijack": "기본 DNS 하이재킹 활성화", + "Enable Hotkeys": "단축키 활성화", + "Enable Lan": "LAN 활성화", + "Enable Verge Logs": "Verge 로그 활성화", + "Endpoint Independent Nat": "엔드포인트 독립 NAT", + "Error": "오류", + "Expected": "예상됨", + "Experimental Features": "실험적 기능", + "For Alpha Version": "알파 버전용", + "General": "일반", + "Geox Password": "Geox 비밀번호", + "Geox User": "Geox 사용자", + "Git Proxy": "Git 프록시", + "Green": "녹색", + "Hidden Window on Start": "시작 시 창 숨기기", + "Hotkey Enable": "단축키 활성화", + "Icon Group Type": "아이콘 그룹 유형", + "Include Reserved": "예약된 IP 포함", + "Inject CSS": "CSS 주입", + "Inject HTML": "HTML 주입", + "Inject a custom CSS content into the GUI": "사용자 정의 CSS 내용을 GUI에 주입", + "Inject a custom HTML content into the GUI (appended in body)": "사용자 정의 HTML 내용을 GUI에 주입 (본문에 추가)", + "Input Subscription URL": "구독 URL 입력", + "Installed Web UI": "설치된 웹 UI", + "Latest Build Version": "최신 빌드 버전", + "Light": "라이트", + "Log File": "로그 파일", + "Log Notice": "로그 알림", + "Method": "메소드", + "Misc Setting": "기타 설정", + "Mixin": "혼합", + "Mode": "모드", + "Network": "네트워크", + "On Update": "업데이트 시", + "Open Config Dir": "설정 디렉토리 열기", + "Open Config Folder": "설정 폴더 열기", + "Open Dashboard": "대시보드 열기", + "Others": "기타", + "Patch Profile Failed": "프로필 패치 실패", + "Patch Profile Successful": "프로필 패치 성공", + "Pink": "분홍색", + "Port": "포트", + "Profile Already Exists": "프로필이 이미 존재합니다", + "Profile Format": "프로필 포맷", + "Profile Token": "프로필 토큰", + "Profile User Agent": "프로필 사용자 에이전트", + "Proxy Detail": "프록시 상세", + "Proxy Item Height": "프록시 항목 높이", + "Proxy Item Width": "프록시 항목 너비", + "Proxy Setting": "프록시 설정", + "Purple": "보라색", + "Red": "빨간색", + "Require Clash Core Running": "Clash 코어 실행 필요", + "Reset Verge Theme": "Verge 테마 재설정", "Select Active Profile Failed": "활성 프로필 선택 실패", - "View Profile-Runtime": "프로필-런타임 보기", - "View Profile-Content": "프로필-내용 보기", - "View Profile-Original": "프로필-원본 보기", - "View Profile-Script": "프로필-스크립트 보기", - "View Profile-Merge": "프로필-병합 보기", - "Update Successful": "업데이트 성공", + "Select Active Profile Successful": "활성 프로필 선택 성공", + "Select a config file": "설정 파일 선택", + "Set System Proxy": "시스템 프록시 설정", + "Set as System Auto Proxy": "시스템 자동 프록시로 설정", + "Set as System Proxy": "시스템 프록시로 설정", + "Silent Start Option": "자동 시작 옵션", + "Skip Cert Verify": "인증서 확인 건너뛰기", + "Specify YAML": "YAML 지정", + "Start Core on Start": "시작 시 코어 시작", + "Start Core with System": "시스템과 함께 코어 시작", + "Start Core with System Proxy": "시스템 프록시와 함께 코어 시작", + "Start Core with Tun": "Tun과 함께 코어 시작", + "Start Option": "시작 옵션", + "Start With System": "시스템과 함께 시작", + "Status": "상태", + "Succeed": "성공", + "System": "시스템", + "System Auto Proxy Status": "시스템 자동 프록시 상태", + "System Config": "시스템 설정", + "System Hotkey": "시스템 단축키", + "System Proxy Permission": "시스템 프록시 권한", + "System Proxy Status": "시스템 프록시 상태", + "System Stack Type": "시스템 스택 유형", + "TCP Fast Open": "TCP 빠른 열기", + "TcpConcurrent": "TCP 동시성", + "The User Agent to use when refreshing a subscription profile.": "구독 프로필을 새로 고칠 때 사용할 사용자 에이전트입니다.", + "The expected content type to send in the `Accept` header when refreshing a subscription profile.": "구독 프로필을 새로 고칠 때 `Accept` 헤더에 보낼 예상 컨텐츠 타입입니다.", + "Theme Color": "테마 색상", + "Timeout (ms)": "타임아웃 (ms)", + "Transparent Proxy": "투명 프록시", + "URL": "URL", + "Undefined stack": "정의되지 않은 스택", "Update Failed": "업데이트 실패", - "Auto Delay Detection": "자동 지연 감지", - "Auto Delay Detection Info": "백그라운드에서 현재 노드의 지연을 주기적으로 검사합니다" + "Update Interval(minute)": "업데이트 간격(분)", + "Update Setting": "업데이트 설정", + "Update Successful": "업데이트 성공", + "Verge Log": "Verge 로그", + "Verge Setting": "Verge 설정", + "View Profile-Content": "프로필-내용 보기", + "View Profile-Merge": "프로필-병합 보기", + "View Profile-Original": "프로필-원본 보기", + "View Profile-Runtime": "프로필-런타임 보기", + "View Profile-Script": "프로필-스크립트 보기", + "Warning": "경고", + "Web UI List": "웹 UI 목록", + "WebDav Download": "WebDav 다운로드", + "WebDav Password": "WebDav 비밀번호", + "WebDav Setting": "WebDav 설정", + "WebDav Status": "WebDav 상태", + "WebDav URL": "WebDav URL", + "WebDav Upload": "WebDav 업로드", + "WebDav Username": "WebDav 사용자 이름", + "WebUI Current Port": "웹 UI 현재 포트", + "Yellow": "노란색" } diff --git a/src/locales/tr.json b/src/locales/tr.json index 35f8dc0e..79ad6333 100644 --- a/src/locales/tr.json +++ b/src/locales/tr.json @@ -508,7 +508,6 @@ "Fake IP Filter Mode": "Sahte IP Filtre Modu", "Enable IPv6 DNS resolution": "IPv6 DNS çözümlemesini etkinleştir", "Prefer H3": "H3'ü Tercih Et", - "DNS DOH uses HTTP/3": "DNS DOH HTTP/3 kullanır", "Respect Rules": "Kurallara Uy", "DNS connections follow routing rules": "DNS bağlantıları yönlendirme kurallarını takip eder", "Use Hosts": "Hosts Kullan", @@ -616,5 +615,6 @@ "items": "öğeler", "Select All": "Tümünü Seç", "Selected": "Seçildi", - "Selected profiles deleted successfully": "Seçili profiller başarıyla silindi" + "Selected profiles deleted successfully": "Seçili profiller başarıyla silindi", + "DNS DOH uses HTTP/3": "DNS DOH HTTP/3 kullanır" } diff --git a/src/locales/tt.json b/src/locales/tt.json index 8f9a96d0..2a4d153f 100644 --- a/src/locales/tt.json +++ b/src/locales/tt.json @@ -21,7 +21,6 @@ "Label-Connections": "Тоташулар", "Label-Rules": "Кагыйдәләр", "Label-Logs": "Логлар", - "Label-Test": "Тест", "Label-Settings": "Көйләүләр", "Proxies": "Прокси", "Proxy Groups": "Прокси төркемнәре", @@ -166,7 +165,6 @@ "Table View": "Таблица күзаллау", "List View": "Исемлек күзаллау", "Close All": "Барысын да ябу", - "Default": "Башлангыч", "Download Speed": "Йөкләү тизлеге", "Upload Speed": "Йөкләү (чыгару) тизлеге", "Host": "Хост", @@ -197,6 +195,8 @@ "Settings": "Көйләүләр", "System Setting": "Система көйләүләре", "Tun Mode": "Tun режимы (виртуаль челтәр адаптеры)", + "TUN requires Service Mode": "TUN режимы хезмәт күрсәтүне таләп итә", + "Install Service": "Хезмәтне урнаштыру", "Reset to Default": "Башлангычка кайтару", "Tun Mode Info": "Tun режимы бөтен системаның трафигын тотып ала. Аны кабызган очракта системалы проксины аерым кабызу таләп ителми.", "Stack": "Стек", @@ -280,7 +280,8 @@ "Open UWP tool": "UWP инструментын ачу", "Open UWP tool Info": "Windows 8'дән башлап UWP кушымталары (Microsoft Store кебек) локаль хосттагы челтәр хезмәтләренә турыдан-туры тоташа алмый. Бу инструмент әлеге чикләүне әйләнеп узарга ярдәм итә", "Update GeoData": "GeoData яңарту", - "Verge Setting": "Verge көйләүләре", + "Verge Basic Setting": "Verge Төп көйләүләр", + "Verge Advanced Setting": "Verge Киңәйтелгән көйләүләр", "Language": "Тел", "Theme Mode": "Теманың режимы", "theme.light": "Якты", @@ -296,8 +297,6 @@ "Theme Setting": "Тема көйләүләре", "Primary Color": "Төп төс", "Secondary Color": "Икенче төс", - "Primary Text Color": "Төп текст төсе", - "Secondary Text Color": "Икенче текст төсе", "Info Color": "Мәгълүмат төсе", "Warning Color": "Кисәтү төсе", "Error Color": "Хата төсе", @@ -367,6 +366,7 @@ "Profile Reactivated": "Профиль яңадан активлаштырылды", "Only YAML Files Supported": "Фәкать YAML-файллар гына хуплана", "Settings Applied": "Көйләүләр кулланылды", + "Installing Service...": "Хезмәт урнаштырыла...", "Service Installed Successfully": "Сервис уңышлы урнаштырылды", "Service Uninstalled Successfully": "Сервис уңышлы салдырылды", "Proxy Daemon Duration Cannot be Less than 1 Second": "Прокси-демон эш вакыты 1 секундтан ким була алмый", @@ -380,7 +380,6 @@ "Clash Core Restarted": "Clash ядросы яңадан башланды", "GeoData Updated": "GeoData яңартылды", "Currently on the Latest Version": "Сездә иң соңгы версия урнаштырылган", - "Import subscription successful": "Подписка уңышлы импортланды", "WebDAV Server URL": "WebDAV сервер URL-ы (http(s)://)", "Username": "Кулланучы исеме", "Password": "Пароль", @@ -450,10 +449,11 @@ "Script File Error": "Скрипт файлы хатасы, үзгәрешләр кире кайтарылды", "Core Changed Successfully": "Ядро уңышлы алыштырылды", "Failed to Change Core": "Ядро алыштыру уңышсыз булды", - "Verge Basic Setting": "Verge Төп көйләүләр", - "Verge Advanced Setting": "Verge Киңәйтелгән көйләүләр", - "TUN requires Service Mode": "TUN режимы хезмәт күрсәтүне таләп итә", - "Install Service": "Хезмәтне урнаштыру", - "Installing Service...": "Хезмәт урнаштырыла...", - "Service Administrator Prompt": "Clash Verge система хезмәтен яңадан урнаштыру өчен администратор хокукларын таләп итә" + "Service Administrator Prompt": "Clash Verge система хезмәтен яңадан урнаштыру өчен администратор хокукларын таләп итә", + "Default": "Башлангыч", + "Import subscription successful": "Подписка уңышлы импортланды", + "Label-Test": "Тест", + "Primary Text Color": "Төп текст төсе", + "Secondary Text Color": "Икенче текст төсе", + "Verge Setting": "Verge көйләүләре" } diff --git a/src/locales/zh.json b/src/locales/zh.json index e17e33f9..1e320fd8 100644 --- a/src/locales/zh.json +++ b/src/locales/zh.json @@ -305,7 +305,7 @@ "Socks Port": "SOCKS 代理端口", "Http Port": "HTTP(S) 代理端口", "Redir Port": "Redir 透明代理端口", - "TPROXY Port": "TPROXY 透明代理端口", + "Tproxy Port": "Tproxy 透明代理端口", "Port settings saved": "端口设置已保存", "Failed to save port settings": "端口设置保存失败", "External": "外部控制", @@ -426,6 +426,8 @@ "Uninstalling Service...": "卸载服务中...", "Service Installed Successfully": "已成功安装服务", "Service Uninstalled Successfully": "已成功卸载服务", + "Proxy Daemon Duration Cannot be Less than 1 Second": "代理守护间隔时间不得低于 1 秒", + "Invalid Bypass Format": "无效的代理绕过格式", "Waiting for service to be ready...": "等待服务准备就绪...", "Service not ready, retrying attempt {count}/{total}...": "服务未就绪,正在重试 {{count}}/{{total}} 次...", "Failed to check service status, retrying attempt {count}/{total}...": "检查服务状态失败,正在重试 {{count}}/{{total}} 次...", @@ -436,8 +438,6 @@ "Fallback core restart also failed: {message}": "后备内核重启也失败了: {{message}}", "Service is ready and core restarted": "服务已就绪,内核已重启", "Core restarted. Service is now available.": "内核已重启,服务现已可用", - "Proxy Daemon Duration Cannot be Less than 1 Second": "代理守护间隔时间不得低于 1 秒", - "Invalid Bypass Format": "无效的代理绕过格式", "Clash Port Modified": "Clash 端口已修改", "Port Conflict": "端口冲突", "Restart Application to Apply Modifications": "重启 Verge 以应用修改", @@ -715,5 +715,6 @@ "Unlock menu order": "解锁菜单排序", "Lock menu order": "锁定菜单排序", "Open App Log": "应用日志", - "Open Core Log": "内核日志" + "Open Core Log": "内核日志", + "TPROXY Port": "TPROXY 透明代理端口" } diff --git a/src/locales/zhtw.json b/src/locales/zhtw.json index 1d4f0941..fb4bc66e 100644 --- a/src/locales/zhtw.json +++ b/src/locales/zhtw.json @@ -217,7 +217,6 @@ "Settings": "設定", "System Setting": "系統設定", "Tun Mode": "虛擬網路介面卡模式", - "TUN requires Service Mode or Admin Mode": "虛擬網路介面卡模式需要服務模式或管理員模式", "Install Service": "安裝服務", "Install Service failed": "安裝服務失敗", "Uninstall Service": "解除安裝服務", @@ -305,7 +304,6 @@ "Socks Port": "SOCKS 代理連接埠", "Http Port": "HTTP(S) 代理連接埠", "Redir Port": "Redir 透明代理連接埠", - "TPROXY Port": "TPROXY 透明代理連接埠", "Port settings saved": "連結埠設定已儲存", "Failed to save port settings": "連結埠設定儲存失敗", "External": "外部控制", @@ -426,6 +424,8 @@ "Uninstalling Service...": "服務解除安裝中...", "Service Installed Successfully": "已成功安裝服務", "Service Uninstalled Successfully": "已成功解除安裝服務", + "Proxy Daemon Duration Cannot be Less than 1 Second": "代理守護間隔時間不得低於 1 秒", + "Invalid Bypass Format": "無效的代理繞過格式", "Waiting for service to be ready...": "等待服務就緒...", "Service not ready, retrying attempt {count}/{total}...": "服務未就緒,正在重試 {{count}}/{{total}} 次...", "Failed to check service status, retrying attempt {count}/{total}...": "檢查服務狀態失敗,正在重試 {{count}}/{{total}} 次...", @@ -436,8 +436,6 @@ "Fallback core restart also failed: {message}": "被園內核重新啟動也失敗了:{{message}}", "Service is ready and core restarted": "服務已就緒,內核已重啟", "Core restarted. Service is now available.": "內核已重啟,服務已就緒", - "Proxy Daemon Duration Cannot be Less than 1 Second": "代理守護間隔時間不得低於 1 秒", - "Invalid Bypass Format": "無效的代理繞過格式", "Clash Port Modified": "Clash 連結埠已修改", "Port Conflict": "連結埠衝突", "Restart Application to Apply Modifications": "重新啟動 Verge 以套用修改", @@ -713,5 +711,6 @@ "Allow Auto Update": "允許自動更新", "Menu reorder mode": "選單排序模式", "Unlock menu order": "解鎖選單排序", - "Lock menu order": "鎖定選單排序" + "Lock menu order": "鎖定選單排序", + "TPROXY Port": "TPROXY 透明代理連接埠" } From d094d3885cdb66c18d059d30ff07369ac09726af Mon Sep 17 00:00:00 2001 From: Slinetrac Date: Fri, 31 Oct 2025 10:41:48 +0800 Subject: [PATCH 39/70] chore: move CONTRIBUTING_i18n.md to /docs --- CONTRIBUTING.md | 2 +- CONTRIBUTING_i18n.md => docs/CONTRIBUTING_i18n.md | 0 2 files changed, 1 insertion(+), 1 deletion(-) rename CONTRIBUTING_i18n.md => docs/CONTRIBUTING_i18n.md (100%) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index debd1009..e23cd352 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -4,7 +4,7 @@ Thank you for your interest in contributing to Clash Verge Rev! This document pr ## Internationalization (i18n) -We welcome translations and improvements to existing locales. Please follow the detailed guidelines in [CONTRIBUTING_i18n.md](CONTRIBUTING_i18n.md) for instructions on extracting strings, file naming conventions, testing translations, and submitting translation PRs. +We welcome translations and improvements to existing locales. Please follow the detailed guidelines in [CONTRIBUTING_i18n.md](docs/CONTRIBUTING_i18n.md) for instructions on extracting strings, file naming conventions, testing translations, and submitting translation PRs. ## Development Setup diff --git a/CONTRIBUTING_i18n.md b/docs/CONTRIBUTING_i18n.md similarity index 100% rename from CONTRIBUTING_i18n.md rename to docs/CONTRIBUTING_i18n.md From 5e7adf76ca79b5ebcafef9dabaad992074bb7cd1 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Fri, 31 Oct 2025 10:46:42 +0800 Subject: [PATCH 40/70] chore(deps): update npm dependencies (#5258) Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> --- package.json | 4 ++-- pnpm-lock.yaml | 20 ++++++++++---------- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/package.json b/package.json index 07418b07..f29443f6 100644 --- a/package.json +++ b/package.json @@ -68,7 +68,7 @@ "react-dom": "19.2.0", "react-error-boundary": "6.0.0", "react-hook-form": "^7.65.0", - "react-i18next": "16.2.2", + "react-i18next": "16.2.3", "react-markdown": "10.1.0", "react-monaco-editor": "0.59.0", "react-router": "^7.9.5", @@ -112,7 +112,7 @@ "node-fetch": "^3.3.2", "prettier": "^3.6.2", "sass": "^1.93.2", - "tar": "^7.5.1", + "tar": "^7.5.2", "terser": "^5.44.0", "typescript": "^5.9.3", "typescript-eslint": "^8.46.2", diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index a1d8151c..77969e82 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -111,8 +111,8 @@ importers: specifier: ^7.65.0 version: 7.65.0(react@19.2.0) react-i18next: - specifier: 16.2.2 - version: 16.2.2(i18next@25.6.0(typescript@5.9.3))(react-dom@19.2.0(react@19.2.0))(react@19.2.0)(typescript@5.9.3) + specifier: 16.2.3 + version: 16.2.3(i18next@25.6.0(typescript@5.9.3))(react-dom@19.2.0(react@19.2.0))(react@19.2.0)(typescript@5.9.3) react-markdown: specifier: 10.1.0 version: 10.1.0(@types/react@19.2.2)(react@19.2.0) @@ -238,8 +238,8 @@ importers: specifier: ^1.93.2 version: 1.93.2 tar: - specifier: ^7.5.1 - version: 7.5.1 + specifier: ^7.5.2 + version: 7.5.2 terser: specifier: ^5.44.0 version: 5.44.0 @@ -3627,8 +3627,8 @@ packages: peerDependencies: react: ^16.8.0 || ^17 || ^18 || ^19 - react-i18next@16.2.2: - resolution: {integrity: sha512-iKgJMloKpkLUCy6w/0BEJSVQ5jG2WbnFiO2w/9wwa8nc+obsEZjQErRCw27O7BHlpscKfpWSu5vTnC+3fBMQfQ==} + react-i18next@16.2.3: + resolution: {integrity: sha512-O0t2zvmIz7nHWKNfIL+O/NTIbpTaOPY0vZov779hegbep3IZ+xcqkeVPKWBSXwzdkiv77q8zmq9toKIUys1x3A==} peerDependencies: i18next: '>= 25.5.2' react: '>= 16.8.0' @@ -3971,8 +3971,8 @@ packages: systemjs@6.15.1: resolution: {integrity: sha512-Nk8c4lXvMB98MtbmjX7JwJRgJOL8fluecYCfCeYBznwmpOs8Bf15hLM6z4z71EDAhQVrQrI+wt1aLWSXZq+hXA==} - tar@7.5.1: - resolution: {integrity: sha512-nlGpxf+hv0v7GkWBK2V9spgactGOp0qvfWRxUMjqHyzrt3SgwE48DIv/FhqPHJYLHpgW1opq3nERbz5Anq7n1g==} + tar@7.5.2: + resolution: {integrity: sha512-7NyxrTE4Anh8km8iEy7o0QYPs+0JKBTj5ZaqHg6B39erLg0qYXN3BijtShwbsNSvQ+LN75+KV+C4QR/f6Gwnpg==} engines: {node: '>=18'} tauri-plugin-mihomo-api@https://codeload.github.com/clash-verge-rev/tauri-plugin-mihomo/tar.gz/dcb6b5a6753233422e7cea23042239c7994c605c: @@ -8204,7 +8204,7 @@ snapshots: dependencies: react: 19.2.0 - react-i18next@16.2.2(i18next@25.6.0(typescript@5.9.3))(react-dom@19.2.0(react@19.2.0))(react@19.2.0)(typescript@5.9.3): + react-i18next@16.2.3(i18next@25.6.0(typescript@5.9.3))(react-dom@19.2.0(react@19.2.0))(react@19.2.0)(typescript@5.9.3): dependencies: '@babel/runtime': 7.28.4 html-parse-stringify: 3.0.1 @@ -8629,7 +8629,7 @@ snapshots: systemjs@6.15.1: {} - tar@7.5.1: + tar@7.5.2: dependencies: '@isaacs/fs-minipass': 4.0.1 chownr: 3.0.0 From 85ff296912e5acdfb360e0a722e133d9d98fb9f7 Mon Sep 17 00:00:00 2001 From: Slinetrac Date: Fri, 31 Oct 2025 11:28:14 +0800 Subject: [PATCH 41/70] chore(deps): update deps --- src-tauri/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src-tauri/Cargo.toml b/src-tauri/Cargo.toml index f3253251..cc6caacc 100755 --- a/src-tauri/Cargo.toml +++ b/src-tauri/Cargo.toml @@ -41,7 +41,7 @@ tokio = { version = "1.48.0", features = [ serde = { version = "1.0.228", features = ["derive"] } reqwest = { version = "0.12.24", features = ["json", "cookies"] } regex = "1.12.2" -sysproxy = { version = "0.3.1", git = "https://github.com/clash-verge-rev/sysproxy-rs" } +sysproxy = { git = "https://github.com/clash-verge-rev/sysproxy-rs" } tauri = { version = "2.9.1", features = [ "protocol-asset", "devtools", From 7b7fa2239bb562d80106ede27bcf5507200130a4 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Fri, 31 Oct 2025 13:36:35 +0800 Subject: [PATCH 42/70] chore(deps): update dependency dayjs to v1.11.19 (#5261) Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> --- package.json | 2 +- pnpm-lock.yaml | 12 ++++++------ 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/package.json b/package.json index f29443f6..89800442 100644 --- a/package.json +++ b/package.json @@ -55,7 +55,7 @@ "@types/json-schema": "^7.0.15", "ahooks": "^3.9.6", "axios": "^1.13.1", - "dayjs": "1.11.18", + "dayjs": "1.11.19", "foxact": "^0.2.49", "i18next": "^25.6.0", "js-yaml": "^4.1.0", diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 77969e82..1d244ff3 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -72,8 +72,8 @@ importers: specifier: ^1.13.1 version: 1.13.1 dayjs: - specifier: 1.11.18 - version: 1.11.18 + specifier: 1.11.19 + version: 1.11.19 foxact: specifier: ^0.2.49 version: 0.2.49(react@19.2.0) @@ -2363,8 +2363,8 @@ packages: resolution: {integrity: sha512-BS8PfmtDGnrgYdOonGZQdLZslWIeCGFP9tpan0hi1Co2Zr2NKADsvGYA8XxuG/4UWgJ6Cjtv+YJnB6MM69QGlQ==} engines: {node: '>= 0.4'} - dayjs@1.11.18: - resolution: {integrity: sha512-zFBQ7WFRvVRhKcWoUh+ZA1g2HVgUbsZm9sbddh8EC5iv93sui8DVVz1Npvz+r6meo9VKfa8NyLWBsQK1VvIKPA==} + dayjs@1.11.19: + resolution: {integrity: sha512-t5EcLVS6QPBNqM2z8fakk/NKel+Xzshgt8FFKAn+qwlD1pzZWxh0nVCrvFK7ZDb6XucZeF9z8C7CBWTRIVApAw==} debug@3.2.7: resolution: {integrity: sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ==} @@ -6261,7 +6261,7 @@ snapshots: dependencies: '@babel/runtime': 7.28.4 '@types/js-cookie': 3.0.6 - dayjs: 1.11.18 + dayjs: 1.11.19 intersection-observer: 0.12.2 js-cookie: 3.0.5 lodash: 4.17.21 @@ -6597,7 +6597,7 @@ snapshots: is-data-view: 1.0.2 optional: true - dayjs@1.11.18: {} + dayjs@1.11.19: {} debug@3.2.7: dependencies: From 5187712a717f9eeea2add6d7359cd9b97447cc4e Mon Sep 17 00:00:00 2001 From: Tunglies <77394545+Tunglies@users.noreply.github.com> Date: Fri, 31 Oct 2025 16:55:52 +0800 Subject: [PATCH 43/70] chore(deps): remove zustand and update vite-plugin-monaco-editor to esm version --- package.json | 5 ++--- pnpm-lock.yaml | 39 ++++++--------------------------------- vite.config.mts | 7 +++---- 3 files changed, 11 insertions(+), 40 deletions(-) diff --git a/package.json b/package.json index 89800442..a175ce1d 100644 --- a/package.json +++ b/package.json @@ -75,8 +75,7 @@ "react-virtuoso": "^4.14.1", "swr": "^2.3.6", "tauri-plugin-mihomo-api": "git+https://github.com/clash-verge-rev/tauri-plugin-mihomo", - "types-pac": "^1.0.3", - "zustand": "^5.0.8" + "types-pac": "^1.0.3" }, "devDependencies": { "@actions/github": "^6.0.1", @@ -117,7 +116,7 @@ "typescript": "^5.9.3", "typescript-eslint": "^8.46.2", "vite": "^7.1.12", - "vite-plugin-monaco-editor": "^1.1.0", + "vite-plugin-monaco-editor-esm": "^2.0.2", "vite-plugin-svgr": "^4.5.0", "vitest": "^4.0.5" }, diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 1d244ff3..93569431 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -134,9 +134,6 @@ importers: types-pac: specifier: ^1.0.3 version: 1.0.3 - zustand: - specifier: ^5.0.8 - version: 5.0.8(@types/react@19.2.2)(react@19.2.0)(use-sync-external-store@1.6.0(react@19.2.0)) devDependencies: '@actions/github': specifier: ^6.0.1 @@ -252,9 +249,9 @@ importers: vite: specifier: ^7.1.12 version: 7.1.12(@types/node@24.9.2)(jiti@2.6.1)(sass@1.93.2)(terser@5.44.0)(yaml@2.8.1) - vite-plugin-monaco-editor: - specifier: ^1.1.0 - version: 1.1.0(monaco-editor@0.54.0) + vite-plugin-monaco-editor-esm: + specifier: ^2.0.2 + version: 2.0.2(monaco-editor@0.54.0) vite-plugin-svgr: specifier: ^4.5.0 version: 4.5.0(rollup@4.46.2)(typescript@5.9.3)(vite@7.1.12(@types/node@24.9.2)(jiti@2.6.1)(sass@1.93.2)(terser@5.44.0)(yaml@2.8.1)) @@ -4145,8 +4142,8 @@ packages: vfile@6.0.3: resolution: {integrity: sha512-KzIbH/9tXat2u30jf+smMwFCsno4wHVdNmzFyL+T/L3UGqqk6JKfVqOFOZEpZSHADH1k40ab6NUIXZq422ov3Q==} - vite-plugin-monaco-editor@1.1.0: - resolution: {integrity: sha512-IvtUqZotrRoVqwT0PBBDIZPNraya3BxN/bfcNfnxZ5rkJiGcNtO5eAOWWSgT7zullIAEqQwxMU83yL9J5k7gww==} + vite-plugin-monaco-editor-esm@2.0.2: + resolution: {integrity: sha512-XVkOpL/r0rw1NpbO30vUwG4S0THkC9KB1vjjV8olGd49h4/EQsKl3DrxB6KRDwyZNC9mKiiZgk2L6njUYj3oKQ==} peerDependencies: monaco-editor: '>=0.33.0' @@ -4327,24 +4324,6 @@ packages: zod@4.1.12: resolution: {integrity: sha512-JInaHOamG8pt5+Ey8kGmdcAcg3OL9reK8ltczgHTAwNhMys/6ThXHityHxVV2p3fkw/c+MAvBHFVYHFZDmjMCQ==} - zustand@5.0.8: - resolution: {integrity: sha512-gyPKpIaxY9XcO2vSMrLbiER7QMAMGOQZVRdJ6Zi782jkbzZygq5GI9nG8g+sMgitRtndwaBSl7uiqC49o1SSiw==} - engines: {node: '>=12.20.0'} - peerDependencies: - '@types/react': '>=18.0.0' - immer: '>=9.0.6' - react: '>=18.0.0' - use-sync-external-store: '>=1.2.0' - peerDependenciesMeta: - '@types/react': - optional: true - immer: - optional: true - react: - optional: true - use-sync-external-store: - optional: true - zwitch@2.0.4: resolution: {integrity: sha512-bXE4cR/kVZhKZX/RjPEflHaKVhUVl85noU3v6b8apfQEc1x4A+zBxjZ4lN8LqGd6WZ3dl98pY4o717VFmoPp+A==} @@ -8861,7 +8840,7 @@ snapshots: '@types/unist': 3.0.3 vfile-message: 4.0.2 - vite-plugin-monaco-editor@1.1.0(monaco-editor@0.54.0): + vite-plugin-monaco-editor-esm@2.0.2(monaco-editor@0.54.0): dependencies: monaco-editor: 0.54.0 @@ -9040,10 +9019,4 @@ snapshots: zod@4.1.12: {} - zustand@5.0.8(@types/react@19.2.2)(react@19.2.0)(use-sync-external-store@1.6.0(react@19.2.0)): - optionalDependencies: - '@types/react': 19.2.2 - react: 19.2.0 - use-sync-external-store: 1.6.0(react@19.2.0) - zwitch@2.0.4: {} diff --git a/vite.config.mts b/vite.config.mts index d7307c6d..71305ec6 100644 --- a/vite.config.mts +++ b/vite.config.mts @@ -4,12 +4,11 @@ import legacy from "@vitejs/plugin-legacy"; import react from "@vitejs/plugin-react-swc"; import monacoEditorPlugin, { type IMonacoEditorOpts, -} from "vite-plugin-monaco-editor"; +} from "vite-plugin-monaco-editor-esm"; import svgr from "vite-plugin-svgr"; import { defineConfig } from "vitest/config"; -const monacoEditorPluginDefault = (monacoEditorPlugin as any).default as ( - options: IMonacoEditorOpts, -) => any; +const monacoEditorPluginDefault = ((monacoEditorPlugin as any).default ?? + monacoEditorPlugin) as (options: IMonacoEditorOpts) => any; export default defineConfig({ root: "src", From 8c734a5a35abbca6991f99620725dfea68fed910 Mon Sep 17 00:00:00 2001 From: oomeow Date: Fri, 31 Oct 2025 17:31:40 +0800 Subject: [PATCH 44/70] fix: disable tun mode menu on tray when tun mode is unavailable (#4975) * fix: check if service installed when toggle tun mode on tray * chore: cargo fmt * fix: auto disable tun mode * docs: update UPDATELOG.md * fix: init Tun mode status * chore: update * feat: disable tun mode tray menu when tun mode is unavailable * fix: restart core when uninstall service is canceled * chore: remove check notification when toggle tun mode * chore: fix updatelog --------- Co-authored-by: Tunglies <77394545+Tunglies@users.noreply.github.com> --- UPDATELOG.md | 2 + src-tauri/src/config/config.rs | 19 ++- src-tauri/src/core/hotkey.rs | 46 +++---- src-tauri/src/core/service.rs | 2 + src-tauri/src/core/tray/mod.rs | 8 +- src-tauri/src/utils/notification.rs | 20 ++- .../shared/ProxyControlSwitches.tsx | 51 ++++--- src/hooks/use-system-state.ts | 124 +++++++++++------- src/hooks/use-verge.ts | 55 -------- src/hooks/useServiceInstaller.ts | 14 +- src/hooks/useServiceUninstaller.ts | 31 +++-- 11 files changed, 175 insertions(+), 197 deletions(-) diff --git a/UPDATELOG.md b/UPDATELOG.md index 071d8ac1..9cd24f9d 100644 --- a/UPDATELOG.md +++ b/UPDATELOG.md @@ -21,6 +21,7 @@ - 修复 Linux WebKit 网络进程的崩溃 - 修复无法导入订阅 - 修复实际导入成功但显示导入失败的问题 +- 修复服务不可用时,自动关闭 Tun 模式导致应用卡死问题 - 修复删除订阅时未能实际删除相关文件 - 修复 macOS 连接界面显示异常 - 修复规则配置项在不同配置文件间全局共享导致切换被重置的问题 @@ -78,6 +79,7 @@ - 允许在 `界面设置` 修改 `悬浮跳转导航延迟` - 添加热键绑定错误的提示信息 - 在 macOS 10.15 及更高版本默认包含 Mihomo-go122,以解决 Intel 架构 Mac 无法运行内核的问题 +- Tun 模式不可用时,禁用系统托盘的 Tun 模式菜单
diff --git a/src-tauri/src/config/config.rs b/src-tauri/src/config/config.rs index dfbfed7c..4318a3e2 100644 --- a/src-tauri/src/config/config.rs +++ b/src-tauri/src/config/config.rs @@ -1,9 +1,10 @@ use super::{IClashTemp, IProfiles, IRuntime, IVerge}; use crate::{ + cmd, config::{PrfItem, profiles_append_item_safe}, constants::{files, timing}, - core::{CoreManager, handle, validate::CoreConfigValidator}, - enhance, logging, + core::{CoreManager, handle, service, tray, validate::CoreConfigValidator}, + enhance, logging, logging_error, utils::{Draft, dirs, help, logging::Type}, }; use anyhow::{Result, anyhow}; @@ -55,6 +56,20 @@ impl Config { pub async fn init_config() -> Result<()> { Self::ensure_default_profile_items().await?; + // init Tun mode + if !cmd::system::is_admin().unwrap_or_default() + && service::is_service_available().await.is_err() + { + let verge = Config::verge().await; + verge.draft_mut().enable_tun_mode = Some(false); + verge.apply(); + let _ = tray::Tray::global().update_tray_display().await; + + // 分离数据获取和异步调用避免Send问题 + let verge_data = Config::verge().await.latest_ref().clone(); + logging_error!(Type::Core, verge_data.save_file().await); + } + let validation_result = Self::generate_and_validate().await?; if let Some((msg_type, msg_content)) = validation_result { diff --git a/src-tauri/src/core/hotkey.rs b/src-tauri/src/core/hotkey.rs index 1a397438..d9429c76 100755 --- a/src-tauri/src/core/hotkey.rs +++ b/src-tauri/src/core/hotkey.rs @@ -8,7 +8,6 @@ use anyhow::{Result, bail}; use parking_lot::Mutex; use smartstring::alias::String; use std::{collections::HashMap, fmt, str::FromStr, sync::Arc}; -use tauri::{AppHandle, Manager}; use tauri_plugin_global_shortcut::{Code, GlobalShortcutExt, ShortcutState}; /// Enum representing all available hotkey functions @@ -105,66 +104,53 @@ impl Hotkey { } /// Execute the function associated with a hotkey function enum - fn execute_function(function: HotkeyFunction, app_handle: &AppHandle) { - let app_handle = app_handle.clone(); + fn execute_function(function: HotkeyFunction) { match function { HotkeyFunction::OpenOrCloseDashboard => { AsyncHandler::spawn(async move || { crate::feat::open_or_close_dashboard().await; - notify_event(app_handle, NotificationEvent::DashboardToggled).await; + notify_event(NotificationEvent::DashboardToggled).await; }); } HotkeyFunction::ClashModeRule => { AsyncHandler::spawn(async move || { feat::change_clash_mode("rule".into()).await; - notify_event( - app_handle, - NotificationEvent::ClashModeChanged { mode: "Rule" }, - ) - .await; + notify_event(NotificationEvent::ClashModeChanged { mode: "Rule" }).await; }); } HotkeyFunction::ClashModeGlobal => { AsyncHandler::spawn(async move || { feat::change_clash_mode("global".into()).await; - notify_event( - app_handle, - NotificationEvent::ClashModeChanged { mode: "Global" }, - ) - .await; + notify_event(NotificationEvent::ClashModeChanged { mode: "Global" }).await; }); } HotkeyFunction::ClashModeDirect => { AsyncHandler::spawn(async move || { feat::change_clash_mode("direct".into()).await; - notify_event( - app_handle, - NotificationEvent::ClashModeChanged { mode: "Direct" }, - ) - .await; + notify_event(NotificationEvent::ClashModeChanged { mode: "Direct" }).await; }); } HotkeyFunction::ToggleSystemProxy => { AsyncHandler::spawn(async move || { feat::toggle_system_proxy().await; - notify_event(app_handle, NotificationEvent::SystemProxyToggled).await; + notify_event(NotificationEvent::SystemProxyToggled).await; }); } HotkeyFunction::ToggleTunMode => { AsyncHandler::spawn(async move || { feat::toggle_tun_mode(None).await; - notify_event(app_handle, NotificationEvent::TunModeToggled).await; + notify_event(NotificationEvent::TunModeToggled).await; }); } HotkeyFunction::EntryLightweightMode => { AsyncHandler::spawn(async move || { entry_lightweight_mode().await; - notify_event(app_handle, NotificationEvent::LightweightModeEntered).await; + notify_event(NotificationEvent::LightweightModeEntered).await; }); } HotkeyFunction::Quit => { AsyncHandler::spawn(async move || { - notify_event(app_handle, NotificationEvent::AppQuit).await; + notify_event(NotificationEvent::AppQuit).await; feat::quit().await; }); } @@ -172,7 +158,7 @@ impl Hotkey { HotkeyFunction::Hide => { AsyncHandler::spawn(async move || { feat::hide().await; - notify_event(app_handle, NotificationEvent::AppHidden).await; + notify_event(NotificationEvent::AppHidden).await; }); } } @@ -224,14 +210,12 @@ impl Hotkey { let is_quit = matches!(function, HotkeyFunction::Quit); - manager.on_shortcut(hotkey, move |app_handle, hotkey_event, event| { + manager.on_shortcut(hotkey, move |_app_handle, hotkey_event, event| { let hotkey_event_owned = *hotkey_event; let event_owned = event; let function_owned = function; let is_quit_owned = is_quit; - let app_handle_cloned = app_handle.clone(); - AsyncHandler::spawn(move || async move { if event_owned.state == ShortcutState::Pressed { logging!( @@ -242,11 +226,11 @@ impl Hotkey { ); if hotkey_event_owned.key == Code::KeyQ && is_quit_owned { - if let Some(window) = app_handle_cloned.get_webview_window("main") + if let Some(window) = handle::Handle::get_window() && window.is_focused().unwrap_or(false) { logging!(debug, Type::Hotkey, "Executing quit function"); - Self::execute_function(function_owned, &app_handle_cloned); + Self::execute_function(function_owned); } } else { logging!(debug, Type::Hotkey, "Executing function directly"); @@ -258,14 +242,14 @@ impl Hotkey { .unwrap_or(true); if is_enable_global_hotkey { - Self::execute_function(function_owned, &app_handle_cloned); + Self::execute_function(function_owned); } else { use crate::utils::window_manager::WindowManager; let is_visible = WindowManager::is_main_window_visible(); let is_focused = WindowManager::is_main_window_focused(); if is_focused && is_visible { - Self::execute_function(function_owned, &app_handle_cloned); + Self::execute_function(function_owned); } } } diff --git a/src-tauri/src/core/service.rs b/src-tauri/src/core/service.rs index 1361a593..04ded18a 100644 --- a/src-tauri/src/core/service.rs +++ b/src-tauri/src/core/service.rs @@ -1,5 +1,6 @@ use crate::{ config::Config, + core::tray, logging, logging_error, utils::{dirs, init::service_writer_config, logging::Type}, }; @@ -531,6 +532,7 @@ impl ServiceManager { return Err(anyhow::anyhow!("服务不可用: {}", reason)); } } + let _ = tray::Tray::global().update_tray_display().await; Ok(()) } } diff --git a/src-tauri/src/core/tray/mod.rs b/src-tauri/src/core/tray/mod.rs index a2b15ff6..ed9f21e8 100644 --- a/src-tauri/src/core/tray/mod.rs +++ b/src-tauri/src/core/tray/mod.rs @@ -5,6 +5,7 @@ use tauri_plugin_mihomo::models::Proxies; #[cfg(target_os = "macos")] pub mod speed_rate; use crate::config::PrfSelected; +use crate::core::service; use crate::module::lightweight; use crate::process::AsyncHandler; use crate::utils::window_manager::WindowManager; @@ -297,6 +298,9 @@ impl Tray { let verge = Config::verge().await.latest_ref().clone(); let system_proxy = verge.enable_system_proxy.as_ref().unwrap_or(&false); let tun_mode = verge.enable_tun_mode.as_ref().unwrap_or(&false); + let tun_mode_available = cmd::system::is_admin().unwrap_or_default() + || service::is_service_available().await.is_ok(); + println!("tun_mode_available: {}", tun_mode_available); let mode = { Config::clash() .await @@ -322,6 +326,7 @@ impl Tray { Some(mode.as_str()), *system_proxy, *tun_mode, + tun_mode_available, profile_uid_and_name, is_lightweight_mode, ) @@ -837,6 +842,7 @@ async fn create_tray_menu( mode: Option<&str>, system_proxy_enabled: bool, tun_mode_enabled: bool, + tun_mode_available: bool, profile_uid_and_name: Vec<(String, String)>, is_lightweight_mode: bool, ) -> Result> { @@ -980,7 +986,7 @@ async fn create_tray_menu( app_handle, MenuIds::TUN_MODE, &texts.tun_mode, - true, + tun_mode_available, tun_mode_enabled, hotkeys.get("toggle_tun_mode").map(|s| s.as_str()), )?; diff --git a/src-tauri/src/utils/notification.rs b/src-tauri/src/utils/notification.rs index 0b0b7199..f737560e 100644 --- a/src-tauri/src/utils/notification.rs +++ b/src-tauri/src/utils/notification.rs @@ -1,6 +1,5 @@ -use crate::utils::i18n::t; +use crate::{core::handle, utils::i18n::t}; -use tauri::AppHandle; use tauri_plugin_notification::NotificationExt; pub enum NotificationEvent<'a> { @@ -16,8 +15,10 @@ pub enum NotificationEvent<'a> { AppHidden, } -fn notify(app: &AppHandle, title: &str, body: &str) { - app.notification() +fn notify(title: &str, body: &str) { + let app_handle = handle::Handle::app_handle(); + app_handle + .notification() .builder() .title(title) .body(body) @@ -25,49 +26,44 @@ fn notify(app: &AppHandle, title: &str, body: &str) { .ok(); } -pub async fn notify_event<'a>(app: AppHandle, event: NotificationEvent<'a>) { +pub async fn notify_event<'a>(event: NotificationEvent<'a>) { match event { NotificationEvent::DashboardToggled => { notify( - &app, &t("DashboardToggledTitle").await, &t("DashboardToggledBody").await, ); } NotificationEvent::ClashModeChanged { mode } => { notify( - &app, &t("ClashModeChangedTitle").await, &t_with_args("ClashModeChangedBody", mode).await, ); } NotificationEvent::SystemProxyToggled => { notify( - &app, &t("SystemProxyToggledTitle").await, &t("SystemProxyToggledBody").await, ); } NotificationEvent::TunModeToggled => { notify( - &app, &t("TunModeToggledTitle").await, &t("TunModeToggledBody").await, ); } NotificationEvent::LightweightModeEntered => { notify( - &app, &t("LightweightModeEnteredTitle").await, &t("LightweightModeEnteredBody").await, ); } NotificationEvent::AppQuit => { - notify(&app, &t("AppQuitTitle").await, &t("AppQuitBody").await); + notify(&t("AppQuitTitle").await, &t("AppQuitBody").await); } #[cfg(target_os = "macos")] NotificationEvent::AppHidden => { - notify(&app, &t("AppHiddenTitle").await, &t("AppHiddenBody").await); + notify(&t("AppHiddenTitle").await, &t("AppHiddenBody").await); } } } diff --git a/src/components/shared/ProxyControlSwitches.tsx b/src/components/shared/ProxyControlSwitches.tsx index ccf3ec7b..5d9304bf 100644 --- a/src/components/shared/ProxyControlSwitches.tsx +++ b/src/components/shared/ProxyControlSwitches.tsx @@ -117,13 +117,8 @@ const ProxyControlSwitches = ({ const { uninstallServiceAndRestartCore } = useServiceUninstaller(); const { actualState: systemProxyActualState, toggleSystemProxy } = useSystemProxyState(); - const { - isServiceMode, - isTunModeAvailable, - mutateRunningMode, - mutateServiceOk, - mutateTunModeAvailable, - } = useSystemState(); + const { isServiceOk, isTunModeAvailable, mutateSystemState } = + useSystemState(); const sysproxyRef = useRef(null); const tunRef = useRef(null); @@ -148,9 +143,7 @@ const ProxyControlSwitches = ({ const onInstallService = useLockFn(async () => { try { await installServiceAndRestartCore(); - await mutateRunningMode(); - await mutateServiceOk(); - await mutateTunModeAvailable(); + await mutateSystemState(); } catch (err) { showNotice("error", (err as Error).message || String(err)); } @@ -158,11 +151,11 @@ const ProxyControlSwitches = ({ const onUninstallService = useLockFn(async () => { try { - await handleTunToggle(false); + if (verge?.enable_tun_mode) { + await handleTunToggle(false); + } await uninstallServiceAndRestartCore(); - await mutateRunningMode(); - await mutateServiceOk(); - await mutateTunModeAvailable(); + await mutateSystemState(); } catch (err) { showNotice("error", (err as Error).message || String(err)); } @@ -198,22 +191,22 @@ const ProxyControlSwitches = ({ extraIcons={ <> {!isTunModeAvailable && ( - + <> + + + )} - {!isTunModeAvailable && ( - - )} - {isServiceMode && ( + {isServiceOk && ( { + const [runningMode, isAdminMode, isServiceOk] = await Promise.all([ + getRunningMode(), + isAdmin(), + isServiceAvailable(), + ]); + return { runningMode, isAdminMode, isServiceOk } as SystemState; + }, { - suspense: false, - revalidateOnFocus: false, + suspense: true, + refreshInterval: 30000, + fallback: defaultSystemState, }, ); - const { - data: isServiceOk = false, - mutate: mutateServiceOk, - isLoading: isServiceLoading, - } = useSWR(isServiceMode ? "isServiceAvailable" : null, isServiceAvailable, { - suspense: false, - revalidateOnFocus: false, - onSuccess: (data) => { - console.log("[useSystemState] 服务状态更新:", data); - }, - onError: (error) => { - console.error("[useSystemState] 服务状态检查失败:", error); - }, - // isPaused: () => !isServiceMode, // 仅在非 Service 模式下暂停请求 - }); + const isSidecarMode = systemState.runningMode === "Sidecar"; + const isServiceMode = systemState.runningMode === "Service"; + const isTunModeAvailable = systemState.isAdminMode || systemState.isServiceOk; - const isLoading = - runningModeLoading || isAdminLoading || (isServiceMode && isServiceLoading); + const enable_tun_mode = verge?.enable_tun_mode; + useEffect(() => { + if (enable_tun_mode === undefined) return; - const { data: isTunModeAvailable = false, mutate: mutateTunModeAvailable } = - useSWR( - ["isTunModeAvailable", isAdminMode, isServiceOk], - () => isAdminMode || isServiceOk, - { - suspense: false, - revalidateOnFocus: false, - }, - ); + if ( + !disablingTunMode && + enable_tun_mode && + !isTunModeAvailable && + !isLoading + ) { + disablingTunMode = true; + patchVerge({ enable_tun_mode: false }) + .then(() => { + showNotice( + "info", + t("TUN Mode automatically disabled due to service unavailable"), + ); + }) + .catch((err) => { + console.error("[useVerge] 自动关闭TUN模式失败:", err); + showNotice("error", t("Failed to disable TUN Mode automatically")); + }) + .finally(() => { + const tid = setTimeout(() => { + // 避免 verge 数据更新不及时导致重复执行关闭 Tun 模式 + disablingTunMode = false; + clearTimeout(tid); + }, 1000); + }); + } + }, [enable_tun_mode, isTunModeAvailable, patchVerge, isLoading, t]); return { - runningMode, - isAdminMode, + runningMode: systemState.runningMode, + isAdminMode: systemState.isAdminMode, + isServiceOk: systemState.isServiceOk, isSidecarMode, isServiceMode, - isServiceOk, isTunModeAvailable, - mutateRunningMode, - mutateServiceOk, - mutateTunModeAvailable, + mutateSystemState, isLoading, }; } diff --git a/src/hooks/use-verge.ts b/src/hooks/use-verge.ts index 1e7d7afc..f6415ff8 100644 --- a/src/hooks/use-verge.ts +++ b/src/hooks/use-verge.ts @@ -1,16 +1,8 @@ -import { useCallback, useEffect, useRef } from "react"; -import { useTranslation } from "react-i18next"; import useSWR from "swr"; -import { useSystemState } from "@/hooks/use-system-state"; import { getVergeConfig, patchVergeConfig } from "@/services/cmds"; -import { showNotice } from "@/services/noticeService"; export const useVerge = () => { - const { t } = useTranslation(); - const { isTunModeAvailable, isServiceMode, isLoading } = useSystemState(); - const disablingRef = useRef(false); - const { data: verge, mutate: mutateVerge } = useSWR( "getVergeConfig", async () => { @@ -24,53 +16,6 @@ export const useVerge = () => { mutateVerge(); }; - const { enable_tun_mode } = verge ?? {}; - - const mutateVergeRef = useRef(mutateVerge); - const tRef = useRef(t); - const enableTunRef = useRef(enable_tun_mode); - const isLoadingRef = useRef(isLoading); - const isServiceModeRef = useRef(isServiceMode); - - mutateVergeRef.current = mutateVerge; - tRef.current = t; - enableTunRef.current = enable_tun_mode; - isLoadingRef.current = isLoading; - isServiceModeRef.current = isServiceMode; - - const doDisable = useCallback(async () => { - try { - if (isServiceModeRef.current === true) return; - await patchVergeConfig({ enable_tun_mode: false }); - await mutateVergeRef.current?.(); - showNotice( - "info", - tRef.current( - "TUN Mode automatically disabled due to service unavailable", - ), - ); - } catch (err) { - console.error("[useVerge] 自动关闭TUN模式失败:", err); - showNotice( - "error", - tRef.current("Failed to disable TUN Mode automatically"), - ); - } finally { - disablingRef.current = false; - } - }, []); - - useEffect(() => { - if (isTunModeAvailable === true) return; - if (isLoadingRef.current === true) return; - if (enableTunRef.current !== true) return; - if (isServiceModeRef.current === true) return; - if (disablingRef.current) return; - - disablingRef.current = true; - void doDisable(); - }, [isTunModeAvailable, doDisable]); - return { verge, mutateVerge, diff --git a/src/hooks/useServiceInstaller.ts b/src/hooks/useServiceInstaller.ts index b074c678..2f216876 100644 --- a/src/hooks/useServiceInstaller.ts +++ b/src/hooks/useServiceInstaller.ts @@ -25,7 +25,7 @@ const executeWithErrorHandling = async ( }; export const useServiceInstaller = () => { - const { mutateRunningMode, mutateServiceOk } = useSystemState(); + const { mutateSystemState } = useSystemState(); const installServiceAndRestartCore = useCallback(async () => { await executeWithErrorHandling( @@ -34,9 +34,13 @@ export const useServiceInstaller = () => { "Service Installed Successfully", ); - await executeWithErrorHandling(() => restartCore(), "Restarting Core..."); - await mutateRunningMode(); - await mutateServiceOk(); - }, [mutateRunningMode, mutateServiceOk]); + await executeWithErrorHandling( + () => restartCore(), + "Restarting Core...", + "Clash Core Restarted", + ); + + await mutateSystemState(); + }, [mutateSystemState]); return { installServiceAndRestartCore }; }; diff --git a/src/hooks/useServiceUninstaller.ts b/src/hooks/useServiceUninstaller.ts index bb2c450d..fbcd76a5 100644 --- a/src/hooks/useServiceUninstaller.ts +++ b/src/hooks/useServiceUninstaller.ts @@ -25,21 +25,26 @@ const executeWithErrorHandling = async ( }; export const useServiceUninstaller = () => { - const { mutateRunningMode, mutateServiceOk } = useSystemState(); + const { mutateSystemState } = useSystemState(); const uninstallServiceAndRestartCore = useCallback(async () => { - await executeWithErrorHandling(() => stopCore(), "Stopping Core..."); - - await executeWithErrorHandling( - () => uninstallService(), - "Uninstalling Service...", - "Service Uninstalled Successfully", - ); - - await executeWithErrorHandling(() => restartCore(), "Restarting Core..."); - await mutateRunningMode(); - await mutateServiceOk(); - }, [mutateRunningMode, mutateServiceOk]); + try { + await executeWithErrorHandling(() => stopCore(), "Stopping Core..."); + await executeWithErrorHandling( + () => uninstallService(), + "Uninstalling Service...", + "Service Uninstalled Successfully", + ); + } catch (ignore) { + } finally { + await executeWithErrorHandling( + () => restartCore(), + "Restarting Core...", + "Clash Core Restarted", + ); + await mutateSystemState(); + } + }, [mutateSystemState]); return { uninstallServiceAndRestartCore }; }; From 0f1537ef48d4370177fcbf0542fd448b9eb21f76 Mon Sep 17 00:00:00 2001 From: Slinetrac Date: Fri, 31 Oct 2025 17:36:33 +0800 Subject: [PATCH 45/70] chore: up Cargo.lock --- src-tauri/Cargo.lock | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src-tauri/Cargo.lock b/src-tauri/Cargo.lock index 2fd3d8c1..e12e75eb 100644 --- a/src-tauri/Cargo.lock +++ b/src-tauri/Cargo.lock @@ -7134,7 +7134,7 @@ dependencies = [ [[package]] name = "sysproxy" version = "0.3.1" -source = "git+https://github.com/clash-verge-rev/sysproxy-rs#f1f5ac38614669d03d56821192dc6a1a512d368e" +source = "git+https://github.com/clash-verge-rev/sysproxy-rs#9334499a455ffb99fcfaf3e368b741ca6f25f1e0" dependencies = [ "interfaces", "iptools", From 8fc8eb1789dd0164b3c246f4c5e01bb1da3b3aa4 Mon Sep 17 00:00:00 2001 From: Tunglies <77394545+Tunglies@users.noreply.github.com> Date: Fri, 31 Oct 2025 18:15:21 +0800 Subject: [PATCH 46/70] chore: add acknowledgments for contributors in update log --- UPDATELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/UPDATELOG.md b/UPDATELOG.md index 9cd24f9d..39d129bd 100644 --- a/UPDATELOG.md +++ b/UPDATELOG.md @@ -1,5 +1,7 @@ ## v2.4.3 +感谢 @Slinetrac, @oomeow 以及 @Lythrilla 的出色贡献 + ### 🐞 修复问题 - 优化服务模式重装逻辑,避免不必要的重复检查 From 59e7095b0ff30bbba1b79c024f448b9ede10776a Mon Sep 17 00:00:00 2001 From: Slinetrac Date: Fri, 31 Oct 2025 19:20:07 +0800 Subject: [PATCH 47/70] chore: up sysproxy git hash --- src-tauri/Cargo.lock | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src-tauri/Cargo.lock b/src-tauri/Cargo.lock index e12e75eb..b6942264 100644 --- a/src-tauri/Cargo.lock +++ b/src-tauri/Cargo.lock @@ -7134,7 +7134,7 @@ dependencies = [ [[package]] name = "sysproxy" version = "0.3.1" -source = "git+https://github.com/clash-verge-rev/sysproxy-rs#9334499a455ffb99fcfaf3e368b741ca6f25f1e0" +source = "git+https://github.com/clash-verge-rev/sysproxy-rs#50100ab03eb802056c381f3c5009e903c67e3bac" dependencies = [ "interfaces", "iptools", From d3386908ff544a44bf7399f7276979e617fef12c Mon Sep 17 00:00:00 2001 From: Tunglies <77394545+Tunglies@users.noreply.github.com> Date: Fri, 31 Oct 2025 19:37:28 +0800 Subject: [PATCH 48/70] fix: improve caching strategy for autobuild jobs --- .github/workflows/autobuild.yml | 33 ++++++++++++++++++++++++++++--- .github/workflows/lint-clippy.yml | 5 +++-- 2 files changed, 33 insertions(+), 5 deletions(-) diff --git a/.github/workflows/autobuild.yml b/.github/workflows/autobuild.yml index 793a1c59..50c6617b 100644 --- a/.github/workflows/autobuild.yml +++ b/.github/workflows/autobuild.yml @@ -169,7 +169,8 @@ jobs: workspaces: src-tauri cache-all-crates: true save-if: ${{ github.ref == 'refs/heads/dev' }} - shared-key: autobuild-shared + shared-key: autobuild-${{ runner.os }}-${{ matrix.target }} + key: ${{ runner.os }}-${{ matrix.target }}-${{ hashFiles('src-tauri/Cargo.lock') }} - name: Install dependencies (ubuntu only) if: matrix.os == 'ubuntu-22.04' @@ -197,6 +198,14 @@ jobs: node-version: "22" cache: "pnpm" + - name: Cache pnpm store + uses: actions/cache@v4 + with: + path: ~/.pnpm-store + key: ${{ runner.os }}-pnpm-${{ hashFiles('pnpm-lock.yaml') }} + restore-keys: | + ${{ runner.os }}-pnpm- + - name: Pnpm install and check run: | pnpm i @@ -259,7 +268,8 @@ jobs: workspaces: src-tauri cache-all-crates: true save-if: ${{ github.ref == 'refs/heads/dev' }} - shared-key: autobuild-shared + shared-key: autobuild-${{ runner.os }}-${{ matrix.target }} + key: ${{ runner.os }}-${{ matrix.target }}-${{ hashFiles('src-tauri/Cargo.lock') }} - name: Install pnpm uses: pnpm/action-setup@v4 @@ -272,6 +282,14 @@ jobs: node-version: "22" cache: "pnpm" + - name: Cache pnpm store + uses: actions/cache@v4 + with: + path: ~/.pnpm-store + key: ${{ runner.os }}-pnpm-${{ hashFiles('pnpm-lock.yaml') }} + restore-keys: | + ${{ runner.os }}-pnpm- + - name: Pnpm install and check run: | pnpm i @@ -391,7 +409,8 @@ jobs: workspaces: src-tauri cache-all-crates: true save-if: ${{ github.ref == 'refs/heads/dev' }} - shared-key: autobuild-shared + shared-key: autobuild-${{ runner.os }}-${{ matrix.target }} + key: ${{ runner.os }}-${{ matrix.target }}-${{ hashFiles('src-tauri/Cargo.lock') }} - name: Install pnpm uses: pnpm/action-setup@v4 @@ -404,6 +423,14 @@ jobs: node-version: "22" cache: "pnpm" + - name: Cache pnpm store + uses: actions/cache@v4 + with: + path: ~/.pnpm-store + key: ${{ runner.os }}-pnpm-${{ hashFiles('pnpm-lock.yaml') }} + restore-keys: | + ${{ runner.os }}-pnpm- + - name: Pnpm install and check run: | pnpm i diff --git a/.github/workflows/lint-clippy.yml b/.github/workflows/lint-clippy.yml index 38ca85c0..30ed828b 100644 --- a/.github/workflows/lint-clippy.yml +++ b/.github/workflows/lint-clippy.yml @@ -59,9 +59,10 @@ jobs: uses: Swatinem/rust-cache@v2 with: workspaces: src-tauri + cache-all-crates: true save-if: false - cache-all-crates: false - shared-key: autobuild-shared + shared-key: autobuild-${{ runner.os }}-${{ matrix.target }} + key: ${{ runner.os }}-${{ matrix.target }}-${{ hashFiles('src-tauri/Cargo.lock') }} - name: Install dependencies (ubuntu only) if: matrix.os == 'ubuntu-22.04' From 804641425b84cfddefb8625f92de662196b3f9b1 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Fri, 31 Oct 2025 22:51:12 +0800 Subject: [PATCH 49/70] chore(deps): update dependency vitest to ^4.0.6 (#5264) Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> --- package.json | 2 +- pnpm-lock.yaml | 86 +++++++++++++++++++++++++------------------------- 2 files changed, 44 insertions(+), 44 deletions(-) diff --git a/package.json b/package.json index a175ce1d..06e5251a 100644 --- a/package.json +++ b/package.json @@ -118,7 +118,7 @@ "vite": "^7.1.12", "vite-plugin-monaco-editor-esm": "^2.0.2", "vite-plugin-svgr": "^4.5.0", - "vitest": "^4.0.5" + "vitest": "^4.0.6" }, "lint-staged": { "*.{ts,tsx,js,jsx}": [ diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 93569431..32733ca3 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -256,8 +256,8 @@ importers: specifier: ^4.5.0 version: 4.5.0(rollup@4.46.2)(typescript@5.9.3)(vite@7.1.12(@types/node@24.9.2)(jiti@2.6.1)(sass@1.93.2)(terser@5.44.0)(yaml@2.8.1)) vitest: - specifier: ^4.0.5 - version: 4.0.5(@types/debug@4.1.12)(@types/node@24.9.2)(jiti@2.6.1)(sass@1.93.2)(terser@5.44.0)(yaml@2.8.1) + specifier: ^4.0.6 + version: 4.0.6(@types/debug@4.1.12)(@types/node@24.9.2)(jiti@2.6.1)(sass@1.93.2)(terser@5.44.0)(yaml@2.8.1) packages: @@ -2019,11 +2019,11 @@ packages: peerDependencies: vite: ^4 || ^5 || ^6 || ^7 - '@vitest/expect@4.0.5': - resolution: {integrity: sha512-DJctLVlKoddvP/G389oGmKWNG6GD9frm2FPXARziU80Rjo7SIYxQzb2YFzmQ4fVD3Q5utUYY8nUmWrqsuIlIXQ==} + '@vitest/expect@4.0.6': + resolution: {integrity: sha512-5j8UUlBVhOjhj4lR2Nt9sEV8b4WtbcYh8vnfhTNA2Kn5+smtevzjNq+xlBuVhnFGXiyPPNzGrOVvmyHWkS5QGg==} - '@vitest/mocker@4.0.5': - resolution: {integrity: sha512-iYHIy72LfbK+mL5W8zXROp6oOcJKXWeKcNjcPPsqoa18qIEDrhB6/Z08o0wRajTd6SSSDNw8NCSIHVNOMpz0mw==} + '@vitest/mocker@4.0.6': + resolution: {integrity: sha512-3COEIew5HqdzBFEYN9+u0dT3i/NCwppLnO1HkjGfAP1Vs3vti1Hxm/MvcbC4DAn3Szo1M7M3otiAaT83jvqIjA==} peerDependencies: msw: ^2.4.9 vite: ^6.0.0 || ^7.0.0-0 @@ -2033,20 +2033,20 @@ packages: vite: optional: true - '@vitest/pretty-format@4.0.5': - resolution: {integrity: sha512-t1T/sSdsYyNc5AZl0EMeD0jW9cpJe2cODP0R++ZQe1kTkpgrwEfxGFR/yCG4w8ZybizbXRTHU7lE8sTDD/QsGw==} + '@vitest/pretty-format@4.0.6': + resolution: {integrity: sha512-4vptgNkLIA1W1Nn5X4x8rLJBzPiJwnPc+awKtfBE5hNMVsoAl/JCCPPzNrbf+L4NKgklsis5Yp2gYa+XAS442g==} - '@vitest/runner@4.0.5': - resolution: {integrity: sha512-CQVVe+YEeKSiFBD5gBAmRDQglm4PnMBYzeTmt06t5iWtsUN9StQeeKhYCea/oaqBYilf8sARG6fSctUcEL/UmQ==} + '@vitest/runner@4.0.6': + resolution: {integrity: sha512-trPk5qpd7Jj+AiLZbV/e+KiiaGXZ8ECsRxtnPnCrJr9OW2mLB72Cb824IXgxVz/mVU3Aj4VebY+tDTPn++j1Og==} - '@vitest/snapshot@4.0.5': - resolution: {integrity: sha512-jfmSAeR6xYNEvcD+/RxFGA1bzpqHtkVhgxo2cxXia+Q3xX7m6GpZij07rz+WyQcA/xEGn4eIS1OItkMyWsGBmQ==} + '@vitest/snapshot@4.0.6': + resolution: {integrity: sha512-PaYLt7n2YzuvxhulDDu6c9EosiRuIE+FI2ECKs6yvHyhoga+2TBWI8dwBjs+IeuQaMtZTfioa9tj3uZb7nev1g==} - '@vitest/spy@4.0.5': - resolution: {integrity: sha512-TUmVQpAQign7r8+EnZsgTF3vY9BdGofTUge1rGNbnHn2IN3FChiQoT9lrPz7A7AVUZJU2LAZXl4v66HhsNMhoA==} + '@vitest/spy@4.0.6': + resolution: {integrity: sha512-g9jTUYPV1LtRPRCQfhbMintW7BTQz1n6WXYQYRQ25qkyffA4bjVXjkROokZnv7t07OqfaFKw1lPzqKGk1hmNuQ==} - '@vitest/utils@4.0.5': - resolution: {integrity: sha512-V5RndUgCB5/AfNvK9zxGCrRs99IrPYtMTIdUzJMMFs9nrmE5JXExIEfjVtUteyTRiLfCm+dCRMHf/Uu7Mm8/dg==} + '@vitest/utils@4.0.6': + resolution: {integrity: sha512-bG43VS3iYKrMIZXBo+y8Pti0O7uNju3KvNn6DrQWhQQKcLavMB+0NZfO1/QBAEbq0MaQ3QjNsnnXlGQvsh0Z6A==} acorn-jsx@5.3.2: resolution: {integrity: sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==} @@ -4192,18 +4192,18 @@ packages: yaml: optional: true - vitest@4.0.5: - resolution: {integrity: sha512-4H+J28MI5oeYgGg3h5BFSkQ1g/2GKK1IR8oorH3a6EQQbb7CwjbnyBjH4PGxw9/6vpwAPNzaeUMp4Js4WJmdXQ==} + vitest@4.0.6: + resolution: {integrity: sha512-gR7INfiVRwnEOkCk47faros/9McCZMp5LM+OMNWGLaDBSvJxIzwjgNFufkuePBNaesGRnLmNfW+ddbUJRZn0nQ==} engines: {node: ^20.0.0 || ^22.0.0 || >=24.0.0} hasBin: true peerDependencies: '@edge-runtime/vm': '*' '@types/debug': ^4.1.12 '@types/node': ^20.0.0 || ^22.0.0 || >=24.0.0 - '@vitest/browser-playwright': 4.0.5 - '@vitest/browser-preview': 4.0.5 - '@vitest/browser-webdriverio': 4.0.5 - '@vitest/ui': 4.0.5 + '@vitest/browser-playwright': 4.0.6 + '@vitest/browser-preview': 4.0.6 + '@vitest/browser-webdriverio': 4.0.6 + '@vitest/ui': 4.0.6 happy-dom: '*' jsdom: '*' peerDependenciesMeta: @@ -6187,43 +6187,43 @@ snapshots: transitivePeerDependencies: - '@swc/helpers' - '@vitest/expect@4.0.5': + '@vitest/expect@4.0.6': dependencies: '@standard-schema/spec': 1.0.0 '@types/chai': 5.2.2 - '@vitest/spy': 4.0.5 - '@vitest/utils': 4.0.5 + '@vitest/spy': 4.0.6 + '@vitest/utils': 4.0.6 chai: 6.2.0 tinyrainbow: 3.0.3 - '@vitest/mocker@4.0.5(vite@7.1.12(@types/node@24.9.2)(jiti@2.6.1)(sass@1.93.2)(terser@5.44.0)(yaml@2.8.1))': + '@vitest/mocker@4.0.6(vite@7.1.12(@types/node@24.9.2)(jiti@2.6.1)(sass@1.93.2)(terser@5.44.0)(yaml@2.8.1))': dependencies: - '@vitest/spy': 4.0.5 + '@vitest/spy': 4.0.6 estree-walker: 3.0.3 magic-string: 0.30.19 optionalDependencies: vite: 7.1.12(@types/node@24.9.2)(jiti@2.6.1)(sass@1.93.2)(terser@5.44.0)(yaml@2.8.1) - '@vitest/pretty-format@4.0.5': + '@vitest/pretty-format@4.0.6': dependencies: tinyrainbow: 3.0.3 - '@vitest/runner@4.0.5': + '@vitest/runner@4.0.6': dependencies: - '@vitest/utils': 4.0.5 + '@vitest/utils': 4.0.6 pathe: 2.0.3 - '@vitest/snapshot@4.0.5': + '@vitest/snapshot@4.0.6': dependencies: - '@vitest/pretty-format': 4.0.5 + '@vitest/pretty-format': 4.0.6 magic-string: 0.30.19 pathe: 2.0.3 - '@vitest/spy@4.0.5': {} + '@vitest/spy@4.0.6': {} - '@vitest/utils@4.0.5': + '@vitest/utils@4.0.6': dependencies: - '@vitest/pretty-format': 4.0.5 + '@vitest/pretty-format': 4.0.6 tinyrainbow: 3.0.3 acorn-jsx@5.3.2(acorn@8.15.0): @@ -8871,15 +8871,15 @@ snapshots: terser: 5.44.0 yaml: 2.8.1 - vitest@4.0.5(@types/debug@4.1.12)(@types/node@24.9.2)(jiti@2.6.1)(sass@1.93.2)(terser@5.44.0)(yaml@2.8.1): + vitest@4.0.6(@types/debug@4.1.12)(@types/node@24.9.2)(jiti@2.6.1)(sass@1.93.2)(terser@5.44.0)(yaml@2.8.1): dependencies: - '@vitest/expect': 4.0.5 - '@vitest/mocker': 4.0.5(vite@7.1.12(@types/node@24.9.2)(jiti@2.6.1)(sass@1.93.2)(terser@5.44.0)(yaml@2.8.1)) - '@vitest/pretty-format': 4.0.5 - '@vitest/runner': 4.0.5 - '@vitest/snapshot': 4.0.5 - '@vitest/spy': 4.0.5 - '@vitest/utils': 4.0.5 + '@vitest/expect': 4.0.6 + '@vitest/mocker': 4.0.6(vite@7.1.12(@types/node@24.9.2)(jiti@2.6.1)(sass@1.93.2)(terser@5.44.0)(yaml@2.8.1)) + '@vitest/pretty-format': 4.0.6 + '@vitest/runner': 4.0.6 + '@vitest/snapshot': 4.0.6 + '@vitest/spy': 4.0.6 + '@vitest/utils': 4.0.6 debug: 4.4.3 es-module-lexer: 1.7.0 expect-type: 1.2.2 From b672dd7055c4903020c2fbbe46a6cd90b88110d1 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Fri, 31 Oct 2025 22:54:52 +0800 Subject: [PATCH 50/70] chore(deps): update dependency sass to ^1.93.3 (#5265) Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> --- package.json | 2 +- pnpm-lock.yaml | 46 +++++++++++++++++++++++----------------------- 2 files changed, 24 insertions(+), 24 deletions(-) diff --git a/package.json b/package.json index 06e5251a..e9e7406e 100644 --- a/package.json +++ b/package.json @@ -110,7 +110,7 @@ "meta-json-schema": "^1.19.14", "node-fetch": "^3.3.2", "prettier": "^3.6.2", - "sass": "^1.93.2", + "sass": "^1.93.3", "tar": "^7.5.2", "terser": "^5.44.0", "typescript": "^5.9.3", diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 32733ca3..6e293b60 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -164,10 +164,10 @@ importers: version: 19.2.2(@types/react@19.2.2) '@vitejs/plugin-legacy': specifier: ^7.2.1 - version: 7.2.1(terser@5.44.0)(vite@7.1.12(@types/node@24.9.2)(jiti@2.6.1)(sass@1.93.2)(terser@5.44.0)(yaml@2.8.1)) + version: 7.2.1(terser@5.44.0)(vite@7.1.12(@types/node@24.9.2)(jiti@2.6.1)(sass@1.93.3)(terser@5.44.0)(yaml@2.8.1)) '@vitejs/plugin-react-swc': specifier: ^4.2.0 - version: 4.2.0(vite@7.1.12(@types/node@24.9.2)(jiti@2.6.1)(sass@1.93.2)(terser@5.44.0)(yaml@2.8.1)) + version: 4.2.0(vite@7.1.12(@types/node@24.9.2)(jiti@2.6.1)(sass@1.93.3)(terser@5.44.0)(yaml@2.8.1)) adm-zip: specifier: ^0.5.16 version: 0.5.16 @@ -232,8 +232,8 @@ importers: specifier: ^3.6.2 version: 3.6.2 sass: - specifier: ^1.93.2 - version: 1.93.2 + specifier: ^1.93.3 + version: 1.93.3 tar: specifier: ^7.5.2 version: 7.5.2 @@ -248,16 +248,16 @@ importers: version: 8.46.2(eslint@9.38.0(jiti@2.6.1))(typescript@5.9.3) vite: specifier: ^7.1.12 - version: 7.1.12(@types/node@24.9.2)(jiti@2.6.1)(sass@1.93.2)(terser@5.44.0)(yaml@2.8.1) + version: 7.1.12(@types/node@24.9.2)(jiti@2.6.1)(sass@1.93.3)(terser@5.44.0)(yaml@2.8.1) vite-plugin-monaco-editor-esm: specifier: ^2.0.2 version: 2.0.2(monaco-editor@0.54.0) vite-plugin-svgr: specifier: ^4.5.0 - version: 4.5.0(rollup@4.46.2)(typescript@5.9.3)(vite@7.1.12(@types/node@24.9.2)(jiti@2.6.1)(sass@1.93.2)(terser@5.44.0)(yaml@2.8.1)) + version: 4.5.0(rollup@4.46.2)(typescript@5.9.3)(vite@7.1.12(@types/node@24.9.2)(jiti@2.6.1)(sass@1.93.3)(terser@5.44.0)(yaml@2.8.1)) vitest: specifier: ^4.0.6 - version: 4.0.6(@types/debug@4.1.12)(@types/node@24.9.2)(jiti@2.6.1)(sass@1.93.2)(terser@5.44.0)(yaml@2.8.1) + version: 4.0.6(@types/debug@4.1.12)(@types/node@24.9.2)(jiti@2.6.1)(sass@1.93.3)(terser@5.44.0)(yaml@2.8.1) packages: @@ -3773,8 +3773,8 @@ packages: resolution: {integrity: sha512-x/+Cz4YrimQxQccJf5mKEbIa1NzeCRNI5Ecl/ekmlYaampdNLPalVyIcCZNNH3MvmqBugV5TMYZXv0ljslUlaw==} engines: {node: '>= 0.4'} - sass@1.93.2: - resolution: {integrity: sha512-t+YPtOQHpGW1QWsh1CHQ5cPIr9lbbGZLZnbihP/D/qZj/yuV68m8qarcV17nvkOX81BCrvzAlq2klCQFZghyTg==} + sass@1.93.3: + resolution: {integrity: sha512-elOcIZRTM76dvxNAjqYrucTSI0teAF/L2Lv0s6f6b7FOwcwIuA357bIE871580AjHJuSvLIRUosgV+lIWx6Rgg==} engines: {node: '>=14.0.0'} hasBin: true @@ -6160,7 +6160,7 @@ snapshots: '@unrs/resolver-binding-win32-x64-msvc@1.11.1': optional: true - '@vitejs/plugin-legacy@7.2.1(terser@5.44.0)(vite@7.1.12(@types/node@24.9.2)(jiti@2.6.1)(sass@1.93.2)(terser@5.44.0)(yaml@2.8.1))': + '@vitejs/plugin-legacy@7.2.1(terser@5.44.0)(vite@7.1.12(@types/node@24.9.2)(jiti@2.6.1)(sass@1.93.3)(terser@5.44.0)(yaml@2.8.1))': dependencies: '@babel/core': 7.28.4 '@babel/plugin-transform-dynamic-import': 7.27.1(@babel/core@7.28.4) @@ -6175,15 +6175,15 @@ snapshots: regenerator-runtime: 0.14.1 systemjs: 6.15.1 terser: 5.44.0 - vite: 7.1.12(@types/node@24.9.2)(jiti@2.6.1)(sass@1.93.2)(terser@5.44.0)(yaml@2.8.1) + vite: 7.1.12(@types/node@24.9.2)(jiti@2.6.1)(sass@1.93.3)(terser@5.44.0)(yaml@2.8.1) transitivePeerDependencies: - supports-color - '@vitejs/plugin-react-swc@4.2.0(vite@7.1.12(@types/node@24.9.2)(jiti@2.6.1)(sass@1.93.2)(terser@5.44.0)(yaml@2.8.1))': + '@vitejs/plugin-react-swc@4.2.0(vite@7.1.12(@types/node@24.9.2)(jiti@2.6.1)(sass@1.93.3)(terser@5.44.0)(yaml@2.8.1))': dependencies: '@rolldown/pluginutils': 1.0.0-beta.43 '@swc/core': 1.14.0 - vite: 7.1.12(@types/node@24.9.2)(jiti@2.6.1)(sass@1.93.2)(terser@5.44.0)(yaml@2.8.1) + vite: 7.1.12(@types/node@24.9.2)(jiti@2.6.1)(sass@1.93.3)(terser@5.44.0)(yaml@2.8.1) transitivePeerDependencies: - '@swc/helpers' @@ -6196,13 +6196,13 @@ snapshots: chai: 6.2.0 tinyrainbow: 3.0.3 - '@vitest/mocker@4.0.6(vite@7.1.12(@types/node@24.9.2)(jiti@2.6.1)(sass@1.93.2)(terser@5.44.0)(yaml@2.8.1))': + '@vitest/mocker@4.0.6(vite@7.1.12(@types/node@24.9.2)(jiti@2.6.1)(sass@1.93.3)(terser@5.44.0)(yaml@2.8.1))': dependencies: '@vitest/spy': 4.0.6 estree-walker: 3.0.3 magic-string: 0.30.19 optionalDependencies: - vite: 7.1.12(@types/node@24.9.2)(jiti@2.6.1)(sass@1.93.2)(terser@5.44.0)(yaml@2.8.1) + vite: 7.1.12(@types/node@24.9.2)(jiti@2.6.1)(sass@1.93.3)(terser@5.44.0)(yaml@2.8.1) '@vitest/pretty-format@4.0.6': dependencies: @@ -8385,7 +8385,7 @@ snapshots: is-regex: 1.2.1 optional: true - sass@1.93.2: + sass@1.93.3: dependencies: chokidar: 4.0.3 immutable: 5.1.2 @@ -8844,18 +8844,18 @@ snapshots: dependencies: monaco-editor: 0.54.0 - vite-plugin-svgr@4.5.0(rollup@4.46.2)(typescript@5.9.3)(vite@7.1.12(@types/node@24.9.2)(jiti@2.6.1)(sass@1.93.2)(terser@5.44.0)(yaml@2.8.1)): + vite-plugin-svgr@4.5.0(rollup@4.46.2)(typescript@5.9.3)(vite@7.1.12(@types/node@24.9.2)(jiti@2.6.1)(sass@1.93.3)(terser@5.44.0)(yaml@2.8.1)): dependencies: '@rollup/pluginutils': 5.2.0(rollup@4.46.2) '@svgr/core': 8.1.0(typescript@5.9.3) '@svgr/plugin-jsx': 8.1.0(@svgr/core@8.1.0(typescript@5.9.3)) - vite: 7.1.12(@types/node@24.9.2)(jiti@2.6.1)(sass@1.93.2)(terser@5.44.0)(yaml@2.8.1) + vite: 7.1.12(@types/node@24.9.2)(jiti@2.6.1)(sass@1.93.3)(terser@5.44.0)(yaml@2.8.1) transitivePeerDependencies: - rollup - supports-color - typescript - vite@7.1.12(@types/node@24.9.2)(jiti@2.6.1)(sass@1.93.2)(terser@5.44.0)(yaml@2.8.1): + vite@7.1.12(@types/node@24.9.2)(jiti@2.6.1)(sass@1.93.3)(terser@5.44.0)(yaml@2.8.1): dependencies: esbuild: 0.25.4 fdir: 6.5.0(picomatch@4.0.3) @@ -8867,14 +8867,14 @@ snapshots: '@types/node': 24.9.2 fsevents: 2.3.3 jiti: 2.6.1 - sass: 1.93.2 + sass: 1.93.3 terser: 5.44.0 yaml: 2.8.1 - vitest@4.0.6(@types/debug@4.1.12)(@types/node@24.9.2)(jiti@2.6.1)(sass@1.93.2)(terser@5.44.0)(yaml@2.8.1): + vitest@4.0.6(@types/debug@4.1.12)(@types/node@24.9.2)(jiti@2.6.1)(sass@1.93.3)(terser@5.44.0)(yaml@2.8.1): dependencies: '@vitest/expect': 4.0.6 - '@vitest/mocker': 4.0.6(vite@7.1.12(@types/node@24.9.2)(jiti@2.6.1)(sass@1.93.2)(terser@5.44.0)(yaml@2.8.1)) + '@vitest/mocker': 4.0.6(vite@7.1.12(@types/node@24.9.2)(jiti@2.6.1)(sass@1.93.3)(terser@5.44.0)(yaml@2.8.1)) '@vitest/pretty-format': 4.0.6 '@vitest/runner': 4.0.6 '@vitest/snapshot': 4.0.6 @@ -8891,7 +8891,7 @@ snapshots: tinyexec: 0.3.2 tinyglobby: 0.2.15 tinyrainbow: 3.0.3 - vite: 7.1.12(@types/node@24.9.2)(jiti@2.6.1)(sass@1.93.2)(terser@5.44.0)(yaml@2.8.1) + vite: 7.1.12(@types/node@24.9.2)(jiti@2.6.1)(sass@1.93.3)(terser@5.44.0)(yaml@2.8.1) why-is-node-running: 2.3.0 optionalDependencies: '@types/debug': 4.1.12 From 518875acdea72fb4bc8611086a547b67cb77d274 Mon Sep 17 00:00:00 2001 From: Tunglies <77394545+Tunglies@users.noreply.github.com> Date: Fri, 31 Oct 2025 23:31:04 +0800 Subject: [PATCH 51/70] refactor: update draft handling and improve benchmark structure --- src-tauri/benches/draft_benchmark.rs | 189 +++++++++++---------------- src-tauri/src/config/config.rs | 4 +- src-tauri/src/config/verge.rs | 13 +- src-tauri/src/core/manager/config.rs | 4 +- src-tauri/src/core/timer.rs | 16 ++- src-tauri/src/feat/profile.rs | 2 +- src-tauri/src/feat/window.rs | 2 +- src-tauri/src/utils/draft.rs | 32 ++--- 8 files changed, 114 insertions(+), 148 deletions(-) diff --git a/src-tauri/benches/draft_benchmark.rs b/src-tauri/benches/draft_benchmark.rs index 694fc82a..3942382f 100644 --- a/src-tauri/benches/draft_benchmark.rs +++ b/src-tauri/benches/draft_benchmark.rs @@ -3,7 +3,6 @@ use std::hint::black_box; use std::process; use tokio::runtime::Runtime; -// 引入业务模型 & Draft 实现 use app_lib::config::IVerge; use app_lib::utils::Draft as DraftNew; @@ -17,108 +16,86 @@ fn make_draft() -> DraftNew> { DraftNew::from(verge) } -/// 基准:只读 data_ref(正式数据) -fn bench_data_ref(c: &mut Criterion) { - c.bench_function("draft_data_ref", |b| { - b.iter(|| { - let draft = make_draft(); - let data = draft.data_ref(); - black_box(data.enable_auto_launch); - }); - }); -} - -/// 基准:可写 data_mut(正式数据) -fn bench_data_mut(c: &mut Criterion) { - c.bench_function("draft_data_mut", |b| { - b.iter(|| { - let draft = make_draft(); - let mut data = draft.data_mut(); - data.enable_tun_mode = Some(true); - black_box(data.enable_tun_mode); - }); - }); -} - -/// 基准:首次创建草稿(会触发 clone) -fn bench_draft_mut_first(c: &mut Criterion) { - c.bench_function("draft_draft_mut_first", |b| { - b.iter(|| { - let draft = make_draft(); - let mut d = draft.draft_mut(); - d.enable_auto_launch = Some(false); - black_box(d.enable_auto_launch); - }); - }); -} - -/// 基准:重复 draft_mut(已存在草稿,不再 clone) -fn bench_draft_mut_existing(c: &mut Criterion) { - c.bench_function("draft_draft_mut_existing", |b| { - b.iter(|| { - let draft = make_draft(); - { - let mut first = draft.draft_mut(); - first.enable_tun_mode = Some(true); - } - let mut second = draft.draft_mut(); - second.enable_tun_mode = Some(false); - black_box(second.enable_tun_mode); - }); - }); -} - -/// 基准:零拷贝读取最新视图(latest_ref) -fn bench_latest_ref(c: &mut Criterion) { - c.bench_function("draft_latest_ref", |b| { - b.iter(|| { - let draft = make_draft(); - let latest = draft.latest_ref(); - black_box(latest.enable_auto_launch); - }); - }); -} - -/// 基准:apply(提交草稿) -fn bench_apply(c: &mut Criterion) { - c.bench_function("draft_apply", |b| { - b.iter(|| { - let draft = make_draft(); - { - let mut d = draft.draft_mut(); - d.enable_auto_launch = Some(false); - } - let _ = draft.apply(); - }); - }); -} - -/// 基准:discard(丢弃草稿) -fn bench_discard(c: &mut Criterion) { - c.bench_function("draft_discard", |b| { - b.iter(|| { - let draft = make_draft(); - { - let mut d = draft.draft_mut(); - d.enable_auto_launch = Some(false); - } - let _ = draft.discard(); - }); - }); -} - -/// 基准:异步 with_data_modify -fn bench_with_data_modify(c: &mut Criterion) { - let rt = Runtime::new().unwrap_or_else(|error| { - eprintln!("draft benchmarks require a Tokio runtime: {error}"); +pub fn bench_draft(c: &mut Criterion) { + let rt = Runtime::new().unwrap_or_else(|e| { + eprintln!("Tokio runtime init failed: {e}"); process::exit(1); }); - c.bench_function("draft_with_data_modify", |b| { + let mut group = c.benchmark_group("draft"); + group.sample_size(100); + group.warm_up_time(std::time::Duration::from_millis(300)); + group.measurement_time(std::time::Duration::from_secs(1)); + + group.bench_function("data_mut", |b| { + b.iter(|| { + let draft = black_box(make_draft()); + let mut data = draft.data_mut(); + data.enable_tun_mode = Some(true); + black_box(&data.enable_tun_mode); + }); + }); + + group.bench_function("draft_mut_first", |b| { + b.iter(|| { + let draft = black_box(make_draft()); + let mut d = draft.draft_mut(); + d.enable_auto_launch = Some(false); + black_box(&d.enable_auto_launch); + }); + }); + + group.bench_function("draft_mut_existing", |b| { + b.iter(|| { + let draft = black_box(make_draft()); + { + let mut first = draft.draft_mut(); + first.enable_tun_mode = Some(true); + black_box(&first.enable_tun_mode); + } + let mut second = draft.draft_mut(); + second.enable_tun_mode = Some(false); + black_box(&second.enable_tun_mode); + }); + }); + + group.bench_function("latest_ref", |b| { + b.iter(|| { + let draft = black_box(make_draft()); + let latest = draft.latest_ref(); + black_box(&latest.enable_auto_launch); + }); + }); + + group.bench_function("apply", |b| { + b.iter(|| { + let draft = black_box(make_draft()); + { + let mut d = draft.draft_mut(); + d.enable_auto_launch = Some(false); + } + draft.apply(); + black_box(&draft); + }); + }); + + group.bench_function("discard", |b| { + b.iter(|| { + let draft = black_box(make_draft()); + { + let mut d = draft.draft_mut(); + d.enable_auto_launch = Some(false); + } + draft.discard(); + black_box(&draft); + }); + }); + + group.bench_function("with_data_modify_async", |b| { b.to_async(&rt).iter(|| async { - let draft = make_draft(); - let _res: Result<(), anyhow::Error> = draft - .with_data_modify(|mut box_data| async move { + let draft = black_box(make_draft()); + let _: Result<(), anyhow::Error> = draft + .with_data_modify::<_, _, _, anyhow::Error>(|mut box_data| async move { box_data.enable_auto_launch = Some(!box_data.enable_auto_launch.unwrap_or(false)); Ok((box_data, ())) @@ -126,17 +103,9 @@ fn bench_with_data_modify(c: &mut Criterion) { .await; }); }); + + group.finish(); } -criterion_group!( - benches, - bench_data_ref, - bench_data_mut, - bench_draft_mut_first, - bench_draft_mut_existing, - bench_latest_ref, - bench_apply, - bench_discard, - bench_with_data_modify -); +criterion_group!(benches, bench_draft); criterion_main!(benches); diff --git a/src-tauri/src/config/config.rs b/src-tauri/src/config/config.rs index 4318a3e2..246a4c6b 100644 --- a/src-tauri/src/config/config.rs +++ b/src-tauri/src/config/config.rs @@ -168,11 +168,11 @@ impl Config { pub async fn generate() -> Result<()> { let (config, exists_keys, logs) = enhance::enhance().await; - *Config::runtime().await.draft_mut() = Box::new(IRuntime { + **Config::runtime().await.draft_mut() = IRuntime { config: Some(config), exists_keys, chain_logs: logs, - }); + }; Ok(()) } diff --git a/src-tauri/src/config/verge.rs b/src-tauri/src/config/verge.rs index bfa45816..25edd04b 100644 --- a/src-tauri/src/config/verge.rs +++ b/src-tauri/src/config/verge.rs @@ -1,3 +1,4 @@ +use crate::config::Config; use crate::{ config::{DEFAULT_PAC, deserialize_encrypted, serialize_encrypted}, logging, @@ -304,19 +305,17 @@ impl IVerge { /// 配置修正后重新加载配置 async fn reload_config_after_fix(updated_config: IVerge) -> Result<()> { - use crate::config::Config; - - let config_draft = Config::verge().await; - *config_draft.draft_mut() = Box::new(updated_config.clone()); - config_draft.apply(); - logging!( info, Type::Config, "内存配置已强制更新,新的clash_core: {:?}", - updated_config.clash_core + &updated_config.clash_core ); + let config_draft = Config::verge().await; + **config_draft.draft_mut() = updated_config; + config_draft.apply(); + Ok(()) } diff --git a/src-tauri/src/core/manager/config.rs b/src-tauri/src/core/manager/config.rs index 263ddb4b..70b4bc6f 100644 --- a/src-tauri/src/core/manager/config.rs +++ b/src-tauri/src/core/manager/config.rs @@ -19,11 +19,11 @@ impl CoreManager { let runtime_path = dirs::app_home_dir()?.join(RUNTIME_CONFIG); let clash_config = Config::clash().await.latest_ref().0.clone(); - *Config::runtime().await.draft_mut() = Box::new(IRuntime { + **Config::runtime().await.draft_mut() = IRuntime { config: Some(clash_config.clone()), exists_keys: vec![], chain_logs: Default::default(), - }); + }; help::save_yaml(&runtime_path, &clash_config, Some("# Clash Verge Runtime")).await?; handle::Handle::notice_message(error_key, error_msg); diff --git a/src-tauri/src/core/timer.rs b/src-tauri/src/core/timer.rs index 2ceca275..fa232933 100644 --- a/src-tauri/src/core/timer.rs +++ b/src-tauri/src/core/timer.rs @@ -419,13 +419,15 @@ impl Timer { }; // Get the profile updated timestamp - now safe to await - let config_profiles = Config::profiles().await; - let profiles = config_profiles.data_ref().clone(); - let items = match profiles.get_items() { - Some(i) => i, - None => { - logging!(warn, Type::Timer, "获取配置列表失败"); - return None; + let items = { + let profiles = Config::profiles().await; + let profiles_guard = profiles.latest_ref(); + match profiles_guard.get_items() { + Some(i) => i.clone(), + None => { + logging!(warn, Type::Timer, "获取配置列表失败"); + return None; + } } }; diff --git a/src-tauri/src/feat/profile.rs b/src-tauri/src/feat/profile.rs index 948ba3af..ffba85d0 100644 --- a/src-tauri/src/feat/profile.rs +++ b/src-tauri/src/feat/profile.rs @@ -102,7 +102,7 @@ async fn perform_profile_update( let profile_name = item.name.clone().unwrap_or_else(|| uid.clone()); handle::Handle::notice_message("update_with_clash_proxy", profile_name); - let is_current = Some(uid.clone()) == profiles.data_ref().get_current(); + let is_current = Some(uid.clone()) == profiles.latest_ref().get_current(); log::info!(target: "app", "[订阅更新] 是否为当前使用的订阅: {is_current}"); Ok(is_current) } diff --git a/src-tauri/src/feat/window.rs b/src-tauri/src/feat/window.rs index 57cfc810..246179e5 100644 --- a/src-tauri/src/feat/window.rs +++ b/src-tauri/src/feat/window.rs @@ -47,7 +47,7 @@ pub async fn clean_async() -> bool { let tun_task = async { let tun_enabled = Config::verge() .await - .data_ref() + .latest_ref() .enable_tun_mode .unwrap_or(false); diff --git a/src-tauri/src/utils/draft.rs b/src-tauri/src/utils/draft.rs index 044f6f1f..76465db9 100644 --- a/src-tauri/src/utils/draft.rs +++ b/src-tauri/src/utils/draft.rs @@ -24,7 +24,6 @@ impl From for Draft { /// /// # Methods /// - `data_mut`: Returns a mutable reference to the committed data. -/// - `data_ref`: Returns an immutable reference to the committed data. /// - `draft_mut`: Creates or retrieves a mutable reference to the draft data, cloning the committed data if no draft exists. /// - `latest_ref`: Returns an immutable reference to the draft data if it exists, otherwise to the committed data. /// - `apply`: Commits the draft data, replacing the committed data and returning the old committed value if a draft existed. @@ -35,11 +34,6 @@ impl Draft> { RwLockWriteGuard::map(self.inner.write(), |inner| &mut inner.0) } - /// 返回正式数据的只读视图(不包含草稿) - pub fn data_ref(&self) -> MappedRwLockReadGuard<'_, Box> { - RwLockReadGuard::map(self.inner.read(), |inner| &inner.0) - } - /// 创建或获取草稿并返回可写引用 pub fn draft_mut(&self) -> MappedRwLockWriteGuard<'_, Box> { let guard = self.inner.upgradable_read(); @@ -69,17 +63,21 @@ impl Draft> { } /// 提交草稿,返回旧正式数据 - pub fn apply(&self) -> Option> { - let mut inner = self.inner.write(); - inner - .1 - .take() - .map(|draft| std::mem::replace(&mut inner.0, draft)) + pub fn apply(&self) { + let guard = self.inner.upgradable_read(); + if guard.1.is_none() { + return; + } + + let mut guard = RwLockUpgradableReadGuard::upgrade(guard); + if let Some(draft) = guard.1.take() { + guard.0 = draft; + } } /// 丢弃草稿,返回被丢弃的草稿 - pub fn discard(&self) -> Option> { - self.inner.write().1.take() + pub fn discard(&self) { + self.inner.write().1.take(); } /// 异步修改正式数据,闭包直接获得 Box 所有权 @@ -152,8 +150,7 @@ fn test_draft_box() { } // 5. 提交草稿 - assert!(draft.apply().is_some()); // 第一次提交应有返回 - assert!(draft.apply().is_none()); // 第二次提交返回 None + draft.apply(); // 正式数据已更新 { @@ -170,8 +167,7 @@ fn test_draft_box() { assert_eq!(draft.draft_mut().enable_auto_launch, Some(true)); // 7. 丢弃草稿 - assert!(draft.discard().is_some()); // 第一次丢弃返回 Some - assert!(draft.discard().is_none()); // 再次丢弃返回 None + draft.discard(); // 8. 草稿已被丢弃,新的 draft_mut() 会重新 clone assert_eq!(draft.draft_mut().enable_auto_launch, Some(false)); From 52545a626cc4c8421185409603bf71c1dc41f08e Mon Sep 17 00:00:00 2001 From: Slinetrac Date: Sat, 1 Nov 2025 09:49:52 +0800 Subject: [PATCH 52/70] chore(lint): enforce no warnings in pre hooks --- package.json | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/package.json b/package.json index e9e7406e..a93f66b0 100644 --- a/package.json +++ b/package.json @@ -26,8 +26,8 @@ "publish-version": "node scripts/publish-version.mjs", "fmt": "cargo fmt --manifest-path ./src-tauri/Cargo.toml", "clippy": "cargo clippy --all-features --all-targets --manifest-path ./src-tauri/Cargo.toml", - "lint": "eslint -c eslint.config.ts --cache --cache-location .eslintcache src", - "lint:fix": "eslint -c eslint.config.ts --cache --cache-location .eslintcache --fix src", + "lint": "eslint -c eslint.config.ts --max-warnings=0 --cache --cache-location .eslintcache src", + "lint:fix": "eslint -c eslint.config.ts --max-warnings=0 --cache --cache-location .eslintcache --fix src", "format": "prettier --write .", "format:check": "prettier --check .", "typecheck": "tsc --noEmit", @@ -122,7 +122,7 @@ }, "lint-staged": { "*.{ts,tsx,js,jsx}": [ - "eslint --fix", + "eslint --fix --max-warnings=0", "prettier --write", "git add" ], From c0e111e756f56d9dcfdc8a09281cd29317c15268 Mon Sep 17 00:00:00 2001 From: Tunglies <77394545+Tunglies@users.noreply.github.com> Date: Sat, 1 Nov 2025 10:07:29 +0800 Subject: [PATCH 53/70] fix: resolve macOS lightweight mode exit synchronization issues and improve logging levels #5241 --- UPDATELOG.md | 1 + src-tauri/src/core/timer.rs | 2 +- src-tauri/src/lib.rs | 7 + src-tauri/src/module/lightweight.rs | 195 ++++++++++---------------- src-tauri/src/utils/resolve/mod.rs | 16 +-- src-tauri/src/utils/window_manager.rs | 1 - 6 files changed, 90 insertions(+), 132 deletions(-) diff --git a/UPDATELOG.md b/UPDATELOG.md index 39d129bd..ebd8f657 100644 --- a/UPDATELOG.md +++ b/UPDATELOG.md @@ -33,6 +33,7 @@ - 修复悬浮跳转导航失效 - 修复小键盘热键映射错误 - 修复前端无法及时刷新操作状态 +- 修复 macOS 从 Dock 栏退出轻量模式状态不同步
✨ 新增功能 diff --git a/src-tauri/src/core/timer.rs b/src-tauri/src/core/timer.rs index fa232933..16bb0232 100644 --- a/src-tauri/src/core/timer.rs +++ b/src-tauri/src/core/timer.rs @@ -155,7 +155,7 @@ impl Timer { .set_maximum_parallel_runnable_num(1) .set_frequency_count_down_by_seconds(3, 3) .spawn_async_routine(|| async move { - logging!(info, Type::Timer, "Updating tray menu"); + logging!(debug, Type::Timer, "Updating tray menu"); crate::core::tray::Tray::global() .update_tray_display() .await diff --git a/src-tauri/src/lib.rs b/src-tauri/src/lib.rs index bbd16795..d7446d9f 100644 --- a/src-tauri/src/lib.rs +++ b/src-tauri/src/lib.rs @@ -10,6 +10,8 @@ mod feat; mod module; mod process; pub mod utils; +#[cfg(target_os = "macos")] +use crate::module::lightweight; #[cfg(target_os = "linux")] use crate::utils::linux; #[cfg(target_os = "macos")] @@ -287,6 +289,11 @@ pub fn run() { pub async fn handle_reopen(has_visible_windows: bool) { handle::Handle::global().init(); + if lightweight::is_in_lightweight_mode() { + lightweight::exit_lightweight_mode().await; + return; + } + if !has_visible_windows { handle::Handle::global().set_activation_policy_regular(); let _ = WindowManager::show_main_window().await; diff --git a/src-tauri/src/module/lightweight.rs b/src-tauri/src/module/lightweight.rs index 08ea8575..2252cb65 100644 --- a/src-tauri/src/module/lightweight.rs +++ b/src-tauri/src/module/lightweight.rs @@ -1,6 +1,6 @@ use crate::{ config::Config, - core::{handle, timer::Timer, tray::Tray}, + core::{handle, timer::Timer}, log_err, logging, process::AsyncHandler, utils::logging::Type, @@ -43,49 +43,42 @@ impl LightweightState { static LIGHTWEIGHT_STATE: AtomicU8 = AtomicU8::new(LightweightState::Normal as u8); -static WINDOW_CLOSE_HANDLER: AtomicU32 = AtomicU32::new(0); -static WEBVIEW_FOCUS_HANDLER: AtomicU32 = AtomicU32::new(0); - -fn set_state(new: LightweightState) { - LIGHTWEIGHT_STATE.store(new.as_u8(), Ordering::Release); - match new { - LightweightState::Normal => { - logging!(info, Type::Lightweight, "轻量模式已关闭"); - } - LightweightState::In => { - logging!(info, Type::Lightweight, "轻量模式已开启"); - } - LightweightState::Exiting => { - logging!(info, Type::Lightweight, "正在退出轻量模式"); - } - } -} +static WINDOW_CLOSE_HANDLER_ID: AtomicU32 = AtomicU32::new(0); +static WEBVIEW_FOCUS_HANDLER_ID: AtomicU32 = AtomicU32::new(0); +#[inline] fn get_state() -> LightweightState { LIGHTWEIGHT_STATE.load(Ordering::Acquire).into() } -// 检查是否处于轻量模式 +#[inline] +fn try_transition(from: LightweightState, to: LightweightState) -> bool { + LIGHTWEIGHT_STATE + .compare_exchange( + from.as_u8(), + to.as_u8(), + Ordering::AcqRel, + Ordering::Relaxed, + ) + .is_ok() +} + +#[inline] +fn record_state_and_log(state: LightweightState) { + LIGHTWEIGHT_STATE.store(state.as_u8(), Ordering::Release); + match state { + LightweightState::Normal => logging!(info, Type::Lightweight, "轻量模式已关闭"), + LightweightState::In => logging!(info, Type::Lightweight, "轻量模式已开启"), + LightweightState::Exiting => logging!(info, Type::Lightweight, "正在退出轻量模式"), + } +} + +#[inline] pub fn is_in_lightweight_mode() -> bool { get_state() == LightweightState::In } -// 设置轻量模式状态(仅 Normal <-> In) -async fn set_lightweight_mode(value: bool) { - let current = get_state(); - if value && current != LightweightState::In { - set_state(LightweightState::In); - } else if !value && current != LightweightState::Normal { - set_state(LightweightState::Normal); - } - - // 只有在状态可用时才触发托盘更新 - if let Err(e) = Tray::global().update_part().await { - log::warn!("Failed to update tray: {e}"); - } -} - -pub async fn run_once_auto_lightweight() { +pub async fn auto_lightweight_boot() -> Result<()> { let verge_config = Config::verge().await; let enable_auto = verge_config .data_mut() @@ -96,39 +89,23 @@ pub async fn run_once_auto_lightweight() { .enable_silent_start .unwrap_or(false); - if !(enable_auto && is_silent_start) { - logging!( - info, - Type::Lightweight, - "不满足静默启动且自动进入轻量模式的条件,跳过自动进入轻量模式" - ); - return; + if is_silent_start { + logging!(info, Type::Lightweight, "静默启动:直接进入轻量模式"); + let _ = entry_lightweight_mode().await; + return Ok(()); } - set_lightweight_mode(true).await; + if !enable_auto { + logging!(info, Type::Lightweight, "未开启自动轻量模式,跳过初始化"); + return Ok(()); + } + + logging!( + info, + Type::Lightweight, + "非静默启动:注册自动轻量模式监听器" + ); enable_auto_light_weight_mode().await; -} - -pub async fn auto_lightweight_mode_init() -> Result<()> { - let is_silent_start = - { Config::verge().await.latest_ref().enable_silent_start }.unwrap_or(false); - let enable_auto = { - Config::verge() - .await - .latest_ref() - .enable_auto_light_weight_mode - } - .unwrap_or(false); - - if enable_auto && !is_silent_start { - logging!( - info, - Type::Lightweight, - "非静默启动直接挂载自动进入轻量模式监听器!" - ); - set_state(LightweightState::Normal); - enable_auto_light_weight_mode().await; - } Ok(()) } @@ -151,43 +128,18 @@ pub fn disable_auto_light_weight_mode() { } pub async fn entry_lightweight_mode() -> bool { - // 尝试从 Normal -> In - if LIGHTWEIGHT_STATE - .compare_exchange( - LightweightState::Normal as u8, - LightweightState::In as u8, - Ordering::Acquire, - Ordering::Relaxed, - ) - .is_err() - { + if !try_transition(LightweightState::Normal, LightweightState::In) { logging!(info, Type::Lightweight, "无需进入轻量模式,跳过调用"); return false; } - + record_state_and_log(LightweightState::In); WindowManager::destroy_main_window(); - - set_lightweight_mode(true).await; let _ = cancel_light_weight_timer(); - - // 回到 In - set_state(LightweightState::In); - true } -// 添加从轻量模式恢复的函数 pub async fn exit_lightweight_mode() -> bool { - // 尝试从 In -> Exiting - if LIGHTWEIGHT_STATE - .compare_exchange( - LightweightState::In as u8, - LightweightState::Exiting as u8, - Ordering::Acquire, - Ordering::Relaxed, - ) - .is_err() - { + if !try_transition(LightweightState::In, LightweightState::Exiting) { logging!( info, Type::Lightweight, @@ -195,16 +147,10 @@ pub async fn exit_lightweight_mode() -> bool { ); return false; } - + record_state_and_log(LightweightState::Exiting); WindowManager::show_main_window().await; - - set_lightweight_mode(false).await; let _ = cancel_light_weight_timer(); - - // 回到 Normal - set_state(LightweightState::Normal); - - logging!(info, Type::Lightweight, "轻量模式退出完成"); + record_state_and_log(LightweightState::Normal); true } @@ -215,7 +161,11 @@ pub async fn add_light_weight_timer() { fn setup_window_close_listener() { if let Some(window) = handle::Handle::get_window() { - let handler = window.listen("tauri://close-requested", move |_event| { + let old_id = WINDOW_CLOSE_HANDLER_ID.swap(0, Ordering::AcqRel); + if old_id != 0 { + window.unlisten(old_id); + } + let handler_id = window.listen("tauri://close-requested", move |_event| { std::mem::drop(AsyncHandler::spawn(|| async { if let Err(e) = setup_light_weight_timer().await { log::warn!("Failed to setup light weight timer: {e}"); @@ -223,16 +173,15 @@ fn setup_window_close_listener() { })); logging!(info, Type::Lightweight, "监听到关闭请求,开始轻量模式计时"); }); - - WINDOW_CLOSE_HANDLER.store(handler, Ordering::Release); + WINDOW_CLOSE_HANDLER_ID.store(handler_id, Ordering::Release); } } fn cancel_window_close_listener() { if let Some(window) = handle::Handle::get_window() { - let handler = WINDOW_CLOSE_HANDLER.swap(0, Ordering::AcqRel); - if handler != 0 { - window.unlisten(handler); + let id = WINDOW_CLOSE_HANDLER_ID.swap(0, Ordering::AcqRel); + if id != 0 { + window.unlisten(id); logging!(info, Type::Lightweight, "取消了窗口关闭监听"); } } @@ -240,7 +189,11 @@ fn cancel_window_close_listener() { fn setup_webview_focus_listener() { if let Some(window) = handle::Handle::get_window() { - let handler = window.listen("tauri://focus", move |_event| { + let old_id = WEBVIEW_FOCUS_HANDLER_ID.swap(0, Ordering::AcqRel); + if old_id != 0 { + window.unlisten(old_id); + } + let handler_id = window.listen("tauri://focus", move |_event| { log_err!(cancel_light_weight_timer()); logging!( info, @@ -248,37 +201,45 @@ fn setup_webview_focus_listener() { "监听到窗口获得焦点,取消轻量模式计时" ); }); - - WEBVIEW_FOCUS_HANDLER.store(handler, Ordering::Release); + WEBVIEW_FOCUS_HANDLER_ID.store(handler_id, Ordering::Release); } } fn cancel_webview_focus_listener() { if let Some(window) = handle::Handle::get_window() { - let handler = WEBVIEW_FOCUS_HANDLER.swap(0, Ordering::AcqRel); - if handler != 0 { - window.unlisten(handler); + let id = WEBVIEW_FOCUS_HANDLER_ID.swap(0, Ordering::AcqRel); + if id != 0 { + window.unlisten(id); logging!(info, Type::Lightweight, "取消了窗口焦点监听"); } } } async fn setup_light_weight_timer() -> Result<()> { - Timer::global().init().await?; + if let Err(e) = Timer::global().init().await { + return Err(e).context("failed to initialize timer"); + } + let once_by_minutes = Config::verge() .await .latest_ref() .auto_light_weight_minutes .unwrap_or(10); - // 获取task_id + { + let timer_map = Timer::global().timer_map.read(); + if timer_map.contains_key(LIGHT_WEIGHT_TASK_UID) { + logging!(warn, Type::Timer, "轻量模式计时器已存在,跳过创建"); + return Ok(()); + } + } + let task_id = { Timer::global() .timer_count .fetch_add(1, std::sync::atomic::Ordering::Relaxed) }; - // 创建任务 let task = TaskBuilder::default() .set_task_id(task_id) .set_maximum_parallel_runnable_num(1) @@ -289,7 +250,6 @@ async fn setup_light_weight_timer() -> Result<()> { }) .context("failed to create timer task")?; - // 添加任务到定时器 { let delay_timer = Timer::global().delay_timer.write(); delay_timer @@ -297,7 +257,6 @@ async fn setup_light_weight_timer() -> Result<()> { .context("failed to add timer task")?; } - // 更新任务映射 { let mut timer_map = Timer::global().timer_map.write(); let timer_task = crate::core::timer::TimerTask { diff --git a/src-tauri/src/utils/resolve/mod.rs b/src-tauri/src/utils/resolve/mod.rs index 329c3a86..ac725534 100644 --- a/src-tauri/src/utils/resolve/mod.rs +++ b/src-tauri/src/utils/resolve/mod.rs @@ -11,10 +11,7 @@ use crate::{ tray::Tray, }, logging, logging_error, - module::{ - lightweight::{auto_lightweight_mode_init, run_once_auto_lightweight}, - signal, - }, + module::{lightweight::auto_lightweight_boot, signal}, process::AsyncHandler, utils::{init, logging::Type, server, window_manager::WindowManager}, }; @@ -71,8 +68,7 @@ pub fn resolve_setup_async() { tray_init, init_timer(), init_hotkey(), - init_auto_lightweight_mode(), - init_once_auto_lightweight(), + init_auto_lightweight_boot(), ); }); } @@ -128,12 +124,8 @@ pub(super) async fn init_hotkey() { logging_error!(Type::Setup, Hotkey::global().init().await); } -pub(super) async fn init_once_auto_lightweight() { - run_once_auto_lightweight().await; -} - -pub(super) async fn init_auto_lightweight_mode() { - logging_error!(Type::Setup, auto_lightweight_mode_init().await); +pub(super) async fn init_auto_lightweight_boot() { + logging_error!(Type::Setup, auto_lightweight_boot().await); } pub(super) fn init_signal() { diff --git a/src-tauri/src/utils/window_manager.rs b/src-tauri/src/utils/window_manager.rs index 2e9c3054..463f8c39 100644 --- a/src-tauri/src/utils/window_manager.rs +++ b/src-tauri/src/utils/window_manager.rs @@ -359,7 +359,6 @@ impl WindowManager { } return WindowOperationResult::Destroyed; } - logging!(warn, Type::Window, "窗口摧毁失败"); WindowOperationResult::Failed } From ae319279ae86f6b86cae2e7f26ead8d346a184cb Mon Sep 17 00:00:00 2001 From: Tunglies <77394545+Tunglies@users.noreply.github.com> Date: Sat, 1 Nov 2025 10:15:12 +0800 Subject: [PATCH 54/70] chore(deps): update cc, clash_verge_logger, and version-compare to latest versions --- src-tauri/Cargo.lock | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src-tauri/Cargo.lock b/src-tauri/Cargo.lock index b6942264..d1264cff 100644 --- a/src-tauri/Cargo.lock +++ b/src-tauri/Cargo.lock @@ -966,9 +966,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.2.43" +version = "1.2.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "739eb0f94557554b3ca9a86d2d37bebd49c5e6d0c1d2bda35ba5bdac830befc2" +checksum = "37521ac7aabe3d13122dc382493e20c9416f299d2ccd5b3a5340a2570cdeb0f3" dependencies = [ "find-msvc-tools", "jobserver", @@ -1165,8 +1165,8 @@ dependencies = [ [[package]] name = "clash_verge_logger" -version = "0.2.0" -source = "git+https://github.com/clash-verge-rev/clash-verge-logger#9bb189b5b5c4c2eee35168ff4997e8fb10901c81" +version = "0.2.1" +source = "git+https://github.com/clash-verge-rev/clash-verge-logger#955f1b709890640ff01fd30009df0f35816bbca6" dependencies = [ "arraydeque", "compact_str", @@ -8735,9 +8735,9 @@ checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" [[package]] name = "version-compare" -version = "0.2.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "852e951cb7832cb45cb1169900d19760cfa39b82bc0ea9c0e5a14ae88411c98b" +checksum = "03c2856837ef78f57382f06b2b8563a2f512f7185d732608fd9176cb3b8edf0e" [[package]] name = "version_check" From 413f29e22ade36100ffff11fcd8d07b5ed34a3d4 Mon Sep 17 00:00:00 2001 From: Sline Date: Sat, 1 Nov 2025 15:28:56 +0800 Subject: [PATCH 55/70] fix: linux theme sync (#5273) --- src/components/layout/use-custom-theme.ts | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/src/components/layout/use-custom-theme.ts b/src/components/layout/use-custom-theme.ts index 145bc5bd..7682b0a9 100644 --- a/src/components/layout/use-custom-theme.ts +++ b/src/components/layout/use-custom-theme.ts @@ -53,10 +53,13 @@ export const useCustomTheme = () => { return; } - if ( + const preferBrowserMatchMedia = typeof window !== "undefined" && - typeof window.matchMedia === "function" - ) { + typeof window.matchMedia === "function" && + // Skip Tauri flow when running purely in browser. + !("__TAURI__" in window); + + if (preferBrowserMatchMedia) { return; } From b3b8eeb577b82cdfa5e7fc4a962534440570570c Mon Sep 17 00:00:00 2001 From: Tunglies Date: Sat, 1 Nov 2025 16:46:03 +0800 Subject: [PATCH 56/70] refactor: convert file operations to async using tokio fs (#5267) * refactor: convert file operations to async using tokio fs * refactor: integrate AsyncHandler for file operations in backup processes --- src-tauri/src/cmd/app.rs | 11 +- src-tauri/src/cmd/backup.rs | 10 +- src-tauri/src/cmd/clash.rs | 9 +- src-tauri/src/cmd/profile.rs | 13 ++- src-tauri/src/cmd/save_profile.rs | 165 ++++++++++++++++-------------- src-tauri/src/config/prfitem.rs | 19 ++-- src-tauri/src/core/backup.rs | 32 +++--- src-tauri/src/core/tray/mod.rs | 8 +- src-tauri/src/core/validate.rs | 27 +++-- src-tauri/src/enhance/chain.rs | 4 +- src-tauri/src/enhance/mod.rs | 47 ++++----- src-tauri/src/feat/backup.rs | 48 +++++---- 12 files changed, 210 insertions(+), 183 deletions(-) diff --git a/src-tauri/src/cmd/app.rs b/src-tauri/src/cmd/app.rs index 11a9207f..b0b9d480 100644 --- a/src-tauri/src/cmd/app.rs +++ b/src-tauri/src/cmd/app.rs @@ -12,6 +12,7 @@ use smartstring::alias::String; use std::path::Path; use tauri::{AppHandle, Manager}; use tokio::fs; +use tokio::io::AsyncWriteExt; /// 打开应用程序所在目录 #[tauri::command] @@ -116,7 +117,7 @@ pub async fn download_icon_cache(url: String, name: String) -> CmdResult } if !icon_cache_dir.exists() { - let _ = std::fs::create_dir_all(&icon_cache_dir); + let _ = fs::create_dir_all(&icon_cache_dir).await; } let temp_path = icon_cache_dir.join(format!("{}.downloading", name.as_str())); @@ -140,7 +141,7 @@ pub async fn download_icon_cache(url: String, name: String) -> CmdResult if is_image && !is_html { { - let mut file = match std::fs::File::create(&temp_path) { + let mut file = match fs::File::create(&temp_path).await { Ok(file) => file, Err(_) => { if icon_path.exists() { @@ -149,12 +150,12 @@ pub async fn download_icon_cache(url: String, name: String) -> CmdResult return Err("Failed to create temporary file".into()); } }; - - std::io::copy(&mut content.as_ref(), &mut file).stringify_err()?; + file.write_all(content.as_ref()).await.stringify_err()?; + file.flush().await.stringify_err()?; } if !icon_path.exists() { - match std::fs::rename(&temp_path, &icon_path) { + match fs::rename(&temp_path, &icon_path).await { Ok(_) => {} Err(_) => { let _ = temp_path.remove_if_exists().await; diff --git a/src-tauri/src/cmd/backup.rs b/src-tauri/src/cmd/backup.rs index 8610b682..a105f4d8 100644 --- a/src-tauri/src/cmd/backup.rs +++ b/src-tauri/src/cmd/backup.rs @@ -11,8 +11,8 @@ pub async fn create_local_backup() -> CmdResult<()> { /// List local backups #[tauri::command] -pub fn list_local_backup() -> CmdResult> { - feat::list_local_backup().stringify_err() +pub async fn list_local_backup() -> CmdResult> { + feat::list_local_backup().await.stringify_err() } /// Delete local backup @@ -29,6 +29,8 @@ pub async fn restore_local_backup(filename: String) -> CmdResult<()> { /// Export local backup to a user selected destination #[tauri::command] -pub fn export_local_backup(filename: String, destination: String) -> CmdResult<()> { - feat::export_local_backup(filename, destination).stringify_err() +pub async fn export_local_backup(filename: String, destination: String) -> CmdResult<()> { + feat::export_local_backup(filename, destination) + .await + .stringify_err() } diff --git a/src-tauri/src/cmd/clash.rs b/src-tauri/src/cmd/clash.rs index cf3675f7..f429e4fd 100644 --- a/src-tauri/src/cmd/clash.rs +++ b/src-tauri/src/cmd/clash.rs @@ -8,6 +8,7 @@ use crate::{config::*, feat, logging, utils::logging::Type}; use compact_str::CompactString; use serde_yaml_ng::Mapping; use smartstring::alias::String; +use tokio::fs; /// 复制Clash环境变量 #[tauri::command] @@ -158,11 +159,9 @@ pub async fn apply_dns_config(apply: bool) -> CmdResult { return Err("DNS config file not found".into()); } - let dns_yaml = tokio::fs::read_to_string(&dns_path) - .await - .stringify_err_log(|e| { - logging!(error, Type::Config, "Failed to read DNS config: {e}"); - })?; + let dns_yaml = fs::read_to_string(&dns_path).await.stringify_err_log(|e| { + logging!(error, Type::Config, "Failed to read DNS config: {e}"); + })?; // 解析DNS配置 let patch_config = serde_yaml_ng::from_str::(&dns_yaml) diff --git a/src-tauri/src/cmd/profile.rs b/src-tauri/src/cmd/profile.rs index 15177936..fc36cf56 100644 --- a/src-tauri/src/cmd/profile.rs +++ b/src-tauri/src/cmd/profile.rs @@ -631,10 +631,15 @@ pub async fn view_profile(index: String) -> CmdResult { /// 读取配置文件内容 #[tauri::command] pub async fn read_profile_file(index: String) -> CmdResult { - let profiles = Config::profiles().await; - let profiles_ref = profiles.latest_ref(); - let item = profiles_ref.get_item(&index).stringify_err()?; - let data = item.read_file().stringify_err()?; + let item = { + let profiles = Config::profiles().await; + let profiles_ref = profiles.latest_ref(); + PrfItem { + file: profiles_ref.get_item(&index).stringify_err()?.file.clone(), + ..Default::default() + } + }; + let data = item.read_file().await.stringify_err()?; Ok(data) } diff --git a/src-tauri/src/cmd/save_profile.rs b/src-tauri/src/cmd/save_profile.rs index a1cd3474..cb216e8b 100644 --- a/src-tauri/src/cmd/save_profile.rs +++ b/src-tauri/src/cmd/save_profile.rs @@ -12,28 +12,37 @@ use tokio::fs; /// 保存profiles的配置 #[tauri::command] pub async fn save_profile_file(index: String, file_data: Option) -> CmdResult { - if file_data.is_none() { - return Ok(()); - } + let file_data = match file_data { + Some(d) => d, + None => return Ok(()), + }; - // 在异步操作前完成所有文件操作 - let (file_path, original_content, is_merge_file) = { + // 在异步操作前获取必要元数据并释放锁 + let (rel_path, is_merge_file) = { let profiles = Config::profiles().await; let profiles_guard = profiles.latest_ref(); let item = profiles_guard.get_item(&index).stringify_err()?; - // 确定是否为merge类型文件 let is_merge = item.itype.as_ref().is_some_and(|t| t == "merge"); - let content = item.read_file().stringify_err()?; let path = item.file.clone().ok_or("file field is null")?; - let profiles_dir = dirs::app_profiles_dir().stringify_err()?; - (profiles_dir.join(path.as_str()), content, is_merge) + (path, is_merge) }; + // 读取原始内容(在释放profiles_guard后进行) + let original_content = PrfItem { + file: Some(rel_path.clone()), + ..Default::default() + } + .read_file() + .await + .stringify_err()?; + + let profiles_dir = dirs::app_profiles_dir().stringify_err()?; + let file_path = profiles_dir.join(rel_path.as_str()); + let file_path_str = file_path.to_string_lossy().to_string(); + // 保存新的配置文件 - let file_data = file_data.ok_or("file_data is None")?; fs::write(&file_path, &file_data).await.stringify_err()?; - let file_path_str = file_path.to_string_lossy().to_string(); logging!( info, Type::Config, @@ -42,84 +51,91 @@ pub async fn save_profile_file(index: String, file_data: Option) -> CmdR is_merge_file ); - // 对于 merge 文件,只进行语法验证,不进行后续内核验证 if is_merge_file { - logging!( - info, - Type::Config, - "[cmd配置save] 检测到merge文件,只进行语法验证" - ); - match CoreConfigValidator::validate_config_file(&file_path_str, Some(true)).await { - Ok((true, _)) => { - logging!(info, Type::Config, "[cmd配置save] merge文件语法验证通过"); - // 成功后尝试更新整体配置 - match CoreManager::global().update_config().await { - Ok(_) => { - // 配置更新成功,刷新前端 - handle::Handle::refresh_clash(); - } - Err(e) => { - logging!( - warn, - Type::Config, - "[cmd配置save] 更新整体配置时发生错误: {}", - e - ); - } - } - return Ok(()); - } - Ok((false, error_msg)) => { + return handle_merge_file(&file_path_str, &file_path, &original_content).await; + } + + handle_full_validation(&file_path_str, &file_path, &original_content).await +} + +async fn restore_original( + file_path: &std::path::Path, + original_content: &str, +) -> Result<(), String> { + fs::write(file_path, original_content).await.stringify_err() +} + +fn is_script_error(err: &str, file_path_str: &str) -> bool { + file_path_str.ends_with(".js") + || err.contains("Script syntax error") + || err.contains("Script must contain a main function") + || err.contains("Failed to read script file") +} + +async fn handle_merge_file( + file_path_str: &str, + file_path: &std::path::Path, + original_content: &str, +) -> CmdResult { + logging!( + info, + Type::Config, + "[cmd配置save] 检测到merge文件,只进行语法验证" + ); + + match CoreConfigValidator::validate_config_file(file_path_str, Some(true)).await { + Ok((true, _)) => { + logging!(info, Type::Config, "[cmd配置save] merge文件语法验证通过"); + if let Err(e) = CoreManager::global().update_config().await { logging!( warn, Type::Config, - "[cmd配置save] merge文件语法验证失败: {}", - error_msg + "[cmd配置save] 更新整体配置时发生错误: {}", + e ); - // 恢复原始配置文件 - fs::write(&file_path, original_content) - .await - .stringify_err()?; - // 发送合并文件专用错误通知 - let result = (false, error_msg.clone()); - crate::cmd::validate::handle_yaml_validation_notice(&result, "合并配置文件"); - return Ok(()); - } - Err(e) => { - logging!(error, Type::Config, "[cmd配置save] 验证过程发生错误: {}", e); - // 恢复原始配置文件 - fs::write(&file_path, original_content) - .await - .stringify_err()?; - return Err(e.to_string().into()); + } else { + handle::Handle::refresh_clash(); } + Ok(()) + } + Ok((false, error_msg)) => { + logging!( + warn, + Type::Config, + "[cmd配置save] merge文件语法验证失败: {}", + error_msg + ); + restore_original(file_path, original_content).await?; + let result = (false, error_msg.clone()); + crate::cmd::validate::handle_yaml_validation_notice(&result, "合并配置文件"); + Ok(()) + } + Err(e) => { + logging!(error, Type::Config, "[cmd配置save] 验证过程发生错误: {}", e); + restore_original(file_path, original_content).await?; + Err(e.to_string().into()) } } +} - // 非merge文件使用完整验证流程 - match CoreConfigValidator::validate_config_file(&file_path_str, None).await { +async fn handle_full_validation( + file_path_str: &str, + file_path: &std::path::Path, + original_content: &str, +) -> CmdResult { + match CoreConfigValidator::validate_config_file(file_path_str, None).await { Ok((true, _)) => { logging!(info, Type::Config, "[cmd配置save] 验证成功"); Ok(()) } Ok((false, error_msg)) => { logging!(warn, Type::Config, "[cmd配置save] 验证失败: {}", error_msg); - // 恢复原始配置文件 - fs::write(&file_path, original_content) - .await - .stringify_err()?; - - // 智能判断错误类型 - let is_script_error = file_path_str.ends_with(".js") - || error_msg.contains("Script syntax error") - || error_msg.contains("Script must contain a main function") - || error_msg.contains("Failed to read script file"); + restore_original(file_path, original_content).await?; if error_msg.contains("YAML syntax error") || error_msg.contains("Failed to read file:") - || (!file_path_str.ends_with(".js") && !is_script_error) + || (!file_path_str.ends_with(".js") && !is_script_error(&error_msg, file_path_str)) { - // 普通YAML错误使用YAML通知处理 logging!( info, Type::Config, @@ -127,8 +143,7 @@ pub async fn save_profile_file(index: String, file_data: Option) -> CmdR ); let result = (false, error_msg.clone()); crate::cmd::validate::handle_yaml_validation_notice(&result, "YAML配置文件"); - } else if is_script_error { - // 脚本错误使用专门的通知处理 + } else if is_script_error(&error_msg, file_path_str) { logging!( info, Type::Config, @@ -137,7 +152,6 @@ pub async fn save_profile_file(index: String, file_data: Option) -> CmdR let result = (false, error_msg.clone()); crate::cmd::validate::handle_script_validation_notice(&result, "脚本文件"); } else { - // 普通配置错误使用一般通知 logging!( info, Type::Config, @@ -150,10 +164,7 @@ pub async fn save_profile_file(index: String, file_data: Option) -> CmdR } Err(e) => { logging!(error, Type::Config, "[cmd配置save] 验证过程发生错误: {}", e); - // 恢复原始配置文件 - fs::write(&file_path, original_content) - .await - .stringify_err()?; + restore_original(file_path, original_content).await?; Err(e.to_string().into()) } } diff --git a/src-tauri/src/config/prfitem.rs b/src-tauri/src/config/prfitem.rs index 46c86478..d26eac70 100644 --- a/src-tauri/src/config/prfitem.rs +++ b/src-tauri/src/config/prfitem.rs @@ -7,7 +7,8 @@ use anyhow::{Context, Result, bail}; use serde::{Deserialize, Serialize}; use serde_yaml_ng::Mapping; use smartstring::alias::String; -use std::{fs, time::Duration}; +use std::time::Duration; +use tokio::fs; #[derive(Debug, Clone, Deserialize, Serialize, Default)] pub struct PrfItem { @@ -546,24 +547,28 @@ impl PrfItem { } /// get the file data - pub fn read_file(&self) -> Result { + pub async fn read_file(&self) -> Result { let file = self .file - .clone() + .as_ref() .ok_or_else(|| anyhow::anyhow!("could not find the file"))?; let path = dirs::app_profiles_dir()?.join(file.as_str()); - let content = fs::read_to_string(path).context("failed to read the file")?; + let content = fs::read_to_string(path) + .await + .context("failed to read the file")?; Ok(content.into()) } /// save the file data - pub fn save_file(&self, data: String) -> Result<()> { + pub async fn save_file(&self, data: String) -> Result<()> { let file = self .file - .clone() + .as_ref() .ok_or_else(|| anyhow::anyhow!("could not find the file"))?; let path = dirs::app_profiles_dir()?.join(file.as_str()); - fs::write(path, data.as_bytes()).context("failed to save the file") + fs::write(path, data.as_bytes()) + .await + .context("failed to save the file") } } diff --git a/src-tauri/src/core/backup.rs b/src-tauri/src/core/backup.rs index d9e46c68..345d1f05 100644 --- a/src-tauri/src/core/backup.rs +++ b/src-tauri/src/core/backup.rs @@ -1,4 +1,4 @@ -use crate::{config::Config, utils::dirs}; +use crate::{config::Config, process::AsyncHandler, utils::dirs}; use anyhow::Error; use once_cell::sync::OnceCell; use parking_lot::Mutex; @@ -7,13 +7,12 @@ use smartstring::alias::String; use std::{ collections::HashMap, env::{consts::OS, temp_dir}, - fs, io::Write, path::PathBuf, sync::Arc, time::Duration, }; -use tokio::time::timeout; +use tokio::{fs, time::timeout}; use zip::write::SimpleFileOptions; // 应用版本常量,来自 tauri.conf.json @@ -170,7 +169,7 @@ impl WebDavClient { let webdav_path: String = format!("{}/{}", dirs::BACKUP_DIR, file_name).into(); // 读取文件并上传,如果失败尝试一次重试 - let file_content = fs::read(&file_path)?; + let file_content = fs::read(&file_path).await?; // 添加超时保护 let upload_result = timeout( @@ -212,7 +211,7 @@ impl WebDavClient { let fut = async { let response = client.get(path.as_str()).await?; let content = response.bytes().await?; - fs::write(&storage_path, &content)?; + fs::write(&storage_path, &content).await?; Ok::<(), Error>(()) }; @@ -250,18 +249,19 @@ impl WebDavClient { } } -pub fn create_backup() -> Result<(String, PathBuf), Error> { +pub async fn create_backup() -> Result<(String, PathBuf), Error> { let now = chrono::Local::now().format("%Y-%m-%d_%H-%M-%S").to_string(); let zip_file_name: String = format!("{OS}-backup-{now}.zip").into(); let zip_path = temp_dir().join(zip_file_name.as_str()); - let file = fs::File::create(&zip_path)?; + let value = zip_path.clone(); + let file = AsyncHandler::spawn_blocking(move || std::fs::File::create(&value)).await??; let mut zip = zip::ZipWriter::new(file); zip.add_directory("profiles/", SimpleFileOptions::default())?; let options = SimpleFileOptions::default().compression_method(zip::CompressionMethod::Stored); - if let Ok(entries) = fs::read_dir(dirs::app_profiles_dir()?) { - for entry in entries { - let entry = entry?; + + if let Ok(mut entries) = fs::read_dir(dirs::app_profiles_dir()?).await { + while let Some(entry) = entries.next_entry().await? { let path = entry.path(); if path.is_file() { let file_name_os = entry.file_name(); @@ -270,16 +270,16 @@ pub fn create_backup() -> Result<(String, PathBuf), Error> { .ok_or_else(|| anyhow::Error::msg("Invalid file name encoding"))?; let backup_path = format!("profiles/{}", file_name); zip.start_file(backup_path, options)?; - let file_content = fs::read(&path)?; + let file_content = fs::read(&path).await?; zip.write_all(&file_content)?; } } } zip.start_file(dirs::CLASH_CONFIG, options)?; - zip.write_all(fs::read(dirs::clash_path()?)?.as_slice())?; + zip.write_all(fs::read(dirs::clash_path()?).await?.as_slice())?; - let mut verge_config: serde_json::Value = - serde_yaml_ng::from_str(&fs::read_to_string(dirs::verge_path()?)?)?; + let verge_text = fs::read_to_string(dirs::verge_path()?).await?; + let mut verge_config: serde_json::Value = serde_yaml_ng::from_str(&verge_text)?; if let Some(obj) = verge_config.as_object_mut() { obj.remove("webdav_username"); obj.remove("webdav_password"); @@ -291,11 +291,11 @@ pub fn create_backup() -> Result<(String, PathBuf), Error> { let dns_config_path = dirs::app_home_dir()?.join(dirs::DNS_CONFIG); if dns_config_path.exists() { zip.start_file(dirs::DNS_CONFIG, options)?; - zip.write_all(fs::read(&dns_config_path)?.as_slice())?; + zip.write_all(fs::read(&dns_config_path).await?.as_slice())?; } zip.start_file(dirs::PROFILE_YAML, options)?; - zip.write_all(fs::read(dirs::profiles_path()?)?.as_slice())?; + zip.write_all(fs::read(dirs::profiles_path()?).await?.as_slice())?; zip.finish()?; Ok((zip_file_name, zip_path)) } diff --git a/src-tauri/src/core/tray/mod.rs b/src-tauri/src/core/tray/mod.rs index ed9f21e8..fe657d11 100644 --- a/src-tauri/src/core/tray/mod.rs +++ b/src-tauri/src/core/tray/mod.rs @@ -2,6 +2,7 @@ use once_cell::sync::OnceCell; use tauri::Emitter; use tauri::tray::TrayIconBuilder; use tauri_plugin_mihomo::models::Proxies; +use tokio::fs; #[cfg(target_os = "macos")] pub mod speed_rate; use crate::config::PrfSelected; @@ -26,7 +27,6 @@ use smartstring::alias::String; use std::collections::HashMap; use std::sync::Arc; use std::{ - fs, sync::atomic::{AtomicBool, Ordering}, time::{Duration, Instant}, }; @@ -86,7 +86,7 @@ impl TrayState { let is_common_tray_icon = verge.common_tray_icon.unwrap_or(false); if is_common_tray_icon && let Ok(Some(common_icon_path)) = find_target_icons("common") - && let Ok(icon_data) = fs::read(common_icon_path) + && let Ok(icon_data) = fs::read(common_icon_path).await { return (true, icon_data); } @@ -123,7 +123,7 @@ impl TrayState { let is_sysproxy_tray_icon = verge.sysproxy_tray_icon.unwrap_or(false); if is_sysproxy_tray_icon && let Ok(Some(sysproxy_icon_path)) = find_target_icons("sysproxy") - && let Ok(icon_data) = fs::read(sysproxy_icon_path) + && let Ok(icon_data) = fs::read(sysproxy_icon_path).await { return (true, icon_data); } @@ -160,7 +160,7 @@ impl TrayState { let is_tun_tray_icon = verge.tun_tray_icon.unwrap_or(false); if is_tun_tray_icon && let Ok(Some(tun_icon_path)) = find_target_icons("tun") - && let Ok(icon_data) = fs::read(tun_icon_path) + && let Ok(icon_data) = fs::read(tun_icon_path).await { return (true, icon_data); } diff --git a/src-tauri/src/core/validate.rs b/src-tauri/src/core/validate.rs index 44673cc0..204497a0 100644 --- a/src-tauri/src/core/validate.rs +++ b/src-tauri/src/core/validate.rs @@ -1,9 +1,9 @@ use anyhow::Result; use scopeguard::defer; use smartstring::alias::String; -use std::path::Path; use std::sync::atomic::{AtomicBool, Ordering}; use tauri_plugin_shell::ShellExt; +use tokio::fs; use crate::config::{Config, ConfigType}; use crate::core::handle; @@ -33,19 +33,16 @@ impl CoreConfigValidator { impl CoreConfigValidator { /// 检查文件是否为脚本文件 - fn is_script_file

(path: P) -> Result - where - P: AsRef + std::fmt::Display, - { + async fn is_script_file(path: &str) -> Result { // 1. 先通过扩展名快速判断 - if has_ext(&path, "yaml") || has_ext(&path, "yml") { + if has_ext(path, "yaml") || has_ext(path, "yml") { return Ok(false); // YAML文件不是脚本文件 - } else if has_ext(&path, "js") { + } else if has_ext(path, "js") { return Ok(true); // JS文件是脚本文件 } // 2. 读取文件内容 - let content = match std::fs::read_to_string(&path) { + let content = match fs::read_to_string(path).await { Ok(content) => content, Err(err) => { logging!( @@ -115,11 +112,11 @@ impl CoreConfigValidator { } /// 只进行文件语法检查,不进行完整验证 - fn validate_file_syntax(config_path: &str) -> Result<(bool, String)> { + async fn validate_file_syntax(config_path: &str) -> Result<(bool, String)> { logging!(info, Type::Validate, "开始检查文件: {}", config_path); // 读取文件内容 - let content = match std::fs::read_to_string(config_path) { + let content = match fs::read_to_string(config_path).await { Ok(content) => content, Err(err) => { let error_msg = format!("Failed to read file: {err}").into(); @@ -144,9 +141,9 @@ impl CoreConfigValidator { } /// 验证脚本文件语法 - fn validate_script_file(path: &str) -> Result<(bool, String)> { + async fn validate_script_file(path: &str) -> Result<(bool, String)> { // 读取脚本内容 - let content = match std::fs::read_to_string(path) { + let content = match fs::read_to_string(path).await { Ok(content) => content, Err(err) => { let error_msg = format!("Failed to read script file: {err}").into(); @@ -216,14 +213,14 @@ impl CoreConfigValidator { "检测到Merge文件,仅进行语法检查: {}", config_path ); - return Self::validate_file_syntax(config_path); + return Self::validate_file_syntax(config_path).await; } // 检查是否为脚本文件 let is_script = if config_path.ends_with(".js") { true } else { - match Self::is_script_file(config_path) { + match Self::is_script_file(config_path).await { Ok(result) => result, Err(err) => { // 如果无法确定文件类型,尝试使用Clash内核验证 @@ -246,7 +243,7 @@ impl CoreConfigValidator { "检测到脚本文件,使用JavaScript验证: {}", config_path ); - return Self::validate_script_file(config_path); + return Self::validate_script_file(config_path).await; } // 对YAML配置文件使用Clash内核验证 diff --git a/src-tauri/src/enhance/chain.rs b/src-tauri/src/enhance/chain.rs index 8238061c..a9a81ac8 100644 --- a/src-tauri/src/enhance/chain.rs +++ b/src-tauri/src/enhance/chain.rs @@ -5,7 +5,7 @@ use crate::{ }; use serde_yaml_ng::Mapping; use smartstring::alias::String; -use std::fs; +use tokio::fs; #[derive(Debug, Clone)] pub struct ChainItem { @@ -83,7 +83,7 @@ impl AsyncChainItemFrom for Option { match itype { "script" => Some(ChainItem { uid, - data: ChainType::Script(fs::read_to_string(path).ok()?.into()), + data: ChainType::Script(fs::read_to_string(path).await.ok()?.into()), }), "merge" => Some(ChainItem { uid, diff --git a/src-tauri/src/enhance/mod.rs b/src-tauri/src/enhance/mod.rs index fc8089c0..7f7d71c8 100644 --- a/src-tauri/src/enhance/mod.rs +++ b/src-tauri/src/enhance/mod.rs @@ -6,10 +6,12 @@ pub mod seq; mod tun; use self::{chain::*, field::*, merge::*, script::*, seq::*, tun::*}; +use crate::utils::dirs; use crate::{config::Config, utils::tmpl}; use serde_yaml_ng::Mapping; use smartstring::alias::String; use std::collections::{HashMap, HashSet}; +use tokio::fs; type ResultLog = Vec<(String, String)>; #[derive(Debug)] @@ -437,34 +439,29 @@ fn apply_builtin_scripts( config } -fn apply_dns_settings(mut config: Mapping, enable_dns_settings: bool) -> Mapping { - if enable_dns_settings { - use crate::utils::dirs; - use std::fs; +async fn apply_dns_settings(mut config: Mapping, enable_dns_settings: bool) -> Mapping { + if enable_dns_settings && let Ok(app_dir) = dirs::app_home_dir() { + let dns_path = app_dir.join("dns_config.yaml"); - if let Ok(app_dir) = dirs::app_home_dir() { - let dns_path = app_dir.join("dns_config.yaml"); - - if dns_path.exists() - && let Ok(dns_yaml) = fs::read_to_string(&dns_path) - && let Ok(dns_config) = serde_yaml_ng::from_str::(&dns_yaml) + if dns_path.exists() + && let Ok(dns_yaml) = fs::read_to_string(&dns_path).await + && let Ok(dns_config) = serde_yaml_ng::from_str::(&dns_yaml) + { + if let Some(hosts_value) = dns_config.get("hosts") + && hosts_value.is_mapping() { - if let Some(hosts_value) = dns_config.get("hosts") - && hosts_value.is_mapping() - { - config.insert("hosts".into(), hosts_value.clone()); - log::info!(target: "app", "apply hosts configuration"); - } + config.insert("hosts".into(), hosts_value.clone()); + log::info!(target: "app", "apply hosts configuration"); + } - if let Some(dns_value) = dns_config.get("dns") { - if let Some(dns_mapping) = dns_value.as_mapping() { - config.insert("dns".into(), dns_mapping.clone().into()); - log::info!(target: "app", "apply dns_config.yaml (dns section)"); - } - } else { - config.insert("dns".into(), dns_config.into()); - log::info!(target: "app", "apply dns_config.yaml"); + if let Some(dns_value) = dns_config.get("dns") { + if let Some(dns_mapping) = dns_value.as_mapping() { + config.insert("dns".into(), dns_mapping.clone().into()); + log::info!(target: "app", "apply dns_config.yaml (dns section)"); } + } else { + config.insert("dns".into(), dns_config.into()); + log::info!(target: "app", "apply dns_config.yaml"); } } } @@ -540,7 +537,7 @@ pub async fn enhance() -> (Mapping, Vec, HashMap) { config = use_sort(config); // dns settings - config = apply_dns_settings(config, enable_dns_settings); + config = apply_dns_settings(config, enable_dns_settings).await; let mut exists_set = HashSet::new(); exists_set.extend(exists_keys); diff --git a/src-tauri/src/feat/backup.rs b/src-tauri/src/feat/backup.rs index e2f0957a..7d0bf9a8 100644 --- a/src-tauri/src/feat/backup.rs +++ b/src-tauri/src/feat/backup.rs @@ -2,6 +2,7 @@ use crate::{ config::{Config, IVerge}, core::backup, logging, logging_error, + process::AsyncHandler, utils::{ dirs::{PathBufExec, app_home_dir, local_backup_dir}, logging::Type, @@ -12,7 +13,8 @@ use chrono::Utc; use reqwest_dav::list_cmd::ListFile; use serde::Serialize; use smartstring::alias::String; -use std::{fs, path::PathBuf}; +use std::path::PathBuf; +use tokio::fs; #[derive(Debug, Serialize)] pub struct LocalBackupFile { @@ -24,7 +26,7 @@ pub struct LocalBackupFile { /// Create a backup and upload to WebDAV pub async fn create_backup_and_upload_webdav() -> Result<()> { - let (file_name, temp_file_path) = backup::create_backup().map_err(|err| { + let (file_name, temp_file_path) = backup::create_backup().await.map_err(|err| { logging!(error, Type::Backup, "Failed to create backup: {err:#?}"); err })?; @@ -97,7 +99,9 @@ pub async fn restore_webdav_backup(filename: String) -> Result<()> { })?; // extract zip file - let mut zip = zip::ZipArchive::new(fs::File::open(backup_storage_path.clone())?)?; + let value = backup_storage_path.clone(); + let file = AsyncHandler::spawn_blocking(move || std::fs::File::open(&value)).await??; + let mut zip = zip::ZipArchive::new(file)?; zip.extract(app_home_dir()?)?; logging_error!( Type::Backup, @@ -119,7 +123,7 @@ pub async fn restore_webdav_backup(filename: String) -> Result<()> { /// Create a backup and save to local storage pub async fn create_local_backup() -> Result<()> { - let (file_name, temp_file_path) = backup::create_backup().map_err(|err| { + let (file_name, temp_file_path) = backup::create_backup().await.map_err(|err| { logging!( error, Type::Backup, @@ -131,7 +135,7 @@ pub async fn create_local_backup() -> Result<()> { let backup_dir = local_backup_dir()?; let target_path = backup_dir.join(file_name.as_str()); - if let Err(err) = move_file(temp_file_path.clone(), target_path.clone()) { + if let Err(err) = move_file(temp_file_path.clone(), target_path.clone()).await { logging!( error, Type::Backup, @@ -151,12 +155,12 @@ pub async fn create_local_backup() -> Result<()> { Ok(()) } -fn move_file(from: PathBuf, to: PathBuf) -> Result<()> { +async fn move_file(from: PathBuf, to: PathBuf) -> Result<()> { if let Some(parent) = to.parent() { - fs::create_dir_all(parent)?; + fs::create_dir_all(parent).await?; } - match fs::rename(&from, &to) { + match fs::rename(&from, &to).await { Ok(_) => Ok(()), Err(rename_err) => { // Attempt copy + remove as fallback, covering cross-device moves @@ -165,8 +169,11 @@ fn move_file(from: PathBuf, to: PathBuf) -> Result<()> { Type::Backup, "Failed to rename backup file directly, fallback to copy/remove: {rename_err:#?}" ); - fs::copy(&from, &to).map_err(|err| anyhow!("Failed to copy backup file: {err:#?}"))?; + fs::copy(&from, &to) + .await + .map_err(|err| anyhow!("Failed to copy backup file: {err:#?}"))?; fs::remove_file(&from) + .await .map_err(|err| anyhow!("Failed to remove temp backup file: {err:#?}"))?; Ok(()) } @@ -174,24 +181,25 @@ fn move_file(from: PathBuf, to: PathBuf) -> Result<()> { } /// List local backups -pub fn list_local_backup() -> Result> { +pub async fn list_local_backup() -> Result> { let backup_dir = local_backup_dir()?; if !backup_dir.exists() { return Ok(vec![]); } let mut backups = Vec::new(); - for entry in fs::read_dir(&backup_dir)? { - let entry = entry?; + let mut dir = fs::read_dir(&backup_dir).await?; + while let Some(entry) = dir.next_entry().await? { let path = entry.path(); - if !path.is_file() { + let metadata = entry.metadata().await?; + if !metadata.is_file() { continue; } - let Some(file_name) = path.file_name().and_then(|name| name.to_str()) else { - continue; + let file_name = match path.file_name().and_then(|name| name.to_str()) { + Some(name) => name, + None => continue, }; - let metadata = entry.metadata()?; let last_modified = metadata .modified() .map(|time| chrono::DateTime::::from(time).to_rfc3339()) @@ -239,7 +247,8 @@ pub async fn restore_local_backup(filename: String) -> Result<()> { let webdav_username = verge_data.webdav_username.clone(); let webdav_password = verge_data.webdav_password.clone(); - let mut zip = zip::ZipArchive::new(fs::File::open(&target_path)?)?; + let file = AsyncHandler::spawn_blocking(move || std::fs::File::open(&target_path)).await??; + let mut zip = zip::ZipArchive::new(file)?; zip.extract(app_home_dir()?)?; logging_error!( Type::Backup, @@ -258,7 +267,7 @@ pub async fn restore_local_backup(filename: String) -> Result<()> { } /// Export local backup file to user selected destination -pub fn export_local_backup(filename: String, destination: String) -> Result<()> { +pub async fn export_local_backup(filename: String, destination: String) -> Result<()> { let backup_dir = local_backup_dir()?; let source_path = backup_dir.join(filename.as_str()); if !source_path.exists() { @@ -267,10 +276,11 @@ pub fn export_local_backup(filename: String, destination: String) -> Result<()> let dest_path = PathBuf::from(destination.as_str()); if let Some(parent) = dest_path.parent() { - fs::create_dir_all(parent)?; + fs::create_dir_all(parent).await?; } fs::copy(&source_path, &dest_path) + .await .map(|_| ()) .map_err(|err| anyhow!("Failed to export backup file: {err:#?}"))?; Ok(()) From 9dc50da16783b829592dc9c962c67501ea3463aa Mon Sep 17 00:00:00 2001 From: Slinetrac Date: Sat, 1 Nov 2025 17:08:17 +0800 Subject: [PATCH 57/70] fix: profile auto refresh #5274 --- src-tauri/Cargo.lock | 93 +++++++++++++++---------- src-tauri/Cargo.toml | 4 +- src-tauri/src/cmd/profile.rs | 2 +- src-tauri/src/core/timer.rs | 2 +- src-tauri/src/feat/profile.rs | 20 ++++-- src/components/profile/profile-item.tsx | 3 +- 6 files changed, 74 insertions(+), 50 deletions(-) diff --git a/src-tauri/Cargo.lock b/src-tauri/Cargo.lock index d1264cff..c1bdfecd 100644 --- a/src-tauri/Cargo.lock +++ b/src-tauri/Cargo.lock @@ -479,7 +479,7 @@ dependencies = [ "http-body 0.4.6", "hyper 0.14.32", "itoa", - "matchit", + "matchit 0.7.3", "memchr", "mime", "percent-encoding", @@ -494,25 +494,23 @@ dependencies = [ [[package]] name = "axum" -version = "0.7.9" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "edca88bc138befd0323b20752846e6587272d3b03b0343c8ea28a6f819e6e71f" +checksum = "8a18ed336352031311f4e0b4dd2ff392d4fbb370777c9d18d7fc9d7359f73871" dependencies = [ - "async-trait", - "axum-core 0.4.5", + "axum-core 0.5.5", "bytes", "futures-util", "http 1.3.1", "http-body 1.0.1", "http-body-util", "itoa", - "matchit", + "matchit 0.8.4", "memchr", "mime", "percent-encoding", "pin-project-lite", - "rustversion", - "serde", + "serde_core", "sync_wrapper 1.0.2", "tower 0.5.2", "tower-layer", @@ -538,19 +536,17 @@ dependencies = [ [[package]] name = "axum-core" -version = "0.4.5" +version = "0.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09f2bd6146b97ae3359fa0cc6d6b376d9539582c7b4220f041a33ec24c226199" +checksum = "59446ce19cd142f8833f856eb31f3eb097812d1479ab224f54d72428ca21ea22" dependencies = [ - "async-trait", "bytes", - "futures-util", + "futures-core", "http 1.3.1", "http-body 1.0.1", "http-body-util", "mime", "pin-project-lite", - "rustversion", "sync_wrapper 1.0.2", "tower-layer", "tower-service", @@ -1289,22 +1285,23 @@ dependencies = [ [[package]] name = "console-api" -version = "0.8.1" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8030735ecb0d128428b64cd379809817e620a40e5001c54465b99ec5feec2857" +checksum = "e8599749b6667e2f0c910c1d0dff6901163ff698a52d5a39720f61b5be4b20d3" dependencies = [ "futures-core", - "prost 0.13.5", - "prost-types 0.13.5", - "tonic 0.12.3", + "prost 0.14.1", + "prost-types 0.14.1", + "tonic 0.14.2", + "tonic-prost", "tracing-core", ] [[package]] name = "console-subscriber" -version = "0.4.1" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6539aa9c6a4cd31f4b1c040f860a1eac9aa80e7df6b05d506a6e7179936d6a01" +checksum = "fb4915b7d8dd960457a1b6c380114c2944f728e7c65294ab247ae6b6f1f37592" dependencies = [ "console-api", "crossbeam-channel", @@ -1313,14 +1310,14 @@ dependencies = [ "hdrhistogram", "humantime", "hyper-util", - "prost 0.13.5", - "prost-types 0.13.5", + "prost 0.14.1", + "prost-types 0.14.1", "serde", "serde_json", "thread_local", "tokio", "tokio-stream", - "tonic 0.12.3", + "tonic 0.14.2", "tracing", "tracing-core", "tracing-subscriber", @@ -4131,6 +4128,12 @@ version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0e7465ac9959cc2b1404e8e2367b43684a6d13790fe23056cc8c6c5a6b7bcb94" +[[package]] +name = "matchit" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "47e1ffaa40ddd1f3ed91f717a33c8c0ee23fff369e3aa8772b9605cc1d22f4c3" + [[package]] name = "md-5" version = "0.10.6" @@ -5613,12 +5616,12 @@ dependencies = [ [[package]] name = "prost" -version = "0.13.5" +version = "0.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2796faa41db3ec313a31f7624d9286acf277b52de526150b7e69f3debf891ee5" +checksum = "7231bd9b3d3d33c86b58adbac74b5ec0ad9f496b19d22801d773636feaa95f3d" dependencies = [ "bytes", - "prost-derive 0.13.5", + "prost-derive 0.14.1", ] [[package]] @@ -5636,9 +5639,9 @@ dependencies = [ [[package]] name = "prost-derive" -version = "0.13.5" +version = "0.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a56d757972c98b346a9b766e3f02746cde6dd1cd1d1d563472929fdd74bec4d" +checksum = "9120690fafc389a67ba3803df527d0ec9cbbc9cc45e4cc20b332996dfb672425" dependencies = [ "anyhow", "itertools 0.14.0", @@ -5658,11 +5661,11 @@ dependencies = [ [[package]] name = "prost-types" -version = "0.13.5" +version = "0.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52c2c1bf36ddb1a1c396b3601a3cec27c2462e45f07c386894ec3ccf5332bd16" +checksum = "b9b4db3d6da204ed77bb26ba83b6122a73aeb2e87e25fbf7ad2e84c4ccbf8f72" dependencies = [ - "prost 0.13.5", + "prost 0.14.1", ] [[package]] @@ -8240,13 +8243,12 @@ dependencies = [ [[package]] name = "tonic" -version = "0.12.3" +version = "0.14.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "877c5b330756d856ffcc4553ab34a5684481ade925ecc54bcd1bf02b1d0d4d52" +checksum = "eb7613188ce9f7df5bfe185db26c5814347d110db17920415cf2fbcad85e7203" dependencies = [ - "async-stream", "async-trait", - "axum 0.7.9", + "axum 0.8.6", "base64 0.22.1", "bytes", "h2 0.4.12", @@ -8258,11 +8260,11 @@ dependencies = [ "hyper-util", "percent-encoding", "pin-project", - "prost 0.13.5", - "socket2 0.5.10", + "socket2 0.6.1", + "sync_wrapper 1.0.2", "tokio", "tokio-stream", - "tower 0.4.13", + "tower 0.5.2", "tower-layer", "tower-service", "tracing", @@ -8281,6 +8283,17 @@ dependencies = [ "tonic 0.10.2", ] +[[package]] +name = "tonic-prost" +version = "0.14.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "66bd50ad6ce1252d87ef024b3d64fe4c3cf54a86fb9ef4c631fdd0ded7aeaa67" +dependencies = [ + "bytes", + "prost 0.14.1", + "tonic 0.14.2", +] + [[package]] name = "tonic-web" version = "0.10.2" @@ -8329,11 +8342,15 @@ checksum = "d039ad9159c98b70ecfd540b2573b97f7f52c3e8d9f8ad57a24b916a536975f9" dependencies = [ "futures-core", "futures-util", + "indexmap 2.12.0", "pin-project-lite", + "slab", "sync_wrapper 1.0.2", "tokio", + "tokio-util", "tower-layer", "tower-service", + "tracing", ] [[package]] diff --git a/src-tauri/Cargo.toml b/src-tauri/Cargo.toml index cc6caacc..b5e3a424 100755 --- a/src-tauri/Cargo.toml +++ b/src-tauri/Cargo.toml @@ -42,7 +42,7 @@ serde = { version = "1.0.228", features = ["derive"] } reqwest = { version = "0.12.24", features = ["json", "cookies"] } regex = "1.12.2" sysproxy = { git = "https://github.com/clash-verge-rev/sysproxy-rs" } -tauri = { version = "2.9.1", features = [ +tauri = { version = "2.9.2", features = [ "protocol-asset", "devtools", "tray-icon", @@ -77,7 +77,7 @@ backoff = { version = "0.4.0", features = ["tokio"] } compact_str = { version = "0.9.0", features = ["serde"] } tauri-plugin-http = "2.5.4" flexi_logger = "0.31.7" -console-subscriber = { version = "0.4.1", optional = true } +console-subscriber = { version = "0.5.0", optional = true } tauri-plugin-devtools = { version = "2.0.1" } tauri-plugin-mihomo = { git = "https://github.com/clash-verge-rev/tauri-plugin-mihomo" } clash_verge_logger = { git = "https://github.com/clash-verge-rev/clash-verge-logger" } diff --git a/src-tauri/src/cmd/profile.rs b/src-tauri/src/cmd/profile.rs index fc36cf56..5ed734d2 100644 --- a/src-tauri/src/cmd/profile.rs +++ b/src-tauri/src/cmd/profile.rs @@ -180,7 +180,7 @@ pub async fn create_profile(item: PrfItem, file_data: Option) -> CmdResu /// 更新配置文件 #[tauri::command] pub async fn update_profile(index: String, option: Option) -> CmdResult { - match feat::update_profile(index, option, Some(true)).await { + match feat::update_profile(index, option, Some(true), Some(true)).await { Ok(_) => Ok(()), Err(e) => { log::error!(target: "app", "{}", e); diff --git a/src-tauri/src/core/timer.rs b/src-tauri/src/core/timer.rs index 16bb0232..47fc7f2f 100644 --- a/src-tauri/src/core/timer.rs +++ b/src-tauri/src/core/timer.rs @@ -492,7 +492,7 @@ impl Timer { is_current ); - feat::update_profile(uid.clone(), None, Some(is_current)).await + feat::update_profile(uid.clone(), None, Some(is_current), None).await }) .await { diff --git a/src-tauri/src/feat/profile.rs b/src-tauri/src/feat/profile.rs index ffba85d0..7f2ce798 100644 --- a/src-tauri/src/feat/profile.rs +++ b/src-tauri/src/feat/profile.rs @@ -23,7 +23,10 @@ pub async fn toggle_proxy_profile(profile_index: String) { } } -async fn should_update_profile(uid: String) -> Result)>> { +async fn should_update_profile( + uid: String, + ignore_auto_update: bool, +) -> Result)>> { let profiles = Config::profiles().await; let profiles = profiles.latest_ref(); let item = profiles.get_item(&uid)?; @@ -35,11 +38,12 @@ async fn should_update_profile(uid: String) -> Result, auto_refresh: Option, + ignore_auto_update: Option, ) -> Result<()> { logging!(info, Type::Config, "[订阅更新] 开始更新订阅 {}", uid); let auto_refresh = auto_refresh.unwrap_or(true); + let ignore_auto_update = ignore_auto_update.unwrap_or(false); - let url_opt = should_update_profile(uid.clone()).await?; + let url_opt = should_update_profile(uid.clone(), ignore_auto_update).await?; let should_refresh = match url_opt { Some((url, opt)) => { diff --git a/src/components/profile/profile-item.tsx b/src/components/profile/profile-item.tsx index c9ffeb23..48aa6385 100644 --- a/src/components/profile/profile-item.tsx +++ b/src/components/profile/profile-item.tsx @@ -341,7 +341,8 @@ export const ProfileItem = (props: Props) => { try { // 调用后端更新(后端会自动处理回退逻辑) - await updateProfile(itemData.uid, option); + const payload = Object.keys(option).length > 0 ? option : undefined; + await updateProfile(itemData.uid, payload); // 更新成功,刷新列表 mutate("getProfiles"); From 30d1655e076ba4065cef62fca8215e73d3611e0c Mon Sep 17 00:00:00 2001 From: Slinetrac Date: Sat, 1 Nov 2025 17:31:10 +0800 Subject: [PATCH 58/70] docs: UPDATELOG.md --- UPDATELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/UPDATELOG.md b/UPDATELOG.md index ebd8f657..8b7081cf 100644 --- a/UPDATELOG.md +++ b/UPDATELOG.md @@ -34,6 +34,8 @@ - 修复小键盘热键映射错误 - 修复前端无法及时刷新操作状态 - 修复 macOS 从 Dock 栏退出轻量模式状态不同步 +- 修复 Linux 系统主题切换不生效 +- 修复 `允许自动更新` 字段使手动订阅刷新失效

✨ 新增功能 From 73e53eb33f186a5708ed539c1bc8000e0b9ceb7b Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Sat, 1 Nov 2025 19:42:34 +0800 Subject: [PATCH 59/70] chore(deps): update npm dependencies (#5278) Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> --- package.json | 8 +- pnpm-lock.yaml | 312 ++++++++++++++++++++++++------------------------- 2 files changed, 160 insertions(+), 160 deletions(-) diff --git a/package.json b/package.json index a93f66b0..f4cae993 100644 --- a/package.json +++ b/package.json @@ -67,7 +67,7 @@ "react": "19.2.0", "react-dom": "19.2.0", "react-error-boundary": "6.0.0", - "react-hook-form": "^7.65.0", + "react-hook-form": "^7.66.0", "react-i18next": "16.2.3", "react-markdown": "10.1.0", "react-monaco-editor": "0.59.0", @@ -80,7 +80,7 @@ "devDependencies": { "@actions/github": "^6.0.1", "@eslint-react/eslint-plugin": "^2.2.4", - "@eslint/js": "^9.38.0", + "@eslint/js": "^9.39.0", "@tauri-apps/cli": "2.9.2", "@types/js-yaml": "^4.0.9", "@types/lodash-es": "^4.17.12", @@ -93,7 +93,7 @@ "cli-color": "^2.0.4", "commander": "^14.0.2", "cross-env": "^10.1.0", - "eslint": "^9.38.0", + "eslint": "^9.39.0", "eslint-config-prettier": "^10.1.8", "eslint-import-resolver-typescript": "^4.4.4", "eslint-plugin-import-x": "^4.16.1", @@ -102,7 +102,7 @@ "eslint-plugin-react-refresh": "^0.4.24", "eslint-plugin-unused-imports": "^4.3.0", "glob": "^11.0.3", - "globals": "^16.4.0", + "globals": "^16.5.0", "https-proxy-agent": "^7.0.6", "husky": "^9.1.7", "jiti": "^2.6.1", diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 6e293b60..5c0c6be2 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -108,8 +108,8 @@ importers: specifier: 6.0.0 version: 6.0.0(react@19.2.0) react-hook-form: - specifier: ^7.65.0 - version: 7.65.0(react@19.2.0) + specifier: ^7.66.0 + version: 7.66.0(react@19.2.0) react-i18next: specifier: 16.2.3 version: 16.2.3(i18next@25.6.0(typescript@5.9.3))(react-dom@19.2.0(react@19.2.0))(react@19.2.0)(typescript@5.9.3) @@ -140,10 +140,10 @@ importers: version: 6.0.1 '@eslint-react/eslint-plugin': specifier: ^2.2.4 - version: 2.2.4(eslint@9.38.0(jiti@2.6.1))(typescript@5.9.3) + version: 2.2.4(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) '@eslint/js': - specifier: ^9.38.0 - version: 9.38.0 + specifier: ^9.39.0 + version: 9.39.0 '@tauri-apps/cli': specifier: 2.9.2 version: 2.9.2 @@ -181,35 +181,35 @@ importers: specifier: ^10.1.0 version: 10.1.0 eslint: - specifier: ^9.38.0 - version: 9.38.0(jiti@2.6.1) + specifier: ^9.39.0 + version: 9.39.0(jiti@2.6.1) eslint-config-prettier: specifier: ^10.1.8 - version: 10.1.8(eslint@9.38.0(jiti@2.6.1)) + version: 10.1.8(eslint@9.39.0(jiti@2.6.1)) eslint-import-resolver-typescript: specifier: ^4.4.4 - version: 4.4.4(eslint-plugin-import-x@4.16.1(@typescript-eslint/utils@8.46.2(eslint@9.38.0(jiti@2.6.1))(typescript@5.9.3))(eslint-import-resolver-node@0.3.9)(eslint@9.38.0(jiti@2.6.1)))(eslint-plugin-import@2.32.0)(eslint@9.38.0(jiti@2.6.1)) + version: 4.4.4(eslint-plugin-import-x@4.16.1(@typescript-eslint/utils@8.46.2(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3))(eslint-import-resolver-node@0.3.9)(eslint@9.39.0(jiti@2.6.1)))(eslint-plugin-import@2.32.0)(eslint@9.39.0(jiti@2.6.1)) eslint-plugin-import-x: specifier: ^4.16.1 - version: 4.16.1(@typescript-eslint/utils@8.46.2(eslint@9.38.0(jiti@2.6.1))(typescript@5.9.3))(eslint-import-resolver-node@0.3.9)(eslint@9.38.0(jiti@2.6.1)) + version: 4.16.1(@typescript-eslint/utils@8.46.2(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3))(eslint-import-resolver-node@0.3.9)(eslint@9.39.0(jiti@2.6.1)) eslint-plugin-prettier: specifier: ^5.5.4 - version: 5.5.4(eslint-config-prettier@10.1.8(eslint@9.38.0(jiti@2.6.1)))(eslint@9.38.0(jiti@2.6.1))(prettier@3.6.2) + version: 5.5.4(eslint-config-prettier@10.1.8(eslint@9.39.0(jiti@2.6.1)))(eslint@9.39.0(jiti@2.6.1))(prettier@3.6.2) eslint-plugin-react-hooks: specifier: ^7.0.1 - version: 7.0.1(eslint@9.38.0(jiti@2.6.1)) + version: 7.0.1(eslint@9.39.0(jiti@2.6.1)) eslint-plugin-react-refresh: specifier: ^0.4.24 - version: 0.4.24(eslint@9.38.0(jiti@2.6.1)) + version: 0.4.24(eslint@9.39.0(jiti@2.6.1)) eslint-plugin-unused-imports: specifier: ^4.3.0 - version: 4.3.0(@typescript-eslint/eslint-plugin@8.46.2(@typescript-eslint/parser@8.46.2(eslint@9.38.0(jiti@2.6.1))(typescript@5.9.3))(eslint@9.38.0(jiti@2.6.1))(typescript@5.9.3))(eslint@9.38.0(jiti@2.6.1)) + version: 4.3.0(@typescript-eslint/eslint-plugin@8.46.2(@typescript-eslint/parser@8.46.2(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3))(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3))(eslint@9.39.0(jiti@2.6.1)) glob: specifier: ^11.0.3 version: 11.0.3 globals: - specifier: ^16.4.0 - version: 16.4.0 + specifier: ^16.5.0 + version: 16.5.0 https-proxy-agent: specifier: ^7.0.6 version: 7.0.6 @@ -245,7 +245,7 @@ importers: version: 5.9.3 typescript-eslint: specifier: ^8.46.2 - version: 8.46.2(eslint@9.38.0(jiti@2.6.1))(typescript@5.9.3) + version: 8.46.2(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) vite: specifier: ^7.1.12 version: 7.1.12(@types/node@24.9.2)(jiti@2.6.1)(sass@1.93.3)(terser@5.44.0)(yaml@2.8.1) @@ -1041,28 +1041,28 @@ packages: resolution: {integrity: sha512-aw1gNayWpdI/jSYVgzN5pL0cfzU02GT3NBpeT/DXbx1/1x7ZKxFPd9bwrzygx/qiwIQiJ1sw/zD8qY/kRvlGHA==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} - '@eslint/config-helpers@0.4.1': - resolution: {integrity: sha512-csZAzkNhsgwb0I/UAV6/RGFTbiakPCf0ZrGmrIxQpYvGZ00PhTkSnyKNolphgIvmnJeGw6rcGVEXfTzUnFuEvw==} + '@eslint/config-helpers@0.4.2': + resolution: {integrity: sha512-gBrxN88gOIf3R7ja5K9slwNayVcZgK6SOUORm2uBzTeIEfeVaIhOpCtTox3P6R7o2jLFwLFTLnC7kU/RGcYEgw==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} - '@eslint/core@0.16.0': - resolution: {integrity: sha512-nmC8/totwobIiFcGkDza3GIKfAw1+hLiYVrh3I1nIomQ8PEr5cxg34jnkmGawul/ep52wGRAcyeDCNtWKSOj4Q==} + '@eslint/core@0.17.0': + resolution: {integrity: sha512-yL/sLrpmtDaFEiUj1osRP4TI2MDz1AddJL+jZ7KSqvBuliN4xqYY54IfdN8qD8Toa6g1iloph1fxQNkjOxrrpQ==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} '@eslint/eslintrc@3.3.1': resolution: {integrity: sha512-gtF186CXhIl1p4pJNGZw8Yc6RlshoePRvE0X91oPGb3vZ8pM3qOS9W9NGPat9LziaBV7XrJWGylNQXkGcnM3IQ==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} - '@eslint/js@9.38.0': - resolution: {integrity: sha512-UZ1VpFvXf9J06YG9xQBdnzU+kthors6KjhMAl6f4gH4usHyh31rUf2DLGInT8RFYIReYXNSydgPY0V2LuWgl7A==} + '@eslint/js@9.39.0': + resolution: {integrity: sha512-BIhe0sW91JGPiaF1mOuPy5v8NflqfjIcDNpC+LbW9f609WVRX1rArrhi6Z2ymvrAry9jw+5POTj4t2t62o8Bmw==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} '@eslint/object-schema@2.1.7': resolution: {integrity: sha512-VtAOaymWVfZcmZbp6E2mympDIHvyjXs/12LqWYjVw6qjrfF+VK+fyG33kChz3nnK+SU5/NeHOqrTEHS8sXO3OA==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} - '@eslint/plugin-kit@0.4.0': - resolution: {integrity: sha512-sB5uyeq+dwCWyPi31B2gQlVlo+j5brPlWx4yZBrEaRo/nhdDE8Xke1gsGgtiBdaBTxuTkceLVuVt/pclrasb0A==} + '@eslint/plugin-kit@0.4.1': + resolution: {integrity: sha512-43/qtrDUokr7LJqoF2c3+RInu/t4zfrpYdoSDfYyhg52rwLV6TnOvdG4fXm7IkSB3wErkcmJS9iEhjVtOSEjjA==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} '@fastify/busboy@2.1.1': @@ -2670,8 +2670,8 @@ packages: resolution: {integrity: sha512-Uhdk5sfqcee/9H/rCOJikYz67o0a2Tw2hGRPOG2Y1R2dg7brRe1uG0yaNQDHu+TO/uQPF/5eCapvYSmHUjt7JQ==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} - eslint@9.38.0: - resolution: {integrity: sha512-t5aPOpmtJcZcz5UJyY2GbvpDlsK5E8JqRqoKtfiKE3cNh437KIqfJr3A3AKf5k64NPx6d0G3dno6XDY05PqPtw==} + eslint@9.39.0: + resolution: {integrity: sha512-iy2GE3MHrYTL5lrCtMZ0X1KLEKKUjmK0kzwcnefhR66txcEmXZD2YWgR5GNdcEwkNx3a0siYkSvl0vIC+Svjmg==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} hasBin: true peerDependencies: @@ -2871,8 +2871,8 @@ packages: resolution: {integrity: sha512-oahGvuMGQlPw/ivIYBjVSrWAfWLBeku5tpPE2fOPLi+WHffIWbuh2tCjhyQhTBPMf5E9jDEH4FOmTYgYwbKwtQ==} engines: {node: '>=18'} - globals@16.4.0: - resolution: {integrity: sha512-ob/2LcVVaVGCYN+r14cnwnoDPUufjiYgSqRhiFD0Q1iI4Odora5RE8Iv1D24hAz5oMophRGkGz+yuvQmmUMnMw==} + globals@16.5.0: + resolution: {integrity: sha512-c/c15i26VrJ4IRt5Z89DnIzCGDn9EcebibhAOjw5ibqEHsE1wLUgkPn9RDmNcUKyU87GeaL633nyJ+pplFR2ZQ==} engines: {node: '>=18'} globalthis@1.0.4: @@ -3618,8 +3618,8 @@ packages: react-fast-compare@3.2.2: resolution: {integrity: sha512-nsO+KSNgo1SbJqJEYRE9ERzo7YtYbou/OqjSQKxV7jcKox7+usiUVZOAC+XnDOABXggQTno0Y1CpVnuWEc1boQ==} - react-hook-form@7.65.0: - resolution: {integrity: sha512-xtOzDz063WcXvGWaHgLNrNzlsdFgtUWcb32E6WFaGTd7kPZG3EeDusjdZfUsPwKCKVXy1ZlntifaHZ4l8pAsmw==} + react-hook-form@7.66.0: + resolution: {integrity: sha512-xXBqsWGKrY46ZqaHDo+ZUYiMUgi8suYu5kdrS20EG8KiL7VRQitEbNjm+UcrDYrNi1YLyfpmAeGjCZYXLT9YBw==} engines: {node: '>=18.0.0'} peerDependencies: react: ^16.8.0 || ^17 || ^18 || ^19 @@ -5199,34 +5199,34 @@ snapshots: '@esbuild/win32-x64@0.25.4': optional: true - '@eslint-community/eslint-utils@4.8.0(eslint@9.38.0(jiti@2.6.1))': + '@eslint-community/eslint-utils@4.8.0(eslint@9.39.0(jiti@2.6.1))': dependencies: - eslint: 9.38.0(jiti@2.6.1) + eslint: 9.39.0(jiti@2.6.1) eslint-visitor-keys: 3.4.3 '@eslint-community/regexpp@4.12.1': {} - '@eslint-react/ast@2.2.4(eslint@9.38.0(jiti@2.6.1))(typescript@5.9.3)': + '@eslint-react/ast@2.2.4(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3)': dependencies: '@eslint-react/eff': 2.2.4 '@typescript-eslint/types': 8.46.2 '@typescript-eslint/typescript-estree': 8.46.2(typescript@5.9.3) - '@typescript-eslint/utils': 8.46.2(eslint@9.38.0(jiti@2.6.1))(typescript@5.9.3) + '@typescript-eslint/utils': 8.46.2(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) string-ts: 2.2.1 transitivePeerDependencies: - eslint - supports-color - typescript - '@eslint-react/core@2.2.4(eslint@9.38.0(jiti@2.6.1))(typescript@5.9.3)': + '@eslint-react/core@2.2.4(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3)': dependencies: - '@eslint-react/ast': 2.2.4(eslint@9.38.0(jiti@2.6.1))(typescript@5.9.3) + '@eslint-react/ast': 2.2.4(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) '@eslint-react/eff': 2.2.4 - '@eslint-react/shared': 2.2.4(eslint@9.38.0(jiti@2.6.1))(typescript@5.9.3) - '@eslint-react/var': 2.2.4(eslint@9.38.0(jiti@2.6.1))(typescript@5.9.3) + '@eslint-react/shared': 2.2.4(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) + '@eslint-react/var': 2.2.4(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) '@typescript-eslint/scope-manager': 8.46.2 '@typescript-eslint/types': 8.46.2 - '@typescript-eslint/utils': 8.46.2(eslint@9.38.0(jiti@2.6.1))(typescript@5.9.3) + '@typescript-eslint/utils': 8.46.2(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) birecord: 0.1.1 ts-pattern: 5.9.0 transitivePeerDependencies: @@ -5236,29 +5236,29 @@ snapshots: '@eslint-react/eff@2.2.4': {} - '@eslint-react/eslint-plugin@2.2.4(eslint@9.38.0(jiti@2.6.1))(typescript@5.9.3)': + '@eslint-react/eslint-plugin@2.2.4(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3)': dependencies: '@eslint-react/eff': 2.2.4 - '@eslint-react/shared': 2.2.4(eslint@9.38.0(jiti@2.6.1))(typescript@5.9.3) + '@eslint-react/shared': 2.2.4(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) '@typescript-eslint/scope-manager': 8.46.2 - '@typescript-eslint/type-utils': 8.46.2(eslint@9.38.0(jiti@2.6.1))(typescript@5.9.3) + '@typescript-eslint/type-utils': 8.46.2(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) '@typescript-eslint/types': 8.46.2 - '@typescript-eslint/utils': 8.46.2(eslint@9.38.0(jiti@2.6.1))(typescript@5.9.3) - eslint: 9.38.0(jiti@2.6.1) - eslint-plugin-react-dom: 2.2.4(eslint@9.38.0(jiti@2.6.1))(typescript@5.9.3) - eslint-plugin-react-hooks-extra: 2.2.4(eslint@9.38.0(jiti@2.6.1))(typescript@5.9.3) - eslint-plugin-react-naming-convention: 2.2.4(eslint@9.38.0(jiti@2.6.1))(typescript@5.9.3) - eslint-plugin-react-web-api: 2.2.4(eslint@9.38.0(jiti@2.6.1))(typescript@5.9.3) - eslint-plugin-react-x: 2.2.4(eslint@9.38.0(jiti@2.6.1))(typescript@5.9.3) + '@typescript-eslint/utils': 8.46.2(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) + eslint: 9.39.0(jiti@2.6.1) + eslint-plugin-react-dom: 2.2.4(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) + eslint-plugin-react-hooks-extra: 2.2.4(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) + eslint-plugin-react-naming-convention: 2.2.4(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) + eslint-plugin-react-web-api: 2.2.4(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) + eslint-plugin-react-x: 2.2.4(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) ts-api-utils: 2.1.0(typescript@5.9.3) typescript: 5.9.3 transitivePeerDependencies: - supports-color - '@eslint-react/shared@2.2.4(eslint@9.38.0(jiti@2.6.1))(typescript@5.9.3)': + '@eslint-react/shared@2.2.4(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3)': dependencies: '@eslint-react/eff': 2.2.4 - '@typescript-eslint/utils': 8.46.2(eslint@9.38.0(jiti@2.6.1))(typescript@5.9.3) + '@typescript-eslint/utils': 8.46.2(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) ts-pattern: 5.9.0 zod: 4.1.12 transitivePeerDependencies: @@ -5266,13 +5266,13 @@ snapshots: - supports-color - typescript - '@eslint-react/var@2.2.4(eslint@9.38.0(jiti@2.6.1))(typescript@5.9.3)': + '@eslint-react/var@2.2.4(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3)': dependencies: - '@eslint-react/ast': 2.2.4(eslint@9.38.0(jiti@2.6.1))(typescript@5.9.3) + '@eslint-react/ast': 2.2.4(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) '@eslint-react/eff': 2.2.4 '@typescript-eslint/scope-manager': 8.46.2 '@typescript-eslint/types': 8.46.2 - '@typescript-eslint/utils': 8.46.2(eslint@9.38.0(jiti@2.6.1))(typescript@5.9.3) + '@typescript-eslint/utils': 8.46.2(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) ts-pattern: 5.9.0 transitivePeerDependencies: - eslint @@ -5287,11 +5287,11 @@ snapshots: transitivePeerDependencies: - supports-color - '@eslint/config-helpers@0.4.1': + '@eslint/config-helpers@0.4.2': dependencies: - '@eslint/core': 0.16.0 + '@eslint/core': 0.17.0 - '@eslint/core@0.16.0': + '@eslint/core@0.17.0': dependencies: '@types/json-schema': 7.0.15 @@ -5309,13 +5309,13 @@ snapshots: transitivePeerDependencies: - supports-color - '@eslint/js@9.38.0': {} + '@eslint/js@9.39.0': {} '@eslint/object-schema@2.1.7': {} - '@eslint/plugin-kit@0.4.0': + '@eslint/plugin-kit@0.4.1': dependencies: - '@eslint/core': 0.16.0 + '@eslint/core': 0.17.0 levn: 0.4.1 '@fastify/busboy@2.1.1': {} @@ -6006,15 +6006,15 @@ snapshots: '@types/unist@3.0.3': {} - '@typescript-eslint/eslint-plugin@8.46.2(@typescript-eslint/parser@8.46.2(eslint@9.38.0(jiti@2.6.1))(typescript@5.9.3))(eslint@9.38.0(jiti@2.6.1))(typescript@5.9.3)': + '@typescript-eslint/eslint-plugin@8.46.2(@typescript-eslint/parser@8.46.2(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3))(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3)': dependencies: '@eslint-community/regexpp': 4.12.1 - '@typescript-eslint/parser': 8.46.2(eslint@9.38.0(jiti@2.6.1))(typescript@5.9.3) + '@typescript-eslint/parser': 8.46.2(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) '@typescript-eslint/scope-manager': 8.46.2 - '@typescript-eslint/type-utils': 8.46.2(eslint@9.38.0(jiti@2.6.1))(typescript@5.9.3) - '@typescript-eslint/utils': 8.46.2(eslint@9.38.0(jiti@2.6.1))(typescript@5.9.3) + '@typescript-eslint/type-utils': 8.46.2(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) + '@typescript-eslint/utils': 8.46.2(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) '@typescript-eslint/visitor-keys': 8.46.2 - eslint: 9.38.0(jiti@2.6.1) + eslint: 9.39.0(jiti@2.6.1) graphemer: 1.4.0 ignore: 7.0.5 natural-compare: 1.4.0 @@ -6023,14 +6023,14 @@ snapshots: transitivePeerDependencies: - supports-color - '@typescript-eslint/parser@8.46.2(eslint@9.38.0(jiti@2.6.1))(typescript@5.9.3)': + '@typescript-eslint/parser@8.46.2(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3)': dependencies: '@typescript-eslint/scope-manager': 8.46.2 '@typescript-eslint/types': 8.46.2 '@typescript-eslint/typescript-estree': 8.46.2(typescript@5.9.3) '@typescript-eslint/visitor-keys': 8.46.2 debug: 4.4.3 - eslint: 9.38.0(jiti@2.6.1) + eslint: 9.39.0(jiti@2.6.1) typescript: 5.9.3 transitivePeerDependencies: - supports-color @@ -6053,13 +6053,13 @@ snapshots: dependencies: typescript: 5.9.3 - '@typescript-eslint/type-utils@8.46.2(eslint@9.38.0(jiti@2.6.1))(typescript@5.9.3)': + '@typescript-eslint/type-utils@8.46.2(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3)': dependencies: '@typescript-eslint/types': 8.46.2 '@typescript-eslint/typescript-estree': 8.46.2(typescript@5.9.3) - '@typescript-eslint/utils': 8.46.2(eslint@9.38.0(jiti@2.6.1))(typescript@5.9.3) + '@typescript-eslint/utils': 8.46.2(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) debug: 4.4.3 - eslint: 9.38.0(jiti@2.6.1) + eslint: 9.39.0(jiti@2.6.1) ts-api-utils: 2.1.0(typescript@5.9.3) typescript: 5.9.3 transitivePeerDependencies: @@ -6083,13 +6083,13 @@ snapshots: transitivePeerDependencies: - supports-color - '@typescript-eslint/utils@8.46.2(eslint@9.38.0(jiti@2.6.1))(typescript@5.9.3)': + '@typescript-eslint/utils@8.46.2(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3)': dependencies: - '@eslint-community/eslint-utils': 4.8.0(eslint@9.38.0(jiti@2.6.1)) + '@eslint-community/eslint-utils': 4.8.0(eslint@9.39.0(jiti@2.6.1)) '@typescript-eslint/scope-manager': 8.46.2 '@typescript-eslint/types': 8.46.2 '@typescript-eslint/typescript-estree': 8.46.2(typescript@5.9.3) - eslint: 9.38.0(jiti@2.6.1) + eslint: 9.39.0(jiti@2.6.1) typescript: 5.9.3 transitivePeerDependencies: - supports-color @@ -6805,9 +6805,9 @@ snapshots: escape-string-regexp@4.0.0: {} - eslint-config-prettier@10.1.8(eslint@9.38.0(jiti@2.6.1)): + eslint-config-prettier@10.1.8(eslint@9.39.0(jiti@2.6.1)): dependencies: - eslint: 9.38.0(jiti@2.6.1) + eslint: 9.39.0(jiti@2.6.1) eslint-import-context@0.1.9(unrs-resolver@1.11.1): dependencies: @@ -6825,10 +6825,10 @@ snapshots: - supports-color optional: true - eslint-import-resolver-typescript@4.4.4(eslint-plugin-import-x@4.16.1(@typescript-eslint/utils@8.46.2(eslint@9.38.0(jiti@2.6.1))(typescript@5.9.3))(eslint-import-resolver-node@0.3.9)(eslint@9.38.0(jiti@2.6.1)))(eslint-plugin-import@2.32.0)(eslint@9.38.0(jiti@2.6.1)): + eslint-import-resolver-typescript@4.4.4(eslint-plugin-import-x@4.16.1(@typescript-eslint/utils@8.46.2(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3))(eslint-import-resolver-node@0.3.9)(eslint@9.39.0(jiti@2.6.1)))(eslint-plugin-import@2.32.0)(eslint@9.39.0(jiti@2.6.1)): dependencies: debug: 4.4.3 - eslint: 9.38.0(jiti@2.6.1) + eslint: 9.39.0(jiti@2.6.1) eslint-import-context: 0.1.9(unrs-resolver@1.11.1) get-tsconfig: 4.10.1 is-bun-module: 2.0.0 @@ -6836,29 +6836,29 @@ snapshots: tinyglobby: 0.2.15 unrs-resolver: 1.11.1 optionalDependencies: - eslint-plugin-import: 2.32.0(@typescript-eslint/parser@8.46.2(eslint@9.38.0(jiti@2.6.1))(typescript@5.9.3))(eslint-import-resolver-typescript@4.4.4)(eslint@9.38.0(jiti@2.6.1)) - eslint-plugin-import-x: 4.16.1(@typescript-eslint/utils@8.46.2(eslint@9.38.0(jiti@2.6.1))(typescript@5.9.3))(eslint-import-resolver-node@0.3.9)(eslint@9.38.0(jiti@2.6.1)) + eslint-plugin-import: 2.32.0(@typescript-eslint/parser@8.46.2(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3))(eslint-import-resolver-typescript@4.4.4)(eslint@9.39.0(jiti@2.6.1)) + eslint-plugin-import-x: 4.16.1(@typescript-eslint/utils@8.46.2(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3))(eslint-import-resolver-node@0.3.9)(eslint@9.39.0(jiti@2.6.1)) transitivePeerDependencies: - supports-color - eslint-module-utils@2.12.1(@typescript-eslint/parser@8.46.2(eslint@9.38.0(jiti@2.6.1))(typescript@5.9.3))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@4.4.4)(eslint@9.38.0(jiti@2.6.1)): + eslint-module-utils@2.12.1(@typescript-eslint/parser@8.46.2(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@4.4.4)(eslint@9.39.0(jiti@2.6.1)): dependencies: debug: 3.2.7 optionalDependencies: - '@typescript-eslint/parser': 8.46.2(eslint@9.38.0(jiti@2.6.1))(typescript@5.9.3) - eslint: 9.38.0(jiti@2.6.1) + '@typescript-eslint/parser': 8.46.2(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) + eslint: 9.39.0(jiti@2.6.1) eslint-import-resolver-node: 0.3.9 - eslint-import-resolver-typescript: 4.4.4(eslint-plugin-import-x@4.16.1(@typescript-eslint/utils@8.46.2(eslint@9.38.0(jiti@2.6.1))(typescript@5.9.3))(eslint-import-resolver-node@0.3.9)(eslint@9.38.0(jiti@2.6.1)))(eslint-plugin-import@2.32.0)(eslint@9.38.0(jiti@2.6.1)) + eslint-import-resolver-typescript: 4.4.4(eslint-plugin-import-x@4.16.1(@typescript-eslint/utils@8.46.2(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3))(eslint-import-resolver-node@0.3.9)(eslint@9.39.0(jiti@2.6.1)))(eslint-plugin-import@2.32.0)(eslint@9.39.0(jiti@2.6.1)) transitivePeerDependencies: - supports-color optional: true - eslint-plugin-import-x@4.16.1(@typescript-eslint/utils@8.46.2(eslint@9.38.0(jiti@2.6.1))(typescript@5.9.3))(eslint-import-resolver-node@0.3.9)(eslint@9.38.0(jiti@2.6.1)): + eslint-plugin-import-x@4.16.1(@typescript-eslint/utils@8.46.2(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3))(eslint-import-resolver-node@0.3.9)(eslint@9.39.0(jiti@2.6.1)): dependencies: '@typescript-eslint/types': 8.46.2 comment-parser: 1.4.1 debug: 4.4.3 - eslint: 9.38.0(jiti@2.6.1) + eslint: 9.39.0(jiti@2.6.1) eslint-import-context: 0.1.9(unrs-resolver@1.11.1) is-glob: 4.0.3 minimatch: 10.0.3 @@ -6866,12 +6866,12 @@ snapshots: stable-hash-x: 0.2.0 unrs-resolver: 1.11.1 optionalDependencies: - '@typescript-eslint/utils': 8.46.2(eslint@9.38.0(jiti@2.6.1))(typescript@5.9.3) + '@typescript-eslint/utils': 8.46.2(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) eslint-import-resolver-node: 0.3.9 transitivePeerDependencies: - supports-color - eslint-plugin-import@2.32.0(@typescript-eslint/parser@8.46.2(eslint@9.38.0(jiti@2.6.1))(typescript@5.9.3))(eslint-import-resolver-typescript@4.4.4)(eslint@9.38.0(jiti@2.6.1)): + eslint-plugin-import@2.32.0(@typescript-eslint/parser@8.46.2(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3))(eslint-import-resolver-typescript@4.4.4)(eslint@9.39.0(jiti@2.6.1)): dependencies: '@rtsao/scc': 1.1.0 array-includes: 3.1.9 @@ -6880,9 +6880,9 @@ snapshots: array.prototype.flatmap: 1.3.3 debug: 3.2.7 doctrine: 2.1.0 - eslint: 9.38.0(jiti@2.6.1) + eslint: 9.39.0(jiti@2.6.1) eslint-import-resolver-node: 0.3.9 - eslint-module-utils: 2.12.1(@typescript-eslint/parser@8.46.2(eslint@9.38.0(jiti@2.6.1))(typescript@5.9.3))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@4.4.4)(eslint@9.38.0(jiti@2.6.1)) + eslint-module-utils: 2.12.1(@typescript-eslint/parser@8.46.2(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@4.4.4)(eslint@9.39.0(jiti@2.6.1)) hasown: 2.0.2 is-core-module: 2.16.1 is-glob: 4.0.3 @@ -6894,122 +6894,122 @@ snapshots: string.prototype.trimend: 1.0.9 tsconfig-paths: 3.15.0 optionalDependencies: - '@typescript-eslint/parser': 8.46.2(eslint@9.38.0(jiti@2.6.1))(typescript@5.9.3) + '@typescript-eslint/parser': 8.46.2(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) transitivePeerDependencies: - eslint-import-resolver-typescript - eslint-import-resolver-webpack - supports-color optional: true - eslint-plugin-prettier@5.5.4(eslint-config-prettier@10.1.8(eslint@9.38.0(jiti@2.6.1)))(eslint@9.38.0(jiti@2.6.1))(prettier@3.6.2): + eslint-plugin-prettier@5.5.4(eslint-config-prettier@10.1.8(eslint@9.39.0(jiti@2.6.1)))(eslint@9.39.0(jiti@2.6.1))(prettier@3.6.2): dependencies: - eslint: 9.38.0(jiti@2.6.1) + eslint: 9.39.0(jiti@2.6.1) prettier: 3.6.2 prettier-linter-helpers: 1.0.0 synckit: 0.11.11 optionalDependencies: - eslint-config-prettier: 10.1.8(eslint@9.38.0(jiti@2.6.1)) + eslint-config-prettier: 10.1.8(eslint@9.39.0(jiti@2.6.1)) - eslint-plugin-react-dom@2.2.4(eslint@9.38.0(jiti@2.6.1))(typescript@5.9.3): + eslint-plugin-react-dom@2.2.4(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3): dependencies: - '@eslint-react/ast': 2.2.4(eslint@9.38.0(jiti@2.6.1))(typescript@5.9.3) - '@eslint-react/core': 2.2.4(eslint@9.38.0(jiti@2.6.1))(typescript@5.9.3) + '@eslint-react/ast': 2.2.4(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) + '@eslint-react/core': 2.2.4(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) '@eslint-react/eff': 2.2.4 - '@eslint-react/shared': 2.2.4(eslint@9.38.0(jiti@2.6.1))(typescript@5.9.3) - '@eslint-react/var': 2.2.4(eslint@9.38.0(jiti@2.6.1))(typescript@5.9.3) + '@eslint-react/shared': 2.2.4(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) + '@eslint-react/var': 2.2.4(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) '@typescript-eslint/scope-manager': 8.46.2 '@typescript-eslint/types': 8.46.2 - '@typescript-eslint/utils': 8.46.2(eslint@9.38.0(jiti@2.6.1))(typescript@5.9.3) + '@typescript-eslint/utils': 8.46.2(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) compare-versions: 6.1.1 - eslint: 9.38.0(jiti@2.6.1) + eslint: 9.39.0(jiti@2.6.1) string-ts: 2.2.1 ts-pattern: 5.9.0 typescript: 5.9.3 transitivePeerDependencies: - supports-color - eslint-plugin-react-hooks-extra@2.2.4(eslint@9.38.0(jiti@2.6.1))(typescript@5.9.3): + eslint-plugin-react-hooks-extra@2.2.4(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3): dependencies: - '@eslint-react/ast': 2.2.4(eslint@9.38.0(jiti@2.6.1))(typescript@5.9.3) - '@eslint-react/core': 2.2.4(eslint@9.38.0(jiti@2.6.1))(typescript@5.9.3) + '@eslint-react/ast': 2.2.4(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) + '@eslint-react/core': 2.2.4(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) '@eslint-react/eff': 2.2.4 - '@eslint-react/shared': 2.2.4(eslint@9.38.0(jiti@2.6.1))(typescript@5.9.3) - '@eslint-react/var': 2.2.4(eslint@9.38.0(jiti@2.6.1))(typescript@5.9.3) + '@eslint-react/shared': 2.2.4(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) + '@eslint-react/var': 2.2.4(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) '@typescript-eslint/scope-manager': 8.46.2 - '@typescript-eslint/type-utils': 8.46.2(eslint@9.38.0(jiti@2.6.1))(typescript@5.9.3) + '@typescript-eslint/type-utils': 8.46.2(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) '@typescript-eslint/types': 8.46.2 - '@typescript-eslint/utils': 8.46.2(eslint@9.38.0(jiti@2.6.1))(typescript@5.9.3) - eslint: 9.38.0(jiti@2.6.1) + '@typescript-eslint/utils': 8.46.2(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) + eslint: 9.39.0(jiti@2.6.1) string-ts: 2.2.1 ts-pattern: 5.9.0 typescript: 5.9.3 transitivePeerDependencies: - supports-color - eslint-plugin-react-hooks@7.0.1(eslint@9.38.0(jiti@2.6.1)): + eslint-plugin-react-hooks@7.0.1(eslint@9.39.0(jiti@2.6.1)): dependencies: '@babel/core': 7.28.4 '@babel/parser': 7.28.4 - eslint: 9.38.0(jiti@2.6.1) + eslint: 9.39.0(jiti@2.6.1) hermes-parser: 0.25.1 zod: 4.1.12 zod-validation-error: 4.0.2(zod@4.1.12) transitivePeerDependencies: - supports-color - eslint-plugin-react-naming-convention@2.2.4(eslint@9.38.0(jiti@2.6.1))(typescript@5.9.3): + eslint-plugin-react-naming-convention@2.2.4(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3): dependencies: - '@eslint-react/ast': 2.2.4(eslint@9.38.0(jiti@2.6.1))(typescript@5.9.3) - '@eslint-react/core': 2.2.4(eslint@9.38.0(jiti@2.6.1))(typescript@5.9.3) + '@eslint-react/ast': 2.2.4(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) + '@eslint-react/core': 2.2.4(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) '@eslint-react/eff': 2.2.4 - '@eslint-react/shared': 2.2.4(eslint@9.38.0(jiti@2.6.1))(typescript@5.9.3) - '@eslint-react/var': 2.2.4(eslint@9.38.0(jiti@2.6.1))(typescript@5.9.3) + '@eslint-react/shared': 2.2.4(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) + '@eslint-react/var': 2.2.4(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) '@typescript-eslint/scope-manager': 8.46.2 - '@typescript-eslint/type-utils': 8.46.2(eslint@9.38.0(jiti@2.6.1))(typescript@5.9.3) + '@typescript-eslint/type-utils': 8.46.2(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) '@typescript-eslint/types': 8.46.2 - '@typescript-eslint/utils': 8.46.2(eslint@9.38.0(jiti@2.6.1))(typescript@5.9.3) - eslint: 9.38.0(jiti@2.6.1) + '@typescript-eslint/utils': 8.46.2(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) + eslint: 9.39.0(jiti@2.6.1) string-ts: 2.2.1 ts-pattern: 5.9.0 typescript: 5.9.3 transitivePeerDependencies: - supports-color - eslint-plugin-react-refresh@0.4.24(eslint@9.38.0(jiti@2.6.1)): + eslint-plugin-react-refresh@0.4.24(eslint@9.39.0(jiti@2.6.1)): dependencies: - eslint: 9.38.0(jiti@2.6.1) + eslint: 9.39.0(jiti@2.6.1) - eslint-plugin-react-web-api@2.2.4(eslint@9.38.0(jiti@2.6.1))(typescript@5.9.3): + eslint-plugin-react-web-api@2.2.4(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3): dependencies: - '@eslint-react/ast': 2.2.4(eslint@9.38.0(jiti@2.6.1))(typescript@5.9.3) - '@eslint-react/core': 2.2.4(eslint@9.38.0(jiti@2.6.1))(typescript@5.9.3) + '@eslint-react/ast': 2.2.4(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) + '@eslint-react/core': 2.2.4(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) '@eslint-react/eff': 2.2.4 - '@eslint-react/shared': 2.2.4(eslint@9.38.0(jiti@2.6.1))(typescript@5.9.3) - '@eslint-react/var': 2.2.4(eslint@9.38.0(jiti@2.6.1))(typescript@5.9.3) + '@eslint-react/shared': 2.2.4(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) + '@eslint-react/var': 2.2.4(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) '@typescript-eslint/scope-manager': 8.46.2 '@typescript-eslint/types': 8.46.2 - '@typescript-eslint/utils': 8.46.2(eslint@9.38.0(jiti@2.6.1))(typescript@5.9.3) - eslint: 9.38.0(jiti@2.6.1) + '@typescript-eslint/utils': 8.46.2(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) + eslint: 9.39.0(jiti@2.6.1) string-ts: 2.2.1 ts-pattern: 5.9.0 typescript: 5.9.3 transitivePeerDependencies: - supports-color - eslint-plugin-react-x@2.2.4(eslint@9.38.0(jiti@2.6.1))(typescript@5.9.3): + eslint-plugin-react-x@2.2.4(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3): dependencies: - '@eslint-react/ast': 2.2.4(eslint@9.38.0(jiti@2.6.1))(typescript@5.9.3) - '@eslint-react/core': 2.2.4(eslint@9.38.0(jiti@2.6.1))(typescript@5.9.3) + '@eslint-react/ast': 2.2.4(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) + '@eslint-react/core': 2.2.4(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) '@eslint-react/eff': 2.2.4 - '@eslint-react/shared': 2.2.4(eslint@9.38.0(jiti@2.6.1))(typescript@5.9.3) - '@eslint-react/var': 2.2.4(eslint@9.38.0(jiti@2.6.1))(typescript@5.9.3) + '@eslint-react/shared': 2.2.4(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) + '@eslint-react/var': 2.2.4(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) '@typescript-eslint/scope-manager': 8.46.2 - '@typescript-eslint/type-utils': 8.46.2(eslint@9.38.0(jiti@2.6.1))(typescript@5.9.3) + '@typescript-eslint/type-utils': 8.46.2(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) '@typescript-eslint/types': 8.46.2 - '@typescript-eslint/utils': 8.46.2(eslint@9.38.0(jiti@2.6.1))(typescript@5.9.3) + '@typescript-eslint/utils': 8.46.2(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) compare-versions: 6.1.1 - eslint: 9.38.0(jiti@2.6.1) - is-immutable-type: 5.0.1(eslint@9.38.0(jiti@2.6.1))(typescript@5.9.3) + eslint: 9.39.0(jiti@2.6.1) + is-immutable-type: 5.0.1(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) string-ts: 2.2.1 ts-api-utils: 2.1.0(typescript@5.9.3) ts-pattern: 5.9.0 @@ -7017,11 +7017,11 @@ snapshots: transitivePeerDependencies: - supports-color - eslint-plugin-unused-imports@4.3.0(@typescript-eslint/eslint-plugin@8.46.2(@typescript-eslint/parser@8.46.2(eslint@9.38.0(jiti@2.6.1))(typescript@5.9.3))(eslint@9.38.0(jiti@2.6.1))(typescript@5.9.3))(eslint@9.38.0(jiti@2.6.1)): + eslint-plugin-unused-imports@4.3.0(@typescript-eslint/eslint-plugin@8.46.2(@typescript-eslint/parser@8.46.2(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3))(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3))(eslint@9.39.0(jiti@2.6.1)): dependencies: - eslint: 9.38.0(jiti@2.6.1) + eslint: 9.39.0(jiti@2.6.1) optionalDependencies: - '@typescript-eslint/eslint-plugin': 8.46.2(@typescript-eslint/parser@8.46.2(eslint@9.38.0(jiti@2.6.1))(typescript@5.9.3))(eslint@9.38.0(jiti@2.6.1))(typescript@5.9.3) + '@typescript-eslint/eslint-plugin': 8.46.2(@typescript-eslint/parser@8.46.2(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3))(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) eslint-scope@8.4.0: dependencies: @@ -7032,16 +7032,16 @@ snapshots: eslint-visitor-keys@4.2.1: {} - eslint@9.38.0(jiti@2.6.1): + eslint@9.39.0(jiti@2.6.1): dependencies: - '@eslint-community/eslint-utils': 4.8.0(eslint@9.38.0(jiti@2.6.1)) + '@eslint-community/eslint-utils': 4.8.0(eslint@9.39.0(jiti@2.6.1)) '@eslint-community/regexpp': 4.12.1 '@eslint/config-array': 0.21.1 - '@eslint/config-helpers': 0.4.1 - '@eslint/core': 0.16.0 + '@eslint/config-helpers': 0.4.2 + '@eslint/core': 0.17.0 '@eslint/eslintrc': 3.3.1 - '@eslint/js': 9.38.0 - '@eslint/plugin-kit': 0.4.0 + '@eslint/js': 9.39.0 + '@eslint/plugin-kit': 0.4.1 '@humanfs/node': 0.16.6 '@humanwhocodes/module-importer': 1.0.1 '@humanwhocodes/retry': 0.4.3 @@ -7273,7 +7273,7 @@ snapshots: globals@14.0.0: {} - globals@16.4.0: {} + globals@16.5.0: {} globalthis@1.0.4: dependencies: @@ -7478,10 +7478,10 @@ snapshots: is-hexadecimal@2.0.1: {} - is-immutable-type@5.0.1(eslint@9.38.0(jiti@2.6.1))(typescript@5.9.3): + is-immutable-type@5.0.1(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3): dependencies: - '@typescript-eslint/type-utils': 8.46.2(eslint@9.38.0(jiti@2.6.1))(typescript@5.9.3) - eslint: 9.38.0(jiti@2.6.1) + '@typescript-eslint/type-utils': 8.46.2(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) + eslint: 9.39.0(jiti@2.6.1) ts-api-utils: 2.1.0(typescript@5.9.3) ts-declaration-location: 1.0.7(typescript@5.9.3) typescript: 5.9.3 @@ -8179,7 +8179,7 @@ snapshots: react-fast-compare@3.2.2: {} - react-hook-form@7.65.0(react@19.2.0): + react-hook-form@7.66.0(react@19.2.0): dependencies: react: 19.2.0 @@ -8719,13 +8719,13 @@ snapshots: types-pac@1.0.3: {} - typescript-eslint@8.46.2(eslint@9.38.0(jiti@2.6.1))(typescript@5.9.3): + typescript-eslint@8.46.2(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3): dependencies: - '@typescript-eslint/eslint-plugin': 8.46.2(@typescript-eslint/parser@8.46.2(eslint@9.38.0(jiti@2.6.1))(typescript@5.9.3))(eslint@9.38.0(jiti@2.6.1))(typescript@5.9.3) - '@typescript-eslint/parser': 8.46.2(eslint@9.38.0(jiti@2.6.1))(typescript@5.9.3) + '@typescript-eslint/eslint-plugin': 8.46.2(@typescript-eslint/parser@8.46.2(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3))(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) + '@typescript-eslint/parser': 8.46.2(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) '@typescript-eslint/typescript-estree': 8.46.2(typescript@5.9.3) - '@typescript-eslint/utils': 8.46.2(eslint@9.38.0(jiti@2.6.1))(typescript@5.9.3) - eslint: 9.38.0(jiti@2.6.1) + '@typescript-eslint/utils': 8.46.2(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) + eslint: 9.39.0(jiti@2.6.1) typescript: 5.9.3 transitivePeerDependencies: - supports-color From 9370a56337dc117640e385ce884dc9aa128bea37 Mon Sep 17 00:00:00 2001 From: Tunglies Date: Sat, 1 Nov 2025 20:03:56 +0800 Subject: [PATCH 60/70] refactor: reduce clone operation (#5268) * refactor: optimize item handling and improve profile management * refactor: update IVerge references to use references instead of owned values * refactor: update patch_verge to use data_ref for improved data handling * refactor: move handle_copy function to improve resource initialization logic * refactor: update profile handling to use references for improved memory efficiency * refactor: simplify get_item method and update profile item retrieval to use string slices * refactor: update profile validation and patching to use references for improved performance * refactor: update profile functions to use references for improved performance and memory efficiency * refactor: update profile patching functions to use references for improved memory efficiency * refactor: simplify merge function in PrfOption to enhance readability * refactor: update change_core function to accept a reference for improved memory efficiency * refactor: update PrfItem and profile functions to use references for improved memory efficiency * refactor: update resolve_scheme function to accept a reference for improved memory efficiency * refactor: update resolve_scheme function to accept a string slice for improved flexibility * refactor: simplify update_profile parameters and logic --- src-tauri/Cargo.lock | 12 +- src-tauri/src/cmd/clash.rs | 5 +- src-tauri/src/cmd/profile.rs | 16 +-- src-tauri/src/cmd/save_profile.rs | 4 +- src-tauri/src/cmd/verge.rs | 4 +- src-tauri/src/cmd/webdav.rs | 5 +- src-tauri/src/config/config.rs | 12 +- src-tauri/src/config/prfitem.rs | 166 +++++++++++++----------- src-tauri/src/config/profiles.rs | 67 +++++----- src-tauri/src/config/verge.rs | 10 +- src-tauri/src/core/manager/lifecycle.rs | 15 +-- src-tauri/src/core/timer.rs | 12 +- src-tauri/src/enhance/mod.rs | 14 +- src-tauri/src/feat/backup.rs | 4 +- src-tauri/src/feat/config.rs | 13 +- src-tauri/src/feat/profile.rs | 39 +++--- src-tauri/src/feat/proxy.rs | 4 +- src-tauri/src/lib.rs | 18 +-- src-tauri/src/utils/draft.rs | 5 + src-tauri/src/utils/init.rs | 42 +++--- src-tauri/src/utils/resolve/mod.rs | 3 +- src-tauri/src/utils/resolve/scheme.rs | 24 ++-- src-tauri/src/utils/resolve/window.rs | 12 +- src-tauri/src/utils/server.rs | 3 +- 24 files changed, 258 insertions(+), 251 deletions(-) diff --git a/src-tauri/Cargo.lock b/src-tauri/Cargo.lock index c1bdfecd..4c01e031 100644 --- a/src-tauri/Cargo.lock +++ b/src-tauri/Cargo.lock @@ -962,9 +962,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.2.44" +version = "1.2.43" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37521ac7aabe3d13122dc382493e20c9416f299d2ccd5b3a5340a2570cdeb0f3" +checksum = "739eb0f94557554b3ca9a86d2d37bebd49c5e6d0c1d2bda35ba5bdac830befc2" dependencies = [ "find-msvc-tools", "jobserver", @@ -1161,8 +1161,8 @@ dependencies = [ [[package]] name = "clash_verge_logger" -version = "0.2.1" -source = "git+https://github.com/clash-verge-rev/clash-verge-logger#955f1b709890640ff01fd30009df0f35816bbca6" +version = "0.2.0" +source = "git+https://github.com/clash-verge-rev/clash-verge-logger#9bb189b5b5c4c2eee35168ff4997e8fb10901c81" dependencies = [ "arraydeque", "compact_str", @@ -8752,9 +8752,9 @@ checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" [[package]] name = "version-compare" -version = "0.2.1" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03c2856837ef78f57382f06b2b8563a2f512f7185d732608fd9176cb3b8edf0e" +checksum = "852e951cb7832cb45cb1169900d19760cfa39b82bc0ea9c0e5a14ae88411c98b" [[package]] name = "version_check" diff --git a/src-tauri/src/cmd/clash.rs b/src-tauri/src/cmd/clash.rs index f429e4fd..37033f48 100644 --- a/src-tauri/src/cmd/clash.rs +++ b/src-tauri/src/cmd/clash.rs @@ -41,10 +41,7 @@ pub async fn patch_clash_mode(payload: String) -> CmdResult { pub async fn change_clash_core(clash_core: String) -> CmdResult> { logging!(info, Type::Config, "changing core to {clash_core}"); - match CoreManager::global() - .change_core(Some(clash_core.clone())) - .await - { + match CoreManager::global().change_core(&clash_core).await { Ok(_) => { // 切换内核后重启内核 match CoreManager::global().restart_core().await { diff --git a/src-tauri/src/cmd/profile.rs b/src-tauri/src/cmd/profile.rs index 5ed734d2..8080fe12 100644 --- a/src-tauri/src/cmd/profile.rs +++ b/src-tauri/src/cmd/profile.rs @@ -99,7 +99,7 @@ pub async fn import_profile(url: std::string::String, option: Option) logging!(info, Type::Cmd, "[导入订阅] 开始导入: {}", url); // 直接依赖 PrfItem::from_url 自身的超时/重试逻辑,不再使用 tokio::time::timeout 包裹 - let item = match PrfItem::from_url(&url, None, None, option).await { + let item = &mut match PrfItem::from_url(&url, None, None, option.as_ref()).await { Ok(it) => { logging!(info, Type::Cmd, "[导入订阅] 下载完成,开始保存配置"); it @@ -110,7 +110,7 @@ pub async fn import_profile(url: std::string::String, option: Option) } }; - match profiles_append_item_safe(item.clone()).await { + match profiles_append_item_safe(item).await { Ok(_) => match profiles_save_file_safe().await { Ok(_) => { logging!(info, Type::Cmd, "[导入订阅] 配置文件保存成功"); @@ -145,7 +145,7 @@ pub async fn import_profile(url: std::string::String, option: Option) /// 调整profile的顺序 #[tauri::command] pub async fn reorder_profile(active_id: String, over_id: String) -> CmdResult { - match profiles_reorder_safe(active_id, over_id).await { + match profiles_reorder_safe(&active_id, &over_id).await { Ok(_) => { log::info!(target: "app", "重新排序配置文件"); Ok(()) @@ -161,7 +161,7 @@ pub async fn reorder_profile(active_id: String, over_id: String) -> CmdResult { /// 创建一个新的配置文件 #[tauri::command] pub async fn create_profile(item: PrfItem, file_data: Option) -> CmdResult { - match profiles_append_item_with_filedata_safe(item.clone(), file_data).await { + match profiles_append_item_with_filedata_safe(&item, file_data).await { Ok(_) => { // 发送配置变更通知 if let Some(uid) = &item.uid { @@ -180,7 +180,7 @@ pub async fn create_profile(item: PrfItem, file_data: Option) -> CmdResu /// 更新配置文件 #[tauri::command] pub async fn update_profile(index: String, option: Option) -> CmdResult { - match feat::update_profile(index, option, Some(true), Some(true)).await { + match feat::update_profile(&index, option.as_ref(), true, true).await { Ok(_) => Ok(()), Err(e) => { log::error!(target: "app", "{}", e); @@ -194,9 +194,7 @@ pub async fn update_profile(index: String, option: Option) -> CmdResu pub async fn delete_profile(index: String) -> CmdResult { println!("delete_profile: {}", index); // 使用Send-safe helper函数 - let should_update = profiles_delete_item_safe(index.clone()) - .await - .stringify_err()?; + let should_update = profiles_delete_item_safe(&index).await.stringify_err()?; profiles_save_file_safe().await.stringify_err()?; if should_update { @@ -585,7 +583,7 @@ pub async fn patch_profile(index: String, profile: PrfItem) -> CmdResult { false }; - profiles_patch_item_safe(index.clone(), profile) + profiles_patch_item_safe(&index, &profile) .await .stringify_err()?; diff --git a/src-tauri/src/cmd/save_profile.rs b/src-tauri/src/cmd/save_profile.rs index cb216e8b..1e905e63 100644 --- a/src-tauri/src/cmd/save_profile.rs +++ b/src-tauri/src/cmd/save_profile.rs @@ -141,7 +141,7 @@ async fn handle_full_validation( Type::Config, "[cmd配置save] YAML配置文件验证失败,发送通知" ); - let result = (false, error_msg.clone()); + let result = (false, error_msg.to_owned()); crate::cmd::validate::handle_yaml_validation_notice(&result, "YAML配置文件"); } else if is_script_error(&error_msg, file_path_str) { logging!( @@ -149,7 +149,7 @@ async fn handle_full_validation( Type::Config, "[cmd配置save] 脚本文件验证失败,发送通知" ); - let result = (false, error_msg.clone()); + let result = (false, error_msg.to_owned()); crate::cmd::validate::handle_script_validation_notice(&result, "脚本文件"); } else { logging!( diff --git a/src-tauri/src/cmd/verge.rs b/src-tauri/src/cmd/verge.rs index eb74339f..cce4043d 100644 --- a/src-tauri/src/cmd/verge.rs +++ b/src-tauri/src/cmd/verge.rs @@ -9,12 +9,12 @@ pub async fn get_verge_config() -> CmdResult { let ref_data = verge.latest_ref(); ref_data.clone() }; - let verge_response = IVergeResponse::from(*verge_data); + let verge_response = IVergeResponse::from(verge_data); Ok(verge_response) } /// 修改Verge配置 #[tauri::command] pub async fn patch_verge_config(payload: IVerge) -> CmdResult { - feat::patch_verge(payload, false).await.stringify_err() + feat::patch_verge(&payload, false).await.stringify_err() } diff --git a/src-tauri/src/cmd/webdav.rs b/src-tauri/src/cmd/webdav.rs index 6c27868d..1de27b70 100644 --- a/src-tauri/src/cmd/webdav.rs +++ b/src-tauri/src/cmd/webdav.rs @@ -12,10 +12,7 @@ pub async fn save_webdav_config(url: String, username: String, password: String) webdav_password: Some(password), ..IVerge::default() }; - Config::verge() - .await - .draft_mut() - .patch_config(patch.clone()); + Config::verge().await.draft_mut().patch_config(&patch); Config::verge().await.apply(); // 分离数据获取和异步调用 diff --git a/src-tauri/src/config/config.rs b/src-tauri/src/config/config.rs index 246a4c6b..f9bbcf0a 100644 --- a/src-tauri/src/config/config.rs +++ b/src-tauri/src/config/config.rs @@ -83,13 +83,13 @@ impl Config { // Ensure "Merge" and "Script" profile items exist, adding them if missing. async fn ensure_default_profile_items() -> Result<()> { let profiles = Self::profiles().await; - if profiles.latest_ref().get_item(&"Merge".into()).is_err() { - let merge_item = PrfItem::from_merge(Some("Merge".into()))?; - profiles_append_item_safe(merge_item.clone()).await?; + if profiles.latest_ref().get_item("Merge").is_err() { + let merge_item = &mut PrfItem::from_merge(Some("Merge".into()))?; + profiles_append_item_safe(merge_item).await?; } - if profiles.latest_ref().get_item(&"Script".into()).is_err() { - let script_item = PrfItem::from_script(Some("Script".into()))?; - profiles_append_item_safe(script_item.clone()).await?; + if profiles.latest_ref().get_item("Script").is_err() { + let script_item = &mut PrfItem::from_script(Some("Script".into()))?; + profiles_append_item_safe(script_item).await?; } Ok(()) } diff --git a/src-tauri/src/config/prfitem.rs b/src-tauri/src/config/prfitem.rs index d26eac70..d6b54b56 100644 --- a/src-tauri/src/config/prfitem.rs +++ b/src-tauri/src/config/prfitem.rs @@ -1,7 +1,10 @@ -use crate::utils::{ - dirs, help, - network::{NetworkManager, ProxyType}, - tmpl, +use crate::{ + config::profiles, + utils::{ + dirs, help, + network::{NetworkManager, ProxyType}, + tmpl, + }, }; use anyhow::{Context, Result, bail}; use serde::{Deserialize, Serialize}; @@ -119,26 +122,29 @@ pub struct PrfOption { } impl PrfOption { - pub fn merge(one: Option, other: Option) -> Option { + pub fn merge(one: Option<&Self>, other: Option<&Self>) -> Option { match (one, other) { - (Some(mut a), Some(b)) => { - a.user_agent = b.user_agent.or(a.user_agent); - a.with_proxy = b.with_proxy.or(a.with_proxy); - a.self_proxy = b.self_proxy.or(a.self_proxy); - a.danger_accept_invalid_certs = b + (Some(a_ref), Some(b_ref)) => { + let mut result = a_ref.clone(); + result.user_agent = b_ref.user_agent.clone().or(result.user_agent); + result.with_proxy = b_ref.with_proxy.or(result.with_proxy); + result.self_proxy = b_ref.self_proxy.or(result.self_proxy); + result.danger_accept_invalid_certs = b_ref .danger_accept_invalid_certs - .or(a.danger_accept_invalid_certs); - a.allow_auto_update = b.allow_auto_update.or(a.allow_auto_update); - a.update_interval = b.update_interval.or(a.update_interval); - a.merge = b.merge.or(a.merge); - a.script = b.script.or(a.script); - a.rules = b.rules.or(a.rules); - a.proxies = b.proxies.or(a.proxies); - a.groups = b.groups.or(a.groups); - a.timeout_seconds = b.timeout_seconds.or(a.timeout_seconds); - Some(a) + .or(result.danger_accept_invalid_certs); + result.allow_auto_update = b_ref.allow_auto_update.or(result.allow_auto_update); + result.update_interval = b_ref.update_interval.or(result.update_interval); + result.merge = b_ref.merge.clone().or(result.merge); + result.script = b_ref.script.clone().or(result.script); + result.rules = b_ref.rules.clone().or(result.rules); + result.proxies = b_ref.proxies.clone().or(result.proxies); + result.groups = b_ref.groups.clone().or(result.groups); + result.timeout_seconds = b_ref.timeout_seconds.or(result.timeout_seconds); + Some(result) } - t => t.0.or(t.1), + (Some(a_ref), None) => Some(a_ref.clone()), + (None, Some(b_ref)) => Some(b_ref.clone()), + (None, None) => None, } } } @@ -146,13 +152,14 @@ impl PrfOption { impl PrfItem { /// From partial item /// must contain `itype` - pub async fn from(item: PrfItem, file_data: Option) -> Result { + pub async fn from(item: &PrfItem, file_data: Option) -> Result { if item.itype.is_none() { bail!("type should not be null"); } let itype = item .itype + .as_ref() .ok_or_else(|| anyhow::anyhow!("type should not be null"))?; match itype.as_str() { "remote" => { @@ -160,14 +167,16 @@ impl PrfItem { .url .as_ref() .ok_or_else(|| anyhow::anyhow!("url should not be null"))?; - let name = item.name; - let desc = item.desc; - PrfItem::from_url(url, name, desc, item.option).await + let name = item.name.as_ref(); + let desc = item.desc.as_ref(); + let option = item.option.as_ref(); + PrfItem::from_url(url, name, desc, option).await } "local" => { - let name = item.name.unwrap_or_else(|| "Local File".into()); - let desc = item.desc.unwrap_or_else(|| "".into()); - PrfItem::from_local(name, desc, file_data, item.option).await + let name = item.name.clone().unwrap_or_else(|| "Local File".into()); + let desc = item.desc.clone().unwrap_or_else(|| "".into()); + let option = item.option.as_ref(); + PrfItem::from_local(name, desc, file_data, option).await } typ => bail!("invalid profile item type \"{typ}\""), } @@ -179,7 +188,7 @@ impl PrfItem { name: String, desc: String, file_data: Option, - option: Option, + option: Option<&PrfOption>, ) -> Result { let uid = help::get_uid("L").into(); let file = format!("{uid}.yaml").into(); @@ -192,29 +201,29 @@ impl PrfItem { let mut groups = opt_ref.and_then(|o| o.groups.clone()); if merge.is_none() { - let merge_item = PrfItem::from_merge(None)?; - crate::config::profiles::profiles_append_item_safe(merge_item.clone()).await?; - merge = merge_item.uid; + let merge_item = &mut PrfItem::from_merge(None)?; + profiles::profiles_append_item_safe(merge_item).await?; + merge = merge_item.uid.clone(); } if script.is_none() { - let script_item = PrfItem::from_script(None)?; - crate::config::profiles::profiles_append_item_safe(script_item.clone()).await?; - script = script_item.uid; + let script_item = &mut PrfItem::from_script(None)?; + profiles::profiles_append_item_safe(script_item).await?; + script = script_item.uid.clone(); } if rules.is_none() { - let rules_item = PrfItem::from_rules()?; - crate::config::profiles::profiles_append_item_safe(rules_item.clone()).await?; - rules = rules_item.uid; + let rules_item = &mut PrfItem::from_rules()?; + profiles::profiles_append_item_safe(rules_item).await?; + rules = rules_item.uid.clone(); } if proxies.is_none() { - let proxies_item = PrfItem::from_proxies()?; - crate::config::profiles::profiles_append_item_safe(proxies_item.clone()).await?; - proxies = proxies_item.uid; + let proxies_item = &mut PrfItem::from_proxies()?; + profiles::profiles_append_item_safe(proxies_item).await?; + proxies = proxies_item.uid.clone(); } if groups.is_none() { - let groups_item = PrfItem::from_groups()?; - crate::config::profiles::profiles_append_item_safe(groups_item.clone()).await?; - groups = groups_item.uid; + let groups_item = &mut PrfItem::from_groups()?; + profiles::profiles_append_item_safe(groups_item).await?; + groups = groups_item.uid.clone(); } Ok(PrfItem { uid: Some(uid), @@ -244,24 +253,23 @@ impl PrfItem { /// create a new item from url pub async fn from_url( url: &str, - name: Option, - desc: Option, - option: Option, + name: Option<&String>, + desc: Option<&String>, + option: Option<&PrfOption>, ) -> Result { - let opt_ref = option.as_ref(); - let with_proxy = opt_ref.is_some_and(|o| o.with_proxy.unwrap_or(false)); - let self_proxy = opt_ref.is_some_and(|o| o.self_proxy.unwrap_or(false)); + let with_proxy = option.is_some_and(|o| o.with_proxy.unwrap_or(false)); + let self_proxy = option.is_some_and(|o| o.self_proxy.unwrap_or(false)); let accept_invalid_certs = - opt_ref.is_some_and(|o| o.danger_accept_invalid_certs.unwrap_or(false)); - let allow_auto_update = opt_ref.map(|o| o.allow_auto_update.unwrap_or(true)); - let user_agent = opt_ref.and_then(|o| o.user_agent.clone()); - let update_interval = opt_ref.and_then(|o| o.update_interval); - let timeout = opt_ref.and_then(|o| o.timeout_seconds).unwrap_or(20); - let mut merge = opt_ref.and_then(|o| o.merge.clone()); - let mut script = opt_ref.and_then(|o| o.script.clone()); - let mut rules = opt_ref.and_then(|o| o.rules.clone()); - let mut proxies = opt_ref.and_then(|o| o.proxies.clone()); - let mut groups = opt_ref.and_then(|o| o.groups.clone()); + option.is_some_and(|o| o.danger_accept_invalid_certs.unwrap_or(false)); + let allow_auto_update = option.map(|o| o.allow_auto_update.unwrap_or(true)); + let user_agent = option.and_then(|o| o.user_agent.clone()); + let update_interval = option.and_then(|o| o.update_interval); + let timeout = option.and_then(|o| o.timeout_seconds).unwrap_or(20); + let mut merge = option.and_then(|o| o.merge.clone()); + let mut script = option.and_then(|o| o.script.clone()); + let mut rules = option.and_then(|o| o.rules.clone()); + let mut proxies = option.and_then(|o| o.proxies.clone()); + let mut groups = option.and_then(|o| o.groups.clone()); // 选择代理类型 let proxy_type = if self_proxy { @@ -366,7 +374,11 @@ impl PrfItem { let uid = help::get_uid("R").into(); let file = format!("{uid}.yaml").into(); - let name = name.unwrap_or_else(|| filename.unwrap_or_else(|| "Remote File".into()).into()); + let name = name.map(|s| s.to_owned()).unwrap_or_else(|| { + filename + .map(|s| s.into()) + .unwrap_or_else(|| "Remote File".into()) + }); let data = resp.text_with_charset()?; // process the charset "UTF-8 with BOM" @@ -381,36 +393,36 @@ impl PrfItem { } if merge.is_none() { - let merge_item = PrfItem::from_merge(None)?; - crate::config::profiles::profiles_append_item_safe(merge_item.clone()).await?; - merge = merge_item.uid; + let merge_item = &mut PrfItem::from_merge(None)?; + profiles::profiles_append_item_safe(merge_item).await?; + merge = merge_item.uid.clone(); } if script.is_none() { - let script_item = PrfItem::from_script(None)?; - crate::config::profiles::profiles_append_item_safe(script_item.clone()).await?; - script = script_item.uid; + let script_item = &mut PrfItem::from_script(None)?; + profiles::profiles_append_item_safe(script_item).await?; + script = script_item.uid.clone(); } if rules.is_none() { - let rules_item = PrfItem::from_rules()?; - crate::config::profiles::profiles_append_item_safe(rules_item.clone()).await?; - rules = rules_item.uid; + let rules_item = &mut PrfItem::from_rules()?; + profiles::profiles_append_item_safe(rules_item).await?; + rules = rules_item.uid.clone(); } if proxies.is_none() { - let proxies_item = PrfItem::from_proxies()?; - crate::config::profiles::profiles_append_item_safe(proxies_item.clone()).await?; - proxies = proxies_item.uid; + let proxies_item = &mut PrfItem::from_proxies()?; + profiles::profiles_append_item_safe(proxies_item).await?; + proxies = proxies_item.uid.clone(); } if groups.is_none() { - let groups_item = PrfItem::from_groups()?; - crate::config::profiles::profiles_append_item_safe(groups_item.clone()).await?; - groups = groups_item.uid; + let groups_item = &mut PrfItem::from_groups()?; + profiles::profiles_append_item_safe(groups_item).await?; + groups = groups_item.uid.clone(); } Ok(PrfItem { uid: Some(uid), itype: Some("remote".into()), name: Some(name), - desc, + desc: desc.cloned(), file: Some(file), url: Some(url.into()), selected: None, diff --git a/src-tauri/src/config/profiles.rs b/src-tauri/src/config/profiles.rs index 1601fb66..d8d9a003 100644 --- a/src-tauri/src/config/profiles.rs +++ b/src-tauri/src/config/profiles.rs @@ -31,7 +31,7 @@ pub struct CleanupResult { macro_rules! patch { ($lv: expr, $rv: expr, $key: tt) => { if ($rv.$key).is_some() { - $lv.$key = $rv.$key; + $lv.$key = $rv.$key.clone(); } }; } @@ -122,28 +122,30 @@ impl IProfiles { } /// find the item by the uid - pub fn get_item(&self, uid: &String) -> Result<&PrfItem> { - if let Some(items) = self.items.as_ref() { - let some_uid = Some(uid.clone()); + pub fn get_item(&self, uid: impl AsRef) -> Result<&PrfItem> { + let uid_str = uid.as_ref(); + if let Some(items) = self.items.as_ref() { for each in items.iter() { - if each.uid == some_uid { + if let Some(uid_val) = &each.uid + && uid_val.as_str() == uid_str + { return Ok(each); } } } - bail!("failed to get the profile item \"uid:{uid}\""); + bail!("failed to get the profile item \"uid:{}\"", uid_str); } /// append new item /// if the file_data is some /// then should save the data to file - pub async fn append_item(&mut self, mut item: PrfItem) -> Result<()> { - if item.uid.is_none() { + pub async fn append_item(&mut self, item: &mut PrfItem) -> Result<()> { + let uid = &item.uid; + if uid.is_none() { bail!("the uid should not be null"); } - let uid = item.uid.clone(); // save the file data // move the field value after save @@ -165,7 +167,7 @@ impl IProfiles { if self.current.is_none() && (item.itype == Some("remote".into()) || item.itype == Some("local".into())) { - self.current = uid; + self.current = uid.to_owned(); } if self.items.is_none() { @@ -173,24 +175,23 @@ impl IProfiles { } if let Some(items) = self.items.as_mut() { - items.push(item) + items.push(item.to_owned()); } - // self.save_file().await Ok(()) } /// reorder items - pub async fn reorder(&mut self, active_id: String, over_id: String) -> Result<()> { + pub async fn reorder(&mut self, active_id: &String, over_id: &String) -> Result<()> { let mut items = self.items.take().unwrap_or_default(); let mut old_index = None; let mut new_index = None; for (i, _) in items.iter().enumerate() { - if items[i].uid == Some(active_id.clone()) { + if items[i].uid.as_ref() == Some(active_id) { old_index = Some(i); } - if items[i].uid == Some(over_id.clone()) { + if items[i].uid.as_ref() == Some(over_id) { new_index = Some(i); } } @@ -206,11 +207,11 @@ impl IProfiles { } /// update the item value - pub async fn patch_item(&mut self, uid: String, item: PrfItem) -> Result<()> { + pub async fn patch_item(&mut self, uid: &String, item: &PrfItem) -> Result<()> { let mut items = self.items.take().unwrap_or_default(); for each in items.iter_mut() { - if each.uid == Some(uid.clone()) { + if each.uid.as_ref() == Some(uid) { patch!(each, item, itype); patch!(each, item, name); patch!(each, item, desc); @@ -232,13 +233,13 @@ impl IProfiles { /// be used to update the remote item /// only patch `updated` `extra` `file_data` - pub async fn update_item(&mut self, uid: String, mut item: PrfItem) -> Result<()> { + pub async fn update_item(&mut self, uid: &String, item: &mut PrfItem) -> Result<()> { if self.items.is_none() { self.items = Some(vec![]); } // find the item - let _ = self.get_item(&uid)?; + let _ = self.get_item(uid)?; if let Some(items) = self.items.as_mut() { let some_uid = Some(uid.clone()); @@ -247,8 +248,8 @@ impl IProfiles { if each.uid == some_uid { each.extra = item.extra; each.updated = item.updated; - each.home = item.home; - each.option = PrfOption::merge(each.option.clone(), item.option); + each.home = item.home.to_owned(); + each.option = PrfOption::merge(each.option.as_ref(), item.option.as_ref()); // save the file data // move the field value after save if let Some(file_data) = item.file_data.take() { @@ -279,10 +280,10 @@ impl IProfiles { /// delete item /// if delete the current then return true - pub async fn delete_item(&mut self, uid: String) -> Result { - let current = self.current.as_ref().unwrap_or(&uid); + pub async fn delete_item(&mut self, uid: &String) -> Result { + let current = self.current.as_ref().unwrap_or(uid); let current = current.clone(); - let item = self.get_item(&uid)?; + let item = self.get_item(uid)?; let merge_uid = item.option.as_ref().and_then(|e| e.merge.clone()); let script_uid = item.option.as_ref().and_then(|e| e.script.clone()); let rules_uid = item.option.as_ref().and_then(|e| e.rules.clone()); @@ -330,7 +331,7 @@ impl IProfiles { .await; } // delete the original uid - if current == uid { + if current == *uid { self.current = None; for item in items.iter() { if item.itype == Some("remote".into()) || item.itype == Some("local".into()) { @@ -342,7 +343,7 @@ impl IProfiles { self.items = Some(items); self.save_file().await?; - Ok(current == uid) + Ok(current == *uid) } /// 获取current指向的订阅内容 @@ -626,14 +627,14 @@ impl IProfiles { use crate::config::Config; pub async fn profiles_append_item_with_filedata_safe( - item: PrfItem, + item: &PrfItem, file_data: Option, ) -> Result<()> { - let item = PrfItem::from(item, file_data).await?; + let item = &mut PrfItem::from(item, file_data).await?; profiles_append_item_safe(item).await } -pub async fn profiles_append_item_safe(item: PrfItem) -> Result<()> { +pub async fn profiles_append_item_safe(item: &mut PrfItem) -> Result<()> { Config::profiles() .await .with_data_modify(|mut profiles| async move { @@ -643,7 +644,7 @@ pub async fn profiles_append_item_safe(item: PrfItem) -> Result<()> { .await } -pub async fn profiles_patch_item_safe(index: String, item: PrfItem) -> Result<()> { +pub async fn profiles_patch_item_safe(index: &String, item: &PrfItem) -> Result<()> { Config::profiles() .await .with_data_modify(|mut profiles| async move { @@ -653,7 +654,7 @@ pub async fn profiles_patch_item_safe(index: String, item: PrfItem) -> Result<() .await } -pub async fn profiles_delete_item_safe(index: String) -> Result { +pub async fn profiles_delete_item_safe(index: &String) -> Result { Config::profiles() .await .with_data_modify(|mut profiles| async move { @@ -663,7 +664,7 @@ pub async fn profiles_delete_item_safe(index: String) -> Result { .await } -pub async fn profiles_reorder_safe(active_id: String, over_id: String) -> Result<()> { +pub async fn profiles_reorder_safe(active_id: &String, over_id: &String) -> Result<()> { Config::profiles() .await .with_data_modify(|mut profiles| async move { @@ -683,7 +684,7 @@ pub async fn profiles_save_file_safe() -> Result<()> { .await } -pub async fn profiles_draft_update_item_safe(index: String, item: PrfItem) -> Result<()> { +pub async fn profiles_draft_update_item_safe(index: &String, item: &mut PrfItem) -> Result<()> { Config::profiles() .await .with_data_modify(|mut profiles| async move { diff --git a/src-tauri/src/config/verge.rs b/src-tauri/src/config/verge.rs index 25edd04b..61f69ea9 100644 --- a/src-tauri/src/config/verge.rs +++ b/src-tauri/src/config/verge.rs @@ -437,11 +437,11 @@ impl IVerge { /// patch verge config /// only save to file #[allow(clippy::cognitive_complexity)] - pub fn patch_config(&mut self, patch: IVerge) { + pub fn patch_config(&mut self, patch: &IVerge) { macro_rules! patch { ($key: tt) => { if patch.$key.is_some() { - self.$key = patch.$key; + self.$key = patch.$key.clone(); } }; } @@ -696,3 +696,9 @@ impl From for IVergeResponse { } } } + +impl From> for IVergeResponse { + fn from(verge: Box) -> Self { + IVergeResponse::from(*verge) + } +} diff --git a/src-tauri/src/core/manager/lifecycle.rs b/src-tauri/src/core/manager/lifecycle.rs index a766aeef..8f0c6a88 100644 --- a/src-tauri/src/core/manager/lifecycle.rs +++ b/src-tauri/src/core/manager/lifecycle.rs @@ -1,4 +1,5 @@ use super::{CoreManager, RunningMode}; +use crate::config::{Config, ConfigType, IVerge}; use crate::{ core::{ logger::CLASH_LOGGER, @@ -41,18 +42,12 @@ impl CoreManager { self.start_core().await } - pub async fn change_core(&self, clash_core: Option) -> Result<(), String> { - use crate::config::{Config, ConfigType, IVerge}; - - let core = clash_core - .as_ref() - .ok_or_else(|| "Clash core cannot be None".to_string())?; - - if !IVerge::VALID_CLASH_CORES.contains(&core.as_str()) { - return Err(format!("Invalid clash core: {}", core).into()); + pub async fn change_core(&self, clash_core: &String) -> Result<(), String> { + if !IVerge::VALID_CLASH_CORES.contains(&clash_core.as_str()) { + return Err(format!("Invalid clash core: {}", clash_core).into()); } - Config::verge().await.draft_mut().clash_core = clash_core; + Config::verge().await.draft_mut().clash_core = clash_core.to_owned().into(); Config::verge().await.apply(); let verge_data = Config::verge().await.latest_ref().clone(); diff --git a/src-tauri/src/core/timer.rs b/src-tauri/src/core/timer.rs index 47fc7f2f..31ad95cb 100644 --- a/src-tauri/src/core/timer.rs +++ b/src-tauri/src/core/timer.rs @@ -390,7 +390,7 @@ impl Timer { .spawn_async_routine(move || { let uid = uid.clone(); Box::pin(async move { - Self::async_task(uid).await; + Self::async_task(&uid).await; }) as Pin + Send>> }) .context("failed to create timer task")?; @@ -476,14 +476,14 @@ impl Timer { } /// Async task with better error handling and logging - async fn async_task(uid: String) { + async fn async_task(uid: &String) { let task_start = std::time::Instant::now(); logging!(info, Type::Timer, "Running timer task for profile: {}", uid); match tokio::time::timeout(std::time::Duration::from_secs(40), async { - Self::emit_update_event(&uid, true); + Self::emit_update_event(uid, true); - let is_current = Config::profiles().await.latest_ref().current.as_ref() == Some(&uid); + let is_current = Config::profiles().await.latest_ref().current.as_ref() == Some(uid); logging!( info, Type::Timer, @@ -492,7 +492,7 @@ impl Timer { is_current ); - feat::update_profile(uid.clone(), None, Some(is_current), None).await + feat::update_profile(uid, None, is_current, false).await }) .await { @@ -517,7 +517,7 @@ impl Timer { } // Emit completed event - Self::emit_update_event(&uid, false); + Self::emit_update_event(uid, false); } } diff --git a/src-tauri/src/enhance/mod.rs b/src-tauri/src/enhance/mod.rs index 7f7d71c8..05f2191b 100644 --- a/src-tauri/src/enhance/mod.rs +++ b/src-tauri/src/enhance/mod.rs @@ -138,7 +138,7 @@ async fn collect_profile_items() -> ProfileItems { let item = { let profiles = Config::profiles().await; let profiles = profiles.latest_ref(); - profiles.get_item(&merge_uid).ok().cloned() + profiles.get_item(merge_uid).ok().cloned() }; if let Some(item) = item { >::from_async(&item).await @@ -155,7 +155,7 @@ async fn collect_profile_items() -> ProfileItems { let item = { let profiles = Config::profiles().await; let profiles = profiles.latest_ref(); - profiles.get_item(&script_uid).ok().cloned() + profiles.get_item(script_uid).ok().cloned() }; if let Some(item) = item { >::from_async(&item).await @@ -172,7 +172,7 @@ async fn collect_profile_items() -> ProfileItems { let item = { let profiles = Config::profiles().await; let profiles = profiles.latest_ref(); - profiles.get_item(&rules_uid).ok().cloned() + profiles.get_item(rules_uid).ok().cloned() }; if let Some(item) = item { >::from_async(&item).await @@ -189,7 +189,7 @@ async fn collect_profile_items() -> ProfileItems { let item = { let profiles = Config::profiles().await; let profiles = profiles.latest_ref(); - profiles.get_item(&proxies_uid).ok().cloned() + profiles.get_item(proxies_uid).ok().cloned() }; if let Some(item) = item { >::from_async(&item).await @@ -206,7 +206,7 @@ async fn collect_profile_items() -> ProfileItems { let item = { let profiles = Config::profiles().await; let profiles = profiles.latest_ref(); - profiles.get_item(&groups_uid).ok().cloned() + profiles.get_item(groups_uid).ok().cloned() }; if let Some(item) = item { >::from_async(&item).await @@ -223,7 +223,7 @@ async fn collect_profile_items() -> ProfileItems { let item = { let profiles = Config::profiles().await; let profiles = profiles.latest_ref(); - profiles.get_item(&"Merge".into()).ok().cloned() + profiles.get_item("Merge").ok().cloned() }; if let Some(item) = item { >::from_async(&item).await @@ -240,7 +240,7 @@ async fn collect_profile_items() -> ProfileItems { let item = { let profiles = Config::profiles().await; let profiles = profiles.latest_ref(); - profiles.get_item(&"Script".into()).ok().cloned() + profiles.get_item("Script").ok().cloned() }; if let Some(item) = item { >::from_async(&item).await diff --git a/src-tauri/src/feat/backup.rs b/src-tauri/src/feat/backup.rs index 7d0bf9a8..fcc8d048 100644 --- a/src-tauri/src/feat/backup.rs +++ b/src-tauri/src/feat/backup.rs @@ -106,7 +106,7 @@ pub async fn restore_webdav_backup(filename: String) -> Result<()> { logging_error!( Type::Backup, super::patch_verge( - IVerge { + &IVerge { webdav_url, webdav_username, webdav_password, @@ -253,7 +253,7 @@ pub async fn restore_local_backup(filename: String) -> Result<()> { logging_error!( Type::Backup, super::patch_verge( - IVerge { + &IVerge { webdav_url, webdav_username, webdav_password, diff --git a/src-tauri/src/feat/config.rs b/src-tauri/src/feat/config.rs index 4f1d190a..1ca9669c 100644 --- a/src-tauri/src/feat/config.rs +++ b/src-tauri/src/feat/config.rs @@ -226,15 +226,12 @@ async fn process_terminated_flags(update_flags: i32, patch: &IVerge) -> Result<( Ok(()) } -pub async fn patch_verge(patch: IVerge, not_save_file: bool) -> Result<()> { - Config::verge() - .await - .draft_mut() - .patch_config(patch.clone()); +pub async fn patch_verge(patch: &IVerge, not_save_file: bool) -> Result<()> { + Config::verge().await.draft_mut().patch_config(patch); - let update_flags = determine_update_flags(&patch); + let update_flags = determine_update_flags(patch); let process_flag_result: std::result::Result<(), anyhow::Error> = { - process_terminated_flags(update_flags, &patch).await?; + process_terminated_flags(update_flags, patch).await?; Ok(()) }; @@ -245,7 +242,7 @@ pub async fn patch_verge(patch: IVerge, not_save_file: bool) -> Result<()> { Config::verge().await.apply(); if !not_save_file { // 分离数据获取和异步调用 - let verge_data = Config::verge().await.data_mut().clone(); + let verge_data = Config::verge().await.data_ref().clone(); verge_data.save_file().await?; } Ok(()) diff --git a/src-tauri/src/feat/profile.rs b/src-tauri/src/feat/profile.rs index 7f2ce798..4ebd4d06 100644 --- a/src-tauri/src/feat/profile.rs +++ b/src-tauri/src/feat/profile.rs @@ -24,12 +24,12 @@ pub async fn toggle_proxy_profile(profile_index: String) { } async fn should_update_profile( - uid: String, + uid: &String, ignore_auto_update: bool, ) -> Result)>> { let profiles = Config::profiles().await; let profiles = profiles.latest_ref(); - let item = profiles.get_item(&uid)?; + let item = profiles.get_item(uid)?; let is_remote = item.itype.as_ref().is_some_and(|s| s == "remote"); if !is_remote { @@ -63,19 +63,19 @@ async fn should_update_profile( } async fn perform_profile_update( - uid: String, - url: String, - opt: Option, - option: Option, + uid: &String, + url: &String, + opt: Option<&PrfOption>, + option: Option<&PrfOption>, ) -> Result { log::info!(target: "app", "[订阅更新] 开始下载新的订阅内容"); - let merged_opt = PrfOption::merge(opt.clone(), option.clone()); + let merged_opt = PrfOption::merge(opt, option); - match PrfItem::from_url(&url, None, None, merged_opt.clone()).await { - Ok(item) => { + match PrfItem::from_url(url, None, None, merged_opt.as_ref()).await { + Ok(mut item) => { log::info!(target: "app", "[订阅更新] 更新订阅配置成功"); let profiles = Config::profiles().await; - profiles_draft_update_item_safe(uid.clone(), item).await?; + profiles_draft_update_item_safe(uid, &mut item).await?; let is_current = Some(uid.clone()) == profiles.latest_ref().get_current(); log::info!(target: "app", "[订阅更新] 是否为当前使用的订阅: {is_current}"); Ok(is_current) @@ -91,7 +91,7 @@ async fn perform_profile_update( fallback_opt.with_proxy = Some(false); fallback_opt.self_proxy = Some(true); - match PrfItem::from_url(&url, None, None, Some(fallback_opt)).await { + match PrfItem::from_url(url, None, None, Some(&fallback_opt)).await { Ok(mut item) => { log::info!(target: "app", "[订阅更新] 使用Clash代理更新成功"); @@ -101,7 +101,7 @@ async fn perform_profile_update( } let profiles = Config::profiles().await; - profiles_draft_update_item_safe(uid.clone(), item.clone()).await?; + profiles_draft_update_item_safe(uid, &mut item).await?; let profile_name = item.name.clone().unwrap_or_else(|| uid.clone()); handle::Handle::notice_message("update_with_clash_proxy", profile_name); @@ -124,20 +124,17 @@ async fn perform_profile_update( } pub async fn update_profile( - uid: String, - option: Option, - auto_refresh: Option, - ignore_auto_update: Option, + uid: &String, + option: Option<&PrfOption>, + auto_refresh: bool, + ignore_auto_update: bool, ) -> Result<()> { logging!(info, Type::Config, "[订阅更新] 开始更新订阅 {}", uid); - let auto_refresh = auto_refresh.unwrap_or(true); - let ignore_auto_update = ignore_auto_update.unwrap_or(false); - - let url_opt = should_update_profile(uid.clone(), ignore_auto_update).await?; + let url_opt = should_update_profile(uid, ignore_auto_update).await?; let should_refresh = match url_opt { Some((url, opt)) => { - perform_profile_update(uid.clone(), url, opt, option).await? && auto_refresh + perform_profile_update(uid, &url, opt.as_ref(), option).await? && auto_refresh } None => auto_refresh, }; diff --git a/src-tauri/src/feat/proxy.rs b/src-tauri/src/feat/proxy.rs index 9178bd2d..67d94efb 100644 --- a/src-tauri/src/feat/proxy.rs +++ b/src-tauri/src/feat/proxy.rs @@ -20,7 +20,7 @@ pub async fn toggle_system_proxy() { } let patch_result = super::patch_verge( - IVerge { + &IVerge { enable_system_proxy: Some(!enable), ..IVerge::default() }, @@ -40,7 +40,7 @@ pub async fn toggle_tun_mode(not_save_file: Option) { let enable = enable.unwrap_or(false); match super::patch_verge( - IVerge { + &IVerge { enable_tun_mode: Some(!enable), ..IVerge::default() }, diff --git a/src-tauri/src/lib.rs b/src-tauri/src/lib.rs index d7446d9f..caa7261d 100644 --- a/src-tauri/src/lib.rs +++ b/src-tauri/src/lib.rs @@ -93,14 +93,14 @@ mod app_init { } app.deep_link().on_open_url(|event| { - let url = event.urls().first().map(|u| u.to_string()); - if let Some(url) = url { - AsyncHandler::spawn(|| async { - if let Err(e) = resolve::resolve_scheme(url.into()).await { - logging!(error, Type::Setup, "Failed to resolve scheme: {}", e); - } - }); - } + let urls = event.urls(); + AsyncHandler::spawn(move || async move { + if let Some(url) = urls.first() + && let Err(e) = resolve::resolve_scheme(url.as_ref()).await + { + logging!(error, Type::Setup, "Failed to resolve scheme: {}", e); + } + }); }); Ok(()) @@ -117,7 +117,7 @@ mod app_init { { auto_start_plugin_builder = auto_start_plugin_builder .macos_launcher(MacosLauncher::LaunchAgent) - .app_name(app.config().identifier.clone()); + .app_name(&app.config().identifier); } app.handle().plugin(auto_start_plugin_builder.build())?; Ok(()) diff --git a/src-tauri/src/utils/draft.rs b/src-tauri/src/utils/draft.rs index 76465db9..c34bbfb6 100644 --- a/src-tauri/src/utils/draft.rs +++ b/src-tauri/src/utils/draft.rs @@ -29,6 +29,11 @@ impl From for Draft { /// - `apply`: Commits the draft data, replacing the committed data and returning the old committed value if a draft existed. /// - `discard`: Discards the draft data and returns it if it existed. impl Draft> { + /// 正式数据视图 + pub fn data_ref(&self) -> MappedRwLockReadGuard<'_, Box> { + RwLockReadGuard::map(self.inner.read(), |inner| &inner.0) + } + /// 可写正式数据 pub fn data_mut(&self) -> MappedRwLockWriteGuard<'_, Box> { RwLockWriteGuard::map(self.inner.write(), |inner| &mut inner.0) diff --git a/src-tauri/src/utils/init.rs b/src-tauri/src/utils/init.rs index 63b481d9..72f19b08 100644 --- a/src-tauri/src/utils/init.rs +++ b/src-tauri/src/utils/init.rs @@ -429,26 +429,8 @@ pub async fn init_resources() -> Result<()> { let src_path = res_dir.join(file); let dest_path = app_dir.join(file); - let handle_copy = |src: PathBuf, dest: PathBuf, file: String| async move { - match fs::copy(&src, &dest).await { - Ok(_) => { - logging!(debug, Type::Setup, "resources copied '{}'", file); - } - Err(err) => { - logging!( - error, - Type::Setup, - "failed to copy resources '{}' to '{:?}', {}", - file, - dest, - err - ); - } - }; - }; - if src_path.exists() && !dest_path.exists() { - handle_copy(src_path.clone(), dest_path.clone(), (*file).into()).await; + handle_copy(&src_path, &dest_path, file).await; continue; } @@ -458,12 +440,12 @@ pub async fn init_resources() -> Result<()> { match (src_modified, dest_modified) { (Ok(src_modified), Ok(dest_modified)) => { if src_modified > dest_modified { - handle_copy(src_path.clone(), dest_path.clone(), (*file).into()).await; + handle_copy(&src_path, &dest_path, file).await; } } _ => { logging!(debug, Type::Setup, "failed to get modified '{}'", file); - handle_copy(src_path.clone(), dest_path.clone(), (*file).into()).await; + handle_copy(&src_path, &dest_path, file).await; } }; } @@ -563,3 +545,21 @@ pub async fn startup_script() -> Result<()> { Ok(()) } + +async fn handle_copy(src: &PathBuf, dest: &PathBuf, file: &str) { + match fs::copy(src, dest).await { + Ok(_) => { + logging!(debug, Type::Setup, "resources copied '{}'", file); + } + Err(err) => { + logging!( + error, + Type::Setup, + "failed to copy resources '{}' to '{:?}', {}", + file, + dest, + err + ); + } + }; +} diff --git a/src-tauri/src/utils/resolve/mod.rs b/src-tauri/src/utils/resolve/mod.rs index ac725534..cb0989b1 100644 --- a/src-tauri/src/utils/resolve/mod.rs +++ b/src-tauri/src/utils/resolve/mod.rs @@ -1,5 +1,4 @@ use anyhow::Result; -use smartstring::alias::String; use crate::{ config::Config, @@ -99,7 +98,7 @@ pub(super) async fn resolve_setup_logger() { logging_error!(Type::Setup, init::init_logger().await); } -pub async fn resolve_scheme(param: String) -> Result<()> { +pub async fn resolve_scheme(param: &str) -> Result<()> { logging_error!(Type::Setup, scheme::resolve_scheme(param).await); Ok(()) } diff --git a/src-tauri/src/utils/resolve/scheme.rs b/src-tauri/src/utils/resolve/scheme.rs index 4c7ace90..487969a3 100644 --- a/src-tauri/src/utils/resolve/scheme.rs +++ b/src-tauri/src/utils/resolve/scheme.rs @@ -3,9 +3,14 @@ use percent_encoding::percent_decode_str; use smartstring::alias::String; use tauri::Url; -use crate::{config::PrfItem, core::handle, logging, logging_error, utils::logging::Type}; +use crate::{ + config::{PrfItem, profiles}, + core::handle, + logging, logging_error, + utils::logging::Type, +}; -pub(super) async fn resolve_scheme(param: String) -> Result<()> { +pub(super) async fn resolve_scheme(param: &str) -> Result<()> { log::info!(target:"app", "received deep link: {param}"); let param_str = if param.starts_with("[") && param.len() > 4 { @@ -13,7 +18,7 @@ pub(super) async fn resolve_scheme(param: String) -> Result<()> { .get(2..param.len() - 2) .ok_or_else(|| anyhow::anyhow!("Invalid string slice boundaries"))? } else { - param.as_str() + param }; // 解析 URL @@ -25,10 +30,11 @@ pub(super) async fn resolve_scheme(param: String) -> Result<()> { }; if link_parsed.scheme() == "clash" || link_parsed.scheme() == "clash-verge" { - let name = link_parsed + let name_owned: Option = link_parsed .query_pairs() .find(|(key, _)| key == "name") - .map(|(_, value)| value.into()); + .map(|(_, value)| value.into_owned().into()); + let name = name_owned.as_ref(); let url_param = if let Some(query) = link_parsed.query() { let prefix = "url="; @@ -43,10 +49,10 @@ pub(super) async fn resolve_scheme(param: String) -> Result<()> { }; match url_param { - Some(url) => { + Some(ref url) => { log::info!(target:"app", "decoded subscription url: {url}"); - match PrfItem::from_url(url.as_ref(), name, None, None).await { - Ok(item) => { + match PrfItem::from_url(url, name, None, None).await { + Ok(mut item) => { let uid = match item.uid.clone() { Some(uid) => uid, None => { @@ -58,7 +64,7 @@ pub(super) async fn resolve_scheme(param: String) -> Result<()> { return Ok(()); } }; - let result = crate::config::profiles::profiles_append_item_safe(item).await; + let result = profiles::profiles_append_item_safe(&mut item).await; logging_error!( Type::Config, "failed to import subscription url: {:?}", diff --git a/src-tauri/src/utils/resolve/window.rs b/src-tauri/src/utils/resolve/window.rs index f415f506..387145ec 100644 --- a/src-tauri/src/utils/resolve/window.rs +++ b/src-tauri/src/utils/resolve/window.rs @@ -21,16 +21,14 @@ const MINIMAL_HEIGHT: f64 = 520.0; pub async fn build_new_window() -> Result { let app_handle = handle::Handle::app_handle(); - let start_page = Config::verge() - .await - .latest_ref() - .start_page - .clone() - .unwrap_or_else(|| "/".into()); + let config = Config::verge().await; + let latest = config.latest_ref(); + let start_page = latest.start_page.as_deref().unwrap_or("/"); + match tauri::WebviewWindowBuilder::new( app_handle, "main", /* the unique window label */ - tauri::WebviewUrl::App(start_page.as_str().into()), + tauri::WebviewUrl::App(start_page.into()), ) .title("Clash Verge") .center() diff --git a/src-tauri/src/utils/server.rs b/src-tauri/src/utils/server.rs index 991e29de..12985c2a 100644 --- a/src-tauri/src/utils/server.rs +++ b/src-tauri/src/utils/server.rs @@ -107,9 +107,8 @@ pub fn embed_server() { let scheme = warp::path!("commands" / "scheme") .and(warp::query::()) .map(|query: QueryParam| { - let param = query.param.clone(); tokio::task::spawn_local(async move { - logging_error!(Type::Setup, resolve::resolve_scheme(param).await); + logging_error!(Type::Setup, resolve::resolve_scheme(&query.param).await); }); warp::reply::with_status::( "ok".to_string(), From 50567d9b97b626336fb41892eef021d255c6275e Mon Sep 17 00:00:00 2001 From: Slinetrac Date: Sat, 1 Nov 2025 20:09:00 +0800 Subject: [PATCH 61/70] refactor(profiles): remove import verification and simplify post-import refresh --- src/pages/profiles.tsx | 173 +---------------------------------------- 1 file changed, 4 insertions(+), 169 deletions(-) diff --git a/src/pages/profiles.tsx b/src/pages/profiles.tsx index 597ec4ec..0643c3d0 100644 --- a/src/pages/profiles.tsx +++ b/src/pages/profiles.tsx @@ -99,112 +99,6 @@ const isOperationAborted = ( return false; }; -const normalizeProfileUrl = (value?: string) => { - if (!value) return ""; - const trimmed = value.trim(); - - try { - const url = new URL(trimmed); - const auth = - url.username || url.password - ? `${url.username}${url.password ? `:${url.password}` : ""}@` - : ""; - const normalized = - `${url.protocol.toLowerCase()}//${auth}${url.hostname.toLowerCase()}` + - `${url.port ? `:${url.port}` : ""}${url.pathname}${url.search}${url.hash}`; - - return normalized.replace(/\/+$/, ""); - } catch { - const schemeNormalized = trimmed.replace( - /^([a-z]+):\/\//i, - (match, scheme: string) => `${scheme.toLowerCase()}://`, - ); - return schemeNormalized.replace(/\/+$/, ""); - } -}; - -const getProfileSignature = (profile?: IProfileItem | null) => { - if (!profile) return ""; - const { extra, selected, option, name, desc } = profile; - return JSON.stringify({ - extra: extra ?? null, - selected: selected ?? null, - option: option ?? null, - name: name ?? null, - desc: desc ?? null, - }); -}; - -type ImportLandingVerifier = { - baselineCount: number; - hasLanding: (config?: IProfilesConfig | null) => boolean; -}; - -const createImportLandingVerifier = ( - items: IProfileItem[] | undefined, - url: string, -): ImportLandingVerifier => { - const normalizedUrl = normalizeProfileUrl(url); - const baselineCount = items?.length ?? 0; - const baselineProfile = normalizedUrl - ? items?.find((item) => normalizeProfileUrl(item?.url) === normalizedUrl) - : undefined; - const baselineSignature = getProfileSignature(baselineProfile); - const baselineUpdated = baselineProfile?.updated ?? 0; - const hadBaselineProfile = Boolean(baselineProfile); - - const hasLanding = (config?: IProfilesConfig | null) => { - const currentItems = config?.items ?? []; - const currentCount = currentItems.length; - - if (currentCount > baselineCount) { - console.log( - `[导入验证] 配置数量已增加: ${baselineCount} -> ${currentCount}`, - ); - return true; - } - - if (!normalizedUrl) { - return false; - } - - const matchingProfile = currentItems.find( - (item) => normalizeProfileUrl(item?.url) === normalizedUrl, - ); - - if (!matchingProfile) { - return false; - } - - if (!hadBaselineProfile) { - console.log("[导入验证] 检测到新的订阅记录,判定为导入成功"); - return true; - } - - const currentSignature = getProfileSignature(matchingProfile); - const currentUpdated = matchingProfile.updated ?? 0; - - if (currentUpdated > baselineUpdated) { - console.log( - `[导入验证] 订阅更新时间已更新 ${baselineUpdated} -> ${currentUpdated}`, - ); - return true; - } - - if (currentSignature !== baselineSignature) { - console.log("[导入验证] 订阅详情发生变化,判定为导入成功"); - return true; - } - - return false; - }; - - return { - baselineCount, - hasLanding, - }; -}; - const ProfilePage = () => { const { t } = useTranslation(); const location = useLocation(); @@ -382,55 +276,19 @@ const ProfilePage = () => { } setLoading(true); - const importVerifier = createImportLandingVerifier(profiles?.items, url); - const handleImportSuccess = async (noticeKey: string) => { showNotice("success", t(noticeKey)); setUrl(""); - await performRobustRefresh(importVerifier); - }; - - const waitForImportLanding = async () => { - const maxChecks = 2; - for (let attempt = 0; attempt <= maxChecks; attempt++) { - try { - const currentProfiles = await getProfiles(); - if (importVerifier.hasLanding(currentProfiles)) { - return true; - } - - if (attempt < maxChecks) { - await new Promise((resolve) => - setTimeout(resolve, 200 * (attempt + 1)), - ); - } - } catch (verifyErr) { - console.warn("[导入验证] 获取配置状态失败:", verifyErr); - break; - } - } - - return false; + await performRobustRefresh(); }; try { // 尝试正常导入 await importProfile(url); await handleImportSuccess("Profile Imported Successfully"); - return; } catch (initialErr) { console.warn("[订阅导入] 首次导入失败:", initialErr); - const alreadyImported = await waitForImportLanding(); - if (alreadyImported) { - console.warn( - "[订阅导入] 接口返回失败,但检测到订阅已导入,跳过回退导入流程", - ); - await handleImportSuccess("Profile Imported Successfully"); - return; - } - - // 首次导入失败且未检测到数据变更,尝试使用自身代理 showNotice("info", t("Import failed, retrying with Clash proxy...")); try { // 使用自身代理尝试导入 @@ -454,10 +312,7 @@ const ProfilePage = () => { }; // 强化的刷新策略 - const performRobustRefresh = async ( - importVerifier: ImportLandingVerifier, - ) => { - const { baselineCount, hasLanding } = importVerifier; + const performRobustRefresh = async () => { let retryCount = 0; const maxRetries = 5; const baseDelay = 200; @@ -477,28 +332,8 @@ const ProfilePage = () => { setTimeout(resolve, baseDelay * (retryCount + 1)), ); - // 验证刷新是否成功 - const currentProfiles = await getProfiles(); - const currentCount = currentProfiles?.items?.length || 0; - - if (currentCount > baselineCount) { - console.log( - `[导入刷新] 配置刷新成功,配置数量 ${baselineCount} -> ${currentCount}`, - ); - await onEnhance(false); - return; - } - - if (hasLanding(currentProfiles)) { - console.log("[导入刷新] 检测到订阅内容更新,判定刷新成功"); - await onEnhance(false); - return; - } - - console.warn( - `[导入刷新] 配置数量未增加 (${currentCount}), 继续重试...`, - ); - retryCount++; + await onEnhance(false); + return; } catch (error) { console.error(`[导入刷新] 第${retryCount + 1}次刷新失败:`, error); retryCount++; From fb260fb33d7caabdc8df75740e30293853731401 Mon Sep 17 00:00:00 2001 From: Tunglies Date: Sat, 1 Nov 2025 20:47:01 +0800 Subject: [PATCH 62/70] Refactor logging to use a centralized logging utility across the application (#5277) - Replaced direct log calls with a new logging macro that includes a logging type for better categorization. - Updated logging in various modules including `merge.rs`, `mod.rs`, `tun.rs`, `clash.rs`, `profile.rs`, `proxy.rs`, `window.rs`, `lightweight.rs`, `guard.rs`, `autostart.rs`, `dirs.rs`, `dns.rs`, `scheme.rs`, `server.rs`, and `window_manager.rs`. - Introduced logging types such as `Core`, `Network`, `ProxyMode`, `Window`, `Lightweight`, `Service`, and `File` to enhance log clarity and filtering. --- src-tauri/src/cmd/app.rs | 11 ++- src-tauri/src/cmd/clash.rs | 2 +- src-tauri/src/cmd/network.rs | 22 ++++- src-tauri/src/cmd/profile.rs | 18 ++-- src-tauri/src/config/clash.rs | 5 +- src-tauri/src/config/profiles.rs | 20 ++-- src-tauri/src/config/verge.rs | 4 +- src-tauri/src/core/async_proxy_query.rs | 58 +++++++++--- src-tauri/src/core/backup.rs | 28 +++++- src-tauri/src/core/event_driven_proxy.rs | 58 +++++++----- src-tauri/src/core/sysopt.rs | 26 ++++-- src-tauri/src/core/tray/mod.rs | 113 +++++++++++++++++------ src-tauri/src/enhance/merge.rs | 8 +- src-tauri/src/enhance/mod.rs | 11 ++- src-tauri/src/enhance/tun.rs | 9 +- src-tauri/src/feat/clash.rs | 16 ++-- src-tauri/src/feat/profile.rs | 61 +++++++++--- src-tauri/src/feat/proxy.rs | 20 +++- src-tauri/src/feat/window.rs | 60 +++++++----- src-tauri/src/module/lightweight.rs | 6 +- src-tauri/src/process/guard.rs | 9 +- src-tauri/src/utils/autostart.rs | 20 ++-- src-tauri/src/utils/dirs.rs | 18 +++- src-tauri/src/utils/logging.rs | 2 - src-tauri/src/utils/resolve/dns.rs | 38 +++++--- src-tauri/src/utils/resolve/scheme.rs | 6 +- src-tauri/src/utils/server.rs | 8 +- src-tauri/src/utils/window_manager.rs | 26 ++++-- 28 files changed, 473 insertions(+), 210 deletions(-) diff --git a/src-tauri/src/cmd/app.rs b/src-tauri/src/cmd/app.rs index b0b9d480..959277fe 100644 --- a/src-tauri/src/cmd/app.rs +++ b/src-tauri/src/cmd/app.rs @@ -241,7 +241,7 @@ pub async fn copy_icon_file(path: String, icon_info: IconInfo) -> CmdResult CmdResult<()> { - log::info!(target: "app", "前端UI已准备就绪"); + logging!(info, Type::Cmd, "前端UI已准备就绪"); crate::utils::resolve::ui::mark_ui_ready(); Ok(()) } @@ -249,7 +249,7 @@ pub fn notify_ui_ready() -> CmdResult<()> { /// UI加载阶段 #[tauri::command] pub fn update_ui_stage(stage: String) -> CmdResult<()> { - log::info!(target: "app", "UI加载阶段更新: {}", stage.as_str()); + logging!(info, Type::Cmd, "UI加载阶段更新: {}", stage.as_str()); use crate::utils::resolve::ui::UiReadyStage; @@ -260,7 +260,12 @@ pub fn update_ui_stage(stage: String) -> CmdResult<()> { "ResourcesLoaded" => UiReadyStage::ResourcesLoaded, "Ready" => UiReadyStage::Ready, _ => { - log::warn!(target: "app", "未知的UI加载阶段: {}", stage.as_str()); + logging!( + warn, + Type::Cmd, + "Warning: 未知的UI加载阶段: {}", + stage.as_str() + ); return Err(format!("未知的UI加载阶段: {}", stage.as_str()).into()); } }; diff --git a/src-tauri/src/cmd/clash.rs b/src-tauri/src/cmd/clash.rs index 37033f48..b6dbc1fa 100644 --- a/src-tauri/src/cmd/clash.rs +++ b/src-tauri/src/cmd/clash.rs @@ -109,7 +109,7 @@ pub async fn test_delay(url: String) -> CmdResult { let result = match feat::test_delay(url).await { Ok(delay) => delay, Err(e) => { - log::error!(target: "app", "{}", e); + logging!(error, Type::Cmd, "{}", e); 10000u32 } }; diff --git a/src-tauri/src/cmd/network.rs b/src-tauri/src/cmd/network.rs index 120bd1f0..c0dedfde 100644 --- a/src-tauri/src/cmd/network.rs +++ b/src-tauri/src/cmd/network.rs @@ -2,13 +2,14 @@ use super::CmdResult; use crate::cmd::StringifyErr; use crate::core::{EventDrivenProxyManager, async_proxy_query::AsyncProxyQuery}; use crate::process::AsyncHandler; +use crate::{logging, utils::logging::Type}; use network_interface::NetworkInterface; use serde_yaml_ng::Mapping; /// get the system proxy #[tauri::command] pub async fn get_sys_proxy() -> CmdResult { - log::debug!(target: "app", "异步获取系统代理配置"); + logging!(debug, Type::Network, "异步获取系统代理配置"); let current = AsyncProxyQuery::get_system_proxy().await; @@ -20,14 +21,21 @@ pub async fn get_sys_proxy() -> CmdResult { ); map.insert("bypass".into(), current.bypass.into()); - log::debug!(target: "app", "返回系统代理配置: enable={}, {}:{}", current.enable, current.host, current.port); + logging!( + debug, + Type::Network, + "返回系统代理配置: enable={}, {}:{}", + current.enable, + current.host, + current.port + ); Ok(map) } /// 获取自动代理配置 #[tauri::command] pub async fn get_auto_proxy() -> CmdResult { - log::debug!(target: "app", "开始获取自动代理配置(事件驱动)"); + logging!(debug, Type::Network, "开始获取自动代理配置(事件驱动)"); let proxy_manager = EventDrivenProxyManager::global(); @@ -41,7 +49,13 @@ pub async fn get_auto_proxy() -> CmdResult { map.insert("enable".into(), current.enable.into()); map.insert("url".into(), current.url.clone().into()); - log::debug!(target: "app", "返回自动代理配置(缓存): enable={}, url={}", current.enable, current.url); + logging!( + debug, + Type::Network, + "返回自动代理配置(缓存): enable={}, url={}", + current.enable, + current.url + ); Ok(map) } diff --git a/src-tauri/src/cmd/profile.rs b/src-tauri/src/cmd/profile.rs index 8080fe12..9ab8e21a 100644 --- a/src-tauri/src/cmd/profile.rs +++ b/src-tauri/src/cmd/profile.rs @@ -85,7 +85,7 @@ pub async fn enhance_profiles() -> CmdResult { match feat::enhance_profiles().await { Ok(_) => {} Err(e) => { - log::error!(target: "app", "{}", e); + logging!(error, Type::Cmd, "{}", e); return Err(e.to_string().into()); } } @@ -147,11 +147,11 @@ pub async fn import_profile(url: std::string::String, option: Option) pub async fn reorder_profile(active_id: String, over_id: String) -> CmdResult { match profiles_reorder_safe(&active_id, &over_id).await { Ok(_) => { - log::info!(target: "app", "重新排序配置文件"); + logging!(info, Type::Cmd, "重新排序配置文件"); Ok(()) } Err(err) => { - log::error!(target: "app", "重新排序配置文件失败: {}", err); + logging!(error, Type::Cmd, "重新排序配置文件失败: {}", err); Err(format!("重新排序配置文件失败: {}", err).into()) } } @@ -183,7 +183,7 @@ pub async fn update_profile(index: String, option: Option) -> CmdResu match feat::update_profile(&index, option.as_ref(), true, true).await { Ok(_) => Ok(()), Err(e) => { - log::error!(target: "app", "{}", e); + logging!(error, Type::Cmd, "{}", e); Err(e.to_string().into()) } } @@ -206,7 +206,7 @@ pub async fn delete_profile(index: String) -> CmdResult { handle::Handle::notify_profile_changed(index); } Err(e) => { - log::error!(target: "app", "{}", e); + logging!(error, Type::Cmd, "{}", e); return Err(e.to_string().into()); } } @@ -337,7 +337,7 @@ async fn restore_previous_profile(prev_profile: String) -> CmdResult<()> { Config::profiles().await.apply(); crate::process::AsyncHandler::spawn(|| async move { if let Err(e) = profiles_save_file_safe().await { - log::warn!(target: "app", "异步保存恢复配置文件失败: {e}"); + logging!(warn, Type::Cmd, "Warning: 异步保存恢复配置文件失败: {e}"); } }); logging!(info, Type::Cmd, "成功恢复到之前的配置"); @@ -368,15 +368,15 @@ async fn handle_success(current_sequence: u64, current_value: Option) -> handle::Handle::refresh_clash(); if let Err(e) = Tray::global().update_tooltip().await { - log::warn!(target: "app", "异步更新托盘提示失败: {e}"); + logging!(warn, Type::Cmd, "Warning: 异步更新托盘提示失败: {e}"); } if let Err(e) = Tray::global().update_menu().await { - log::warn!(target: "app", "异步更新托盘菜单失败: {e}"); + logging!(warn, Type::Cmd, "Warning: 异步更新托盘菜单失败: {e}"); } if let Err(e) = profiles_save_file_safe().await { - log::warn!(target: "app", "异步保存配置文件失败: {e}"); + logging!(warn, Type::Cmd, "Warning: 异步保存配置文件失败: {e}"); } if let Some(current) = ¤t_value { diff --git a/src-tauri/src/config/clash.rs b/src-tauri/src/config/clash.rs index ac216933..9ca6276a 100644 --- a/src-tauri/src/config/clash.rs +++ b/src-tauri/src/config/clash.rs @@ -1,6 +1,7 @@ use crate::config::Config; use crate::utils::dirs::{ipc_path, path_to_str}; use crate::utils::{dirs, help}; +use crate::{logging, utils::logging::Type}; use anyhow::Result; use serde::{Deserialize, Serialize}; use serde_yaml_ng::{Mapping, Value}; @@ -40,7 +41,7 @@ impl IClashTemp { Self(Self::guard(map)) } Err(err) => { - log::error!(target: "app", "{err}"); + logging!(error, Type::Config, "{err}"); template } } @@ -330,7 +331,7 @@ impl IClashTemp { .ok() .and_then(|path| path_to_str(&path).ok().map(|s| s.into())) .unwrap_or_else(|| { - log::error!(target: "app", "Failed to get IPC path"); + logging!(error, Type::Config, "Failed to get IPC path"); crate::constants::network::DEFAULT_EXTERNAL_CONTROLLER.into() }) } diff --git a/src-tauri/src/config/profiles.rs b/src-tauri/src/config/profiles.rs index d8d9a003..98886afc 100644 --- a/src-tauri/src/config/profiles.rs +++ b/src-tauri/src/config/profiles.rs @@ -3,6 +3,7 @@ use crate::utils::{ dirs::{self, PathBufExec}, help, }; +use crate::{logging, utils::logging::Type}; use anyhow::{Context, Result, bail}; use serde::{Deserialize, Serialize}; use serde_yaml_ng::Mapping; @@ -67,12 +68,12 @@ impl IProfiles { profiles } Err(err) => { - log::error!(target: "app", "{err}"); + logging!(error, Type::Config, "{err}"); Self::template() } }, Err(err) => { - log::error!(target: "app", "{err}"); + logging!(error, Type::Config, "{err}"); Self::template() } } @@ -492,7 +493,7 @@ impl IProfiles { { // 检查是否为全局扩展文件 if protected_files.contains(file_name) { - log::debug!(target: "app", "保护全局扩展配置文件: {file_name}"); + logging!(debug, Type::Config, "保护全局扩展配置文件: {file_name}"); continue; } @@ -501,11 +502,15 @@ impl IProfiles { match path.to_path_buf().remove_if_exists().await { Ok(_) => { deleted_files.push(file_name.into()); - log::info!(target: "app", "已清理冗余文件: {file_name}"); + logging!(info, Type::Config, "已清理冗余文件: {file_name}"); } Err(e) => { failed_deletions.push(format!("{file_name}: {e}").into()); - log::warn!(target: "app", "清理文件失败: {file_name} - {e}"); + logging!( + warn, + Type::Config, + "Warning: 清理文件失败: {file_name} - {e}" + ); } } } @@ -518,8 +523,9 @@ impl IProfiles { failed_deletions, }; - log::info!( - target: "app", + logging!( + info, + Type::Config, "Profile 文件清理完成: 总文件数={}, 删除文件数={}, 失败数={}", result.total_files, result.deleted_files.len(), diff --git a/src-tauri/src/config/verge.rs b/src-tauri/src/config/verge.rs index 61f69ea9..1e37cee4 100644 --- a/src-tauri/src/config/verge.rs +++ b/src-tauri/src/config/verge.rs @@ -353,12 +353,12 @@ impl IVerge { config } Err(err) => { - log::error!(target: "app", "{err}"); + logging!(error, Type::Config, "{err}"); Self::template() } }, Err(err) => { - log::error!(target: "app", "{err}"); + logging!(error, Type::Config, "{err}"); Self::template() } } diff --git a/src-tauri/src/core/async_proxy_query.rs b/src-tauri/src/core/async_proxy_query.rs index 2fd8b3d8..c9e31c67 100644 --- a/src-tauri/src/core/async_proxy_query.rs +++ b/src-tauri/src/core/async_proxy_query.rs @@ -1,5 +1,6 @@ #[cfg(target_os = "windows")] use crate::process::AsyncHandler; +use crate::{logging, utils::logging::Type}; use anyhow::Result; use serde::{Deserialize, Serialize}; use tokio::time::{Duration, timeout}; @@ -41,15 +42,21 @@ impl AsyncProxyQuery { pub async fn get_auto_proxy() -> AsyncAutoproxy { match timeout(Duration::from_secs(3), Self::get_auto_proxy_impl()).await { Ok(Ok(proxy)) => { - log::debug!(target: "app", "异步获取自动代理成功: enable={}, url={}", proxy.enable, proxy.url); + logging!( + debug, + Type::Network, + "异步获取自动代理成功: enable={}, url={}", + proxy.enable, + proxy.url + ); proxy } Ok(Err(e)) => { - log::warn!(target: "app", "异步获取自动代理失败: {e}"); + logging!(warn, Type::Network, "Warning: 异步获取自动代理失败: {e}"); AsyncAutoproxy::default() } Err(_) => { - log::warn!(target: "app", "异步获取自动代理超时"); + logging!(warn, Type::Network, "Warning: 异步获取自动代理超时"); AsyncAutoproxy::default() } } @@ -59,15 +66,22 @@ impl AsyncProxyQuery { pub async fn get_system_proxy() -> AsyncSysproxy { match timeout(Duration::from_secs(3), Self::get_system_proxy_impl()).await { Ok(Ok(proxy)) => { - log::debug!(target: "app", "异步获取系统代理成功: enable={}, {}:{}", proxy.enable, proxy.host, proxy.port); + logging!( + debug, + Type::Network, + "异步获取系统代理成功: enable={}, {}:{}", + proxy.enable, + proxy.host, + proxy.port + ); proxy } Ok(Err(e)) => { - log::warn!(target: "app", "异步获取系统代理失败: {e}"); + logging!(warn, Type::Network, "Warning: 异步获取系统代理失败: {e}"); AsyncSysproxy::default() } Err(_) => { - log::warn!(target: "app", "异步获取系统代理超时"); + logging!(warn, Type::Network, "Warning: 异步获取系统代理超时"); AsyncSysproxy::default() } } @@ -99,7 +113,7 @@ impl AsyncProxyQuery { RegOpenKeyExW(HKEY_CURRENT_USER, key_path.as_ptr(), 0, KEY_READ, &mut hkey); if result != 0 { - log::debug!(target: "app", "无法打开注册表项"); + logging!(debug, Type::Network, "无法打开注册表项"); return Ok(AsyncAutoproxy::default()); } @@ -125,7 +139,7 @@ impl AsyncProxyQuery { .position(|&x| x == 0) .unwrap_or(url_buffer.len()); pac_url = String::from_utf16_lossy(&url_buffer[..end_pos]); - log::debug!(target: "app", "从注册表读取到PAC URL: {pac_url}"); + logging!(debug, Type::Network, "从注册表读取到PAC URL: {pac_url}"); } // 2. 检查自动检测设置是否启用 @@ -150,7 +164,11 @@ impl AsyncProxyQuery { || (detect_query_result == 0 && detect_value_type == REG_DWORD && auto_detect != 0); if pac_enabled { - log::debug!(target: "app", "PAC配置启用: URL={pac_url}, AutoDetect={auto_detect}"); + logging!( + debug, + Type::Network, + "PAC配置启用: URL={pac_url}, AutoDetect={auto_detect}" + ); if pac_url.is_empty() && auto_detect != 0 { pac_url = "auto-detect".into(); @@ -161,7 +179,7 @@ impl AsyncProxyQuery { url: pac_url, }) } else { - log::debug!(target: "app", "PAC配置未启用"); + logging!(debug, Type::Network, "PAC配置未启用"); Ok(AsyncAutoproxy::default()) } } @@ -177,7 +195,11 @@ impl AsyncProxyQuery { } let stdout = String::from_utf8_lossy(&output.stdout); - log::debug!(target: "app", "scutil output: {stdout}"); + crate::logging!( + debug, + crate::utils::logging::Type::Network, + "scutil output: {stdout}" + ); let mut pac_enabled = false; let mut pac_url = String::new(); @@ -196,7 +218,11 @@ impl AsyncProxyQuery { } } - log::debug!(target: "app", "解析结果: pac_enabled={pac_enabled}, pac_url={pac_url}"); + crate::logging!( + debug, + crate::utils::logging::Type::Network, + "解析结果: pac_enabled={pac_enabled}, pac_url={pac_url}" + ); Ok(AsyncAutoproxy { enable: pac_enabled && !pac_url.is_empty(), @@ -363,7 +389,11 @@ impl AsyncProxyQuery { (proxy_server, 8080) }; - log::debug!(target: "app", "从注册表读取到代理设置: {host}:{port}, bypass: {bypass_list}"); + logging!( + debug, + Type::Network, + "从注册表读取到代理设置: {host}:{port}, bypass: {bypass_list}" + ); Ok(AsyncSysproxy { enable: true, @@ -386,7 +416,7 @@ impl AsyncProxyQuery { } let stdout = String::from_utf8_lossy(&output.stdout); - log::debug!(target: "app", "scutil proxy output: {stdout}"); + logging!(debug, Type::Network, "scutil proxy output: {stdout}"); let mut http_enabled = false; let mut http_host = String::new(); diff --git a/src-tauri/src/core/backup.rs b/src-tauri/src/core/backup.rs index 345d1f05..167fbdd1 100644 --- a/src-tauri/src/core/backup.rs +++ b/src-tauri/src/core/backup.rs @@ -1,4 +1,9 @@ -use crate::{config::Config, process::AsyncHandler, utils::dirs}; +use crate::{ + config::Config, + logging, + process::AsyncHandler, + utils::{dirs, logging::Type}, +}; use anyhow::Error; use once_cell::sync::OnceCell; use parking_lot::Mutex; @@ -137,9 +142,14 @@ impl WebDavClient { .is_err() { match client.mkcol(dirs::BACKUP_DIR).await { - Ok(_) => log::info!("Successfully created backup directory"), + Ok(_) => logging!(info, Type::Backup, "Successfully created backup directory"), Err(e) => { - log::warn!("Failed to create backup directory: {}", e); + logging!( + warn, + Type::Backup, + "Warning: Failed to create backup directory: {}", + e + ); // 清除缓存,强制下次重新尝试 self.reset(); return Err(anyhow::Error::msg(format!( @@ -180,7 +190,11 @@ impl WebDavClient { match upload_result { Err(_) => { - log::warn!("Upload timed out, retrying once"); + logging!( + warn, + Type::Backup, + "Warning: Upload timed out, retrying once" + ); tokio::time::sleep(Duration::from_millis(500)).await; timeout( Duration::from_secs(TIMEOUT_UPLOAD), @@ -191,7 +205,11 @@ impl WebDavClient { } Ok(Err(e)) => { - log::warn!("Upload failed, retrying once: {e}"); + logging!( + warn, + Type::Backup, + "Warning: Upload failed, retrying once: {e}" + ); tokio::time::sleep(Duration::from_millis(500)).await; timeout( Duration::from_secs(TIMEOUT_UPLOAD), diff --git a/src-tauri/src/core/event_driven_proxy.rs b/src-tauri/src/core/event_driven_proxy.rs index dd0928c7..18a6f155 100644 --- a/src-tauri/src/core/event_driven_proxy.rs +++ b/src-tauri/src/core/event_driven_proxy.rs @@ -7,6 +7,7 @@ use tokio_stream::{StreamExt, wrappers::UnboundedReceiverStream}; use crate::config::{Config, IVerge}; use crate::core::{async_proxy_query::AsyncProxyQuery, handle}; use crate::process::AsyncHandler; +use crate::{logging, utils::logging::Type}; use once_cell::sync::Lazy; use smartstring::alias::String; use sysproxy::{Autoproxy, Sysproxy}; @@ -104,14 +105,14 @@ impl EventDrivenProxyManager { let query = QueryRequest { response_tx: tx }; if self.query_sender.send(query).is_err() { - log::error!(target: "app", "发送查询请求失败,返回缓存数据"); + logging!(error, Type::Network, "发送查询请求失败,返回缓存数据"); return self.get_auto_proxy_cached().await; } match timeout(Duration::from_secs(5), rx).await { Ok(Ok(result)) => result, _ => { - log::warn!(target: "app", "查询超时,返回缓存数据"); + logging!(warn, Type::Network, "Warning: 查询超时,返回缓存数据"); self.get_auto_proxy_cached().await } } @@ -134,7 +135,7 @@ impl EventDrivenProxyManager { fn send_event(&self, event: ProxyEvent) { if let Err(e) = self.event_sender.send(event) { - log::error!(target: "app", "发送代理事件失败: {e}"); + logging!(error, Type::Network, "发送代理事件失败: {e}"); } } @@ -143,7 +144,7 @@ impl EventDrivenProxyManager { event_rx: mpsc::UnboundedReceiver, query_rx: mpsc::UnboundedReceiver, ) { - log::info!(target: "app", "事件驱动代理管理器启动"); + logging!(info, Type::Network, "事件驱动代理管理器启动"); // 将 mpsc 接收器包装成 Stream,避免每次循环创建 future let mut event_stream = UnboundedReceiverStream::new(event_rx); @@ -158,7 +159,7 @@ impl EventDrivenProxyManager { loop { tokio::select! { Some(event) = event_stream.next() => { - log::debug!(target: "app", "处理代理事件: {event:?}"); + logging!(debug, Type::Network, "处理代理事件: {event:?}"); let event_clone = event.clone(); // 保存一份副本用于后续检查 Self::handle_event(&state, event).await; @@ -179,13 +180,13 @@ impl EventDrivenProxyManager { // 定时检查代理设置 let config = Self::get_proxy_config().await; if config.guard_enabled && config.sys_enabled { - log::debug!(target: "app", "定时检查代理设置"); + logging!(debug, Type::Network, "定时检查代理设置"); Self::check_and_restore_proxy(&state).await; } } else => { // 两个通道都关闭时退出 - log::info!(target: "app", "事件或查询通道关闭,代理管理器停止"); + logging!(info, Type::Network, "事件或查询通道关闭,代理管理器停止"); break; } } @@ -201,7 +202,7 @@ impl EventDrivenProxyManager { Self::initialize_proxy_state(state).await; } ProxyEvent::AppStopping => { - log::info!(target: "app", "清理代理状态"); + logging!(info, Type::Network, "清理代理状态"); Self::update_state_timestamp(state, |s| { s.sys_enabled = false; s.pac_enabled = false; @@ -224,7 +225,7 @@ impl EventDrivenProxyManager { } async fn initialize_proxy_state(state: &Arc>) { - log::info!(target: "app", "初始化代理状态"); + logging!(info, Type::Network, "初始化代理状态"); let config = Self::get_proxy_config().await; let auto_proxy = Self::get_auto_proxy_with_timeout().await; @@ -239,11 +240,17 @@ impl EventDrivenProxyManager { }) .await; - log::info!(target: "app", "代理状态初始化完成: sys={}, pac={}", config.sys_enabled, config.pac_enabled); + logging!( + info, + Type::Network, + "代理状态初始化完成: sys={}, pac={}", + config.sys_enabled, + config.pac_enabled + ); } async fn update_proxy_config(state: &Arc>) { - log::debug!(target: "app", "更新代理配置"); + logging!(debug, Type::Network, "更新代理配置"); let config = Self::get_proxy_config().await; @@ -260,7 +267,7 @@ impl EventDrivenProxyManager { async fn check_and_restore_proxy(state: &Arc>) { if handle::Handle::global().is_exiting() { - log::debug!(target: "app", "应用正在退出,跳过系统代理守卫检查"); + logging!(debug, Type::Network, "应用正在退出,跳过系统代理守卫检查"); return; } let (sys_enabled, pac_enabled) = { @@ -272,7 +279,7 @@ impl EventDrivenProxyManager { return; } - log::debug!(target: "app", "检查代理状态"); + logging!(debug, Type::Network, "检查代理状态"); if pac_enabled { Self::check_and_restore_pac_proxy(state).await; @@ -283,7 +290,7 @@ impl EventDrivenProxyManager { async fn check_and_restore_pac_proxy(state: &Arc>) { if handle::Handle::global().is_exiting() { - log::debug!(target: "app", "应用正在退出,跳过PAC代理恢复检查"); + logging!(debug, Type::Network, "应用正在退出,跳过PAC代理恢复检查"); return; } @@ -296,9 +303,9 @@ impl EventDrivenProxyManager { .await; if !current.enable || current.url != expected.url { - log::info!(target: "app", "PAC代理设置异常,正在恢复..."); + logging!(info, Type::Network, "PAC代理设置异常,正在恢复..."); if let Err(e) = Self::restore_pac_proxy(&expected.url).await { - log::error!(target: "app", "恢复PAC代理失败: {}", e); + logging!(error, Type::Network, "恢复PAC代理失败: {}", e); } sleep(Duration::from_millis(500)).await; @@ -314,7 +321,7 @@ impl EventDrivenProxyManager { async fn check_and_restore_sys_proxy(state: &Arc>) { if handle::Handle::global().is_exiting() { - log::debug!(target: "app", "应用正在退出,跳过系统代理恢复检查"); + logging!(debug, Type::Network, "应用正在退出,跳过系统代理恢复检查"); return; } @@ -327,9 +334,9 @@ impl EventDrivenProxyManager { .await; if !current.enable || current.host != expected.host || current.port != expected.port { - log::info!(target: "app", "系统代理设置异常,正在恢复..."); + logging!(info, Type::Network, "系统代理设置异常,正在恢复..."); if let Err(e) = Self::restore_sys_proxy(&expected).await { - log::error!(target: "app", "恢复系统代理失败: {}", e); + logging!(error, Type::Network, "恢复系统代理失败: {}", e); } sleep(Duration::from_millis(500)).await; @@ -457,7 +464,7 @@ impl EventDrivenProxyManager { #[cfg(target_os = "windows")] async fn restore_pac_proxy(expected_url: &str) -> Result<(), anyhow::Error> { if handle::Handle::global().is_exiting() { - log::debug!(target: "app", "应用正在退出,跳过PAC代理恢复"); + logging!(debug, Type::Network, "应用正在退出,跳过PAC代理恢复"); return Ok(()); } Self::execute_sysproxy_command(&["pac", expected_url]).await @@ -481,7 +488,7 @@ impl EventDrivenProxyManager { #[cfg(target_os = "windows")] async fn restore_sys_proxy(expected: &Sysproxy) -> Result<(), anyhow::Error> { if handle::Handle::global().is_exiting() { - log::debug!(target: "app", "应用正在退出,跳过系统代理恢复"); + logging!(debug, Type::Network, "应用正在退出,跳过系统代理恢复"); return Ok(()); } let address = format!("{}:{}", expected.host, expected.port); @@ -502,8 +509,9 @@ impl EventDrivenProxyManager { #[cfg(target_os = "windows")] async fn execute_sysproxy_command(args: &[&str]) -> Result<(), anyhow::Error> { if handle::Handle::global().is_exiting() { - log::debug!( - target: "app", + logging!( + debug, + Type::Network, "应用正在退出,取消调用 sysproxy.exe,参数: {:?}", args ); @@ -518,14 +526,14 @@ impl EventDrivenProxyManager { let binary_path = match dirs::service_path() { Ok(path) => path, Err(e) => { - log::error!(target: "app", "获取服务路径失败: {e}"); + logging!(error, Type::Network, "获取服务路径失败: {e}"); return Err(e); } }; let sysproxy_exe = binary_path.with_file_name("sysproxy.exe"); if !sysproxy_exe.exists() { - log::error!(target: "app", "sysproxy.exe 不存在"); + logging!(error, Type::Network, "sysproxy.exe 不存在"); } anyhow::ensure!(sysproxy_exe.exists(), "sysproxy.exe does not exist"); diff --git a/src-tauri/src/core/sysopt.rs b/src-tauri/src/core/sysopt.rs index 77e561f1..d73d9a91 100644 --- a/src-tauri/src/core/sysopt.rs +++ b/src-tauri/src/core/sysopt.rs @@ -99,7 +99,7 @@ impl Sysopt { let proxy_manager = EventDrivenProxyManager::global(); proxy_manager.notify_app_started(); - log::info!(target: "app", "已启用事件驱动代理守卫"); + logging!(info, Type::Core, "已启用事件驱动代理守卫"); Ok(()) } @@ -224,14 +224,22 @@ impl Sysopt { let mut sysproxy: Sysproxy = match Sysproxy::get_system_proxy() { Ok(sp) => sp, Err(e) => { - log::warn!(target: "app", "重置代理时获取系统代理配置失败: {e}, 使用默认配置"); + logging!( + warn, + Type::Core, + "Warning: 重置代理时获取系统代理配置失败: {e}, 使用默认配置" + ); Sysproxy::default() } }; let mut autoproxy = match Autoproxy::get_auto_proxy() { Ok(ap) => ap, Err(e) => { - log::warn!(target: "app", "重置代理时获取自动代理配置失败: {e}, 使用默认配置"); + logging!( + warn, + Type::Core, + "Warning: 重置代理时获取自动代理配置失败: {e}, 使用默认配置" + ); Autoproxy::default() } }; @@ -265,14 +273,14 @@ impl Sysopt { { if is_enable { if let Err(e) = startup_shortcut::create_shortcut().await { - log::error!(target: "app", "创建启动快捷方式失败: {e}"); + logging!(error, Type::Setup, "创建启动快捷方式失败: {e}"); // 如果快捷方式创建失败,回退到原来的方法 self.try_original_autostart_method(is_enable); } else { return Ok(()); } } else if let Err(e) = startup_shortcut::remove_shortcut().await { - log::error!(target: "app", "删除启动快捷方式失败: {e}"); + logging!(error, Type::Setup, "删除启动快捷方式失败: {e}"); self.try_original_autostart_method(is_enable); } else { return Ok(()); @@ -307,11 +315,11 @@ impl Sysopt { { match startup_shortcut::is_shortcut_enabled() { Ok(enabled) => { - log::info!(target: "app", "快捷方式自启动状态: {enabled}"); + logging!(info, Type::System, "快捷方式自启动状态: {enabled}"); return Ok(enabled); } Err(e) => { - log::error!(target: "app", "检查快捷方式失败,尝试原来的方法: {e}"); + logging!(error, Type::System, "检查快捷方式失败,尝试原来的方法: {e}"); } } } @@ -322,11 +330,11 @@ impl Sysopt { match autostart_manager.is_enabled() { Ok(status) => { - log::info!(target: "app", "Auto launch status: {status}"); + logging!(info, Type::System, "Auto launch status: {status}"); Ok(status) } Err(e) => { - log::error!(target: "app", "Failed to get auto launch status: {e}"); + logging!(error, Type::System, "Failed to get auto launch status: {e}"); Err(anyhow::anyhow!("Failed to get auto launch status: {}", e)) } } diff --git a/src-tauri/src/core/tray/mod.rs b/src-tauri/src/core/tray/mod.rs index fe657d11..8261c98a 100644 --- a/src-tauri/src/core/tray/mod.rs +++ b/src-tauri/src/core/tray/mod.rs @@ -62,8 +62,12 @@ fn should_handle_tray_click() -> bool { *last_click = now; true } else { - log::debug!(target: "app", "托盘点击被防抖机制忽略,距离上次点击 {:?}ms", - now.duration_since(*last_click).as_millis()); + logging!( + debug, + Type::Tray, + "托盘点击被防抖机制忽略,距离上次点击 {}ms", + now.duration_since(*last_click).as_millis() + ); false } } @@ -207,7 +211,7 @@ singleton_lazy!(Tray, TRAY, Tray::default); impl Tray { pub async fn init(&self) -> Result<()> { if handle::Handle::global().is_exiting() { - log::debug!(target: "app", "应用正在退出,跳过托盘初始化"); + logging!(debug, Type::Tray, "应用正在退出,跳过托盘初始化"); return Ok(()); } @@ -215,11 +219,15 @@ impl Tray { match self.create_tray_from_handle(app_handle).await { Ok(_) => { - log::info!(target: "app", "System tray created successfully"); + logging!(info, Type::Tray, "System tray created successfully"); } Err(e) => { // Don't return error, let application continue running without tray - log::warn!(target: "app", "System tray creation failed: {}, Application will continue running without tray icon", e); + logging!( + warn, + Type::Tray, + "System tray creation failed: {e}, Application will continue running without tray icon", + ); } } // TODO: 初始化时,暂时使用此方法更新系统托盘菜单,有效避免代理节点菜单空白 @@ -230,7 +238,7 @@ impl Tray { /// 更新托盘点击行为 pub async fn update_click_behavior(&self) -> Result<()> { if handle::Handle::global().is_exiting() { - log::debug!(target: "app", "应用正在退出,跳过托盘点击行为更新"); + logging!(debug, Type::Tray, "应用正在退出,跳过托盘点击行为更新"); return Ok(()); } @@ -250,7 +258,7 @@ impl Tray { /// 更新托盘菜单 pub async fn update_menu(&self) -> Result<()> { if handle::Handle::global().is_exiting() { - log::debug!(target: "app", "应用正在退出,跳过托盘菜单更新"); + logging!(debug, Type::Tray, "应用正在退出,跳过托盘菜单更新"); return Ok(()); } // 调整最小更新间隔,确保状态及时刷新 @@ -332,11 +340,15 @@ impl Tray { ) .await?, )); - log::debug!(target: "app", "托盘菜单更新成功"); + logging!(debug, Type::Tray, "托盘菜单更新成功"); Ok(()) } None => { - log::warn!(target: "app", "更新托盘菜单失败: 托盘不存在"); + logging!( + warn, + Type::Tray, + "Failed to update tray menu: tray not found" + ); Ok(()) } } @@ -346,7 +358,7 @@ impl Tray { #[cfg(target_os = "macos")] pub async fn update_icon(&self) -> Result<()> { if handle::Handle::global().is_exiting() { - log::debug!(target: "app", "应用正在退出,跳过托盘图标更新"); + logging!(debug, Type::Tray, "应用正在退出,跳过托盘图标更新"); return Ok(()); } @@ -355,7 +367,11 @@ impl Tray { let tray = match app_handle.tray_by_id("main") { Some(tray) => tray, None => { - log::warn!(target: "app", "更新托盘图标失败: 托盘不存在"); + logging!( + warn, + Type::Tray, + "Failed to update tray icon: tray not found" + ); return Ok(()); } }; @@ -385,7 +401,7 @@ impl Tray { #[cfg(not(target_os = "macos"))] pub async fn update_icon(&self) -> Result<()> { if handle::Handle::global().is_exiting() { - log::debug!(target: "app", "应用正在退出,跳过托盘图标更新"); + logging!(debug, Type::Tray, "应用正在退出,跳过托盘图标更新"); return Ok(()); } @@ -394,7 +410,11 @@ impl Tray { let tray = match app_handle.tray_by_id("main") { Some(tray) => tray, None => { - log::warn!(target: "app", "更新托盘图标失败: 托盘不存在"); + logging!( + warn, + Type::Tray, + "Failed to update tray icon: tray not found" + ); return Ok(()); } }; @@ -417,7 +437,7 @@ impl Tray { /// 更新托盘显示状态的函数 pub async fn update_tray_display(&self) -> Result<()> { if handle::Handle::global().is_exiting() { - log::debug!(target: "app", "应用正在退出,跳过托盘显示状态更新"); + logging!(debug, Type::Tray, "应用正在退出,跳过托盘显示状态更新"); return Ok(()); } @@ -435,7 +455,7 @@ impl Tray { /// 更新托盘提示 pub async fn update_tooltip(&self) -> Result<()> { if handle::Handle::global().is_exiting() { - log::debug!(target: "app", "应用正在退出,跳过托盘提示更新"); + logging!(debug, Type::Tray, "应用正在退出,跳过托盘提示更新"); return Ok(()); } @@ -491,7 +511,11 @@ impl Tray { if let Some(tray) = app_handle.tray_by_id("main") { let _ = tray.set_tooltip(Some(&tooltip)); } else { - log::warn!(target: "app", "更新托盘提示失败: 托盘不存在"); + logging!( + warn, + Type::Tray, + "Failed to update tray tooltip: tray not found" + ); } Ok(()) @@ -499,7 +523,7 @@ impl Tray { pub async fn update_part(&self) -> Result<()> { if handle::Handle::global().is_exiting() { - log::debug!(target: "app", "应用正在退出,跳过托盘局部更新"); + logging!(debug, Type::Tray, "应用正在退出,跳过托盘局部更新"); return Ok(()); } // self.update_menu().await?; @@ -512,11 +536,11 @@ impl Tray { pub async fn create_tray_from_handle(&self, app_handle: &AppHandle) -> Result<()> { if handle::Handle::global().is_exiting() { - log::debug!(target: "app", "应用正在退出,跳过托盘创建"); + logging!(debug, Type::Tray, "应用正在退出,跳过托盘创建"); return Ok(()); } - log::info!(target: "app", "正在从AppHandle创建系统托盘"); + logging!(info, Type::Tray, "正在从AppHandle创建系统托盘"); // 获取图标 let icon_bytes = TrayState::get_common_tray_icon().await.1; @@ -562,7 +586,7 @@ impl Tray { AsyncHandler::spawn(|| async move { let tray_event = { Config::verge().await.latest_ref().tray_event.clone() }; let tray_event: String = tray_event.unwrap_or_else(|| "main_window".into()); - log::debug!(target: "app", "tray event: {tray_event:?}"); + logging!(debug, Type::Tray, "tray event: {tray_event:?}"); if let TrayIconEvent::Click { button: MouseButton::Left, @@ -597,14 +621,13 @@ impl Tray { }); }); tray.on_menu_event(on_menu_event); - log::info!(target: "app", "系统托盘创建成功"); Ok(()) } // 托盘统一的状态更新函数 pub async fn update_all_states(&self) -> Result<()> { if handle::Handle::global().is_exiting() { - log::debug!(target: "app", "应用正在退出,跳过托盘状态更新"); + logging!(debug, Type::Tray, "应用正在退出,跳过托盘状态更新"); return Ok(()); } @@ -731,7 +754,9 @@ fn create_subcreate_proxy_menu_item( is_selected, None::<&str>, ) - .map_err(|e| log::warn!(target: "app", "创建代理菜单项失败: {}", e)) + .map_err(|e| { + logging!(warn, Type::Tray, "Failed to create proxy menu item: {}", e) + }) .ok() }) .collect(); @@ -773,7 +798,12 @@ fn create_subcreate_proxy_menu_item( let insertion_index = submenus.len(); submenus.push((group_name.into(), insertion_index, submenu)); } else { - log::warn!(target: "app", "创建代理组子菜单失败: {}", group_name); + logging!( + warn, + Type::Tray, + "Failed to create proxy group submenu: {}", + group_name + ); } } } @@ -1166,7 +1196,7 @@ fn on_menu_event(_: &AppHandle, event: MenuEvent) { feat::change_clash_mode(mode.into()).await; } MenuIds::DASHBOARD => { - log::info!(target: "app", "托盘菜单点击: 打开窗口"); + logging!(info, Type::Tray, "托盘菜单点击: 打开窗口"); if !should_handle_tray_click() { return; @@ -1183,7 +1213,11 @@ fn on_menu_event(_: &AppHandle, event: MenuEvent) { } MenuIds::CLOSE_ALL_CONNECTIONS => { if let Err(err) = handle::Handle::mihomo().await.close_all_connections().await { - log::error!(target: "app", "Failed to close all connections from tray: {err}"); + logging!( + error, + Type::Tray, + "Failed to close all connections from tray: {err}" + ); } } MenuIds::COPY_ENV => feat::copy_clash_env().await, @@ -1236,12 +1270,25 @@ fn on_menu_event(_: &AppHandle, event: MenuEvent) { .await { Ok(_) => { - log::info!(target: "app", "切换代理成功: {} -> {}", group_name, proxy_name); + logging!( + info, + Type::Tray, + "切换代理成功: {} -> {}", + group_name, + proxy_name + ); let _ = handle::Handle::app_handle() .emit("verge://refresh-proxy-config", ()); } Err(e) => { - log::error!(target: "app", "切换代理失败: {} -> {}, 错误: {:?}", group_name, proxy_name, e); + logging!( + error, + Type::Tray, + "切换代理失败: {} -> {}, 错误: {:?}", + group_name, + proxy_name, + e + ); // Fallback to IPC update if (handle::Handle::mihomo() @@ -1250,7 +1297,13 @@ fn on_menu_event(_: &AppHandle, event: MenuEvent) { .await) .is_ok() { - log::info!(target: "app", "代理切换回退成功: {} -> {}", group_name, proxy_name); + logging!( + info, + Type::Tray, + "代理切换回退成功: {} -> {}", + group_name, + proxy_name + ); let app_handle = handle::Handle::app_handle(); let _ = app_handle.emit("verge://force-refresh-proxies", ()); @@ -1264,7 +1317,7 @@ fn on_menu_event(_: &AppHandle, event: MenuEvent) { // Ensure tray state update is awaited and properly handled if let Err(e) = Tray::global().update_all_states().await { - log::warn!(target: "app", "更新托盘状态失败: {e}"); + logging!(warn, Type::Tray, "Failed to update tray state: {e}"); } }); } diff --git a/src-tauri/src/enhance/merge.rs b/src-tauri/src/enhance/merge.rs index 0210851d..3d0e4bf9 100644 --- a/src-tauri/src/enhance/merge.rs +++ b/src-tauri/src/enhance/merge.rs @@ -1,3 +1,5 @@ +use crate::{logging, utils::logging::Type}; + use super::use_lowercase; use serde_yaml_ng::{self, Mapping, Value}; @@ -19,7 +21,11 @@ pub fn use_merge(merge: Mapping, config: Mapping) -> Mapping { deep_merge(&mut config, &Value::from(merge)); config.as_mapping().cloned().unwrap_or_else(|| { - log::error!("Failed to convert merged config to mapping, using empty mapping"); + logging!( + error, + Type::Core, + "Failed to convert merged config to mapping, using empty mapping" + ); Mapping::new() }) } diff --git a/src-tauri/src/enhance/mod.rs b/src-tauri/src/enhance/mod.rs index 05f2191b..1deb49e5 100644 --- a/src-tauri/src/enhance/mod.rs +++ b/src-tauri/src/enhance/mod.rs @@ -8,6 +8,7 @@ mod tun; use self::{chain::*, field::*, merge::*, script::*, seq::*, tun::*}; use crate::utils::dirs; use crate::{config::Config, utils::tmpl}; +use crate::{logging, utils::logging::Type}; use serde_yaml_ng::Mapping; use smartstring::alias::String; use std::collections::{HashMap, HashSet}; @@ -422,14 +423,14 @@ fn apply_builtin_scripts( .filter(|(s, _)| s.is_support(clash_core.as_ref())) .map(|(_, c)| c) .for_each(|item| { - log::debug!(target: "app", "run builtin script {}", item.uid); + logging!(debug, Type::Core, "run builtin script {}", item.uid); if let ChainType::Script(script) = item.data { match use_script(script, config.to_owned(), "".into()) { Ok((res_config, _)) => { config = res_config; } Err(err) => { - log::error!(target: "app", "builtin script error `{err}`"); + logging!(error, Type::Core, "builtin script error `{err}`"); } } } @@ -451,17 +452,17 @@ async fn apply_dns_settings(mut config: Mapping, enable_dns_settings: bool) -> M && hosts_value.is_mapping() { config.insert("hosts".into(), hosts_value.clone()); - log::info!(target: "app", "apply hosts configuration"); + logging!(info, Type::Core, "apply hosts configuration"); } if let Some(dns_value) = dns_config.get("dns") { if let Some(dns_mapping) = dns_value.as_mapping() { config.insert("dns".into(), dns_mapping.clone().into()); - log::info!(target: "app", "apply dns_config.yaml (dns section)"); + logging!(info, Type::Core, "apply dns_config.yaml (dns section)"); } } else { config.insert("dns".into(), dns_config.into()); - log::info!(target: "app", "apply dns_config.yaml"); + logging!(info, Type::Core, "apply dns_config.yaml"); } } } diff --git a/src-tauri/src/enhance/tun.rs b/src-tauri/src/enhance/tun.rs index 92c8f018..a09051fa 100644 --- a/src-tauri/src/enhance/tun.rs +++ b/src-tauri/src/enhance/tun.rs @@ -2,6 +2,8 @@ use serde_yaml_ng::{Mapping, Value}; #[cfg(target_os = "macos")] use crate::process::AsyncHandler; +#[cfg(target_os = "linux")] +use crate::{logging, utils::logging::Type}; macro_rules! revise { ($map: expr, $key: expr, $val: expr) => { @@ -42,9 +44,10 @@ pub fn use_tun(mut config: Mapping, enable: bool) -> Mapping { if should_override { revise!(tun_val, "stack", "mixed"); - log::warn!( - target: "app", - "gVisor TUN stack detected on Linux; falling back to 'mixed' for compatibility" + logging!( + warn, + Type::Network, + "Warning: gVisor TUN stack detected on Linux; falling back to 'mixed' for compatibility" ); } } diff --git a/src-tauri/src/feat/clash.rs b/src-tauri/src/feat/clash.rs index b4143a26..cf031d78 100644 --- a/src-tauri/src/feat/clash.rs +++ b/src-tauri/src/feat/clash.rs @@ -1,7 +1,7 @@ use crate::{ config::Config, core::{CoreManager, handle, tray}, - logging_error, + logging, logging_error, process::AsyncHandler, utils::{self, logging::Type, resolve}, }; @@ -17,7 +17,7 @@ pub async fn restart_clash_core() { } Err(err) => { handle::Handle::notice_message("set_config::error", format!("{err}")); - log::error!(target:"app", "{err}"); + logging!(error, Type::Core, "{err}"); } } } @@ -30,7 +30,7 @@ pub async fn restart_app() { "restart_app::error", format!("Failed to cleanup resources: {err}"), ); - log::error!(target:"app", "Restart failed during cleanup: {err}"); + logging!(error, Type::Core, "Restart failed during cleanup: {err}"); return; } @@ -50,7 +50,7 @@ fn after_change_clash_mode() { } } Err(err) => { - log::error!(target: "app", "Failed to get connections: {err}"); + logging!(error, Type::Core, "Failed to get connections: {err}"); } } }); @@ -64,7 +64,7 @@ pub async fn change_clash_mode(mode: String) { let json_value = serde_json::json!({ "mode": mode }); - log::debug!(target: "app", "change clash mode to {mode}"); + logging!(debug, Type::Core, "change clash mode to {mode}"); match handle::Handle::mihomo() .await .patch_base_config(&json_value) @@ -91,7 +91,7 @@ pub async fn change_clash_mode(mode: String) { after_change_clash_mode(); } } - Err(err) => log::error!(target: "app", "{err}"), + Err(err) => logging!(error, Type::Core, "{err}"), } } @@ -123,7 +123,7 @@ pub async fn test_delay(url: String) -> anyhow::Result { match response { Ok(response) => { - log::trace!(target: "app", "test_delay response: {response:#?}"); + logging!(trace, Type::Network, "test_delay response: {response:#?}"); if response.status().is_success() { Ok(start.elapsed().as_millis() as u32) } else { @@ -131,7 +131,7 @@ pub async fn test_delay(url: String) -> anyhow::Result { } } Err(err) => { - log::trace!(target: "app", "test_delay error: {err:#?}"); + logging!(trace, Type::Network, "test_delay error: {err:#?}"); Err(err) } } diff --git a/src-tauri/src/feat/profile.rs b/src-tauri/src/feat/profile.rs index 4ebd4d06..273022a5 100644 --- a/src-tauri/src/feat/profile.rs +++ b/src-tauri/src/feat/profile.rs @@ -18,7 +18,7 @@ pub async fn toggle_proxy_profile(profile_index: String) { } } Err(err) => { - log::error!(target: "app", "{err}"); + logging!(error, Type::Tray, "{err}"); } } } @@ -33,10 +33,18 @@ async fn should_update_profile( let is_remote = item.itype.as_ref().is_some_and(|s| s == "remote"); if !is_remote { - log::info!(target: "app", "[订阅更新] {uid} 不是远程订阅,跳过更新"); + logging!( + info, + Type::Config, + "[订阅更新] {uid} 不是远程订阅,跳过更新" + ); Ok(None) } else if item.url.is_none() { - log::warn!(target: "app", "[订阅更新] {uid} 缺少URL,无法更新"); + logging!( + warn, + Type::Config, + "Warning: [订阅更新] {uid} 缺少URL,无法更新" + ); bail!("failed to get the profile item url"); } else if !ignore_auto_update && !item @@ -45,13 +53,22 @@ async fn should_update_profile( .and_then(|o| o.allow_auto_update) .unwrap_or(true) { - log::info!(target: "app", "[订阅更新] {} 禁止自动更新,跳过更新", uid); + logging!( + info, + Type::Config, + "[订阅更新] {} 禁止自动更新,跳过更新", + uid + ); Ok(None) } else { - log::info!(target: "app", + logging!( + info, + Type::Config, "[订阅更新] {} 是远程订阅,URL: {}", uid, - item.url.clone().ok_or_else(|| anyhow::anyhow!("Profile URL is None"))? + item.url + .clone() + .ok_or_else(|| anyhow::anyhow!("Profile URL is None"))? ); Ok(Some(( item.url @@ -68,20 +85,28 @@ async fn perform_profile_update( opt: Option<&PrfOption>, option: Option<&PrfOption>, ) -> Result { - log::info!(target: "app", "[订阅更新] 开始下载新的订阅内容"); + logging!(info, Type::Config, "[订阅更新] 开始下载新的订阅内容"); let merged_opt = PrfOption::merge(opt, option); match PrfItem::from_url(url, None, None, merged_opt.as_ref()).await { Ok(mut item) => { - log::info!(target: "app", "[订阅更新] 更新订阅配置成功"); + logging!(info, Type::Config, "[订阅更新] 更新订阅配置成功"); let profiles = Config::profiles().await; profiles_draft_update_item_safe(uid, &mut item).await?; let is_current = Some(uid.clone()) == profiles.latest_ref().get_current(); - log::info!(target: "app", "[订阅更新] 是否为当前使用的订阅: {is_current}"); + logging!( + info, + Type::Config, + "[订阅更新] 是否为当前使用的订阅: {is_current}" + ); Ok(is_current) } Err(err) => { - log::warn!(target: "app", "[订阅更新] 正常更新失败: {err},尝试使用Clash代理更新"); + logging!( + warn, + Type::Config, + "Warning: [订阅更新] 正常更新失败: {err},尝试使用Clash代理更新" + ); handle::Handle::notice_message("update_retry_with_clash", uid.clone()); let original_with_proxy = merged_opt.as_ref().and_then(|o| o.with_proxy); @@ -93,7 +118,7 @@ async fn perform_profile_update( match PrfItem::from_url(url, None, None, Some(&fallback_opt)).await { Ok(mut item) => { - log::info!(target: "app", "[订阅更新] 使用Clash代理更新成功"); + logging!(info, Type::Config, "[订阅更新] 使用Clash代理更新成功"); if let Some(option) = item.option.as_mut() { option.with_proxy = original_with_proxy; @@ -107,11 +132,19 @@ async fn perform_profile_update( handle::Handle::notice_message("update_with_clash_proxy", profile_name); let is_current = Some(uid.clone()) == profiles.latest_ref().get_current(); - log::info!(target: "app", "[订阅更新] 是否为当前使用的订阅: {is_current}"); + logging!( + info, + Type::Config, + "[订阅更新] 是否为当前使用的订阅: {is_current}" + ); Ok(is_current) } Err(retry_err) => { - log::error!(target: "app", "[订阅更新] 使用Clash代理更新仍然失败: {retry_err}"); + logging!( + error, + Type::Config, + "[订阅更新] 使用Clash代理更新仍然失败: {retry_err}" + ); handle::Handle::notice_message( "update_failed_even_with_clash", format!("{retry_err}"), @@ -149,7 +182,7 @@ pub async fn update_profile( Err(err) => { logging!(error, Type::Config, "[订阅更新] 更新失败: {}", err); handle::Handle::notice_message("update_failed", format!("{err}")); - log::error!(target: "app", "{err}"); + logging!(error, Type::Config, "{err}"); } } } diff --git a/src-tauri/src/feat/proxy.rs b/src-tauri/src/feat/proxy.rs index 67d94efb..731cec20 100644 --- a/src-tauri/src/feat/proxy.rs +++ b/src-tauri/src/feat/proxy.rs @@ -1,6 +1,8 @@ use crate::{ config::{Config, IVerge}, core::handle, + logging, + utils::logging::Type, }; use std::env; use tauri_plugin_clipboard_manager::ClipboardExt; @@ -16,7 +18,11 @@ pub async fn toggle_system_proxy() { && auto_close_connection && let Err(err) = handle::Handle::mihomo().await.close_all_connections().await { - log::error!(target: "app", "Failed to close all connections: {err}"); + logging!( + error, + Type::ProxyMode, + "Failed to close all connections: {err}" + ); } let patch_result = super::patch_verge( @@ -30,7 +36,7 @@ pub async fn toggle_system_proxy() { match patch_result { Ok(_) => handle::Handle::refresh_verge(), - Err(err) => log::error!(target: "app", "{err}"), + Err(err) => logging!(error, Type::ProxyMode, "{err}"), } } @@ -49,7 +55,7 @@ pub async fn toggle_tun_mode(not_save_file: Option) { .await { Ok(_) => handle::Handle::refresh_verge(), - Err(err) => log::error!(target: "app", "{err}"), + Err(err) => logging!(error, Type::ProxyMode, "{err}"), } } @@ -104,12 +110,16 @@ pub async fn copy_clash_env() { } "fish" => format!("set -x http_proxy {http_proxy}; set -x https_proxy {http_proxy}"), _ => { - log::error!(target: "app", "copy_clash_env: Invalid env type! {env_type}"); + logging!( + error, + Type::ProxyMode, + "copy_clash_env: Invalid env type! {env_type}" + ); return; } }; if cliboard.write_text(export_text).is_err() { - log::error!(target: "app", "Failed to write to clipboard"); + logging!(error, Type::ProxyMode, "Failed to write to clipboard"); } } diff --git a/src-tauri/src/feat/window.rs b/src-tauri/src/feat/window.rs index 246179e5..a23a41f2 100644 --- a/src-tauri/src/feat/window.rs +++ b/src-tauri/src/feat/window.rs @@ -14,7 +14,7 @@ pub async fn open_or_close_dashboard() { async fn open_or_close_dashboard_internal() { let _ = lightweight::exit_lightweight_mode().await; let result = WindowManager::toggle_main_window().await; - log::info!(target: "app", "Window toggle result: {result:?}"); + logging!(info, Type::Window, "Window toggle result: {result:?}"); } pub async fn quit() { @@ -71,16 +71,20 @@ pub async fn clean_async() -> bool { .await { Ok(Ok(_)) => { - log::info!(target: "app", "TUN模式已禁用"); + logging!(info, Type::Window, "TUN模式已禁用"); true } Ok(Err(e)) => { - log::warn!(target: "app", "禁用TUN模式失败: {e}"); + logging!(warn, Type::Window, "Warning: 禁用TUN模式失败: {e}"); // 超时不阻塞退出 true } Err(_) => { - log::warn!(target: "app", "禁用TUN模式超时(可能系统正在关机),继续退出流程"); + logging!( + warn, + Type::Window, + "Warning: 禁用TUN模式超时(可能系统正在关机),继续退出流程" + ); true } } @@ -101,7 +105,7 @@ pub async fn clean_async() -> bool { .unwrap_or(false); if !sys_proxy_enabled { - log::info!(target: "app", "系统代理未启用,跳过重置"); + logging!(info, Type::Window, "系统代理未启用,跳过重置"); return true; } @@ -110,19 +114,23 @@ pub async fn clean_async() -> bool { if is_shutting_down { // sysproxy-rs 操作注册表(避免.exe的dll错误) - log::info!(target: "app", "检测到正在关机,syspro-rs操作注册表关闭系统代理"); + logging!( + info, + Type::Window, + "检测到正在关机,syspro-rs操作注册表关闭系统代理" + ); match Sysproxy::get_system_proxy() { Ok(mut sysproxy) => { sysproxy.enable = false; if let Err(e) = sysproxy.set_system_proxy() { - log::warn!(target: "app", "关机时关闭系统代理失败: {e}"); + logging!(warn, Type::Window, "Warning: 关机时关闭系统代理失败: {e}"); } else { - log::info!(target: "app", "系统代理已关闭(通过注册表)"); + logging!(info, Type::Window, "系统代理已关闭(通过注册表)"); } } Err(e) => { - log::warn!(target: "app", "关机时获取代理设置失败: {e}"); + logging!(warn, Type::Window, "Warning: 关机时获取代理设置失败: {e}"); } } @@ -136,7 +144,7 @@ pub async fn clean_async() -> bool { } // 正常退出:使用 sysproxy.exe 重置代理 - log::info!(target: "app", "sysproxy.exe重置系统代理"); + logging!(info, Type::Window, "sysproxy.exe重置系统代理"); match timeout( Duration::from_secs(2), @@ -145,15 +153,19 @@ pub async fn clean_async() -> bool { .await { Ok(Ok(_)) => { - log::info!(target: "app", "系统代理已重置"); + logging!(info, Type::Window, "系统代理已重置"); true } Ok(Err(e)) => { - log::warn!(target: "app", "重置系统代理失败: {e}"); + logging!(warn, Type::Window, "Warning: 重置系统代理失败: {e}"); true } Err(_) => { - log::warn!(target: "app", "重置系统代理超时,继续退出流程"); + logging!( + warn, + Type::Window, + "Warning: 重置系统代理超时,继续退出流程" + ); true } } @@ -169,11 +181,11 @@ pub async fn clean_async() -> bool { .unwrap_or(false); if !sys_proxy_enabled { - log::info!(target: "app", "系统代理未启用,跳过重置"); + logging!(info, Type::Window, "系统代理未启用,跳过重置"); return true; } - log::info!(target: "app", "开始重置系统代理..."); + logging!(info, Type::Window, "开始重置系统代理..."); match timeout( Duration::from_millis(1500), @@ -182,15 +194,15 @@ pub async fn clean_async() -> bool { .await { Ok(Ok(_)) => { - log::info!(target: "app", "系统代理已重置"); + logging!(info, Type::Window, "系统代理已重置"); true } Ok(Err(e)) => { - log::warn!(target: "app", "重置系统代理失败: {e}"); + logging!(warn, Type::Window, "Warning: 重置系统代理失败: {e}"); true } Err(_) => { - log::warn!(target: "app", "重置系统代理超时,继续退出"); + logging!(warn, Type::Window, "Warning: 重置系统代理超时,继续退出"); true } } @@ -206,11 +218,15 @@ pub async fn clean_async() -> bool { match timeout(stop_timeout, CoreManager::global().stop_core()).await { Ok(_) => { - log::info!(target: "app", "core已停止"); + logging!(info, Type::Window, "core已停止"); true } Err(_) => { - log::warn!(target: "app", "停止core超时(可能系统正在关机),继续退出"); + logging!( + warn, + Type::Window, + "Warning: 停止core超时(可能系统正在关机),继续退出" + ); true } } @@ -226,11 +242,11 @@ pub async fn clean_async() -> bool { .await { Ok(_) => { - log::info!(target: "app", "DNS设置已恢复"); + logging!(info, Type::Window, "DNS设置已恢复"); true } Err(_) => { - log::warn!(target: "app", "恢复DNS设置超时"); + logging!(warn, Type::Window, "Warning: 恢复DNS设置超时"); false } } diff --git a/src-tauri/src/module/lightweight.rs b/src-tauri/src/module/lightweight.rs index 2252cb65..d5c3900e 100644 --- a/src-tauri/src/module/lightweight.rs +++ b/src-tauri/src/module/lightweight.rs @@ -168,7 +168,11 @@ fn setup_window_close_listener() { let handler_id = window.listen("tauri://close-requested", move |_event| { std::mem::drop(AsyncHandler::spawn(|| async { if let Err(e) = setup_light_weight_timer().await { - log::warn!("Failed to setup light weight timer: {e}"); + logging!( + warn, + Type::Lightweight, + "Warning: Failed to setup light weight timer: {e}" + ); } })); logging!(info, Type::Lightweight, "监听到关闭请求,开始轻量模式计时"); diff --git a/src-tauri/src/process/guard.rs b/src-tauri/src/process/guard.rs index e4a48909..a776094f 100644 --- a/src-tauri/src/process/guard.rs +++ b/src-tauri/src/process/guard.rs @@ -1,13 +1,20 @@ use anyhow::Result; use tauri_plugin_shell::process::CommandChild; +use crate::{logging, utils::logging::Type}; + #[derive(Debug)] pub struct CommandChildGuard(Option); impl Drop for CommandChildGuard { fn drop(&mut self) { if let Err(err) = self.kill() { - log::error!(target: "app", "Failed to kill child process: {}", err); + logging!( + error, + Type::Service, + "Failed to kill child process: {}", + err + ); } } } diff --git a/src-tauri/src/utils/autostart.rs b/src-tauri/src/utils/autostart.rs index 06d360ed..b4dc4965 100644 --- a/src-tauri/src/utils/autostart.rs +++ b/src-tauri/src/utils/autostart.rs @@ -1,7 +1,7 @@ #[cfg(target_os = "windows")] +use crate::{logging, utils::logging::Type}; +#[cfg(target_os = "windows")] use anyhow::{Result, anyhow}; -#[cfg(target_os = "windows")] -use log::info; #[cfg(target_os = "windows")] use std::{os::windows::process::CommandExt, path::Path, path::PathBuf}; @@ -49,15 +49,15 @@ pub async fn create_shortcut() -> Result<()> { .remove_if_exists() .await .inspect(|_| { - info!(target: "app", "成功移除旧启动快捷方式"); + logging!(info, Type::Setup, "成功移除旧启动快捷方式"); }) .inspect_err(|err| { - log::error!(target: "app", "移除旧启动快捷方式失败: {err}"); + logging!(error, Type::Setup, "移除旧启动快捷方式失败: {err}"); }); // 如果新快捷方式已存在,直接返回成功 if new_shortcut_path.exists() { - info!(target: "app", "启动快捷方式已存在"); + logging!(info, Type::Setup, "启动快捷方式已存在"); return Ok(()); } @@ -83,7 +83,7 @@ pub async fn create_shortcut() -> Result<()> { return Err(anyhow!("创建快捷方式失败: {}", error_msg)); } - info!(target: "app", "成功创建启动快捷方式"); + logging!(info, Type::Setup, "成功创建启动快捷方式"); Ok(()) } @@ -102,22 +102,22 @@ pub async fn remove_shortcut() -> Result<()> { .remove_if_exists() .await .inspect(|_| { - info!(target: "app", "成功删除旧启动快捷方式"); + logging!(info, Type::Setup, "成功删除旧启动快捷方式"); removed_any = true; }) .inspect_err(|err| { - log::error!(target: "app", "删除旧启动快捷方式失败: {err}"); + logging!(error, Type::Setup, "删除旧启动快捷方式失败: {err}"); }); let _ = new_shortcut_path .remove_if_exists() .await .inspect(|_| { - info!(target: "app", "成功删除启动快捷方式"); + logging!(info, Type::Setup, "成功删除启动快捷方式"); removed_any = true; }) .inspect_err(|err| { - log::error!(target: "app", "删除启动快捷方式失败: {err}"); + logging!(error, Type::Setup, "删除启动快捷方式失败: {err}"); }); Ok(()) diff --git a/src-tauri/src/utils/dirs.rs b/src-tauri/src/utils/dirs.rs index cfae29fe..b16d3d8e 100644 --- a/src-tauri/src/utils/dirs.rs +++ b/src-tauri/src/utils/dirs.rs @@ -62,7 +62,11 @@ pub fn app_home_dir() -> Result { match app_handle.path().data_dir() { Ok(dir) => Ok(dir.join(APP_ID)), Err(e) => { - log::error!(target: "app", "Failed to get the app home directory: {e}"); + logging!( + error, + Type::File, + "Failed to get the app home directory: {e}" + ); Err(anyhow::anyhow!("Failed to get the app homedirectory")) } } @@ -76,7 +80,11 @@ pub fn app_resources_dir() -> Result { match app_handle.path().resource_dir() { Ok(dir) => Ok(dir.join("resources")), Err(e) => { - log::error!(target: "app", "Failed to get the resource directory: {e}"); + logging!( + error, + Type::File, + "Failed to get the resource directory: {e}" + ); Err(anyhow::anyhow!("Failed to get the resource directory")) } } @@ -229,7 +237,11 @@ pub fn ensure_mihomo_safe_dir() -> Option { if home_config.exists() || fs::create_dir_all(&home_config).is_ok() { Some(home_config) } else { - log::error!(target: "app", "Failed to create safe directory: {home_config:?}"); + logging!( + error, + Type::File, + "Failed to create safe directory: {home_config:?}" + ); None } }) diff --git a/src-tauri/src/utils/logging.rs b/src-tauri/src/utils/logging.rs index e623b099..a73b18eb 100644 --- a/src-tauri/src/utils/logging.rs +++ b/src-tauri/src/utils/logging.rs @@ -29,7 +29,6 @@ pub enum Type { Lightweight, Network, ProxyMode, - // Cache, Validate, ClashVergeRev, } @@ -53,7 +52,6 @@ impl fmt::Display for Type { Type::Lightweight => write!(f, "[Lightweight]"), Type::Network => write!(f, "[Network]"), Type::ProxyMode => write!(f, "[ProxMode]"), - // Type::Cache => write!(f, "[Cache]"), Type::Validate => write!(f, "[Validate]"), Type::ClashVergeRev => write!(f, "[ClashVergeRev]"), } diff --git a/src-tauri/src/utils/resolve/dns.rs b/src-tauri/src/utils/resolve/dns.rs index 4b710bdc..e225f0c8 100644 --- a/src-tauri/src/utils/resolve/dns.rs +++ b/src-tauri/src/utils/resolve/dns.rs @@ -1,20 +1,27 @@ #[cfg(target_os = "macos")] +use crate::{logging, utils::logging::Type}; pub async fn set_public_dns(dns_server: String) { - use crate::{core::handle, utils::dirs}; + use crate::utils::logging::Type; + use crate::{core::handle, logging, utils::dirs}; use tauri_plugin_shell::ShellExt; let app_handle = handle::Handle::app_handle(); - log::info!(target: "app", "try to set system dns"); + logging!(info, Type::Config, "try to set system dns"); let resource_dir = match dirs::app_resources_dir() { Ok(dir) => dir, Err(e) => { - log::error!(target: "app", "Failed to get resource directory: {}", e); + logging!( + error, + Type::Config, + "Failed to get resource directory: {}", + e + ); return; } }; let script = resource_dir.join("set_dns.sh"); if !script.exists() { - log::error!(target: "app", "set_dns.sh not found"); + logging!(error, Type::Config, "set_dns.sh not found"); return; } let script = script.to_string_lossy().into_owned(); @@ -28,14 +35,14 @@ pub async fn set_public_dns(dns_server: String) { { Ok(status) => { if status.success() { - log::info!(target: "app", "set system dns successfully"); + logging!(info, Type::Config, "set system dns successfully"); } else { let code = status.code().unwrap_or(-1); - log::error!(target: "app", "set system dns failed: {code}"); + logging!(error, Type::Config, "set system dns failed: {code}"); } } Err(err) => { - log::error!(target: "app", "set system dns failed: {err}"); + logging!(error, Type::Config, "set system dns failed: {err}"); } } } @@ -45,17 +52,22 @@ pub async fn restore_public_dns() { use crate::{core::handle, utils::dirs}; use tauri_plugin_shell::ShellExt; let app_handle = handle::Handle::app_handle(); - log::info!(target: "app", "try to unset system dns"); + logging!(info, Type::Config, "try to unset system dns"); let resource_dir = match dirs::app_resources_dir() { Ok(dir) => dir, Err(e) => { - log::error!(target: "app", "Failed to get resource directory: {}", e); + logging!( + error, + Type::Config, + "Failed to get resource directory: {}", + e + ); return; } }; let script = resource_dir.join("unset_dns.sh"); if !script.exists() { - log::error!(target: "app", "unset_dns.sh not found"); + logging!(error, Type::Config, "unset_dns.sh not found"); return; } let script = script.to_string_lossy().into_owned(); @@ -69,14 +81,14 @@ pub async fn restore_public_dns() { { Ok(status) => { if status.success() { - log::info!(target: "app", "unset system dns successfully"); + logging!(info, Type::Config, "unset system dns successfully"); } else { let code = status.code().unwrap_or(-1); - log::error!(target: "app", "unset system dns failed: {code}"); + logging!(error, Type::Config, "unset system dns failed: {code}"); } } Err(err) => { - log::error!(target: "app", "unset system dns failed: {err}"); + logging!(error, Type::Config, "unset system dns failed: {err}"); } } } diff --git a/src-tauri/src/utils/resolve/scheme.rs b/src-tauri/src/utils/resolve/scheme.rs index 487969a3..04659ab3 100644 --- a/src-tauri/src/utils/resolve/scheme.rs +++ b/src-tauri/src/utils/resolve/scheme.rs @@ -11,7 +11,7 @@ use crate::{ }; pub(super) async fn resolve_scheme(param: &str) -> Result<()> { - log::info!(target:"app", "received deep link: {param}"); + logging!(info, Type::Config, "received deep link: {param}"); let param_str = if param.starts_with("[") && param.len() > 4 { param @@ -50,8 +50,8 @@ pub(super) async fn resolve_scheme(param: &str) -> Result<()> { match url_param { Some(ref url) => { - log::info!(target:"app", "decoded subscription url: {url}"); - match PrfItem::from_url(url, name, None, None).await { + logging!(info, Type::Config, "decoded subscription url: {url}"); + match PrfItem::from_url(url.as_ref(), name, None, None).await { Ok(mut item) => { let uid = match item.uid.clone() { Some(uid) => uid, diff --git a/src-tauri/src/utils/server.rs b/src-tauri/src/utils/server.rs index 12985c2a..c5062a8c 100644 --- a/src-tauri/src/utils/server.rs +++ b/src-tauri/src/utils/server.rs @@ -51,7 +51,11 @@ pub async fn check_singleton() -> Result<()> { .send() .await?; } - log::error!("failed to setup singleton listen server"); + logging!( + error, + Type::Window, + "failed to setup singleton listen server" + ); bail!("app exists"); } Ok(()) @@ -129,7 +133,7 @@ pub fn embed_server() { } pub fn shutdown_embedded_server() { - log::info!("shutting down embedded server"); + logging!(info, Type::Window, "shutting down embedded server"); if let Some(sender) = SHUTDOWN_SENDER.get() && let Some(sender) = sender.lock().take() { diff --git a/src-tauri/src/utils/window_manager.rs b/src-tauri/src/utils/window_manager.rs index 463f8c39..2d5b7a1b 100644 --- a/src-tauri/src/utils/window_manager.rs +++ b/src-tauri/src/utils/window_manager.rs @@ -58,7 +58,11 @@ fn get_window_operation_debounce() -> &'static Mutex { fn should_handle_window_operation() -> bool { if WINDOW_OPERATION_IN_PROGRESS.load(Ordering::Acquire) { - log::warn!(target: "app", "[防抖] 窗口操作已在进行中,跳过重复调用"); + logging!( + warn, + Type::Window, + "Warning: [防抖] 窗口操作已在进行中,跳过重复调用" + ); return false; } @@ -67,17 +71,27 @@ fn should_handle_window_operation() -> bool { let now = Instant::now(); let elapsed = now.duration_since(*last_operation); - log::debug!(target: "app", "[防抖] 检查窗口操作间隔: {}ms (需要>={}ms)", - elapsed.as_millis(), WINDOW_OPERATION_DEBOUNCE_MS); + logging!( + debug, + Type::Window, + "[防抖] 检查窗口操作间隔: {}ms (需要>={}ms)", + elapsed.as_millis(), + WINDOW_OPERATION_DEBOUNCE_MS + ); if elapsed >= Duration::from_millis(WINDOW_OPERATION_DEBOUNCE_MS) { *last_operation = now; WINDOW_OPERATION_IN_PROGRESS.store(true, Ordering::Release); - log::info!(target: "app", "[防抖] 窗口操作被允许执行"); + logging!(info, Type::Window, "[防抖] 窗口操作被允许执行"); true } else { - log::warn!(target: "app", "[防抖] 窗口操作被防抖机制忽略,距离上次操作 {}ms < {}ms", - elapsed.as_millis(), WINDOW_OPERATION_DEBOUNCE_MS); + logging!( + warn, + Type::Window, + "Warning: [防抖] 窗口操作被防抖机制忽略,距离上次操作 {}ms < {}ms", + elapsed.as_millis(), + WINDOW_OPERATION_DEBOUNCE_MS + ); false } } From d3d32006c38fee97123f935fceaa8c15860a155a Mon Sep 17 00:00:00 2001 From: Tunglies <77394545+Tunglies@users.noreply.github.com> Date: Sat, 1 Nov 2025 21:22:41 +0800 Subject: [PATCH 63/70] feat: add logging check to pre-commit and CI workflow --- .github/workflows/lint-clippy.yml | 7 +++++++ .husky/pre-commit | 5 +++++ src-tauri/src/lib.rs | 1 - src-tauri/src/main.rs | 1 - 4 files changed, 12 insertions(+), 2 deletions(-) diff --git a/.github/workflows/lint-clippy.yml b/.github/workflows/lint-clippy.yml index 30ed828b..d7f3416b 100644 --- a/.github/workflows/lint-clippy.yml +++ b/.github/workflows/lint-clippy.yml @@ -73,3 +73,10 @@ jobs: - name: Run Clippy working-directory: ./src-tauri run: cargo clippy-all + + - name: Run Logging Check + working-directory: ./src-tauri + shell: bash + run: | + cargo install --git https://github.com/clash-verge-rev/clash-verge-logging-check.git + clash-verge-logging-check diff --git a/.husky/pre-commit b/.husky/pre-commit index 77736fde..ee8dc1ce 100755 --- a/.husky/pre-commit +++ b/.husky/pre-commit @@ -27,6 +27,11 @@ if [ -n "$RUST_FILES" ]; then ( cd src-tauri cargo clippy-all + if ! command -v clash-verge-logging-check >/dev/null 2>&1; then + echo "[pre-commit] Installing clash-verge-logging-check..." + cargo install --git https://github.com/clash-verge-rev/clash-verge-logging-check.git + fi + clash-verge-logging-check ) fi diff --git a/src-tauri/src/lib.rs b/src-tauri/src/lib.rs index caa7261d..84cb533a 100644 --- a/src-tauri/src/lib.rs +++ b/src-tauri/src/lib.rs @@ -30,7 +30,6 @@ use tauri_plugin_deep_link::DeepLinkExt; use utils::logging::Type; pub static APP_HANDLE: OnceCell = OnceCell::new(); - /// Application initialization helper functions mod app_init { use anyhow::Result; diff --git a/src-tauri/src/main.rs b/src-tauri/src/main.rs index f69742fc..4f3d6015 100755 --- a/src-tauri/src/main.rs +++ b/src-tauri/src/main.rs @@ -10,6 +10,5 @@ fn main() { std::env::set_var("CLASH_VERGE_DISABLE_TRAY", "1"); } } - app_lib::run(); } From c0f9920531d6d1c847fd154f06fc4130d9a60180 Mon Sep 17 00:00:00 2001 From: Tunglies <77394545+Tunglies@users.noreply.github.com> Date: Sat, 1 Nov 2025 22:10:12 +0800 Subject: [PATCH 64/70] refactor: remove orphaned process cleanup functionality It might breaks mihomo starting. Due to potentiall process name processing, permissions verifing, permissions and safty FORCE KILL, find process faillure. --- src-tauri/src/core/manager/mod.rs | 2 - src-tauri/src/core/manager/process.rs | 244 -------------------------- 2 files changed, 246 deletions(-) delete mode 100644 src-tauri/src/core/manager/process.rs diff --git a/src-tauri/src/core/manager/mod.rs b/src-tauri/src/core/manager/mod.rs index 4ef122ae..05220f9e 100644 --- a/src-tauri/src/core/manager/mod.rs +++ b/src-tauri/src/core/manager/mod.rs @@ -1,6 +1,5 @@ mod config; mod lifecycle; -mod process; mod state; use anyhow::Result; @@ -74,7 +73,6 @@ impl CoreManager { } pub async fn init(&self) -> Result<()> { - self.cleanup_orphaned_processes().await?; self.start_core().await?; Ok(()) } diff --git a/src-tauri/src/core/manager/process.rs b/src-tauri/src/core/manager/process.rs deleted file mode 100644 index ccb32961..00000000 --- a/src-tauri/src/core/manager/process.rs +++ /dev/null @@ -1,244 +0,0 @@ -use super::CoreManager; -#[cfg(windows)] -use crate::process::AsyncHandler; -use crate::{ - constants::{process, timing}, - logging, - utils::logging::Type, -}; -use anyhow::Result; -#[cfg(windows)] -use anyhow::anyhow; - -impl CoreManager { - pub async fn cleanup_orphaned_processes(&self) -> Result<()> { - logging!(info, Type::Core, "Cleaning orphaned mihomo processes"); - - let current_pid = self - .state - .lock() - .child_sidecar - .as_ref() - .and_then(|c| c.pid()); - let target_processes = process::process_names(); - - let process_futures = target_processes.iter().map(|&name| { - let process_name = process::with_extension(name); - self.find_processes_by_name(process_name, name) - }); - - let process_results = futures::future::join_all(process_futures).await; - - let pids_to_kill: Vec<_> = process_results - .into_iter() - .filter_map(Result::ok) - .flat_map(|(pids, name)| { - pids.into_iter() - .filter(move |&pid| Some(pid) != current_pid) - .map(move |pid| (pid, name.clone())) - }) - .collect(); - - if pids_to_kill.is_empty() { - return Ok(()); - } - - let kill_futures = pids_to_kill - .iter() - .map(|(pid, name)| self.kill_process_verified(*pid, name.clone())); - - let killed_count = futures::future::join_all(kill_futures) - .await - .into_iter() - .filter(|&success| success) - .count(); - - if killed_count > 0 { - logging!( - info, - Type::Core, - "Cleaned {} orphaned processes", - killed_count - ); - } - - Ok(()) - } - - async fn find_processes_by_name( - &self, - process_name: String, - _target: &str, - ) -> Result<(Vec, String)> { - #[cfg(windows)] - { - use std::mem; - use winapi::um::{ - handleapi::CloseHandle, - tlhelp32::{ - CreateToolhelp32Snapshot, PROCESSENTRY32W, Process32FirstW, Process32NextW, - TH32CS_SNAPPROCESS, - }, - }; - - let process_name_clone = process_name.clone(); - let pids = AsyncHandler::spawn_blocking(move || -> Result> { - let mut pids = Vec::new(); - - unsafe { - let snapshot = CreateToolhelp32Snapshot(TH32CS_SNAPPROCESS, 0); - if snapshot == winapi::um::handleapi::INVALID_HANDLE_VALUE { - return Err(anyhow!("Failed to create process snapshot")); - } - - let mut pe32: PROCESSENTRY32W = mem::zeroed(); - pe32.dwSize = mem::size_of::() as u32; - - if Process32FirstW(snapshot, &mut pe32) != 0 { - loop { - let end_pos = pe32 - .szExeFile - .iter() - .position(|&x| x == 0) - .unwrap_or(pe32.szExeFile.len()); - - let exe_file = String::from_utf16_lossy(&pe32.szExeFile[..end_pos]); - if exe_file.eq_ignore_ascii_case(&process_name_clone) { - pids.push(pe32.th32ProcessID); - } - - if Process32NextW(snapshot, &mut pe32) == 0 { - break; - } - } - } - - CloseHandle(snapshot); - } - - Ok(pids) - }) - .await??; - - Ok((pids, process_name)) - } - - #[cfg(not(windows))] - { - let cmd = if cfg!(target_os = "macos") { - "pgrep" - } else { - "pidof" - }; - let output = tokio::process::Command::new(cmd) - .arg(&process_name) - .output() - .await?; - - if !output.status.success() { - return Ok((Vec::new(), process_name)); - } - - let stdout = String::from_utf8_lossy(&output.stdout); - let pids: Vec = stdout - .split_whitespace() - .filter_map(|s| s.parse().ok()) - .collect(); - - Ok((pids, process_name)) - } - } - - async fn kill_process_verified(&self, pid: u32, process_name: String) -> bool { - #[cfg(windows)] - let success = { - use winapi::um::{ - handleapi::CloseHandle, - processthreadsapi::{OpenProcess, TerminateProcess}, - winnt::{HANDLE, PROCESS_TERMINATE}, - }; - - AsyncHandler::spawn_blocking(move || unsafe { - let handle: HANDLE = OpenProcess(PROCESS_TERMINATE, 0, pid); - if handle.is_null() { - return false; - } - let result = TerminateProcess(handle, 1) != 0; - CloseHandle(handle); - result - }) - .await - .unwrap_or(false) - }; - - #[cfg(not(windows))] - let success = tokio::process::Command::new("kill") - .args(["-9", &pid.to_string()]) - .output() - .await - .map(|output| output.status.success()) - .unwrap_or(false); - - if !success { - return false; - } - - tokio::time::sleep(timing::PROCESS_VERIFY_DELAY).await; - - if self.is_process_running(pid).await.unwrap_or(false) { - logging!( - warn, - Type::Core, - "Process {} (PID: {}) still running after termination", - process_name, - pid - ); - false - } else { - logging!( - info, - Type::Core, - "Terminated process {} (PID: {})", - process_name, - pid - ); - true - } - } - - async fn is_process_running(&self, pid: u32) -> Result { - #[cfg(windows)] - { - use winapi::{ - shared::minwindef::DWORD, - um::{ - handleapi::CloseHandle, - processthreadsapi::{GetExitCodeProcess, OpenProcess}, - winnt::{HANDLE, PROCESS_QUERY_INFORMATION}, - }, - }; - - AsyncHandler::spawn_blocking(move || unsafe { - let handle: HANDLE = OpenProcess(PROCESS_QUERY_INFORMATION, 0, pid); - if handle.is_null() { - return Ok(false); - } - let mut exit_code: DWORD = 0; - let result = GetExitCodeProcess(handle, &mut exit_code); - CloseHandle(handle); - Ok(result != 0 && exit_code == 259) - }) - .await? - } - - #[cfg(not(windows))] - { - let output = tokio::process::Command::new("ps") - .args(["-p", &pid.to_string()]) - .output() - .await?; - - Ok(output.status.success() && !output.stdout.is_empty()) - } - } -} From 4a7859bdae3adf4b395630e3518f916a0a54b09d Mon Sep 17 00:00:00 2001 From: Tunglies Date: Sat, 1 Nov 2025 22:50:19 +0800 Subject: [PATCH 65/70] refactor: replace hardcoded DNS config filename with constant reference (#5280) * refactor: replace hardcoded DNS config filename with constant reference * refactor: remove redundant import of constants in IClashTemp template method * refactor: add conditional compilation for DEFAULT_REDIR based on OS * refactor: simplify default TPROXY port handling and remove unused trace_err macro * refactor: simplify default TPROXY port fallback logic --- src-tauri/src/cmd/clash.rs | 14 +++++++------- src-tauri/src/config/clash.rs | 7 +++---- src-tauri/src/constants.rs | 33 ++------------------------------- src-tauri/src/core/backup.rs | 5 +++-- src-tauri/src/enhance/mod.rs | 3 ++- src-tauri/src/lib.rs | 6 +++--- src-tauri/src/utils/dirs.rs | 1 - src-tauri/src/utils/init.rs | 3 ++- src-tauri/src/utils/logging.rs | 9 --------- 9 files changed, 22 insertions(+), 59 deletions(-) diff --git a/src-tauri/src/cmd/clash.rs b/src-tauri/src/cmd/clash.rs index b6dbc1fa..1bcadea9 100644 --- a/src-tauri/src/cmd/clash.rs +++ b/src-tauri/src/cmd/clash.rs @@ -1,7 +1,9 @@ use super::CmdResult; +use crate::utils::dirs; use crate::{ cmd::StringifyErr, config::Config, + constants, core::{CoreManager, handle, validate::CoreConfigValidator}, }; use crate::{config::*, feat, logging, utils::logging::Type}; @@ -126,7 +128,7 @@ pub async fn save_dns_config(dns_config: Mapping) -> CmdResult { // 获取DNS配置文件路径 let dns_path = dirs::app_home_dir() .stringify_err()? - .join("dns_config.yaml"); + .join(constants::files::DNS_CONFIG); // 保存DNS配置到文件 let yaml_str = serde_yaml_ng::to_string(&dns_config).stringify_err()?; @@ -149,7 +151,7 @@ pub async fn apply_dns_config(apply: bool) -> CmdResult { // 读取DNS配置文件 let dns_path = dirs::app_home_dir() .stringify_err()? - .join("dns_config.yaml"); + .join(constants::files::DNS_CONFIG); if !dns_path.exists() { logging!(warn, Type::Config, "DNS config file not found"); @@ -227,7 +229,7 @@ pub fn check_dns_config_exists() -> CmdResult { let dns_path = dirs::app_home_dir() .stringify_err()? - .join("dns_config.yaml"); + .join(constants::files::DNS_CONFIG); Ok(dns_path.exists()) } @@ -240,7 +242,7 @@ pub async fn get_dns_config_content() -> CmdResult { let dns_path = dirs::app_home_dir() .stringify_err()? - .join("dns_config.yaml"); + .join(constants::files::DNS_CONFIG); if !fs::try_exists(&dns_path).await.stringify_err()? { return Err("DNS config file not found".into()); @@ -253,10 +255,8 @@ pub async fn get_dns_config_content() -> CmdResult { /// 验证DNS配置文件 #[tauri::command] pub async fn validate_dns_config() -> CmdResult<(bool, String)> { - use crate::utils::dirs; - let app_dir = dirs::app_home_dir().stringify_err()?; - let dns_path = app_dir.join("dns_config.yaml"); + let dns_path = app_dir.join(constants::files::DNS_CONFIG); let dns_path_str = dns_path.to_str().unwrap_or_default(); if !dns_path.exists() { diff --git a/src-tauri/src/config/clash.rs b/src-tauri/src/config/clash.rs index 9ca6276a..8b1c072a 100644 --- a/src-tauri/src/config/clash.rs +++ b/src-tauri/src/config/clash.rs @@ -1,4 +1,5 @@ use crate::config::Config; +use crate::constants::{network, tun as tun_const}; use crate::utils::dirs::{ipc_path, path_to_str}; use crate::utils::{dirs, help}; use crate::{logging, utils::logging::Type}; @@ -48,8 +49,6 @@ impl IClashTemp { } pub fn template() -> Self { - use crate::constants::{network, tun as tun_const}; - let mut map = Mapping::new(); let mut tun_config = Mapping::new(); let mut cors_map = Mapping::new(); @@ -215,9 +214,9 @@ impl IClashTemp { Value::Number(val_num) => val_num.as_u64().map(|u| u as u16), _ => None, }) - .unwrap_or(7896); + .unwrap_or(network::ports::DEFAULT_TPROXY); if port == 0 { - port = 7896; + port = network::ports::DEFAULT_TPROXY; } port } diff --git a/src-tauri/src/constants.rs b/src-tauri/src/constants.rs index e3ea41fd..3195ea40 100644 --- a/src-tauri/src/constants.rs +++ b/src-tauri/src/constants.rs @@ -5,15 +5,13 @@ pub mod network { pub const DEFAULT_EXTERNAL_CONTROLLER: &str = "127.0.0.1:9097"; pub mod ports { - #[allow(dead_code)] + #[cfg(not(target_os = "windows"))] pub const DEFAULT_REDIR: u16 = 7895; - #[allow(dead_code)] + #[cfg(target_os = "linux")] pub const DEFAULT_TPROXY: u16 = 7896; pub const DEFAULT_MIXED: u16 = 7897; pub const DEFAULT_SOCKS: u16 = 7898; pub const DEFAULT_HTTP: u16 = 7899; - #[allow(dead_code)] - pub const DEFAULT_EXTERNAL_CONTROLLER: u16 = 9097; #[cfg(not(feature = "verge-dev"))] pub const SINGLETON_SERVER: u16 = 33331; @@ -39,11 +37,8 @@ pub mod timing { pub const CONFIG_UPDATE_DEBOUNCE: Duration = Duration::from_millis(500); pub const CONFIG_RELOAD_DELAY: Duration = Duration::from_millis(300); - pub const PROCESS_VERIFY_DELAY: Duration = Duration::from_millis(100); - #[allow(dead_code)] pub const EVENT_EMIT_DELAY: Duration = Duration::from_millis(20); pub const STARTUP_ERROR_DELAY: Duration = Duration::from_secs(2); - #[allow(dead_code)] pub const ERROR_BATCH_DELAY: Duration = Duration::from_millis(300); #[cfg(target_os = "windows")] @@ -53,40 +48,16 @@ pub mod timing { } pub mod retry { - #[allow(dead_code)] pub const EVENT_EMIT_THRESHOLD: u64 = 10; - #[allow(dead_code)] - pub const SWR_ERROR_RETRY: usize = 2; } pub mod files { pub const RUNTIME_CONFIG: &str = "clash-verge.yaml"; pub const CHECK_CONFIG: &str = "clash-verge-check.yaml"; - #[allow(dead_code)] pub const DNS_CONFIG: &str = "dns_config.yaml"; - #[allow(dead_code)] pub const WINDOW_STATE: &str = "window_state.json"; } -pub mod process { - pub const VERGE_MIHOMO: &str = "verge-mihomo"; - pub const VERGE_MIHOMO_ALPHA: &str = "verge-mihomo-alpha"; - - pub fn process_names() -> [&'static str; 2] { - [VERGE_MIHOMO, VERGE_MIHOMO_ALPHA] - } - - #[cfg(windows)] - pub fn with_extension(name: &str) -> String { - format!("{}.exe", name) - } - - #[cfg(not(windows))] - pub fn with_extension(name: &str) -> String { - name.to_string() - } -} - pub mod error_patterns { pub const CONNECTION_ERRORS: &[&str] = &[ "Failed to create connection", diff --git a/src-tauri/src/core/backup.rs b/src-tauri/src/core/backup.rs index 167fbdd1..cf90bdd1 100644 --- a/src-tauri/src/core/backup.rs +++ b/src-tauri/src/core/backup.rs @@ -1,3 +1,4 @@ +use crate::constants::files::DNS_CONFIG; use crate::{ config::Config, logging, @@ -306,9 +307,9 @@ pub async fn create_backup() -> Result<(String, PathBuf), Error> { zip.start_file(dirs::VERGE_CONFIG, options)?; zip.write_all(serde_yaml_ng::to_string(&verge_config)?.as_bytes())?; - let dns_config_path = dirs::app_home_dir()?.join(dirs::DNS_CONFIG); + let dns_config_path = dirs::app_home_dir()?.join(DNS_CONFIG); if dns_config_path.exists() { - zip.start_file(dirs::DNS_CONFIG, options)?; + zip.start_file(DNS_CONFIG, options)?; zip.write_all(fs::read(&dns_config_path).await?.as_slice())?; } diff --git a/src-tauri/src/enhance/mod.rs b/src-tauri/src/enhance/mod.rs index 1deb49e5..e9183170 100644 --- a/src-tauri/src/enhance/mod.rs +++ b/src-tauri/src/enhance/mod.rs @@ -6,6 +6,7 @@ pub mod seq; mod tun; use self::{chain::*, field::*, merge::*, script::*, seq::*, tun::*}; +use crate::constants; use crate::utils::dirs; use crate::{config::Config, utils::tmpl}; use crate::{logging, utils::logging::Type}; @@ -442,7 +443,7 @@ fn apply_builtin_scripts( async fn apply_dns_settings(mut config: Mapping, enable_dns_settings: bool) -> Mapping { if enable_dns_settings && let Ok(app_dir) = dirs::app_home_dir() { - let dns_path = app_dir.join("dns_config.yaml"); + let dns_path = app_dir.join(constants::files::DNS_CONFIG); if dns_path.exists() && let Ok(dns_yaml) = fs::read_to_string(&dns_path).await diff --git a/src-tauri/src/lib.rs b/src-tauri/src/lib.rs index 84cb533a..c6db41c0 100644 --- a/src-tauri/src/lib.rs +++ b/src-tauri/src/lib.rs @@ -10,6 +10,7 @@ mod feat; mod module; mod process; pub mod utils; +use crate::constants::files; #[cfg(target_os = "macos")] use crate::module::lightweight; #[cfg(target_os = "linux")] @@ -21,6 +22,7 @@ use crate::{ process::AsyncHandler, utils::{resolve, server}, }; +use anyhow::Result; use config::Config; use once_cell::sync::OnceCell; use tauri::{AppHandle, Manager}; @@ -32,8 +34,6 @@ use utils::logging::Type; pub static APP_HANDLE: OnceCell = OnceCell::new(); /// Application initialization helper functions mod app_init { - use anyhow::Result; - use super::*; /// Initialize singleton monitoring for other instances @@ -126,7 +126,7 @@ mod app_init { pub fn setup_window_state(app: &tauri::App) -> Result<(), Box> { logging!(info, Type::Setup, "初始化窗口状态管理..."); let window_state_plugin = tauri_plugin_window_state::Builder::new() - .with_filename("window_state.json") + .with_filename(files::WINDOW_STATE) .with_state_flags(tauri_plugin_window_state::StateFlags::default()) .build(); app.handle().plugin(window_state_plugin)?; diff --git a/src-tauri/src/utils/dirs.rs b/src-tauri/src/utils/dirs.rs index b16d3d8e..18ea6fac 100644 --- a/src-tauri/src/utils/dirs.rs +++ b/src-tauri/src/utils/dirs.rs @@ -24,7 +24,6 @@ pub static PORTABLE_FLAG: OnceCell = OnceCell::new(); pub static CLASH_CONFIG: &str = "config.yaml"; pub static VERGE_CONFIG: &str = "verge.yaml"; pub static PROFILE_YAML: &str = "profiles.yaml"; -pub static DNS_CONFIG: &str = "dns_config.yaml"; /// init portable flag pub fn init_portable_flag() -> Result<()> { diff --git a/src-tauri/src/utils/init.rs b/src-tauri/src/utils/init.rs index 72f19b08..61d4288c 100644 --- a/src-tauri/src/utils/init.rs +++ b/src-tauri/src/utils/init.rs @@ -3,6 +3,7 @@ use crate::utils::logging::NoModuleFilter; use crate::{ config::*, + constants, core::handle, logging, process::AsyncHandler, @@ -304,7 +305,7 @@ async fn init_dns_config() -> Result<()> { // 检查DNS配置文件是否存在 let app_dir = dirs::app_home_dir()?; - let dns_path = app_dir.join("dns_config.yaml"); + let dns_path = app_dir.join(constants::files::DNS_CONFIG); if !dns_path.exists() { logging!(info, Type::Setup, "Creating default DNS config file"); diff --git a/src-tauri/src/utils/logging.rs b/src-tauri/src/utils/logging.rs index a73b18eb..9f73c634 100644 --- a/src-tauri/src/utils/logging.rs +++ b/src-tauri/src/utils/logging.rs @@ -80,15 +80,6 @@ macro_rules! log_err { }; } -#[macro_export] -macro_rules! trace_err { - ($result: expr, $err_str: expr) => { - if let Err(err) = $result { - log::trace!(target: "app", "{}, err {}", $err_str, err); - } - } -} - /// wrap the anyhow error /// transform the error to String #[macro_export] From 85244a8f868da815c2e9454d9559205cbb253489 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Sun, 2 Nov 2025 18:27:58 +0800 Subject: [PATCH 66/70] chore(deps): update dependency @eslint-react/eslint-plugin to ^2.3.1 (#5281) Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> --- package.json | 2 +- pnpm-lock.yaml | 150 ++++++++++++++++++++++++------------------------- 2 files changed, 76 insertions(+), 76 deletions(-) diff --git a/package.json b/package.json index f4cae993..31b65797 100644 --- a/package.json +++ b/package.json @@ -79,7 +79,7 @@ }, "devDependencies": { "@actions/github": "^6.0.1", - "@eslint-react/eslint-plugin": "^2.2.4", + "@eslint-react/eslint-plugin": "^2.3.1", "@eslint/js": "^9.39.0", "@tauri-apps/cli": "2.9.2", "@types/js-yaml": "^4.0.9", diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 5c0c6be2..da35abee 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -139,8 +139,8 @@ importers: specifier: ^6.0.1 version: 6.0.1 '@eslint-react/eslint-plugin': - specifier: ^2.2.4 - version: 2.2.4(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) + specifier: ^2.3.1 + version: 2.3.1(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) '@eslint/js': specifier: ^9.39.0 version: 9.39.0 @@ -1010,31 +1010,31 @@ packages: resolution: {integrity: sha512-CCZCDJuduB9OUkFkY2IgppNZMi2lBQgD2qzwXkEia16cge2pijY/aXi96CJMquDMn3nJdlPV1A5KrJEXwfLNzQ==} engines: {node: ^12.0.0 || ^14.0.0 || >=16.0.0} - '@eslint-react/ast@2.2.4': - resolution: {integrity: sha512-kdG9yMJ2QpEbVPfgvlWqTUAF2L7dZYBAaF8/LPrjDIKB1pCbygxXUoRBPVthxTsE2XTqiLbhPqcWGnI8Q3UNTQ==} + '@eslint-react/ast@2.3.1': + resolution: {integrity: sha512-jB/P72HVbZcC7DtUvjna8tjPSageAS6L9x5muMsBRQxEXkfv2J6CPX47sSpaPu1mMJn1Zzpn9m5z4aTPbfV6Ug==} engines: {node: '>=20.19.0'} - '@eslint-react/core@2.2.4': - resolution: {integrity: sha512-uEfUX2GoIymsBbWccJGpuyz8KCtxyNBxJb2FMyqE37nLtNVPbNsFTHRr6uX1WwkBxw+bUOYDbVDy9zFVbmAJXA==} + '@eslint-react/core@2.3.1': + resolution: {integrity: sha512-R0gXjIqHqqYSeHxNMbXblnlwzmZ2gD32aVPmrJB+SrLP0rItzo/WgVSvstjOK5+N5KExdM87hopFcqnlZS3ONg==} engines: {node: '>=20.19.0'} - '@eslint-react/eff@2.2.4': - resolution: {integrity: sha512-I26FQr5IEjJDXlcuyL1h/shmUdyyAXZrG+Op/E0Lc6cpGvXg5hn1ptcdKJ23o8BAxq2UY2gwyltGxE2t4ixoJQ==} + '@eslint-react/eff@2.3.1': + resolution: {integrity: sha512-k58lxHmhzatRZXVFzWdLZwfRwPgI5Thhf7BNVJ9F+NI2G1fFypclSVFRPqjGmI5jQ8bqB+5UVt9Rh49rZGZPzw==} engines: {node: '>=20.19.0'} - '@eslint-react/eslint-plugin@2.2.4': - resolution: {integrity: sha512-hGWCliK90mqF7Wd3TCBT/BtOgdWsyJ/Y+Zoor7Oz3fI7mu7MvqpBkGerIRCkbQxPthKj4/g2S1CEltvQ2jM4Sg==} + '@eslint-react/eslint-plugin@2.3.1': + resolution: {integrity: sha512-ThWx+AWI3Tl/6g+L1Cq/kTQrrZ4NXWMxRN92iBswYMW7bPaolh/8WBdiLAVZldqnlm+l6LZriia89jyr0CeTHA==} engines: {node: '>=20.19.0'} peerDependencies: eslint: ^9.38.0 typescript: ^5.9.3 - '@eslint-react/shared@2.2.4': - resolution: {integrity: sha512-jDL17njTyVj/cmveNThHtLLJpHqLRd/z76q+38Zcq+kiA3DfZ8mXyy+EYV4lLwD9dvg1FOMomHBTgV/woqWsRQ==} + '@eslint-react/shared@2.3.1': + resolution: {integrity: sha512-UiTbPi1i7UPdsIT2Z7mKZ3zzrgAm1GLeexkKe4QwvZJ1LLeEJmgMwHUw852+VzlDeV8stcQmZ9zWqFX2L0CmGg==} engines: {node: '>=20.19.0'} - '@eslint-react/var@2.2.4': - resolution: {integrity: sha512-MBh64lfHI6Cr2qjaYlJx7x3FcYqgGK9SSB5/7weRsxv63ZfGiJY+aRi0ahSGsE2JhM0/OhWu0T6T1z4nnEbQxA==} + '@eslint-react/var@2.3.1': + resolution: {integrity: sha512-1rC9dbuKKMq77pPoODGT91VTA3ReivIAfdFJePEjscPSRAUhCy7QPA/yK8MPe9nTsG89IDV+hilCGKiLZW8vNQ==} engines: {node: '>=20.19.0'} '@eslint/config-array@0.21.1': @@ -2603,15 +2603,15 @@ packages: eslint-config-prettier: optional: true - eslint-plugin-react-dom@2.2.4: - resolution: {integrity: sha512-mrr56eZsNF0m6NrZxV3wojQnxADLqYGB0A5FHYRuMEX8jmkOy0Jb7v6B4IdzLt0kI1HhAhriOogxOkFlCch/4w==} + eslint-plugin-react-dom@2.3.1: + resolution: {integrity: sha512-Zuvb8iDYRbi8s7mYzvjHKD+i+loHjF6TKJiLGYM/t9F42OWU7V7b4sjIM7pXueukl0o8BSJXDVrQ+9sHOOmxBA==} engines: {node: '>=20.19.0'} peerDependencies: eslint: ^9.38.0 typescript: ^5.9.3 - eslint-plugin-react-hooks-extra@2.2.4: - resolution: {integrity: sha512-tS2xQyEx5QEbd71xkB0xo41/Vbnfo78eZRQYCMYkRmGzCb/Kkax6KXYs3wQH4HGWXBJ6KQloSPL+ISAhhqmtiQ==} + eslint-plugin-react-hooks-extra@2.3.1: + resolution: {integrity: sha512-2t4xQYhUEgPNq1SDQJEXuH3doT+h5spVmerX4rPnBFx0zG2sYfaJV1Gz6z40pI1L3CtBrZag5nFJ44AF/BEg0w==} engines: {node: '>=20.0.0'} peerDependencies: eslint: ^9.38.0 @@ -2623,8 +2623,8 @@ packages: peerDependencies: eslint: ^3.0.0 || ^4.0.0 || ^5.0.0 || ^6.0.0 || ^7.0.0 || ^8.0.0-0 || ^9.0.0 - eslint-plugin-react-naming-convention@2.2.4: - resolution: {integrity: sha512-/nUL8YeLI2g6pCK/HwNwH/KjlJ4x1kHErpOdMNVuOWEKxjaMSBFfb/gEaG6wrgNoCjQ9grzvtc4B3mkK9+fjDw==} + eslint-plugin-react-naming-convention@2.3.1: + resolution: {integrity: sha512-Ghh1o++3XDk3zNKF7DXy3kIN1kJYFiH7wvl4aJF5m9LytQGFrJKTA5kygAaWgR7iL8o4mjk5Ty6Be3OKskpHwA==} engines: {node: '>=20.19.0'} peerDependencies: eslint: ^9.38.0 @@ -2635,15 +2635,15 @@ packages: peerDependencies: eslint: '>=8.40' - eslint-plugin-react-web-api@2.2.4: - resolution: {integrity: sha512-ZdGQkDBFp9wjnoqj3xPhIzfDv07wFMYdd+uwg94xl3Zya5G+d+oUduPvSu4Du+ei6hyeWngua0njUQ8BmppyjQ==} + eslint-plugin-react-web-api@2.3.1: + resolution: {integrity: sha512-rb7AYR9SCJkCDkFdqnD6JHNLKF1o29o6tZLSaPdzA1Ssxh7/VKgJ8GpTrgl3Rv+Gnyn+w+3w4XE14d7T1Db9nA==} engines: {node: '>=20.19.0'} peerDependencies: eslint: ^9.38.0 typescript: ^5.9.3 - eslint-plugin-react-x@2.2.4: - resolution: {integrity: sha512-mdoxE1SPt653/udAGQvwPob7ZgaPjIk47G0MWNwKtzLyuI0oD5X+6uq1QAn99TOM1q+sqjsCvvL7aMwGDuZ6aw==} + eslint-plugin-react-x@2.3.1: + resolution: {integrity: sha512-7zfi297NfkoEtqaz2W953gdK4J9nJD5okVhJVxgrcrP+9FVertkGqpbWtMZLpQuWJ216FncY8P6t1U+af8KNOA==} engines: {node: '>=20.19.0'} peerDependencies: eslint: ^9.38.0 @@ -5206,9 +5206,9 @@ snapshots: '@eslint-community/regexpp@4.12.1': {} - '@eslint-react/ast@2.2.4(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3)': + '@eslint-react/ast@2.3.1(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3)': dependencies: - '@eslint-react/eff': 2.2.4 + '@eslint-react/eff': 2.3.1 '@typescript-eslint/types': 8.46.2 '@typescript-eslint/typescript-estree': 8.46.2(typescript@5.9.3) '@typescript-eslint/utils': 8.46.2(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) @@ -5218,12 +5218,12 @@ snapshots: - supports-color - typescript - '@eslint-react/core@2.2.4(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3)': + '@eslint-react/core@2.3.1(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3)': dependencies: - '@eslint-react/ast': 2.2.4(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) - '@eslint-react/eff': 2.2.4 - '@eslint-react/shared': 2.2.4(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) - '@eslint-react/var': 2.2.4(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) + '@eslint-react/ast': 2.3.1(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) + '@eslint-react/eff': 2.3.1 + '@eslint-react/shared': 2.3.1(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) + '@eslint-react/var': 2.3.1(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) '@typescript-eslint/scope-manager': 8.46.2 '@typescript-eslint/types': 8.46.2 '@typescript-eslint/utils': 8.46.2(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) @@ -5234,30 +5234,30 @@ snapshots: - supports-color - typescript - '@eslint-react/eff@2.2.4': {} + '@eslint-react/eff@2.3.1': {} - '@eslint-react/eslint-plugin@2.2.4(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3)': + '@eslint-react/eslint-plugin@2.3.1(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3)': dependencies: - '@eslint-react/eff': 2.2.4 - '@eslint-react/shared': 2.2.4(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) + '@eslint-react/eff': 2.3.1 + '@eslint-react/shared': 2.3.1(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) '@typescript-eslint/scope-manager': 8.46.2 '@typescript-eslint/type-utils': 8.46.2(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) '@typescript-eslint/types': 8.46.2 '@typescript-eslint/utils': 8.46.2(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) eslint: 9.39.0(jiti@2.6.1) - eslint-plugin-react-dom: 2.2.4(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) - eslint-plugin-react-hooks-extra: 2.2.4(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) - eslint-plugin-react-naming-convention: 2.2.4(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) - eslint-plugin-react-web-api: 2.2.4(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) - eslint-plugin-react-x: 2.2.4(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) + eslint-plugin-react-dom: 2.3.1(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) + eslint-plugin-react-hooks-extra: 2.3.1(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) + eslint-plugin-react-naming-convention: 2.3.1(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) + eslint-plugin-react-web-api: 2.3.1(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) + eslint-plugin-react-x: 2.3.1(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) ts-api-utils: 2.1.0(typescript@5.9.3) typescript: 5.9.3 transitivePeerDependencies: - supports-color - '@eslint-react/shared@2.2.4(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3)': + '@eslint-react/shared@2.3.1(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3)': dependencies: - '@eslint-react/eff': 2.2.4 + '@eslint-react/eff': 2.3.1 '@typescript-eslint/utils': 8.46.2(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) ts-pattern: 5.9.0 zod: 4.1.12 @@ -5266,10 +5266,10 @@ snapshots: - supports-color - typescript - '@eslint-react/var@2.2.4(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3)': + '@eslint-react/var@2.3.1(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3)': dependencies: - '@eslint-react/ast': 2.2.4(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) - '@eslint-react/eff': 2.2.4 + '@eslint-react/ast': 2.3.1(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) + '@eslint-react/eff': 2.3.1 '@typescript-eslint/scope-manager': 8.46.2 '@typescript-eslint/types': 8.46.2 '@typescript-eslint/utils': 8.46.2(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) @@ -6910,13 +6910,13 @@ snapshots: optionalDependencies: eslint-config-prettier: 10.1.8(eslint@9.39.0(jiti@2.6.1)) - eslint-plugin-react-dom@2.2.4(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3): + eslint-plugin-react-dom@2.3.1(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3): dependencies: - '@eslint-react/ast': 2.2.4(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) - '@eslint-react/core': 2.2.4(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) - '@eslint-react/eff': 2.2.4 - '@eslint-react/shared': 2.2.4(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) - '@eslint-react/var': 2.2.4(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) + '@eslint-react/ast': 2.3.1(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) + '@eslint-react/core': 2.3.1(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) + '@eslint-react/eff': 2.3.1 + '@eslint-react/shared': 2.3.1(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) + '@eslint-react/var': 2.3.1(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) '@typescript-eslint/scope-manager': 8.46.2 '@typescript-eslint/types': 8.46.2 '@typescript-eslint/utils': 8.46.2(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) @@ -6928,13 +6928,13 @@ snapshots: transitivePeerDependencies: - supports-color - eslint-plugin-react-hooks-extra@2.2.4(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3): + eslint-plugin-react-hooks-extra@2.3.1(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3): dependencies: - '@eslint-react/ast': 2.2.4(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) - '@eslint-react/core': 2.2.4(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) - '@eslint-react/eff': 2.2.4 - '@eslint-react/shared': 2.2.4(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) - '@eslint-react/var': 2.2.4(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) + '@eslint-react/ast': 2.3.1(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) + '@eslint-react/core': 2.3.1(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) + '@eslint-react/eff': 2.3.1 + '@eslint-react/shared': 2.3.1(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) + '@eslint-react/var': 2.3.1(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) '@typescript-eslint/scope-manager': 8.46.2 '@typescript-eslint/type-utils': 8.46.2(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) '@typescript-eslint/types': 8.46.2 @@ -6957,13 +6957,13 @@ snapshots: transitivePeerDependencies: - supports-color - eslint-plugin-react-naming-convention@2.2.4(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3): + eslint-plugin-react-naming-convention@2.3.1(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3): dependencies: - '@eslint-react/ast': 2.2.4(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) - '@eslint-react/core': 2.2.4(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) - '@eslint-react/eff': 2.2.4 - '@eslint-react/shared': 2.2.4(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) - '@eslint-react/var': 2.2.4(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) + '@eslint-react/ast': 2.3.1(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) + '@eslint-react/core': 2.3.1(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) + '@eslint-react/eff': 2.3.1 + '@eslint-react/shared': 2.3.1(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) + '@eslint-react/var': 2.3.1(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) '@typescript-eslint/scope-manager': 8.46.2 '@typescript-eslint/type-utils': 8.46.2(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) '@typescript-eslint/types': 8.46.2 @@ -6979,13 +6979,13 @@ snapshots: dependencies: eslint: 9.39.0(jiti@2.6.1) - eslint-plugin-react-web-api@2.2.4(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3): + eslint-plugin-react-web-api@2.3.1(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3): dependencies: - '@eslint-react/ast': 2.2.4(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) - '@eslint-react/core': 2.2.4(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) - '@eslint-react/eff': 2.2.4 - '@eslint-react/shared': 2.2.4(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) - '@eslint-react/var': 2.2.4(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) + '@eslint-react/ast': 2.3.1(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) + '@eslint-react/core': 2.3.1(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) + '@eslint-react/eff': 2.3.1 + '@eslint-react/shared': 2.3.1(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) + '@eslint-react/var': 2.3.1(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) '@typescript-eslint/scope-manager': 8.46.2 '@typescript-eslint/types': 8.46.2 '@typescript-eslint/utils': 8.46.2(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) @@ -6996,13 +6996,13 @@ snapshots: transitivePeerDependencies: - supports-color - eslint-plugin-react-x@2.2.4(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3): + eslint-plugin-react-x@2.3.1(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3): dependencies: - '@eslint-react/ast': 2.2.4(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) - '@eslint-react/core': 2.2.4(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) - '@eslint-react/eff': 2.2.4 - '@eslint-react/shared': 2.2.4(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) - '@eslint-react/var': 2.2.4(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) + '@eslint-react/ast': 2.3.1(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) + '@eslint-react/core': 2.3.1(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) + '@eslint-react/eff': 2.3.1 + '@eslint-react/shared': 2.3.1(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) + '@eslint-react/var': 2.3.1(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) '@typescript-eslint/scope-manager': 8.46.2 '@typescript-eslint/type-utils': 8.46.2(eslint@9.39.0(jiti@2.6.1))(typescript@5.9.3) '@typescript-eslint/types': 8.46.2 From fb5bf72fb97d5494e19d6b4d69ff0e5802459fbb Mon Sep 17 00:00:00 2001 From: Tunglies <77394545+Tunglies@users.noreply.github.com> Date: Sun, 2 Nov 2025 20:07:47 +0800 Subject: [PATCH 67/70] feat: add function to retrieve profile name by UID and improve error logging for profile updates --- src-tauri/src/config/profiles.rs | 16 ++++++++++++++-- src-tauri/src/core/tray/mod.rs | 6 ++---- src-tauri/src/feat/profile.rs | 9 +++++++-- 3 files changed, 23 insertions(+), 8 deletions(-) diff --git a/src-tauri/src/config/profiles.rs b/src-tauri/src/config/profiles.rs index 98886afc..f25e5990 100644 --- a/src-tauri/src/config/profiles.rs +++ b/src-tauri/src/config/profiles.rs @@ -435,8 +435,8 @@ impl IProfiles { } /// 判断profile是否是current指向的 - pub fn is_current_profile_index(&self, index: String) -> bool { - self.current == Some(index) + pub fn is_current_profile_index(&self, index: &String) -> bool { + self.current.as_ref() == Some(index) } /// 获取所有的profiles(uid,名称) @@ -455,6 +455,18 @@ impl IProfiles { }) } + /// 通过 uid 获取名称 + pub fn get_name_by_uid(&self, uid: &String) -> Option { + if let Some(items) = &self.items { + for item in items { + if item.uid.as_ref() == Some(uid) { + return item.name.clone(); + } + } + } + None + } + /// 以 app 中的 profile 列表为准,删除不再需要的文件 pub async fn cleanup_orphaned_files(&self) -> Result { let profiles_dir = dirs::app_profiles_dir()?; diff --git a/src-tauri/src/core/tray/mod.rs b/src-tauri/src/core/tray/mod.rs index 8261c98a..474b0202 100644 --- a/src-tauri/src/core/tray/mod.rs +++ b/src-tauri/src/core/tray/mod.rs @@ -673,17 +673,15 @@ async fn create_profile_menu_item( .iter() .map(|(profile_uid, profile_name)| { let app_handle = app_handle.clone(); - let profile_uid = profile_uid.clone(); - let profile_name = profile_name.clone(); async move { let is_current_profile = Config::profiles() .await .latest_ref() - .is_current_profile_index(profile_uid.clone()); + .is_current_profile_index(profile_uid); CheckMenuItem::with_id( &app_handle, format!("profiles_{profile_uid}"), - t(&profile_name).await, + t(profile_name).await, true, is_current_profile, None::<&str>, diff --git a/src-tauri/src/feat/profile.rs b/src-tauri/src/feat/profile.rs index 273022a5..2bd8512c 100644 --- a/src-tauri/src/feat/profile.rs +++ b/src-tauri/src/feat/profile.rs @@ -140,14 +140,19 @@ async fn perform_profile_update( Ok(is_current) } Err(retry_err) => { + let failed_profile_name = Config::profiles() + .await + .latest_ref() + .get_name_by_uid(uid) + .unwrap_or_default(); logging!( error, Type::Config, - "[订阅更新] 使用Clash代理更新仍然失败: {retry_err}" + "[订阅更新] 使用Clash代理更新仍然失败: {failed_profile_name} - {retry_err}" ); handle::Handle::notice_message( "update_failed_even_with_clash", - format!("{retry_err}"), + format!("{failed_profile_name} - {retry_err}"), ); Err(retry_err) } From 36d1a3878fb23c8ebee128ba0765eb32b0dd93f1 Mon Sep 17 00:00:00 2001 From: Tunglies <77394545+Tunglies@users.noreply.github.com> Date: Sun, 2 Nov 2025 20:13:09 +0800 Subject: [PATCH 68/70] refactor: remove retry notice for Clash proxy updates from notification handlers and localization files --- src-tauri/src/feat/profile.rs | 1 - src/locales/de.json | 1 - src/locales/en.json | 1 - src/locales/es.json | 1 - src/locales/jp.json | 1 - src/locales/ko.json | 1 - src/locales/tr.json | 1 - src/locales/zh.json | 1 - src/locales/zhtw.json | 1 - src/pages/_layout/notificationHandlers.ts | 2 -- 10 files changed, 11 deletions(-) diff --git a/src-tauri/src/feat/profile.rs b/src-tauri/src/feat/profile.rs index 2bd8512c..5cee61d2 100644 --- a/src-tauri/src/feat/profile.rs +++ b/src-tauri/src/feat/profile.rs @@ -107,7 +107,6 @@ async fn perform_profile_update( Type::Config, "Warning: [订阅更新] 正常更新失败: {err},尝试使用Clash代理更新" ); - handle::Handle::notice_message("update_retry_with_clash", uid.clone()); let original_with_proxy = merged_opt.as_ref().and_then(|o| o.with_proxy); let original_self_proxy = merged_opt.as_ref().and_then(|o| o.self_proxy); diff --git a/src/locales/de.json b/src/locales/de.json index a13b7181..2c989cfc 100644 --- a/src/locales/de.json +++ b/src/locales/de.json @@ -524,7 +524,6 @@ "Unknown": "Unbekannt", "Auto update disabled": "Automatische Aktualisierung deaktiviert", "Update subscription successfully": "Abonnement erfolgreich aktualisiert", - "Update failed, retrying with Clash proxy...": "Abonnement-Aktualisierung fehlgeschlagen. Versuche es mit dem Clash-Proxy erneut...", "Update with Clash proxy successfully": "Aktualisierung mit Clash-Proxy erfolgreich", "Update failed even with Clash proxy": "Aktualisierung auch mit Clash-Proxy fehlgeschlagen", "Profile creation failed, retrying with Clash proxy...": "Erstellung des Abonnements fehlgeschlagen. Versuche es mit dem Clash-Proxy erneut...", diff --git a/src/locales/en.json b/src/locales/en.json index e1519a2d..2716d316 100644 --- a/src/locales/en.json +++ b/src/locales/en.json @@ -628,7 +628,6 @@ "Unknown": "Unknown", "Auto update disabled": "Auto update disabled", "Update subscription successfully": "Update subscription successfully", - "Update failed, retrying with Clash proxy...": "Update failed, retrying with Clash proxy...", "Update with Clash proxy successfully": "Update with Clash proxy successfully", "Update failed even with Clash proxy": "Update failed even with Clash proxy", "Profile creation failed, retrying with Clash proxy...": "Profile creation failed, retrying with Clash proxy...", diff --git a/src/locales/es.json b/src/locales/es.json index e13a81c2..ede76225 100644 --- a/src/locales/es.json +++ b/src/locales/es.json @@ -524,7 +524,6 @@ "Unknown": "Desconocido", "Auto update disabled": "La actualización automática está deshabilitada", "Update subscription successfully": "Suscripción actualizada con éxito", - "Update failed, retrying with Clash proxy...": "Error al actualizar la suscripción. Intentando con el proxy de Clash...", "Update with Clash proxy successfully": "Actualización con el proxy de Clash exitosa", "Update failed even with Clash proxy": "Error al actualizar incluso con el proxy de Clash", "Profile creation failed, retrying with Clash proxy...": "Error al crear la suscripción. Intentando con el proxy de Clash...", diff --git a/src/locales/jp.json b/src/locales/jp.json index ae3f363e..618e4fbd 100644 --- a/src/locales/jp.json +++ b/src/locales/jp.json @@ -527,7 +527,6 @@ "Unknown": "不明", "Auto update disabled": "自動更新が無効になっています。", "Update subscription successfully": "サブスクリプションの更新に成功しました。", - "Update failed, retrying with Clash proxy...": "サブスクリプションの更新に失敗しました。Clashプロキシを使用して再試行します...", "Update with Clash proxy successfully": "Clashプロキシを使用して更新に成功しました。", "Update failed even with Clash proxy": "Clashプロキシを使用しても更新に失敗しました。", "Profile creation failed, retrying with Clash proxy...": "プロファイルの作成に失敗しました。Clashプロキシを使用して再試行します...", diff --git a/src/locales/ko.json b/src/locales/ko.json index 07173504..b5b29571 100644 --- a/src/locales/ko.json +++ b/src/locales/ko.json @@ -254,7 +254,6 @@ "Merge File Mapping Error": "병합 파일 매핑 오류", "Merge File Key Error": "병합 파일 키 오류", "Merge File Error": "병합 파일 오류", - "Update failed, retrying with Clash proxy...": "업데이트 실패, Clash 프록시로 재시도 중...", "Update with Clash proxy successfully": "Clash 프록시로 업데이트 성공", "Update failed even with Clash proxy": "Clash 프록시로도 업데이트 실패", "Failed": "실패", diff --git a/src/locales/tr.json b/src/locales/tr.json index 79ad6333..0b406488 100644 --- a/src/locales/tr.json +++ b/src/locales/tr.json @@ -574,7 +574,6 @@ "Unknown": "Bilinmiyor", "Auto update disabled": "Otomatik güncelleme devre dışı", "Update subscription successfully": "Abonelik başarıyla güncellendi", - "Update failed, retrying with Clash proxy...": "Güncelleme başarısız oldu, Clash vekil ile yeniden deneniyor...", "Update with Clash proxy successfully": "Clash vekil ile güncelleme başarılı", "Update failed even with Clash proxy": "Clash vekil ile bile güncelleme başarısız oldu", "Profile creation failed, retrying with Clash proxy...": "Profil oluşturma başarısız oldu, Clash vekil ile yeniden deneniyor...", diff --git a/src/locales/zh.json b/src/locales/zh.json index 1e320fd8..f8b161d7 100644 --- a/src/locales/zh.json +++ b/src/locales/zh.json @@ -628,7 +628,6 @@ "Unknown": "未知", "Auto update disabled": "自动更新已禁用", "Update subscription successfully": "订阅更新成功", - "Update failed, retrying with Clash proxy...": "订阅更新失败,尝试使用 Clash 代理更新", "Update with Clash proxy successfully": "使用 Clash 代理更新成功", "Update failed even with Clash proxy": "使用 Clash 代理更新也失败", "Profile creation failed, retrying with Clash proxy...": "订阅创建失败,尝试使用 Clash 代理创建", diff --git a/src/locales/zhtw.json b/src/locales/zhtw.json index fb4bc66e..efcbba3c 100644 --- a/src/locales/zhtw.json +++ b/src/locales/zhtw.json @@ -626,7 +626,6 @@ "Unknown": "未知", "Auto update disabled": "自動更新已停用", "Update subscription successfully": "訂閱更新成功", - "Update failed, retrying with Clash proxy...": "訂閱更新失敗,嘗試使用 Clash 代理更新", "Update with Clash proxy successfully": "使用 Clash 代理更新成功", "Update failed even with Clash proxy": "使用 Clash 代理更新也失敗", "Profile creation failed, retrying with Clash proxy...": "訂閱建立失敗,嘗試使用 Clash 代理建立", diff --git a/src/pages/_layout/notificationHandlers.ts b/src/pages/_layout/notificationHandlers.ts index 0f69ce30..5696b5ea 100644 --- a/src/pages/_layout/notificationHandlers.ts +++ b/src/pages/_layout/notificationHandlers.ts @@ -24,8 +24,6 @@ export const handleNoticeMessage = ( "success", `${t("Update with Clash proxy successfully")} ${msg}`, ), - update_retry_with_clash: () => - showNotice("info", t("Update failed, retrying with Clash proxy...")), update_failed_even_with_clash: () => showNotice( "error", From d4cb16f4ffc56844ecbddfed3df416caa5aa9676 Mon Sep 17 00:00:00 2001 From: oomeow Date: Sun, 2 Nov 2025 22:33:50 +0800 Subject: [PATCH 69/70] perf: select proxy (#5284) * perf: improve select proxy for group * chore: update --- src-tauri/src/cmd/profile.rs | 11 ++++++----- src-tauri/src/core/tray/mod.rs | 1 - src/hooks/use-profiles.ts | 4 +++- 3 files changed, 9 insertions(+), 7 deletions(-) diff --git a/src-tauri/src/cmd/profile.rs b/src-tauri/src/cmd/profile.rs index 9ab8e21a..949ddce1 100644 --- a/src-tauri/src/cmd/profile.rs +++ b/src-tauri/src/cmd/profile.rs @@ -570,14 +570,16 @@ pub async fn patch_profiles_config_by_profile_index(profile_index: String) -> Cm pub async fn patch_profile(index: String, profile: PrfItem) -> CmdResult { // 保存修改前检查是否有更新 update_interval let profiles = Config::profiles().await; - let should_refresh_timer = if let Ok(old_profile) = profiles.latest_ref().get_item(&index) { + let should_refresh_timer = if let Ok(old_profile) = profiles.latest_ref().get_item(&index) + && let Some(new_option) = profile.option.as_ref() + { let old_interval = old_profile.option.as_ref().and_then(|o| o.update_interval); - let new_interval = profile.option.as_ref().and_then(|o| o.update_interval); + let new_interval = new_option.update_interval; let old_allow_auto_update = old_profile .option .as_ref() .and_then(|o| o.allow_auto_update); - let new_allow_auto_update = profile.option.as_ref().and_then(|o| o.allow_auto_update); + let new_allow_auto_update = new_option.allow_auto_update; (old_interval != new_interval) || (old_allow_auto_update != new_allow_auto_update) } else { false @@ -589,14 +591,13 @@ pub async fn patch_profile(index: String, profile: PrfItem) -> CmdResult { // 如果更新间隔或允许自动更新变更,异步刷新定时器 if should_refresh_timer { - let index_clone = index.clone(); crate::process::AsyncHandler::spawn(move || async move { logging!(info, Type::Timer, "定时器更新间隔已变更,正在刷新定时器..."); if let Err(e) = crate::core::Timer::global().refresh().await { logging!(error, Type::Timer, "刷新定时器失败: {}", e); } else { // 刷新成功后发送自定义事件,不触发配置重载 - crate::core::handle::Handle::notify_timer_updated(index_clone); + crate::core::handle::Handle::notify_timer_updated(index); } }); } diff --git a/src-tauri/src/core/tray/mod.rs b/src-tauri/src/core/tray/mod.rs index 474b0202..ef300bbe 100644 --- a/src-tauri/src/core/tray/mod.rs +++ b/src-tauri/src/core/tray/mod.rs @@ -308,7 +308,6 @@ impl Tray { let tun_mode = verge.enable_tun_mode.as_ref().unwrap_or(&false); let tun_mode_available = cmd::system::is_admin().unwrap_or_default() || service::is_service_available().await.is_ok(); - println!("tun_mode_available: {}", tun_mode_available); let mode = { Config::clash() .await diff --git a/src/hooks/use-profiles.ts b/src/hooks/use-profiles.ts index fdb73462..c517c835 100644 --- a/src/hooks/use-profiles.ts +++ b/src/hooks/use-profiles.ts @@ -62,7 +62,9 @@ export const useProfiles = () => { const patchCurrent = async (value: Partial) => { if (profiles?.current) { await patchProfile(profiles.current, value); - mutateProfiles(); + if (!value.selected) { + mutateProfiles(); + } } }; From 34cac0fa3aed8ef653cf62c19f42eaa5ae663135 Mon Sep 17 00:00:00 2001 From: Tunglies <77394545+Tunglies@users.noreply.github.com> Date: Sun, 2 Nov 2025 22:41:22 +0800 Subject: [PATCH 70/70] feat: improve subscription update method and enhance error handling for profile updates --- UPDATELOG.md | 1 + src-tauri/src/core/sysopt.rs | 7 ++ src-tauri/src/core/timer.rs | 18 ++- src-tauri/src/feat/profile.rs | 129 ++++++++++++---------- src-tauri/src/utils/network.rs | 7 ++ src/pages/_layout/notificationHandlers.ts | 2 + 6 files changed, 105 insertions(+), 59 deletions(-) diff --git a/UPDATELOG.md b/UPDATELOG.md index 8b7081cf..2fa6bc73 100644 --- a/UPDATELOG.md +++ b/UPDATELOG.md @@ -85,6 +85,7 @@ - 添加热键绑定错误的提示信息 - 在 macOS 10.15 及更高版本默认包含 Mihomo-go122,以解决 Intel 架构 Mac 无法运行内核的问题 - Tun 模式不可用时,禁用系统托盘的 Tun 模式菜单 +- 改进订阅更新方式,仍失败需打开订阅设置 `允许危险证书`
diff --git a/src-tauri/src/core/sysopt.rs b/src-tauri/src/core/sysopt.rs index d73d9a91..c24d8be9 100644 --- a/src-tauri/src/core/sysopt.rs +++ b/src-tauri/src/core/sysopt.rs @@ -15,6 +15,7 @@ use sysproxy::{Autoproxy, Sysproxy}; use tauri_plugin_autostart::ManagerExt; pub struct Sysopt { + initialed: AtomicBool, update_sysproxy: AtomicBool, reset_sysproxy: AtomicBool, } @@ -84,6 +85,7 @@ async fn execute_sysproxy_command(args: Vec) -> Result<()> impl Default for Sysopt { fn default() -> Self { Sysopt { + initialed: AtomicBool::new(false), update_sysproxy: AtomicBool::new(false), reset_sysproxy: AtomicBool::new(false), } @@ -94,6 +96,10 @@ impl Default for Sysopt { singleton_lazy!(Sysopt, SYSOPT, Sysopt::default); impl Sysopt { + pub fn is_initialed(&self) -> bool { + self.initialed.load(Ordering::SeqCst) + } + pub fn init_guard_sysproxy(&self) -> Result<()> { // 使用事件驱动代理管理器 let proxy_manager = EventDrivenProxyManager::global(); @@ -105,6 +111,7 @@ impl Sysopt { /// init the sysproxy pub async fn update_sysproxy(&self) -> Result<()> { + self.initialed.store(true, Ordering::SeqCst); if self .update_sysproxy .compare_exchange(false, true, Ordering::SeqCst, Ordering::SeqCst) diff --git a/src-tauri/src/core/timer.rs b/src-tauri/src/core/timer.rs index 31ad95cb..e8506df1 100644 --- a/src-tauri/src/core/timer.rs +++ b/src-tauri/src/core/timer.rs @@ -1,4 +1,7 @@ -use crate::{config::Config, feat, logging, logging_error, singleton, utils::logging::Type}; +use crate::{ + config::Config, core::sysopt::Sysopt, feat, logging, logging_error, singleton, + utils::logging::Type, +}; use anyhow::{Context, Result}; use delay_timer::prelude::{DelayTimer, DelayTimerBuilder, TaskBuilder}; use parking_lot::RwLock; @@ -10,7 +13,9 @@ use std::{ Arc, atomic::{AtomicBool, AtomicU64, Ordering}, }, + time::Duration, }; +use tokio::time::{sleep, timeout}; type TaskID = u64; @@ -390,6 +395,7 @@ impl Timer { .spawn_async_routine(move || { let uid = uid.clone(); Box::pin(async move { + Self::wait_until_sysopt(Duration::from_millis(1000)).await; Self::async_task(&uid).await; }) as Pin + Send>> }) @@ -519,6 +525,16 @@ impl Timer { // Emit completed event Self::emit_update_event(uid, false); } + + async fn wait_until_sysopt(max_wait: Duration) { + let _ = timeout(max_wait, async { + while !Sysopt::global().is_initialed() { + logging!(warn, Type::Timer, "Waiting for Sysopt to be initialized..."); + sleep(Duration::from_millis(30)).await; + } + }) + .await; + } } #[derive(Debug)] diff --git a/src-tauri/src/feat/profile.rs b/src-tauri/src/feat/profile.rs index 5cee61d2..f3832e38 100644 --- a/src-tauri/src/feat/profile.rs +++ b/src-tauri/src/feat/profile.rs @@ -86,20 +86,25 @@ async fn perform_profile_update( option: Option<&PrfOption>, ) -> Result { logging!(info, Type::Config, "[订阅更新] 开始下载新的订阅内容"); - let merged_opt = PrfOption::merge(opt, option); + let mut merged_opt = PrfOption::merge(opt, option); + let is_current = { + let profiles = Config::profiles().await; + profiles.latest_ref().is_current_profile_index(uid) + }; + let profile_name = { + let profiles = Config::profiles().await; + profiles + .latest_ref() + .get_name_by_uid(uid) + .unwrap_or_default() + }; + let mut last_err; match PrfItem::from_url(url, None, None, merged_opt.as_ref()).await { Ok(mut item) => { logging!(info, Type::Config, "[订阅更新] 更新订阅配置成功"); - let profiles = Config::profiles().await; profiles_draft_update_item_safe(uid, &mut item).await?; - let is_current = Some(uid.clone()) == profiles.latest_ref().get_current(); - logging!( - info, - Type::Config, - "[订阅更新] 是否为当前使用的订阅: {is_current}" - ); - Ok(is_current) + return Ok(is_current); } Err(err) => { logging!( @@ -107,57 +112,65 @@ async fn perform_profile_update( Type::Config, "Warning: [订阅更新] 正常更新失败: {err},尝试使用Clash代理更新" ); - - let original_with_proxy = merged_opt.as_ref().and_then(|o| o.with_proxy); - let original_self_proxy = merged_opt.as_ref().and_then(|o| o.self_proxy); - - let mut fallback_opt = merged_opt.unwrap_or_default(); - fallback_opt.with_proxy = Some(false); - fallback_opt.self_proxy = Some(true); - - match PrfItem::from_url(url, None, None, Some(&fallback_opt)).await { - Ok(mut item) => { - logging!(info, Type::Config, "[订阅更新] 使用Clash代理更新成功"); - - if let Some(option) = item.option.as_mut() { - option.with_proxy = original_with_proxy; - option.self_proxy = original_self_proxy; - } - - let profiles = Config::profiles().await; - profiles_draft_update_item_safe(uid, &mut item).await?; - - let profile_name = item.name.clone().unwrap_or_else(|| uid.clone()); - handle::Handle::notice_message("update_with_clash_proxy", profile_name); - - let is_current = Some(uid.clone()) == profiles.latest_ref().get_current(); - logging!( - info, - Type::Config, - "[订阅更新] 是否为当前使用的订阅: {is_current}" - ); - Ok(is_current) - } - Err(retry_err) => { - let failed_profile_name = Config::profiles() - .await - .latest_ref() - .get_name_by_uid(uid) - .unwrap_or_default(); - logging!( - error, - Type::Config, - "[订阅更新] 使用Clash代理更新仍然失败: {failed_profile_name} - {retry_err}" - ); - handle::Handle::notice_message( - "update_failed_even_with_clash", - format!("{failed_profile_name} - {retry_err}"), - ); - Err(retry_err) - } - } + last_err = err; } } + + merged_opt.get_or_insert_with(PrfOption::default).self_proxy = Some(true); + merged_opt.get_or_insert_with(PrfOption::default).with_proxy = Some(false); + + match PrfItem::from_url(url, None, None, merged_opt.as_ref()).await { + Ok(mut item) => { + logging!( + info, + Type::Config, + "[订阅更新] 使用 Clash代理 更新订阅配置成功" + ); + profiles_draft_update_item_safe(uid, &mut item).await?; + handle::Handle::notice_message("update_with_clash_proxy", profile_name); + drop(last_err); + return Ok(is_current); + } + Err(err) => { + logging!( + warn, + Type::Config, + "Warning: [订阅更新] 正常更新失败: {err},尝试使用Clash代理更新" + ); + last_err = err; + } + } + + merged_opt.get_or_insert_with(PrfOption::default).self_proxy = Some(false); + merged_opt.get_or_insert_with(PrfOption::default).with_proxy = Some(true); + + match PrfItem::from_url(url, None, None, merged_opt.as_ref()).await { + Ok(mut item) => { + logging!( + info, + Type::Config, + "[订阅更新] 使用 系统代理 更新订阅配置成功" + ); + profiles_draft_update_item_safe(uid, &mut item).await?; + handle::Handle::notice_message("update_with_clash_proxy", profile_name); + drop(last_err); + return Ok(is_current); + } + Err(err) => { + logging!( + warn, + Type::Config, + "Warning: [订阅更新] 正常更新失败: {err},尝试使用系统代理更新" + ); + last_err = err; + } + } + + handle::Handle::notice_message( + "update_failed_even_with_clash", + format!("{profile_name} - {last_err}"), + ); + Ok(is_current) } pub async fn update_profile( diff --git a/src-tauri/src/utils/network.rs b/src-tauri/src/utils/network.rs index cc765e65..65ef63a2 100644 --- a/src-tauri/src/utils/network.rs +++ b/src-tauri/src/utils/network.rs @@ -1,6 +1,7 @@ use crate::config::Config; use anyhow::Result; use base64::{Engine as _, engine::general_purpose}; +use isahc::config::DnsCache; use isahc::prelude::*; use isahc::{HttpClient, config::SslOption}; use isahc::{ @@ -143,6 +144,12 @@ impl NetworkManager { builder = builder.redirect_policy(RedirectPolicy::Follow); + // 禁用缓存,不关心连接复用 + builder = builder.connection_cache_size(0); + + // 禁用 DNS 缓存,避免因 DNS 变化导致的问题 + builder = builder.dns_cache(DnsCache::Disable); + Ok(builder.build()?) } } diff --git a/src/pages/_layout/notificationHandlers.ts b/src/pages/_layout/notificationHandlers.ts index 5696b5ea..afa674c8 100644 --- a/src/pages/_layout/notificationHandlers.ts +++ b/src/pages/_layout/notificationHandlers.ts @@ -19,6 +19,8 @@ export const handleNoticeMessage = ( showNotice("error", msg); }, "set_config::error": () => showNotice("error", msg), + // 后端暂时没有启用相关通知, 批量更新可能造成扰人提醒 + // update_success: () => showNotice("success", t("Update subscription successfully")), update_with_clash_proxy: () => showNotice( "success",