diff --git a/src-tauri/.sqlx/query-f4212b4be29035d4db36547ecdc9606e7622f4a6d1fb02dcadddac5e982c2063.json b/src-tauri/.sqlx/query-3944855d02e0c87f4453c27bbe14f9e59f4237686350b037952c830def4eb3ce.json similarity index 52% rename from src-tauri/.sqlx/query-f4212b4be29035d4db36547ecdc9606e7622f4a6d1fb02dcadddac5e982c2063.json rename to src-tauri/.sqlx/query-3944855d02e0c87f4453c27bbe14f9e59f4237686350b037952c830def4eb3ce.json index 5691fef3..bd6f3e5f 100644 --- a/src-tauri/.sqlx/query-f4212b4be29035d4db36547ecdc9606e7622f4a6d1fb02dcadddac5e982c2063.json +++ b/src-tauri/.sqlx/query-3944855d02e0c87f4453c27bbe14f9e59f4237686350b037952c830def4eb3ce.json @@ -1,6 +1,6 @@ { "db_name": "SQLite", - "query": "SELECT c.id, c.tunnel_id, c.start \"start!\", c.end \"end!\", COALESCE((SELECT ls.upload FROM tunnel_stats ls WHERE ls.tunnel_id = c.tunnel_id AND ls.collected_at >= c.start AND ls.collected_at <= c.end ORDER BY ls.collected_at DESC LIMIT 1 ), 0) \"upload: _\", COALESCE((SELECT ls.download FROM tunnel_stats ls WHERE ls.tunnel_id = c.tunnel_id AND ls.collected_at >= c.start AND ls.collected_at <= c.end ORDER BY ls.collected_at DESC LIMIT 1 ), 0) \"download: _\" FROM tunnel_connection c WHERE tunnel_id = $1 ORDER BY start DESC", + "query": "SELECT c.id, c.tunnel_id, c.start, c.end, COALESCE((SELECT ls.upload FROM tunnel_stats ls WHERE ls.tunnel_id = c.tunnel_id AND ls.collected_at BETWEEN c.start AND c.end ORDER BY ls.collected_at DESC LIMIT 1 ), 0) \"upload: _\", COALESCE((SELECT ls.download FROM tunnel_stats ls WHERE ls.tunnel_id = c.tunnel_id AND ls.collected_at BETWEEN c.start AND c.end ORDER BY ls.collected_at DESC LIMIT 1 ), 0) \"download: _\" FROM tunnel_connection c WHERE tunnel_id = $1 ORDER BY start DESC", "describe": { "columns": [ { @@ -14,12 +14,12 @@ "type_info": "Integer" }, { - "name": "start!", + "name": "start", "ordinal": 2, "type_info": "Datetime" }, { - "name": "end!", + "name": "end", "ordinal": 3, "type_info": "Datetime" }, @@ -46,5 +46,5 @@ false ] }, - "hash": "f4212b4be29035d4db36547ecdc9606e7622f4a6d1fb02dcadddac5e982c2063" + "hash": "3944855d02e0c87f4453c27bbe14f9e59f4237686350b037952c830def4eb3ce" } diff --git a/src-tauri/.sqlx/query-5953a81f34f906e34aabec089dfe0cebf2afc3ad798638db9ea0aabcd506192b.json b/src-tauri/.sqlx/query-5953a81f34f906e34aabec089dfe0cebf2afc3ad798638db9ea0aabcd506192b.json deleted file mode 100644 index ab312bb2..00000000 --- a/src-tauri/.sqlx/query-5953a81f34f906e34aabec089dfe0cebf2afc3ad798638db9ea0aabcd506192b.json +++ /dev/null @@ -1,86 +0,0 @@ -{ - "db_name": "SQLite", - "query": "SELECT id, instance_id, name, address, pubkey, endpoint, allowed_ips, dns, network_id,route_all_traffic, mfa_enabled, keepalive_interval FROM location;", - "describe": { - "columns": [ - { - "name": "id", - "ordinal": 0, - "type_info": "Integer" - }, - { - "name": "instance_id", - "ordinal": 1, - "type_info": "Integer" - }, - { - "name": "name", - "ordinal": 2, - "type_info": "Text" - }, - { - "name": "address", - "ordinal": 3, - "type_info": "Text" - }, - { - "name": "pubkey", - "ordinal": 4, - "type_info": "Text" - }, - { - "name": "endpoint", - "ordinal": 5, - "type_info": "Text" - }, - { - "name": "allowed_ips", - "ordinal": 6, - "type_info": "Text" - }, - { - "name": "dns", - "ordinal": 7, - "type_info": "Text" - }, - { - "name": "network_id", - "ordinal": 8, - "type_info": "Integer" - }, - { - "name": "route_all_traffic", - "ordinal": 9, - "type_info": "Bool" - }, - { - "name": "mfa_enabled", - "ordinal": 10, - "type_info": "Bool" - }, - { - "name": "keepalive_interval", - "ordinal": 11, - "type_info": "Integer" - } - ], - "parameters": { - "Right": 0 - }, - "nullable": [ - false, - false, - false, - false, - false, - false, - false, - true, - false, - false, - false, - false - ] - }, - "hash": "5953a81f34f906e34aabec089dfe0cebf2afc3ad798638db9ea0aabcd506192b" -} diff --git a/src-tauri/.sqlx/query-35cdd5aa0fa68c9700b71bae80b6fe97b3ef20aa110ef0502af0762c2e4ef493.json b/src-tauri/.sqlx/query-b32acd023c6b0b6236265da6b3812db5a064a272285e4e591a886607a89b1017.json similarity index 52% rename from src-tauri/.sqlx/query-35cdd5aa0fa68c9700b71bae80b6fe97b3ef20aa110ef0502af0762c2e4ef493.json rename to src-tauri/.sqlx/query-b32acd023c6b0b6236265da6b3812db5a064a272285e4e591a886607a89b1017.json index b5a67f76..2eac37b5 100644 --- a/src-tauri/.sqlx/query-35cdd5aa0fa68c9700b71bae80b6fe97b3ef20aa110ef0502af0762c2e4ef493.json +++ b/src-tauri/.sqlx/query-b32acd023c6b0b6236265da6b3812db5a064a272285e4e591a886607a89b1017.json @@ -1,6 +1,6 @@ { "db_name": "SQLite", - "query": "SELECT c.id, c.location_id, c.start \"start!\", c.end \"end!\", COALESCE((SELECT ls.upload FROM location_stats ls WHERE ls.location_id = c.location_id AND ls.collected_at >= c.start AND ls.collected_at <= c.end ORDER BY ls.collected_at DESC LIMIT 1 ), 0) \"upload: _\", COALESCE((SELECT ls.download FROM location_stats ls WHERE ls.location_id = c.location_id AND ls.collected_at >= c.start AND ls.collected_at <= c.end ORDER BY ls.collected_at DESC LIMIT 1 ), 0) \"download: _\" FROM connection c WHERE location_id = $1 ORDER BY start DESC", + "query": "SELECT c.id, c.location_id, c.start, c.end, COALESCE((SELECT ls.upload FROM location_stats ls WHERE ls.location_id = c.location_id AND ls.collected_at BETWEEN c.start AND c.end ORDER BY ls.collected_at DESC LIMIT 1 ), 0) \"upload: _\", COALESCE((SELECT ls.download FROM location_stats ls WHERE ls.location_id = c.location_id AND ls.collected_at BETWEEN c.start AND c.end ORDER BY ls.collected_at DESC LIMIT 1 ), 0) \"download: _\" FROM connection c WHERE location_id = $1 ORDER BY start DESC", "describe": { "columns": [ { @@ -14,12 +14,12 @@ "type_info": "Integer" }, { - "name": "start!", + "name": "start", "ordinal": 2, "type_info": "Datetime" }, { - "name": "end!", + "name": "end", "ordinal": 3, "type_info": "Datetime" }, @@ -46,5 +46,5 @@ false ] }, - "hash": "35cdd5aa0fa68c9700b71bae80b6fe97b3ef20aa110ef0502af0762c2e4ef493" + "hash": "b32acd023c6b0b6236265da6b3812db5a064a272285e4e591a886607a89b1017" } diff --git a/src-tauri/.sqlx/query-09eaf7503256da688c9eaeebb2b187d1b7d5692ce7e0ca467465e78f873e0901.json b/src-tauri/.sqlx/query-d84dc04e42e2ef85f990b3f01c4db1ac59ec5e5940a7fa7ff1f6d2181a1f4763.json similarity index 91% rename from src-tauri/.sqlx/query-09eaf7503256da688c9eaeebb2b187d1b7d5692ce7e0ca467465e78f873e0901.json rename to src-tauri/.sqlx/query-d84dc04e42e2ef85f990b3f01c4db1ac59ec5e5940a7fa7ff1f6d2181a1f4763.json index e7bdf562..3898614a 100644 --- a/src-tauri/.sqlx/query-09eaf7503256da688c9eaeebb2b187d1b7d5692ce7e0ca467465e78f873e0901.json +++ b/src-tauri/.sqlx/query-d84dc04e42e2ef85f990b3f01c4db1ac59ec5e5940a7fa7ff1f6d2181a1f4763.json @@ -1,6 +1,6 @@ { "db_name": "SQLite", - "query": "SELECT id \"id: _\", name, uuid, url, proxy_url, username, token, disable_all_traffic, enterprise_enabled FROM instance WHERE token IS NOT NULL;", + "query": "SELECT id \"id: _\", name, uuid, url, proxy_url, username, token, disable_all_traffic, enterprise_enabled FROM instance\n WHERE token IS NOT NULL;", "describe": { "columns": [ { @@ -64,5 +64,5 @@ false ] }, - "hash": "09eaf7503256da688c9eaeebb2b187d1b7d5692ce7e0ca467465e78f873e0901" + "hash": "d84dc04e42e2ef85f990b3f01c4db1ac59ec5e5940a7fa7ff1f6d2181a1f4763" } diff --git a/src-tauri/.sqlx/query-e83851eee8a8ccbf3729ba0de91a025feabb43bdba8ec78a1adb79e8870dbac9.json b/src-tauri/.sqlx/query-e83851eee8a8ccbf3729ba0de91a025feabb43bdba8ec78a1adb79e8870dbac9.json deleted file mode 100644 index c41516a1..00000000 --- a/src-tauri/.sqlx/query-e83851eee8a8ccbf3729ba0de91a025feabb43bdba8ec78a1adb79e8870dbac9.json +++ /dev/null @@ -1,86 +0,0 @@ -{ - "db_name": "SQLite", - "query": "SELECT id \"id: _\", instance_id, name, address, pubkey, endpoint, allowed_ips, dns, network_id, route_all_traffic, mfa_enabled, keepalive_interval FROM location WHERE name = $1", - "describe": { - "columns": [ - { - "name": "id: _", - "ordinal": 0, - "type_info": "Integer" - }, - { - "name": "instance_id", - "ordinal": 1, - "type_info": "Integer" - }, - { - "name": "name", - "ordinal": 2, - "type_info": "Text" - }, - { - "name": "address", - "ordinal": 3, - "type_info": "Text" - }, - { - "name": "pubkey", - "ordinal": 4, - "type_info": "Text" - }, - { - "name": "endpoint", - "ordinal": 5, - "type_info": "Text" - }, - { - "name": "allowed_ips", - "ordinal": 6, - "type_info": "Text" - }, - { - "name": "dns", - "ordinal": 7, - "type_info": "Text" - }, - { - "name": "network_id", - "ordinal": 8, - "type_info": "Integer" - }, - { - "name": "route_all_traffic", - "ordinal": 9, - "type_info": "Bool" - }, - { - "name": "mfa_enabled", - "ordinal": 10, - "type_info": "Bool" - }, - { - "name": "keepalive_interval", - "ordinal": 11, - "type_info": "Integer" - } - ], - "parameters": { - "Right": 1 - }, - "nullable": [ - false, - false, - false, - false, - false, - false, - false, - true, - false, - false, - false, - false - ] - }, - "hash": "e83851eee8a8ccbf3729ba0de91a025feabb43bdba8ec78a1adb79e8870dbac9" -} diff --git a/src-tauri/Cargo.lock b/src-tauri/Cargo.lock index f2669c8f..2197542e 100644 --- a/src-tauri/Cargo.lock +++ b/src-tauri/Cargo.lock @@ -169,7 +169,7 @@ version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "435a87a52755b8f27fcf321ac4f04b2802e337c8c4872923137471ec39c37532" dependencies = [ - "event-listener 5.3.1", + "event-listener 5.4.0", "event-listener-strategy", "futures-core", "pin-project-lite", @@ -237,7 +237,7 @@ dependencies = [ "log", "parking", "polling 2.8.0", - "rustix 0.37.27", + "rustix 0.37.28", "slab", "socket2 0.4.10", "waker-fn", @@ -256,7 +256,7 @@ dependencies = [ "futures-lite 2.5.0", "parking", "polling 3.7.4", - "rustix 0.38.42", + "rustix 0.38.43", "slab", "tracing", "windows-sys 0.59.0", @@ -277,7 +277,7 @@ version = "3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ff6e472cdea888a4bd64f342f09b3f50e1886d32afe8df3d663c01140b811b18" dependencies = [ - "event-listener 5.3.1", + "event-listener 5.4.0", "event-listener-strategy", "pin-project-lite", ] @@ -295,7 +295,7 @@ dependencies = [ "cfg-if", "event-listener 3.1.0", "futures-lite 1.13.0", - "rustix 0.38.42", + "rustix 0.38.43", "windows-sys 0.48.0", ] @@ -312,9 +312,9 @@ dependencies = [ "async-task", "blocking", "cfg-if", - "event-listener 5.3.1", + "event-listener 5.4.0", "futures-lite 2.5.0", - "rustix 0.38.42", + "rustix 0.38.43", "tracing", ] @@ -341,7 +341,7 @@ dependencies = [ "cfg-if", "futures-core", "futures-io", - "rustix 0.38.42", + "rustix 0.38.43", "signal-hook-registry", "slab", "windows-sys 0.59.0", @@ -377,9 +377,9 @@ checksum = "8b75356056920673b02621b35afd0f7dda9306d03c79a30f5c56c44cf256e3de" [[package]] name = "async-trait" -version = "0.1.84" +version = "0.1.85" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b1244b10dcd56c92219da4e14caa97e312079e185f04ba3eea25061561dc0a0" +checksum = "3f934833b4b7233644e5848f235df3f57ed8c80f1528a26c3dfa13d2147fa056" dependencies = [ "proc-macro2", "quote", @@ -826,9 +826,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.23" +version = "4.5.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3135e7ec2ef7b10c6ed8950f0f792ed96ee093fa088608f1c76e569722700c84" +checksum = "9560b07a799281c7e0958b9296854d6fafd4c5f31444a7e5bb1ad6dde5ccf1bd" dependencies = [ "clap_builder", "clap_derive", @@ -836,9 +836,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.23" +version = "4.5.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30582fc632330df2bd26877bde0c1f4470d57c582bbc070376afcd04d8cb4838" +checksum = "874e0dd3eb68bf99058751ac9712f622e61e6f393a94f7128fa26e3f02f5c7cd" dependencies = [ "anstream", "anstyle", @@ -848,9 +848,9 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.5.18" +version = "4.5.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ac6a0c7b1a9e9a5186361f67dfa1b88213572f427fb9ab038efb2bd8c582dab" +checksum = "54b755194d6389280185988721fffba69495eed5ee9feeee9a599b53db80318c" dependencies = [ "heck 0.5.0", "proc-macro2", @@ -1346,7 +1346,7 @@ dependencies = [ [[package]] name = "defguard_wireguard_rs" version = "0.7.0" -source = "git+https://github.com/DefGuard/wireguard-rs.git?rev=v0.7.0#943f9ff2970d13fe139212b954232f474642b213" +source = "git+https://github.com/DefGuard/wireguard-rs.git?rev=v0.7.0#74dafcb416713c4c092633221545ad611be60397" dependencies = [ "base64 0.22.1", "libc", @@ -1709,9 +1709,9 @@ dependencies = [ [[package]] name = "event-listener" -version = "5.3.1" +version = "5.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6032be9bd27023a771701cc49f9f053c751055f71efb2e0ae5c15809093675ba" +checksum = "3492acde4c3fc54c845eaab3eed8bd00c7a7d881f78bfc801e43a93dec1331ae" dependencies = [ "concurrent-queue", "parking", @@ -1724,7 +1724,7 @@ version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3c3e4e0dd3673c1139bf041f3008816d9cf2946bbfac2945c09e523b8d7b05b2" dependencies = [ - "event-listener 5.3.1", + "event-listener 5.4.0", "pin-project-lite", ] @@ -3219,9 +3219,9 @@ dependencies = [ [[package]] name = "libz-sys" -version = "1.1.20" +version = "1.1.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2d16453e800a8cf6dd2fc3eb4bc99b786a9b90c663b8559a5b1a041bf89e472" +checksum = "df9b68e50e6e0b26f672573834882eb57759f6db9b3be2ea3c35c91188bb4eaa" dependencies = [ "cc", "libc", @@ -3237,9 +3237,9 @@ checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519" [[package]] name = "linux-raw-sys" -version = "0.4.14" +version = "0.4.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78b3ae25bc7c8c38cec158d1f2757ee79e9b3740fbc7ccf0e59e4b08d793fa89" +checksum = "d26c52dbd32dccf2d10cac7725f8eae5296885fb5703b261f7d0a0739ec807ab" [[package]] name = "litemap" @@ -4069,12 +4069,12 @@ dependencies = [ [[package]] name = "phf" -version = "0.11.2" +version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ade2d8b8f33c7333b51bcf0428d37e217e9f32192ae4772156f65063b8ce03dc" +checksum = "1fd6780a80ae0c52cc120a26a1a42c1ae51b247a253e4e06113d23d2c2edd078" dependencies = [ - "phf_macros 0.11.2", - "phf_shared 0.11.2", + "phf_macros 0.11.3", + "phf_shared 0.11.3", ] [[package]] @@ -4119,11 +4119,11 @@ dependencies = [ [[package]] name = "phf_generator" -version = "0.11.2" +version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48e4cc64c2ad9ebe670cb8fd69dd50ae301650392e81c05f9bfcb2d5bdbc24b0" +checksum = "3c80231409c20246a13fddb31776fb942c38553c51e871f8cbd687a4cfb5843d" dependencies = [ - "phf_shared 0.11.2", + "phf_shared 0.11.3", "rand 0.8.5", ] @@ -4143,12 +4143,12 @@ dependencies = [ [[package]] name = "phf_macros" -version = "0.11.2" +version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3444646e286606587e49f3bcf1679b8cef1dc2c5ecc29ddacaffc305180d464b" +checksum = "f84ac04429c13a7ff43785d75ad27569f2951ce0ffd30a3321230db2fc727216" dependencies = [ - "phf_generator 0.11.2", - "phf_shared 0.11.2", + "phf_generator 0.11.3", + "phf_shared 0.11.3", "proc-macro2", "quote", "syn 2.0.95", @@ -4160,7 +4160,7 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c00cf8b9eafe68dde5e9eaa2cef8ee84a9336a47d566ec55ca16589633b65af7" dependencies = [ - "siphasher", + "siphasher 0.3.11", ] [[package]] @@ -4169,32 +4169,32 @@ version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b6796ad771acdc0123d2a88dc428b5e38ef24456743ddb1744ed628f9815c096" dependencies = [ - "siphasher", + "siphasher 0.3.11", ] [[package]] name = "phf_shared" -version = "0.11.2" +version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90fcb95eef784c2ac79119d1dd819e162b5da872ce6f3c3abe1e8ca1c082f72b" +checksum = "67eabc2ef2a60eb7faa00097bd1ffdb5bd28e62bf39990626a582201b7a754e5" dependencies = [ - "siphasher", + "siphasher 1.0.1", ] [[package]] name = "pin-project" -version = "1.1.7" +version = "1.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be57f64e946e500c8ee36ef6331845d40a93055567ec57e8fae13efd33759b95" +checksum = "1e2ec53ad785f4d35dac0adea7f7dc6f1bb277ad84a680c7afefeae05d1f5916" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.1.7" +version = "1.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c0f5fad0874fc7abcd4d750e76917eaebbecaa2c20bde22e1dbeeba8beb758c" +checksum = "d56a66c0c55993aa927429d0f8a0abfd74f084e4d9c192cffed01e418d83eefb" dependencies = [ "proc-macro2", "quote", @@ -4203,9 +4203,9 @@ dependencies = [ [[package]] name = "pin-project-lite" -version = "0.2.15" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "915a1e146535de9163f3987b8944ed8cf49a18bb0056bcebcdcece385cece4ff" +checksum = "3b3cff922bd51709b605d9ead9aa71031d81447142d828eb4a6eba76fe619f9b" [[package]] name = "pin-utils" @@ -4303,7 +4303,7 @@ dependencies = [ "concurrent-queue", "hermit-abi 0.4.0", "pin-project-lite", - "rustix 0.38.42", + "rustix 0.38.43", "tracing", "windows-sys 0.59.0", ] @@ -4917,9 +4917,9 @@ dependencies = [ [[package]] name = "rustix" -version = "0.37.27" +version = "0.37.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fea8ca367a3a01fe35e6943c400addf443c0f57670e6ec51196f71a4b8762dd2" +checksum = "519165d378b97752ca44bbe15047d5d3409e875f39327546b42ac81d7e18c1b6" dependencies = [ "bitflags 1.3.2", "errno", @@ -4931,14 +4931,14 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.42" +version = "0.38.43" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f93dc38ecbab2eb790ff964bb77fa94faf256fd3e73285fd7ba0903b76bedb85" +checksum = "a78891ee6bf2340288408954ac787aa063d8e8817e9f53abb37c695c6d834ef6" dependencies = [ "bitflags 2.6.0", "errno", "libc", - "linux-raw-sys 0.4.14", + "linux-raw-sys 0.4.15", "windows-sys 0.59.0", ] @@ -5053,9 +5053,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.13.0" +version = "2.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1863fd3768cd83c56a7f60faa4dc0d403f1b6df0a38c3c25f44b7894e45370d5" +checksum = "49db231d56a190491cb4aeda9527f1ad45345af50b0851622a7adb8c03b01c32" dependencies = [ "core-foundation-sys", "libc", @@ -5112,9 +5112,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.134" +version = "1.0.135" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d00f4175c42ee48b15416f6193a959ba3a0d67fc699a0db9ad12df9f83991c7d" +checksum = "2b0d7ba2887406110130a978386c4e1befb98c674b4fba677954e4db976630d9" dependencies = [ "indexmap 2.7.0", "itoa 1.0.14", @@ -5291,6 +5291,12 @@ version = "0.3.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "38b58827f4464d87d377d175e90bf58eb00fd8716ff0a62f80356b5e61555d0d" +[[package]] +name = "siphasher" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56199f7ddabf13fe5074ce809e7d3f42b42ae711800501b5b16ea82ad029c39d" + [[package]] name = "slab" version = "0.4.9" @@ -5400,7 +5406,7 @@ dependencies = [ "crc", "crossbeam-queue", "either", - "event-listener 5.3.1", + "event-listener 5.4.0", "futures-core", "futures-intrusive", "futures-io", @@ -6000,7 +6006,7 @@ dependencies = [ [[package]] name = "tauri-plugin-log" version = "0.0.0" -source = "git+https://github.com/tauri-apps/plugins-workspace?branch=v1#203db03fbd6e5a9cddbc83ce2acd983b88fbd7e3" +source = "git+https://github.com/tauri-apps/plugins-workspace?branch=v1#f00ad730af8373d8329347d7561ec5486f09ce87" dependencies = [ "byte-unit", "fern", @@ -6015,7 +6021,7 @@ dependencies = [ [[package]] name = "tauri-plugin-single-instance" version = "0.0.0" -source = "git+https://github.com/tauri-apps/plugins-workspace?branch=v1#203db03fbd6e5a9cddbc83ce2acd983b88fbd7e3" +source = "git+https://github.com/tauri-apps/plugins-workspace?branch=v1#f00ad730af8373d8329347d7561ec5486f09ce87" dependencies = [ "log", "serde", @@ -6029,7 +6035,7 @@ dependencies = [ [[package]] name = "tauri-plugin-window-state" version = "0.1.1" -source = "git+https://github.com/tauri-apps/plugins-workspace?branch=v1#203db03fbd6e5a9cddbc83ce2acd983b88fbd7e3" +source = "git+https://github.com/tauri-apps/plugins-workspace?branch=v1#f00ad730af8373d8329347d7561ec5486f09ce87" dependencies = [ "bincode", "bitflags 2.6.0", @@ -6099,7 +6105,7 @@ dependencies = [ "kuchikiki", "log", "memchr", - "phf 0.11.2", + "phf 0.11.3", "proc-macro2", "quote", "semver", @@ -6143,7 +6149,7 @@ dependencies = [ "fastrand 2.3.0", "getrandom 0.2.15", "once_cell", - "rustix 0.38.42", + "rustix 0.38.43", "windows-sys 0.59.0", ] @@ -6969,7 +6975,7 @@ checksum = "056535ced7a150d45159d3a8dc30f91a2e2d588ca0b23f70e56033622b8016f6" dependencies = [ "cc", "downcast-rs", - "rustix 0.38.42", + "rustix 0.38.43", "scoped-tls", "smallvec", "wayland-sys", @@ -6982,7 +6988,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b66249d3fc69f76fd74c82cc319300faa554e9d865dab1f7cd66cc20db10b280" dependencies = [ "bitflags 2.6.0", - "rustix 0.38.42", + "rustix 0.38.43", "wayland-backend", "wayland-scanner", ] @@ -7446,13 +7452,29 @@ dependencies = [ "windows_aarch64_gnullvm 0.52.6", "windows_aarch64_msvc 0.52.6", "windows_i686_gnu 0.52.6", - "windows_i686_gnullvm", + "windows_i686_gnullvm 0.52.6", "windows_i686_msvc 0.52.6", "windows_x86_64_gnu 0.52.6", "windows_x86_64_gnullvm 0.52.6", "windows_x86_64_msvc 0.52.6", ] +[[package]] +name = "windows-targets" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1e4c7e8ceaaf9cb7d7507c974735728ab453b67ef8f18febdd7c11fe59dca8b" +dependencies = [ + "windows_aarch64_gnullvm 0.53.0", + "windows_aarch64_msvc 0.53.0", + "windows_i686_gnu 0.53.0", + "windows_i686_gnullvm 0.53.0", + "windows_i686_msvc 0.53.0", + "windows_x86_64_gnu 0.53.0", + "windows_x86_64_gnullvm 0.53.0", + "windows_x86_64_msvc 0.53.0", +] + [[package]] name = "windows-tokens" version = "0.39.0" @@ -7461,11 +7483,11 @@ checksum = "f838de2fe15fe6bac988e74b798f26499a8b21a9d97edec321e79b28d1d7f597" [[package]] name = "windows-version" -version = "0.1.1" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6998aa457c9ba8ff2fb9f13e9d2a930dabcea28f1d0ab94d687d8b3654844515" +checksum = "c12476c23a74725c539b24eae8bfc0dac4029c39cdb561d9f23616accd4ae26d" dependencies = [ - "windows-targets 0.52.6", + "windows-targets 0.53.0", ] [[package]] @@ -7486,6 +7508,12 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "86b8d5f90ddd19cb4a147a5fa63ca848db3df085e25fee3cc10b39b6eebae764" + [[package]] name = "windows_aarch64_msvc" version = "0.37.0" @@ -7516,6 +7544,12 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" +[[package]] +name = "windows_aarch64_msvc" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7651a1f62a11b8cbd5e0d42526e55f2c99886c77e007179efff86c2b137e66c" + [[package]] name = "windows_i686_gnu" version = "0.37.0" @@ -7546,12 +7580,24 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" +[[package]] +name = "windows_i686_gnu" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c1dc67659d35f387f5f6c479dc4e28f1d4bb90ddd1a5d3da2e5d97b42d6272c3" + [[package]] name = "windows_i686_gnullvm" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" +[[package]] +name = "windows_i686_gnullvm" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ce6ccbdedbf6d6354471319e781c0dfef054c81fbc7cf83f338a4296c0cae11" + [[package]] name = "windows_i686_msvc" version = "0.37.0" @@ -7582,6 +7628,12 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" +[[package]] +name = "windows_i686_msvc" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "581fee95406bb13382d2f65cd4a908ca7b1e4c2f1917f143ba16efe98a589b5d" + [[package]] name = "windows_x86_64_gnu" version = "0.37.0" @@ -7612,6 +7664,12 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" +[[package]] +name = "windows_x86_64_gnu" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2e55b5ac9ea33f2fc1716d1742db15574fd6fc8dadc51caab1c16a3d3b4190ba" + [[package]] name = "windows_x86_64_gnullvm" version = "0.42.2" @@ -7630,6 +7688,12 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0a6e035dd0599267ce1ee132e51c27dd29437f63325753051e71dd9e42406c57" + [[package]] name = "windows_x86_64_msvc" version = "0.37.0" @@ -7660,6 +7724,12 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" +[[package]] +name = "windows_x86_64_msvc" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "271414315aff87387382ec3d271b52d7ae78726f5d44ac98b4f4030c91880486" + [[package]] name = "winnow" version = "0.5.40" @@ -7814,7 +7884,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5d91ffca73ee7f68ce055750bf9f6eca0780b8c85eff9bc046a3b0da41755e12" dependencies = [ "gethostname", - "rustix 0.38.42", + "rustix 0.38.43", "x11rb-protocol", ] @@ -7843,8 +7913,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e105d177a3871454f754b33bb0ee637ecaaac997446375fd3e5d43a2ed00c909" dependencies = [ "libc", - "linux-raw-sys 0.4.14", - "rustix 0.38.42", + "linux-raw-sys 0.4.15", + "rustix 0.38.43", ] [[package]] @@ -7939,7 +8009,7 @@ dependencies = [ "async-trait", "blocking", "enumflags2", - "event-listener 5.3.1", + "event-listener 5.4.0", "futures-core", "futures-sink", "futures-util", diff --git a/src-tauri/src/commands.rs b/src-tauri/src/commands.rs index 1bdca788..98fc3212 100644 --- a/src-tauri/src/commands.rs +++ b/src-tauri/src/commands.rs @@ -95,12 +95,12 @@ pub async fn start_global_logwatcher(handle: AppHandle) -> Result<(), Error> { Ok(()) } -#[tauri::command(async)] -pub async fn stop_global_logwatcher(handle: AppHandle) -> Result<(), Error> { +#[tauri::command] +pub fn stop_global_logwatcher(handle: AppHandle) -> Result<(), Error> { stop_global_log_watcher_task(&handle) } -#[tauri::command] +#[tauri::command(async)] pub async fn disconnect( location_id: Id, connection_type: ConnectionType, @@ -131,16 +131,23 @@ pub async fn disconnect( match err { Error::CoreNotEnterprise => { debug!( - "Tried to fetch instance config from core after disconnecting from {name}(ID: {location_id}), but the core is not enterprise, so we can't fetch the config." + "Tried to fetch instance config from core after disconnecting from \ + {name}(ID: {location_id}), but the core is not enterprise, so we \ + can't fetch the config." ); } Error::NoToken => { debug!( - "Tried to fetch instance config from core after disconnecting from {name}(ID: {location_id}), but this location's instance has no polling token, so we can't fetch the config." + "Tried to fetch instance config from core after disconnecting from \ + {name}(ID: {location_id}), but this location's instance has no \ + polling token, so we can't fetch the config." ); } _ => { - warn!("Error while trying to fetch instance config after disconnecting from {name}(ID: {location_id}): {err}"); + warn!( + "Error while trying to fetch instance config after disconnecting \ + from {name}(ID: {location_id}): {err}" + ); } } }; @@ -148,7 +155,10 @@ pub async fn disconnect( info!("Disconnected from {connection_type} {name}(ID: {location_id})"); Ok(()) } else { - warn!("Couldn't disconnect from {connection_type} {name}(ID: {location_id}), as no active connection was found."); + warn!( + "Couldn't disconnect from {connection_type} {name}(ID: {location_id}), as no active \ + connection was found." + ); Err(Error::NotFound) } } @@ -432,7 +442,7 @@ pub async fn update_instance( } /// Returns true if configuration in instance_info differs from current configuration -pub async fn locations_changed( +pub(crate) async fn locations_changed( transaction: &mut Transaction<'_, Sqlite>, instance: &Instance, device_config: &DeviceConfigResponse, @@ -457,7 +467,7 @@ pub async fn locations_changed( Ok(db_locations != core_locations) } -pub async fn do_update_instance( +pub(crate) async fn do_update_instance( transaction: &mut Transaction<'_, Sqlite>, instance: &mut Instance, response: DeviceConfigResponse, @@ -939,39 +949,66 @@ pub async fn delete_tunnel(tunnel_id: Id, handle: AppHandle) -> Result<(), Error error!("The tunnel to delete with ID {tunnel_id} could not be found, cannot delete."); return Err(Error::NotFound); }; - debug!("The tunnel to delete with ID {tunnel_id} has been identified as {tunnel}, proceeding with deletion."); + debug!( + "The tunnel to delete with ID {tunnel_id} has been identified as {tunnel}, proceeding \ + with deletion." + ); if let Some(connection) = app_state .remove_connection(tunnel_id, ConnectionType::Tunnel) .await { - debug!("Found active connection for tunnel {tunnel} which is being deleted, closing the connection."); + debug!( + "Found active connection for tunnel {tunnel} which is being deleted, closing the \ + connection." + ); if let Some(pre_down) = &tunnel.pre_down { - debug!("Executing defined PreDown command before removing the interface {} for the tunnel {tunnel}: {pre_down}", connection.interface_name); + debug!( + "Executing defined PreDown command before removing the interface {} for the \ + tunnel {tunnel}: {pre_down}", + connection.interface_name + ); let _ = execute_command(pre_down); - info!("Executed defined PreDown command before removing the interface {} for the tunnel {tunnel}: {pre_down}", connection.interface_name); + info!( + "Executed defined PreDown command before removing the interface {} for the \ + tunnel {tunnel}: {pre_down}", + connection.interface_name + ); } let request = RemoveInterfaceRequest { interface_name: connection.interface_name.clone(), endpoint: tunnel.endpoint.clone(), }; - client - .remove_interface(request) - .await - .map_err(|status| { - error!("An error occurred while removing interface {} for tunnel {tunnel}, status: {status}", - connection.interface_name); - Error::InternalError( - format!( - "An error occurred while removing interface {} for tunnel {tunnel}, error message: {}. Check logs for more details.", connection.interface_name, status.message() - ) - ) - })?; - info!("Network interface {} has been removed and the connection to tunnel {tunnel} has been closed.", connection.interface_name); + client.remove_interface(request).await.map_err(|status| { + error!( + "An error occurred while removing interface {} for tunnel {tunnel}, status: \ + {status}", + connection.interface_name + ); + Error::InternalError(format!( + "An error occurred while removing interface {} for tunnel {tunnel}, error \ + message: {}. Check logs for more details.", + connection.interface_name, + status.message() + )) + })?; + info!( + "Network interface {} has been removed and the connection to tunnel {tunnel} has been \ + closed.", + connection.interface_name + ); if let Some(post_down) = &tunnel.post_down { - debug!("Executing defined PostDown command after removing the interface {} for the tunnel {tunnel}: {post_down}", connection.interface_name); + debug!( + "Executing defined PostDown command after removing the interface {} for the \ + tunnel {tunnel}: {post_down}", + connection.interface_name + ); let _ = execute_command(post_down); - info!("Executed defined PostDown command after removing the interface {} for the tunnel {tunnel}: {post_down}", connection.interface_name); + info!( + "Executed defined PostDown command after removing the interface {} for the \ + tunnel {tunnel}: {post_down}", + connection.interface_name + ); } } tunnel.delete(pool).await?; diff --git a/src-tauri/src/database/models/connection.rs b/src-tauri/src/database/models/connection.rs index cb9bdf33..f816c226 100644 --- a/src-tauri/src/database/models/connection.rs +++ b/src-tauri/src/database/models/connection.rs @@ -88,27 +88,24 @@ impl ConnectionInfo { where E: SqliteExecutor<'e>, { - // Because we store interface information for given timestamp select last upload and download - // before connection ended + // Because we store interface information for given timestamp, + // select last upload and download before connection ended. // FIXME: Optimize query let connections = query_as!( ConnectionInfo, - "SELECT c.id, c.location_id, \ - c.start \"start!\", c.end \"end!\", \ + "SELECT c.id, c.location_id, c.start, c.end, \ COALESCE((\ SELECT ls.upload \ FROM location_stats ls \ WHERE ls.location_id = c.location_id \ - AND ls.collected_at >= c.start \ - AND ls.collected_at <= c.end \ + AND ls.collected_at BETWEEN c.start AND c.end \ ORDER BY ls.collected_at DESC LIMIT 1 \ ), 0) \"upload: _\", \ COALESCE((\ SELECT ls.download \ FROM location_stats ls \ WHERE ls.location_id = c.location_id \ - AND ls.collected_at >= c.start \ - AND ls.collected_at <= c.end \ + AND ls.collected_at BETWEEN c.start AND c.end \ ORDER BY ls.collected_at DESC LIMIT 1 \ ), 0) \"download: _\" \ FROM connection c WHERE location_id = $1 \ diff --git a/src-tauri/src/database/models/instance.rs b/src-tauri/src/database/models/instance.rs index 6dcdfbe8..45935459 100644 --- a/src-tauri/src/database/models/instance.rs +++ b/src-tauri/src/database/models/instance.rs @@ -47,7 +47,8 @@ impl Instance { E: SqliteExecutor<'e>, { query!( - "UPDATE instance SET name = $1, uuid = $2, url = $3, proxy_url = $4, username = $5, disable_all_traffic = $6, enterprise_enabled = $7, token = $8 WHERE id = $9;", + "UPDATE instance SET name = $1, uuid = $2, url = $3, proxy_url = $4, username = $5, \ + disable_all_traffic = $6, enterprise_enabled = $7, token = $8 WHERE id = $9;", self.name, self.uuid, self.url, @@ -69,7 +70,8 @@ impl Instance { { let instances = query_as!( Self, - "SELECT id \"id: _\", name, uuid, url, proxy_url, username, token \"token?\", disable_all_traffic, enterprise_enabled FROM instance;" + "SELECT id \"id: _\", name, uuid, url, proxy_url, username, token \"token?\", \ + disable_all_traffic, enterprise_enabled FROM instance;" ) .fetch_all(executor) .await?; @@ -82,7 +84,8 @@ impl Instance { { let instance = query_as!( Self, - "SELECT id \"id: _\", name, uuid, url, proxy_url, username, token \"token?\", disable_all_traffic, enterprise_enabled FROM instance WHERE id = $1;", + "SELECT id \"id: _\", name, uuid, url, proxy_url, username, token \"token?\", \ + disable_all_traffic, enterprise_enabled FROM instance WHERE id = $1;", id ) .fetch_optional(executor) @@ -115,7 +118,9 @@ impl Instance { { let instances = query_as!( Self, - "SELECT id \"id: _\", name, uuid, url, proxy_url, username, token, disable_all_traffic, enterprise_enabled FROM instance WHERE token IS NOT NULL;" + "SELECT id \"id: _\", name, uuid, url, proxy_url, username, token, \ + disable_all_traffic, enterprise_enabled FROM instance + WHERE token IS NOT NULL;" ) .fetch_all(executor) .await?; @@ -144,7 +149,9 @@ impl Instance { let url = self.url.clone(); let proxy_url = self.proxy_url.clone(); let result = query!( - "INSERT INTO instance (name, uuid, url, proxy_url, username, token, disable_all_traffic, enterprise_enabled) VALUES ($1, $2, $3, $4, $5, $6, $7, $8) RETURNING id;", + "INSERT INTO instance (name, uuid, url, proxy_url, username, token, \ + disable_all_traffic, enterprise_enabled) \ + VALUES ($1, $2, $3, $4, $5, $6, $7, $8) RETURNING id;", self.name, self.uuid, url, diff --git a/src-tauri/src/database/models/location.rs b/src-tauri/src/database/models/location.rs index c06f3891..c866d848 100644 --- a/src-tauri/src/database/models/location.rs +++ b/src-tauri/src/database/models/location.rs @@ -36,28 +36,15 @@ impl fmt::Display for Location { } impl Location { - pub async fn all<'e, E>(executor: E) -> Result, SqlxError> - where - E: SqliteExecutor<'e>, - { - query_as!( - Self, - "SELECT id, instance_id, name, address, pubkey, endpoint, allowed_ips, dns, network_id,\ - route_all_traffic, mfa_enabled, keepalive_interval \ - FROM location;" - ) - .fetch_all(executor) - .await - } - - pub async fn save<'e, E>(&mut self, executor: E) -> Result<(), SqlxError> + pub(crate) async fn save<'e, E>(&mut self, executor: E) -> Result<(), SqlxError> where E: SqliteExecutor<'e>, { // Update the existing record when there is an ID query!( - "UPDATE location SET instance_id = $1, name = $2, address = $3, pubkey = $4, endpoint = $5, allowed_ips = $6, dns = $7, \ - network_id = $8, route_all_traffic = $9, mfa_enabled = $10, keepalive_interval = $11 WHERE id = $12", + "UPDATE location SET instance_id = $1, name = $2, address = $3, pubkey = $4, \ + endpoint = $5, allowed_ips = $6, dns = $7, network_id = $8, route_all_traffic = $9, \ + mfa_enabled = $10, keepalive_interval = $11 WHERE id = $12", self.instance_id, self.name, self.address, @@ -77,21 +64,25 @@ impl Location { Ok(()) } - pub async fn find_by_id<'e, E>(executor: E, location_id: Id) -> Result, SqlxError> + pub(crate) async fn find_by_id<'e, E>( + executor: E, + location_id: Id, + ) -> Result, SqlxError> where E: SqliteExecutor<'e>, { query_as!( Self, - "SELECT id \"id: _\", instance_id, name, address, pubkey, endpoint, allowed_ips, dns, network_id, \ - route_all_traffic, mfa_enabled, keepalive_interval FROM location WHERE id = $1", + "SELECT id \"id: _\", instance_id, name, address, pubkey, endpoint, allowed_ips, dns, \ + network_id, route_all_traffic, mfa_enabled, keepalive_interval \ + FROM location WHERE id = $1", location_id ) .fetch_optional(executor) .await } - pub async fn find_by_instance_id<'e, E>( + pub(crate) async fn find_by_instance_id<'e, E>( executor: E, instance_id: Id, ) -> Result, SqlxError> @@ -100,43 +91,34 @@ impl Location { { query_as!( Self, - "SELECT id \"id: _\", instance_id, name, address, pubkey, endpoint, allowed_ips, dns, network_id, \ - route_all_traffic, mfa_enabled, keepalive_interval FROM location WHERE instance_id = $1", + "SELECT id \"id: _\", instance_id, name, address, pubkey, endpoint, allowed_ips, dns, \ + network_id, route_all_traffic, mfa_enabled, keepalive_interval \ + FROM location WHERE instance_id = $1", instance_id ) .fetch_all(executor) .await } - pub async fn find_by_name<'e, E>(executor: E, name: &str) -> Result - where - E: SqliteExecutor<'e>, - { - query_as!( - Self, - "SELECT id \"id: _\", instance_id, name, address, pubkey, endpoint, allowed_ips, dns, network_id, \ - route_all_traffic, mfa_enabled, keepalive_interval FROM location WHERE name = $1", - name - ) - .fetch_one(executor) - .await - } - - pub async fn find_by_public_key<'e, E>(executor: E, pubkey: &str) -> Result + pub(crate) async fn find_by_public_key<'e, E>( + executor: E, + pubkey: &str, + ) -> Result where E: SqliteExecutor<'e>, { query_as!( Self, - "SELECT id \"id: _\", instance_id, name, address, pubkey, endpoint, allowed_ips, dns, network_id, \ - route_all_traffic, mfa_enabled, keepalive_interval FROM location WHERE pubkey = $1;", + "SELECT id \"id: _\", instance_id, name, address, pubkey, endpoint, allowed_ips, dns, \ + network_id, route_all_traffic, mfa_enabled, keepalive_interval \ + FROM location WHERE pubkey = $1;", pubkey ) .fetch_one(executor) .await } - pub async fn delete<'e, E>(&self, executor: E) -> Result<(), SqlxError> + pub(crate) async fn delete<'e, E>(&self, executor: E) -> Result<(), SqlxError> where E: SqliteExecutor<'e>, { @@ -147,7 +129,7 @@ impl Location { } /// Disables all traffic for locations related to the given instance - pub async fn disable_all_traffic_for_all<'e, E>( + pub(crate) async fn disable_all_traffic_for_all<'e, E>( executor: E, instance_id: Id, ) -> Result<(), Error> @@ -165,14 +147,14 @@ impl Location { } impl Location { - pub async fn save<'e, E>(self, executor: E) -> Result, SqlxError> + pub(crate) async fn save<'e, E>(self, executor: E) -> Result, SqlxError> where E: SqliteExecutor<'e>, { // Insert a new record when there is no ID let id = query_scalar!( - "INSERT INTO location (instance_id, name, address, pubkey, endpoint, allowed_ips, dns, \ - network_id, route_all_traffic, mfa_enabled, keepalive_interval) \ + "INSERT INTO location (instance_id, name, address, pubkey, endpoint, allowed_ips, \ + dns, network_id, route_all_traffic, mfa_enabled, keepalive_interval) \ VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11) \ RETURNING id \"id!\"", self.instance_id, diff --git a/src-tauri/src/database/models/tunnel.rs b/src-tauri/src/database/models/tunnel.rs index 9401bf84..e7428057 100644 --- a/src-tauri/src/database/models/tunnel.rs +++ b/src-tauri/src/database/models/tunnel.rs @@ -59,7 +59,8 @@ impl Tunnel { query!( "UPDATE tunnel SET name = $1, pubkey = $2, prvkey = $3, address = $4, \ server_pubkey = $5, preshared_key = $6, allowed_ips = $7, endpoint = $8, dns = $9, \ - persistent_keep_alive = $10, route_all_traffic = $11, pre_up = $12, post_up = $13, pre_down = $14, post_down = $15 \ + persistent_keep_alive = $10, route_all_traffic = $11, pre_up = $12, post_up = $13, \ + pre_down = $14, post_down = $15 \ WHERE id = $16;", self.name, self.pubkey, @@ -101,8 +102,9 @@ impl Tunnel { { query_as!( Self, - "SELECT id \"id: _\", name, pubkey, prvkey, address, server_pubkey, preshared_key, allowed_ips, endpoint, dns, \ - persistent_keep_alive, route_all_traffic, pre_up, post_up, pre_down, post_down FROM tunnel WHERE id = $1;", + "SELECT id \"id: _\", name, pubkey, prvkey, address, server_pubkey, preshared_key, \ + allowed_ips, endpoint, dns, persistent_keep_alive, route_all_traffic, pre_up, \ + post_up, pre_down, post_down FROM tunnel WHERE id = $1;", tunnel_id ) .fetch_optional(executor) @@ -115,8 +117,10 @@ impl Tunnel { { let tunnels = query_as!( Self, - "SELECT id \"id: _\", name, pubkey, prvkey, address, server_pubkey, preshared_key, allowed_ips, endpoint, dns, \ - persistent_keep_alive, route_all_traffic, pre_up, post_up, pre_down, post_down FROM tunnel;" + "SELECT id \"id: _\", name, pubkey, prvkey, address, server_pubkey, preshared_key, \ + allowed_ips, endpoint, dns, persistent_keep_alive, route_all_traffic, pre_up, \ + post_up, pre_down, post_down \ + FROM tunnel;" ) .fetch_all(executor) .await?; @@ -131,9 +135,10 @@ impl Tunnel { E: SqliteExecutor<'e>, { query_as!( - Self, - "SELECT id \"id: _\", name, pubkey, prvkey, address, server_pubkey, preshared_key, allowed_ips, endpoint, dns, persistent_keep_alive, \ - route_all_traffic, pre_up, post_up, pre_down, post_down \ + Self, + "SELECT id \"id: _\", name, pubkey, prvkey, address, server_pubkey, preshared_key, \ + allowed_ips, endpoint, dns, persistent_keep_alive, route_all_traffic, pre_up, \ + post_up, pre_down, post_down \ FROM tunnel WHERE server_pubkey = $1;", pubkey ) @@ -508,27 +513,24 @@ impl TunnelConnectionInfo { where E: SqliteExecutor<'e>, { - // Because we store interface information for given timestamp select last upload and download - // before connection ended + // Because we store interface information for given timestamp, + // select last upload and download before connection ended. // FIXME: Optimize query let connections = query_as!( TunnelConnectionInfo, - "SELECT c.id, c.tunnel_id, \ - c.start \"start!\", c.end \"end!\", \ + "SELECT c.id, c.tunnel_id, c.start, c.end, \ COALESCE((\ SELECT ls.upload \ FROM tunnel_stats ls \ WHERE ls.tunnel_id = c.tunnel_id \ - AND ls.collected_at >= c.start \ - AND ls.collected_at <= c.end \ + AND ls.collected_at BETWEEN c.start AND c.end \ ORDER BY ls.collected_at DESC LIMIT 1 \ ), 0) \"upload: _\", \ COALESCE((\ SELECT ls.download \ FROM tunnel_stats ls \ WHERE ls.tunnel_id = c.tunnel_id \ - AND ls.collected_at >= c.start \ - AND ls.collected_at <= c.end \ + AND ls.collected_at BETWEEN c.start AND c.end \ ORDER BY ls.collected_at DESC LIMIT 1 \ ), 0) \"download: _\" \ FROM tunnel_connection c WHERE tunnel_id = $1 \ diff --git a/src-tauri/src/periodic/connection.rs b/src-tauri/src/periodic/connection.rs index e406cd6d..035b2909 100644 --- a/src-tauri/src/periodic/connection.rs +++ b/src-tauri/src/periodic/connection.rs @@ -106,9 +106,15 @@ pub async fn verify_active_connections(app_handle: AppHandle) -> Result<(), Erro let connections = app_state.active_connections.lock().await; let connection_count = connections.len(); if connection_count == 0 { - debug!("Connections verification skipped, no active connections found, task will wait for next {CHECK_INTERVAL:?}"); + debug!( + "Connections verification skipped, no active connections found, task \ + will wait for next {CHECK_INTERVAL:?}" + ); } else { - debug!("Verifying state of {connection_count} active connections. Inactive connections will be disconnected and reconnected if possible."); + debug!( + "Verifying state of {connection_count} active connections. Inactive \ + connections will be disconnected and reconnected if possible." + ); } let peer_alive_period = TimeDelta::seconds(i64::from( app_state.app_config.lock().unwrap().peer_alive_period, @@ -128,11 +134,21 @@ pub async fn verify_active_connections(app_handle: AppHandle) -> Result<(), Erro // Check if there was any traffic since the connection was established. // If not, consider the location dead and disconnect it later without reconnecting. if latest_stat.collected_at < con.start { - debug!("There wasn't any activity for Location {} since its connection at {}; considering it being dead and possibly broken. \ - It will be disconnected without a further automatic reconnect.", con.location_id, con.start); + debug!( + "There wasn't any activity for Location {} since its \ + connection at {}; considering it being dead and possibly \ + broken. It will be disconnected without a further automatic \ + reconnect.", + con.location_id, con.start + ); locations_to_disconnect.push((con.location_id, false)); } else { - debug!("There wasn't any activity for Location {} for the last {}s; considering it being dead.", con.location_id, peer_alive_period.num_seconds()); + debug!( + "There wasn't any activity for Location {} for the last \ + {}s; considering it being dead.", + con.location_id, + peer_alive_period.num_seconds() + ); locations_to_disconnect.push((con.location_id, true)); } } @@ -143,12 +159,20 @@ pub async fn verify_active_connections(app_handle: AppHandle) -> Result<(), Erro con.connection_type, con.interface_name, con.location_id ); if Utc::now() - con.start.and_utc() > peer_alive_period { - debug!("There wasn't any activity for Location {} since its connection at {}; considering it being dead.", con.location_id, con.start); + debug!( + "There wasn't any activity for Location {} since its \ + connection at {}; considering it being dead.", + con.location_id, con.start + ); locations_to_disconnect.push((con.location_id, false)); } } Err(err) => { - warn!("Verification for location {}({}) skipped due to db error. Error: {err}", con.interface_name, con.location_id); + warn!( + "Verification for location {}({}) skipped due to database error: \ + {err}", + con.interface_name, con.location_id + ); } } } @@ -163,11 +187,21 @@ pub async fn verify_active_connections(app_handle: AppHandle) -> Result<(), Erro // Check if there was any traffic since the connection was established. // If not, consider the location dead and disconnect it later without reconnecting. if latest_stat.collected_at - con.start < TimeDelta::zero() { - debug!("There wasn't any activity for Tunnel {} since its connection at {}; considering it being dead and possibly broken. \ - It will be disconnected without a further automatic reconnect.", con.location_id, con.start); + debug!( + "There wasn't any activity for Tunnel {} since its \ + connection at {}; considering it being dead and possibly \ + broken. It will be disconnected without a further \ + automatic reconnect.", + con.location_id, con.start + ); tunnels_to_disconnect.push((con.location_id, false)); } else { - debug!("There wasn't any activity for Tunnel {} for the last {}s; considering it being dead.", con.location_id, peer_alive_period.num_seconds()); + debug!( + "There wasn't any activity for Tunnel {} for the last + {}s; considering it being dead.", + con.location_id, + peer_alive_period.num_seconds() + ); tunnels_to_disconnect.push((con.location_id, true)); } } @@ -178,15 +212,19 @@ pub async fn verify_active_connections(app_handle: AppHandle) -> Result<(), Erro con.interface_name, con.location_id ); if Utc::now() - con.start.and_utc() > peer_alive_period { - debug!("There wasn't any activity for Location {} since its connection at {}; considering it being dead.", con.location_id, con.start); + debug!( + "There wasn't any activity for Location {} since its \ + connection at {}; considering it being dead.", + con.location_id, con.start + ); tunnels_to_disconnect.push((con.location_id, false)); } } Err(err) => { warn!( - "Verification for tunnel {}({}) skipped due to db error. Error: {err}", - con.interface_name, - con.location_id + "Verification for tunnel {}({}) skipped due to db error. \ + Error: {err}", + con.interface_name, con.location_id ); } } @@ -201,7 +239,11 @@ pub async fn verify_active_connections(app_handle: AppHandle) -> Result<(), Erro match Location::find_by_id(pool, location_id).await { Ok(Some(location)) => { if !allow_reconnect { - warn!("Automatic reconnect for location {}({}) is not possible due to lack of activity. Interface will be disconnected.", location.name, location.id); + warn!( + "Automatic reconnect for location {}({}) is not possible due to lack \ + of activity. Interface will be disconnected.", + location.name, location.id + ); disconnect_dead_connection( location_id, &location.name, @@ -213,7 +255,11 @@ pub async fn verify_active_connections(app_handle: AppHandle) -> Result<(), Erro } else if // only try to reconnect when location is not protected behind MFA location.mfa_enabled { - warn!("Automatic reconnect for location {}({}) is not possible due to enabled MFA. Interface will be disconnected.", location.name, location.id); + warn!( + "Automatic reconnect for location {}({}) is not possible due to \ + enabled MFA. Interface will be disconnected.", + location.name, location.id + ); disconnect_dead_connection( location_id, &location.name, @@ -235,10 +281,17 @@ pub async fn verify_active_connections(app_handle: AppHandle) -> Result<(), Erro } Ok(None) => { // Unlikely due to ON DELETE CASCADE. - warn!("Attempt to reconnect the location ID {location_id} cannot be made, as location was not found in the database."); + warn!( + "Attempt to reconnect the location ID {location_id} cannot be made, as \ + location was not found in the database." + ); } Err(err) => { - warn!("Could not retrieve location ID {location_id} because of a database error. Automatic reconnection cannot be done, interface will be disconnected. Error: {err}"); + warn!( + "Could not retrieve location ID {location_id} because of a database \ + error. Automatic reconnection cannot be done, interface will be \ + disconnected. Error: {err}" + ); disconnect_dead_connection( location_id, "DEAD LOCATION", @@ -265,7 +318,12 @@ pub async fn verify_active_connections(app_handle: AppHandle) -> Result<(), Erro ) .await; } else { - debug!("Automatic reconnect for location {}({}) is not possible due to lack of activity since the connection start. Interface will be disconnected.", tunnel.name, tunnel.id); + debug!( + "Automatic reconnect for location {}({}) is not possible due to lack \ + of activity since the connection start. Interface will be \ + disconnected.", + tunnel.name, tunnel.id + ); disconnect_dead_connection( tunnel_id, "DEAD TUNNEL", @@ -278,10 +336,16 @@ pub async fn verify_active_connections(app_handle: AppHandle) -> Result<(), Erro } Ok(None) => { // Unlikely due to ON DELETE CASCADE. - warn!("Attempt to reconnect the tunnel ID {tunnel_id} cannot be made, as the tunnel was not found in the database."); + warn!( + "Attempt to reconnect the tunnel ID {tunnel_id} cannot be made, as the \ + tunnel was not found in the database." + ); } Err(err) => { - warn!("Attempt to reconnect the tunnel ID {tunnel_id} cannot be made, because of a database error. Error: {err}, connection will be dropped instead."); + warn!( + "Attempt to reconnect the tunnel ID {tunnel_id} cannot be made, because \ + of a database: {err}, connection will be dropped instead." + ); disconnect_dead_connection( tunnel_id, "DEAD TUNNEL", diff --git a/src-tauri/src/service/mod.rs b/src-tauri/src/service/mod.rs index e3ccbf1d..b1121f64 100644 --- a/src-tauri/src/service/mod.rs +++ b/src-tauri/src/service/mod.rs @@ -54,14 +54,14 @@ pub enum DaemonError { #[derive(Debug, Default)] pub struct DaemonService { - stats_period: u64, + stats_period: Duration, } impl DaemonService { #[must_use] pub fn new(config: &Config) -> Self { Self { - stats_period: config.stats_period, + stats_period: Duration::from_secs(config.stats_period), } } } @@ -192,16 +192,14 @@ impl DesktopDaemonService for DaemonService { #[cfg(not(windows))] { debug!("Cleaning up interface {ifname} routing"); - wgapi - .remove_endpoint_routing(&request.endpoint) - .map_err(|err| { - let msg = format!( - "Failed to remove routing for endpoint {}: {err}", - request.endpoint - ); - error!("{msg}"); - Status::new(Code::Internal, msg) - })?; + // Ignore error as this should not be considered fatal, + // e.g. endpoint might fail to resolve DNS name. + if let Err(err) = wgapi.remove_endpoint_routing(&request.endpoint) { + error!( + "Failed to remove routing for endpoint {}: {err}", + request.endpoint + ); + } } wgapi.remove_interface().map_err(|err| { @@ -229,7 +227,7 @@ impl DesktopDaemonService for DaemonService { // Setup WireGuard API. let wgapi = setup_wgapi(&ifname)?; - let mut interval = interval(Duration::from_secs(self.stats_period)); + let mut interval = interval(self.stats_period); let (tx, rx) = mpsc::channel(64); span.in_scope(|| { @@ -283,7 +281,10 @@ impl DesktopDaemonService for DaemonService { } debug!("Network activity statistics for interface {ifname} sent to the client"); } - debug!("The client has disconnected from the network usage statistics data stream for interface {ifname}, stopping the statistics data collection task."); + debug!( + "The client has disconnected from the network usage statistics data stream \ + for interface {ifname}, stopping the statistics data collection task." + ); }.instrument(span)); let output_stream = ReceiverStream::new(rx); @@ -426,7 +427,8 @@ mod tests { let mut base_peer = Peer::new(peer_key); let addr = IpAddrMask::from_str("10.20.30.2/32").unwrap(); base_peer.allowed_ips.push(addr); - base_peer.last_handshake = Some(SystemTime::UNIX_EPOCH); // workaround since ns are lost in conversion + // Workaround since nanoseconds are lost in conversion. + base_peer.last_handshake = Some(SystemTime::UNIX_EPOCH); base_peer.protocol_version = Some(3); base_peer.endpoint = Some("127.0.0.1:8080".parse().unwrap()); base_peer.tx_bytes = 100; diff --git a/src-tauri/src/utils.rs b/src-tauri/src/utils.rs index 020687ba..dce403b5 100644 --- a/src-tauri/src/utils.rs +++ b/src-tauri/src/utils.rs @@ -46,12 +46,11 @@ use winapi::{ }, }; -pub const IS_MACOS: bool = cfg!(target_os = "macos"); -pub static DEFAULT_ROUTE_IPV4: &str = "0.0.0.0/0"; -pub static DEFAULT_ROUTE_IPV6: &str = "::/0"; +pub(crate) static DEFAULT_ROUTE_IPV4: &str = "0.0.0.0/0"; +pub(crate) static DEFAULT_ROUTE_IPV6: &str = "::/0"; /// Setup client interface -pub async fn setup_interface( +pub(crate) async fn setup_interface( location: &Location, interface_name: String, preshared_key: Option, @@ -60,14 +59,14 @@ pub async fn setup_interface( ) -> Result<(), Error> { debug!("Setting up interface for location: {location}"); - debug!("Looking for wireguard keys for location {location} instance"); + debug!("Looking for WireGuard keys for location {location} instance"); let Some(keys) = WireguardKeys::find_by_instance_id(pool, location.instance_id).await? else { error!("No keys found for instance: {}", location.instance_id); return Err(Error::InternalError( "No keys found for instance".to_string(), )); }; - debug!("Wireguard keys found for location {location} instance"); + debug!("WireGuard keys found for location {location} instance"); // prepare peer config debug!( @@ -94,11 +93,14 @@ pub async fn setup_interface( } debug!( - "Parsing location {location} allowed ips: {}", + "Parsing location {location} allowed IPs: {}", location.allowed_ips ); let allowed_ips = if location.route_all_traffic { - debug!("Using all traffic routing for location {location}: {DEFAULT_ROUTE_IPV4} {DEFAULT_ROUTE_IPV6}"); + debug!( + "Using all traffic routing for location {location}: {DEFAULT_ROUTE_IPV4} \ + {DEFAULT_ROUTE_IPV6}" + ); vec![DEFAULT_ROUTE_IPV4.into(), DEFAULT_ROUTE_IPV6.into()] } else { debug!( @@ -118,7 +120,10 @@ pub async fn setup_interface( } Err(err) => { // Handle the error from IpAddrMask::from_str, if needed - error!("Error parsing IP address {allowed_ip} while setting up interface for location {location}, error details: {err}"); + error!( + "Error parsing IP address {allowed_ip} while setting up interface for \ + location {location}, error details: {err}" + ); continue; } } @@ -132,7 +137,8 @@ pub async fn setup_interface( debug!("Looking for a free port for interface {interface_name}..."); let Some(port) = find_free_tcp_port() else { let msg = format!( - "Couldn't find free port during interface {interface_name} setup for location {location}" + "Couldn't find free port during interface {interface_name} setup for location \ + {location}" ); error!("{msg}"); return Err(Error::InternalError(msg)); @@ -160,18 +166,31 @@ pub async fn setup_interface( }; if let Err(error) = client.create_interface(request).await { if error.code() == Code::Unavailable { - error!("Failed to set up connection for location {location}; background service is unavailable. Make sure the service is running. Error: {error}, Interface configuration: {interface_config:?}"); + error!( + "Failed to set up connection for location {location}; background service is \ + unavailable. Make sure the service is running. Error: {error}, Interface \ + configuration: {interface_config:?}" + ); Err(Error::InternalError( "Background service is unavailable. Make sure the service is running.".into(), )) } else { - error!("Failed to send a request to the background service to create an interface for location {location} with the following configuration: {interface_config:?}. Error: {error}"); - Err(Error::InternalError( - format!("Failed to send a request to the background service to create an interface for location {location}. Error: {error}. Check logs for details.") - )) + error!( + "Failed to send a request to the background service to create an interface for \ + location {location} with the following configuration: {interface_config:?}. \ + Error: {error}" + ); + Err(Error::InternalError(format!( + "Failed to send a request to the background service to create an interface for \ + location {location}. Error: {error}. Check logs for details." + ))) } } else { - info!("The interface for location {location} has been created successfully, interface name: {}.", interface_config.name); + info!( + "The interface for location {location} has been created successfully, interface \ + name: {}.", + interface_config.name + ); Ok(()) } } @@ -208,7 +227,10 @@ pub(crate) async fn stats_handler( .await .unwrap_or("UNKNOWN".to_string()); - debug!("Saving network usage stats related to location {location_name} (interface {interface_name})."); + debug!( + "Saving network usage stats related to location {location_name} \ + (interface {interface_name})." + ); trace!("Stats: {location_stats:?}"); match location_stats.save(&pool).await { Ok(_) => { @@ -216,8 +238,9 @@ pub(crate) async fn stats_handler( } Err(err) => { error!( - "Failed to save network usage stats for location {location_name}: {err}" - ); + "Failed to save network usage stats for location \ + {location_name}: {err}" + ); } } } else { @@ -229,7 +252,10 @@ pub(crate) async fn stats_handler( .get_name(&pool) .await .unwrap_or("UNKNOWN".to_string()); - debug!("Saving network usage stats related to tunnel {tunnel_name} (interface {interface_name}): {tunnel_stats:?}"); + debug!( + "Saving network usage stats related to tunnel {tunnel_name} \ + (interface {interface_name}): {tunnel_stats:?}" + ); match tunnel_stats.save(&pool).await { Ok(_) => { debug!("Saved stats for tunnel {tunnel_name}"); @@ -319,12 +345,15 @@ pub async fn setup_interface_tunnel( tunnel.allowed_ips ); let allowed_ips = if tunnel.route_all_traffic { - debug!("Using all traffic routing for tunnel {tunnel}: {DEFAULT_ROUTE_IPV4} {DEFAULT_ROUTE_IPV6}"); + debug!( + "Using all traffic routing for tunnel {tunnel}: {DEFAULT_ROUTE_IPV4} \ + {DEFAULT_ROUTE_IPV6}" + ); vec![DEFAULT_ROUTE_IPV4.into(), DEFAULT_ROUTE_IPV6.into()] } else { let msg = match &tunnel.allowed_ips { Some(ips) => format!("Using predefined location traffic for tunnel {tunnel}: {ips}"), - None => "No allowed IPs found in tunnel {tunnel} configuration".to_string(), + None => "No allowed IP addresses found in tunnel {tunnel} configuration".to_string(), }; debug!("{msg}"); tunnel @@ -379,11 +408,17 @@ pub async fn setup_interface_tunnel( dns: tunnel.dns.clone(), }; if let Some(pre_up) = &tunnel.pre_up { - debug!("Executing defined PreUp command before setting up the interface {} for the tunnel {tunnel}: {pre_up}", interface_config.name); + debug!( + "Executing defined PreUp command before setting up the interface {} for the \ + tunnel {tunnel}: {pre_up}", + interface_config.name + ); let _ = execute_command(pre_up); info!( - "Executed defined PreUp command before setting up the interface {} for the tunnel {tunnel}: {pre_up}", interface_config.name - ); + "Executed defined PreUp command before setting up the interface {} for the \ + tunnel {tunnel}: {pre_up}", + interface_config.name + ); } if let Err(error) = client.create_interface(request).await { error!( @@ -391,18 +426,28 @@ pub async fn setup_interface_tunnel( interface_config.name ); Err(Error::InternalError(format!( - "Failed to create a network interface ({}) for tunnel {tunnel}, error message: {}. Check logs for more details.", - interface_config.name, error.message() - ))) + "Failed to create a network interface ({}) for tunnel {tunnel}, error message: {}. \ + Check logs for more details.", + interface_config.name, + error.message() + ))) } else { info!( "Network interface {} for tunnel {tunnel} created successfully.", interface_config.name ); if let Some(post_up) = &tunnel.post_up { - debug!("Executing defined PostUp command after setting up the interface {} for the tunnel {tunnel}: {post_up}", interface_config.name); + debug!( + "Executing defined PostUp command after setting up the interface {} for the \ + tunnel {tunnel}: {post_up}", + interface_config.name + ); let _ = execute_command(post_up); - info!("Executed defined PostUp command after setting up the interface {} for the tunnel {tunnel}: {post_up}", interface_config.name); + info!( + "Executed defined PostUp command after setting up the interface {} for the \ + tunnel {tunnel}: {post_up}", + interface_config.name + ); } debug!( "Created interface {} with config: {interface_config:?}", @@ -651,19 +696,34 @@ pub(crate) async fn disconnect_interface( match active_connection.connection_type { ConnectionType::Location => { let Some(location) = Location::find_by_id(&state.db, location_id).await? else { - error!("Error while disconnecting interface {interface_name}, location with ID {location_id} not found"); + error!( + "Error while disconnecting interface {interface_name}, location with ID \ + {location_id} not found" + ); return Err(Error::NotFound); }; let request = RemoveInterfaceRequest { interface_name, endpoint: location.endpoint.clone(), }; - debug!("Sending request to the background service to remove interface {} for location {}...", active_connection.interface_name, location.name); + debug!( + "Sending request to the background service to remove interface {} for location \ + {}...", + active_connection.interface_name, location.name + ); if let Err(error) = client.remove_interface(request).await { let msg = if error.code() == Code::Unavailable { - format!("Couldn't remove interface {}. Background service is unavailable. Please make sure the service is running. Error: {error}.", active_connection.interface_name) + format!( + "Couldn't remove interface {}. Background service is unavailable. \ + Please make sure the service is running. Error: {error}.", + active_connection.interface_name + ) } else { - format!("Failed to send a request to the background service to remove interface {}. Error: {error}.", active_connection.interface_name) + format!( + "Failed to send a request to the background service to remove interface \ + {}. Error: {error}.", + active_connection.interface_name + ) }; error!("{msg}"); return Err(Error::InternalError(msg)); @@ -683,13 +743,24 @@ pub(crate) async fn disconnect_interface( } ConnectionType::Tunnel => { let Some(tunnel) = Tunnel::find_by_id(&state.db, location_id).await? else { - error!("Error while disconnecting interface {interface_name}, tunnel with ID {location_id} not found"); + error!( + "Error while disconnecting interface {interface_name}, tunnel with ID \ + {location_id} not found" + ); return Err(Error::NotFound); }; if let Some(pre_down) = &tunnel.pre_down { - debug!("Executing defined PreDown command before setting up the interface {} for the tunnel {tunnel}: {pre_down}", active_connection.interface_name); + debug!( + "Executing defined PreDown command before setting up the interface {} for the \ + tunnel {tunnel}: {pre_down}", + active_connection.interface_name + ); let _ = execute_command(pre_down); - info!("Executed defined PreDown command before setting up the interface {} for the tunnel {tunnel}: {pre_down}", active_connection.interface_name); + info!( + "Executed defined PreDown command before setting up the interface {} for \ + the tunnel {tunnel}: {pre_down}", + active_connection.interface_name + ); } let request = RemoveInterfaceRequest { interface_name, @@ -697,8 +768,8 @@ pub(crate) async fn disconnect_interface( }; if let Err(error) = client.remove_interface(request).await { error!( - "Error while removing interface {}, error details: {:?}", - active_connection.interface_name, error + "Error while removing interface {}, error details: {error:?}", + active_connection.interface_name ); return Err(Error::InternalError(format!( "Failed to remove interface, error message: {}", @@ -706,9 +777,17 @@ pub(crate) async fn disconnect_interface( ))); } if let Some(post_down) = &tunnel.post_down { - debug!("Executing defined PostDown command after removing the interface {} for the tunnel {tunnel}: {post_down}", active_connection.interface_name); + debug!( + "Executing defined PostDown command after removing the interface {} for the \ + tunnel {tunnel}: {post_down}", + active_connection.interface_name + ); let _ = execute_command(post_down); - info!("Executed defined PostDown command after removing the interface {} for the tunnel {tunnel}: {post_down}", active_connection.interface_name); + info!( + "Executed defined PostDown command after removing the interface {} for the \ + tunnel {tunnel}: {post_down}", + active_connection.interface_name + ); } let connection: TunnelConnection = active_connection.into(); let connection = connection.save(&state.db).await?; @@ -751,8 +830,9 @@ pub async fn get_tunnel_or_location_name( name } else { debug!( - "Couldn't identify {connection_type}'s name for logging purposes, it will be referred to as UNKNOWN", - ); + "Couldn't identify {connection_type}'s name for logging purposes, \ + it will be referred to as UNKNOWN", + ); "UNKNOWN".to_string() } } @@ -834,8 +914,15 @@ pub async fn sync_connections(app_handle: &AppHandle) -> Result<(), Error> { let appstate = app_handle.state::(); let all_locations = Location::all(&appstate.db).await?; let service_control_manager = open_service_manager().map_err(|err| { - error!("Failed to open service control manager while trying to sync client's connections with the host state: {}", err); - Error::InternalError("Failed to open service control manager while trying to sync client's connections with the host state".to_string()) + error!( + "Failed to open service control manager while trying to sync client's connections \ + with the host state: {err}" + ); + Error::InternalError( + "Failed to open service control manager while trying to sync client's + connections with the host state" + .to_string(), + ) })?; debug!("Opened service control manager, starting to synchronize active connections for locations..."); @@ -861,9 +948,11 @@ pub async fn sync_connections(app_handle: &AppHandle) -> Result<(), Error> { } _ => { warn!( - "Failed to open service {service_name} for interface {interface_name} while synchronizing active connections. \ - This may cause the location {} state to display incorrectly in the client. Reconnect to it manually to fix it. Error: {err}", location.name - ); + "Failed to open service {service_name} for interface {interface_name} while \ + synchronizing active connections. This may cause the location {} state to \ + display incorrectly in the client. Reconnect to it manually to fix it. \ + Error: {err}", location.name + ); continue; } }, @@ -876,7 +965,8 @@ pub async fn sync_connections(app_handle: &AppHandle) -> Result<(), Error> { debug!("WireGuard tunnel {} is running, ", interface_name); } else { debug!( - "WireGuard tunnel {} is not running, status code: {status}. Refer to Windows documentation for more information about the code.", + "WireGuard tunnel {} is not running, status code: {status}. Refer to \ + Windows documentation for more information about the code.", interface_name ); continue; @@ -885,8 +975,9 @@ pub async fn sync_connections(app_handle: &AppHandle) -> Result<(), Error> { Err(err) => { close_service_handle(service, &service_name)?; warn!( - "Failed to query service status for interface {} while synchronizing active connections. \ - This may cause the location {} state to display incorrectly in the client. Reconnect to it manually to fix it. Error: {err}", + "Failed to query service status for interface {} while synchronizing active \ + connections. This may cause the location {} state to display incorrectly in \ + the client. Reconnect to it manually to fix it. Error: {err}", interface_name, location.name ); continue; @@ -934,29 +1025,28 @@ pub async fn sync_connections(app_handle: &AppHandle) -> Result<(), Error> { for tunnel in Tunnel::all(&appstate.db).await? { let interface_name = get_interface_name(&tunnel.name); let service_name = format!("WireGuardTunnel${}", interface_name); - let service = match open_service( - service_control_manager, - &service_name, - SERVICE_QUERY_STATUS, - ) { - Ok(service) => service, - Err(err) => match err { - ERROR_SERVICE_DOES_NOT_EXIST => { - debug!( - "WireGuard tunnel {} is not installed, nothing to synchronize", - interface_name - ); - continue; - } - _ => { - error!( + let service = + match open_service(service_control_manager, &service_name, SERVICE_QUERY_STATUS) { + Ok(service) => service, + Err(err) => match err { + ERROR_SERVICE_DOES_NOT_EXIST => { + debug!( + "WireGuard tunnel {} is not installed, nothing to synchronize", + interface_name + ); + continue; + } + _ => { + error!( "Failed to open service {service_name} for interface {interface_name}. \ - This may cause the tunnel {} state to display incorrectly in the client. Reconnect to it manually to fix it. Error: {err}", tunnel.name + This may cause the tunnel {} state to display incorrectly in the \ + client. Reconnect to it manually to fix it. Error: {err}", + tunnel.name ); - continue; - } - }, - }; + continue; + } + }, + }; match get_service_status(service) { Ok(status) => { // Only point where we don't jump to the next iteration of the loop and continue with the rest of the code below the match