diff --git a/Cargo.lock b/Cargo.lock index 0f41350d92a..5e8e429d45e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -59,7 +59,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011" dependencies = [ "cfg-if", - "getrandom", + "getrandom 0.2.15", "once_cell", "version_check", "zerocopy 0.7.35", @@ -527,7 +527,7 @@ name = "attest-data" version = "0.3.0" source = "git+https://github.com/oxidecomputer/dice-util?rev=3cc953c8d0ace2f20cbcf3920b0771d25301960a#3cc953c8d0ace2f20cbcf3920b0771d25301960a" dependencies = [ - "getrandom", + "getrandom 0.2.15", "hubpack", "salty", "serde", @@ -589,7 +589,7 @@ version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "df7a4168111d7eb622a31b214057b8509c0a7e1794f44c546d742330dc793972" dependencies = [ - "bindgen", + "bindgen 0.69.5", "cc", "cmake", "dunce", @@ -605,10 +605,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b62ddb9cb1ec0a098ad4bbf9344d0713fa193ae1a80af55febcff2627b6a00c1" dependencies = [ "futures-core", - "getrandom", + "getrandom 0.2.15", "instant", "pin-project-lite", - "rand", + "rand 0.8.5", "tokio", ] @@ -732,6 +732,26 @@ dependencies = [ "which", ] +[[package]] +name = "bindgen" +version = "0.71.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f58bf3d7db68cfbac37cfc485a8d711e87e064c3d0fe0435b92f7a407f9d6b3" +dependencies = [ + "bitflags 2.6.0", + "cexpr", + "clang-sys", + "itertools 0.13.0", + "log", + "prettyplease", + "proc-macro2", + "quote", + "regex", + "rustc-hash 2.1.1", + "shlex", + "syn 2.0.96", +] + [[package]] name = "bit-set" version = "0.5.3" @@ -886,7 +906,7 @@ dependencies = [ "omicron-workspace-hack", "pq-sys", "proptest", - "rand", + "rand 0.8.5", "secrecy", "serde", "serde_with", @@ -911,6 +931,7 @@ dependencies = [ "omicron-uuid-kinds", "omicron-workspace-hack", "schemars", + "semver 1.0.25", "serde", "sled-agent-types", "sled-hardware-types", @@ -928,6 +949,7 @@ dependencies = [ "regress 0.9.1", "reqwest", "schemars", + "semver 1.0.25", "serde", "serde_json", "sled-hardware-types", @@ -1115,9 +1137,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.1.30" +version = "1.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b16803a61b81d9eabb7eae2588776c4c1e584b738ede45fdbb4c972cec1e9945" +checksum = "c736e259eea577f443d5c86c304f9f4ae0295c43f3ba05c21f1d66b5f06001af" dependencies = [ "jobserver", "libc", @@ -1156,7 +1178,7 @@ version = "0.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1a2b34126159980f92da2a08bdec0694fd80fb5eb9e48aff25d20a0d8dfa710d" dependencies = [ - "smallvec 1.13.2", + "smallvec 1.14.0", "target-lexicon", ] @@ -1984,7 +2006,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0dc92fb57ca44df6db8059111ab3af99a63d5d0f8375d9972e319a379c6bab76" dependencies = [ "generic-array", - "rand_core", + "rand_core 0.6.4", "subtle", "zeroize", ] @@ -1996,7 +2018,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" dependencies = [ "generic-array", - "rand_core", + "rand_core 0.6.4", "typenum", ] @@ -2051,7 +2073,7 @@ dependencies = [ "curve25519-dalek-derive", "digest", "fiat-crypto", - "rand_core", + "rand_core 0.6.4", "rustc_version 0.4.1", "subtle", "zeroize", @@ -2353,7 +2375,7 @@ dependencies = [ "hex", "hickory-proto", "ipnet", - "rand", + "rand 0.8.5", "thiserror 1.0.69", "url", ] @@ -2629,7 +2651,7 @@ dependencies = [ "progenitor 0.9.1", "progenitor-client 0.9.1", "quote", - "rand", + "rand 0.8.5", "regress 0.9.1", "reqwest", "rustfmt-wrapper", @@ -2840,7 +2862,7 @@ checksum = "4a3daa8e81a3963a60642bcc1f90a670680bd4a77535faa384e9d1c79d620871" dependencies = [ "curve25519-dalek", "ed25519", - "rand_core", + "rand_core 0.6.4", "serde", "sha2", "signature", @@ -2869,7 +2891,7 @@ dependencies = [ "hkdf", "pem-rfc7468", "pkcs8", - "rand_core", + "rand_core 0.6.4", "sec1", "subtle", "zeroize", @@ -2939,7 +2961,7 @@ dependencies = [ "omicron-uuid-kinds", "omicron-workspace-hack", "oxide-client", - "rand", + "rand 0.8.5", "reqwest", "russh", "russh-keys", @@ -3133,7 +3155,7 @@ version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ded41244b729663b1e574f1b4fb731469f69f79c17667b5d776b16cda0479449" dependencies = [ - "rand_core", + "rand_core 0.6.4", "subtle", ] @@ -3474,7 +3496,7 @@ dependencies = [ "gateway-messages", "omicron-workspace-hack", "progenitor 0.9.1", - "rand", + "rand 0.8.5", "reqwest", "schemars", "serde", @@ -3593,10 +3615,22 @@ dependencies = [ "cfg-if", "js-sys", "libc", - "wasi", + "wasi 0.11.0+wasi-snapshot-preview1", "wasm-bindgen", ] +[[package]] +name = "getrandom" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43a49c392881ce6d5c3b8cb70f98717b7c07aabbdff06687b9030dbfbe2725f8" +dependencies = [ + "cfg-if", + "libc", + "wasi 0.13.3+wasi-0.2.2", + "windows-targets 0.52.6", +] + [[package]] name = "ghash" version = "0.5.1" @@ -3675,15 +3709,15 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f0f9ef7462f7c099f518d754361858f86d8a07af53ba9af0fe635bbccb151a63" dependencies = [ "ff", - "rand_core", + "rand_core 0.6.4", "subtle", ] [[package]] name = "guppy" -version = "0.17.16" +version = "0.17.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2dadc932032945158611b2b45baae9f6fc0024a6f91dd57f61f5edb4ad507f3b" +checksum = "452b22f04f2a5cdae96b3a636a7c311da0cba665ca90f5033e042b8b590b6d7c" dependencies = [ "ahash", "camino", @@ -3701,7 +3735,7 @@ dependencies = [ "semver 1.0.25", "serde", "serde_json", - "smallvec 1.13.2", + "smallvec 1.14.0", "static_assertions", "target-spec", ] @@ -3921,7 +3955,7 @@ dependencies = [ "hickory-proto", "once_cell", "radix_trie", - "rand", + "rand 0.8.5", "thiserror 1.0.69", "tokio", "tracing", @@ -3943,7 +3977,7 @@ dependencies = [ "idna", "ipnet", "once_cell", - "rand", + "rand 0.8.5", "thiserror 1.0.69", "tinyvec", "tokio", @@ -3964,9 +3998,9 @@ dependencies = [ "lru-cache", "once_cell", "parking_lot 0.12.3", - "rand", + "rand 0.8.5", "resolv-conf", - "smallvec 1.13.2", + "smallvec 1.14.0", "thiserror 1.0.69", "tokio", "tracing", @@ -4222,7 +4256,7 @@ dependencies = [ "httpdate", "itoa", "pin-project-lite", - "smallvec 1.13.2", + "smallvec 1.14.0", "tokio", "want", ] @@ -4277,7 +4311,7 @@ dependencies = [ "hyper", "mime_guess", "percent-encoding", - "rand", + "rand 0.8.5", "tokio", "url", "winapi", @@ -4397,7 +4431,7 @@ dependencies = [ "icu_normalizer_data", "icu_properties", "icu_provider", - "smallvec 1.13.2", + "smallvec 1.14.0", "utf16_iter", "utf8_iter", "write16", @@ -4472,7 +4506,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "686f825264d630750a544639377bae737628043f20d38bbc029e8f29ea968a7e" dependencies = [ "idna_adapter", - "smallvec 1.13.2", + "smallvec 1.14.0", "utf8_iter", ] @@ -5076,9 +5110,9 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.169" +version = "0.2.170" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5aba8db14291edd000dfcc4d620c7ebfb122c613afb886ca8803fa4e128a20a" +checksum = "875b3680cb2f8f71bdcf9a30f38d48282f5d3c95cbf9b3fa57269bb5d5c06828" [[package]] name = "libdlpi-sys" @@ -5118,7 +5152,7 @@ dependencies = [ "libnet", "propolis-client 0.1.0 (git+https://github.com/oxidecomputer/propolis?rev=fae5334bcad5e864794332c6fed5e6bb9ec88831)", "propolis-server-config", - "rand", + "rand 0.8.5", "regex", "reqwest", "ron 0.7.1", @@ -5167,7 +5201,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4979f22fdb869068da03c9f7528f8297c6fd2606bc3a4affe42e6a823fdb8da4" dependencies = [ "cfg-if", - "windows-targets 0.52.6", + "windows-targets 0.48.5", ] [[package]] @@ -5190,7 +5224,7 @@ dependencies = [ "nvpair", "nvpair-sys", "oxnet", - "rand", + "rand 0.8.5", "rusty-doors", "socket2", "thiserror 1.0.69", @@ -5582,7 +5616,7 @@ dependencies = [ "hermit-abi 0.3.9", "libc", "log", - "wasi", + "wasi 0.11.0+wasi-snapshot-preview1", "windows-sys 0.52.0", ] @@ -5594,9 +5628,9 @@ checksum = "c9be0862c1b3f26a88803c4a49de6889c10e608b3ee9344e6ef5b45fb37ad3d1" [[package]] name = "mockall" -version = "0.13.0" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4c28b3fb6d753d28c20e826cd46ee611fda1cf3cde03a443a974043247c065a" +checksum = "39a6bfcc6c8c7eed5ee98b9c3e33adc726054389233e201c95dab2d41a3839d2" dependencies = [ "cfg-if", "downcast", @@ -5608,9 +5642,9 @@ dependencies = [ [[package]] name = "mockall_derive" -version = "0.13.0" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "341014e7f530314e9a1fdbc7400b244efea7122662c96bfa248c31da5bfb2020" +checksum = "25ca3004c2efe9011bd4e461bd8256445052b9615405b4f7ea43fc8ca5c20898" dependencies = [ "cfg-if", "proc-macro2", @@ -5650,7 +5684,7 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a51313c5820b0b02bd422f4b44776fbf47961755c74ce64afc73bfad10226c3" dependencies = [ - "getrandom", + "getrandom 0.2.15", ] [[package]] @@ -5736,7 +5770,6 @@ dependencies = [ "omicron-test-utils", "omicron-uuid-kinds", "omicron-workspace-hack", - "once_cell", "openssl", "oso", "pq-sys", @@ -5803,7 +5836,6 @@ dependencies = [ "omicron-common", "omicron-rpaths", "omicron-workspace-hack", - "once_cell", "pq-sys", "strum", "uuid", @@ -5836,11 +5868,10 @@ dependencies = [ "omicron-rpaths", "omicron-uuid-kinds", "omicron-workspace-hack", - "once_cell", "oxnet", "parse-display", "pq-sys", - "rand", + "rand 0.8.5", "ref-cast", "schemars", "semver 1.0.25", @@ -5899,7 +5930,6 @@ dependencies = [ "omicron-test-utils", "omicron-uuid-kinds", "omicron-workspace-hack", - "once_cell", "openapiv3", "oso", "oximeter", @@ -5911,7 +5941,7 @@ dependencies = [ "predicates", "pretty_assertions", "qorb", - "rand", + "rand 0.8.5", "rcgen", "ref-cast", "regex", @@ -5944,9 +5974,8 @@ dependencies = [ "ipnetwork", "omicron-common", "omicron-workspace-hack", - "once_cell", "oxnet", - "rand", + "rand 0.8.5", "serde_json", ] @@ -6158,7 +6187,7 @@ dependencies = [ "once_cell", "oxnet", "proptest", - "rand", + "rand 0.8.5", "sled-agent-client", "slog", "slog-error-chain", @@ -6248,7 +6277,6 @@ dependencies = [ "omicron-rpaths", "omicron-test-utils", "omicron-workspace-hack", - "once_cell", "pq-sys", "pretty_assertions", "serde", @@ -6389,6 +6417,7 @@ dependencies = [ "parse-display", "proptest", "schemars", + "semver 1.0.25", "serde", "serde_json", "serde_with", @@ -6408,7 +6437,7 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "77a5d83df9f36fe23f0c3648c6bbb8b0298bb5f1939c8f2704431371f4b84d43" dependencies = [ - "smallvec 1.13.2", + "smallvec 1.14.0", ] [[package]] @@ -6500,7 +6529,7 @@ checksum = "a5e44f723f1133c9deac646763579fdb3ac745e418f2a7af9cd0c431da1f20b9" dependencies = [ "num-integer", "num-traits", - "rand", + "rand 0.8.5", ] [[package]] @@ -6515,9 +6544,9 @@ dependencies = [ "num-integer", "num-iter", "num-traits", - "rand", + "rand 0.8.5", "serde", - "smallvec 1.13.2", + "smallvec 1.14.0", "zeroize", ] @@ -6773,7 +6802,6 @@ dependencies = [ "omicron-test-utils", "omicron-uuid-kinds", "omicron-workspace-hack", - "once_cell", "openapi-lint", "openapiv3", "pq-sys", @@ -6817,12 +6845,11 @@ dependencies = [ "mg-admin-client", "omicron-uuid-kinds", "omicron-workspace-hack", - "once_cell", "oxnet", "parse-display", "progenitor-client 0.9.1", "proptest", - "rand", + "rand 0.8.5", "regress 0.9.1", "reqwest", "schemars", @@ -6922,7 +6949,6 @@ dependencies = [ "omicron-common", "omicron-test-utils", "omicron-workspace-hack", - "once_cell", "oximeter", "oximeter-instruments", "oximeter-producer", @@ -7081,7 +7107,6 @@ dependencies = [ "omicron-test-utils", "omicron-uuid-kinds", "omicron-workspace-hack", - "once_cell", "openapi-lint", "openapiv3", "openssl", @@ -7103,7 +7128,7 @@ dependencies = [ "progenitor-client 0.9.1", "propolis-client 0.1.0 (git+https://github.com/oxidecomputer/propolis?rev=95d6a559890c94e3aa62c8adcd7c4e123ec4c6dc)", "qorb", - "rand", + "rand 0.8.5", "range-requests", "rcgen", "ref-cast", @@ -7205,7 +7230,7 @@ dependencies = [ "strum", "subprocess", "supports-color", - "tabled", + "tabled 0.15.0", "textwrap", "tokio", "unicode-width 0.1.14", @@ -7260,7 +7285,7 @@ dependencies = [ "clap", "criterion", "omicron-workspace-hack", - "rand", + "rand 0.8.5", "rust-argon2", "schemars", "serde", @@ -7298,7 +7323,6 @@ dependencies = [ "omicron-pins", "omicron-workspace-hack", "omicron-zone-package", - "once_cell", "reqwest", "semver 1.0.25", "serde", @@ -7381,7 +7405,6 @@ dependencies = [ "omicron-test-utils", "omicron-uuid-kinds", "omicron-workspace-hack", - "once_cell", "opte-ioctl", "oximeter", "oximeter-instruments", @@ -7391,7 +7414,7 @@ dependencies = [ "propolis-client 0.1.0 (git+https://github.com/oxidecomputer/propolis?rev=95d6a559890c94e3aa62c8adcd7c4e123ec4c6dc)", "propolis-mock-server", "propolis_api_types", - "rand", + "rand 0.8.5", "range-requests", "rcgen", "repo-depot-api", @@ -7510,6 +7533,7 @@ dependencies = [ "cc", "chrono", "cipher", + "clang-sys", "clap", "clap_builder", "console", @@ -7538,7 +7562,7 @@ dependencies = [ "futures-util", "gateway-messages", "generic-array", - "getrandom", + "getrandom 0.2.15", "group", "hashbrown 0.15.1", "hex", @@ -7553,6 +7577,7 @@ dependencies = [ "inout", "itertools 0.10.5", "itertools 0.12.1", + "itertools 0.14.0", "lalrpop-util", "lazy_static", "libc", @@ -7582,7 +7607,7 @@ dependencies = [ "proc-macro2", "qorb", "quote", - "rand", + "rand 0.8.5", "regex", "regex-automata", "regex-syntax 0.8.5", @@ -7600,7 +7625,7 @@ dependencies = [ "sha2", "similar", "slog", - "smallvec 1.13.2", + "smallvec 1.14.0", "spin", "string_cache", "subtle", @@ -7624,6 +7649,7 @@ dependencies = [ "uuid", "x509-cert", "zerocopy 0.7.35", + "zerocopy 0.8.10", "zeroize", "zip 0.6.6", ] @@ -7879,7 +7905,7 @@ dependencies = [ "omicron-workspace-hack", "progenitor 0.9.1", "progenitor-client 0.9.1", - "rand", + "rand 0.8.5", "regress 0.9.1", "reqwest", "serde", @@ -7978,7 +8004,7 @@ dependencies = [ "oximeter-client", "oximeter-db", "qorb", - "rand", + "rand 0.8.5", "reqwest", "schemars", "semver 1.0.25", @@ -8052,7 +8078,7 @@ dependencies = [ "sqlformat", "sqlparser", "strum", - "tabled", + "tabled 0.15.0", "tempfile", "termtree 0.5.1", "thiserror 1.0.69", @@ -8076,7 +8102,7 @@ dependencies = [ "libc", "omicron-workspace-hack", "oximeter", - "rand", + "rand 0.8.5", "schemars", "serde", "slog", @@ -8185,7 +8211,7 @@ dependencies = [ "omicron-workspace-hack", "oximeter-macro-impl", "parse-display", - "rand", + "rand 0.8.5", "rand_distr", "regex", "rstest", @@ -8271,7 +8297,7 @@ dependencies = [ "ecdsa", "elliptic-curve", "primeorder", - "rand_core", + "rand_core 0.6.4", "sha2", ] @@ -8308,6 +8334,17 @@ dependencies = [ "unicode-width 0.1.14", ] +[[package]] +name = "papergrid" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b915f831b85d984193fdc3d3611505871dc139b2534530fa01c1a6a6707b6723" +dependencies = [ + "bytecount", + "fnv", + "unicode-width 0.2.0", +] + [[package]] name = "parking" version = "2.2.1" @@ -8345,7 +8382,7 @@ dependencies = [ "instant", "libc", "redox_syscall 0.2.16", - "smallvec 1.13.2", + "smallvec 1.14.0", "winapi", ] @@ -8358,7 +8395,7 @@ dependencies = [ "cfg-if", "libc", "redox_syscall 0.5.7", - "smallvec 1.13.2", + "smallvec 1.14.0", "windows-targets 0.52.6", ] @@ -8383,7 +8420,7 @@ dependencies = [ "quote", "regex", "regex-syntax 0.8.5", - "structmeta 0.3.0", + "structmeta", "syn 2.0.96", ] @@ -8421,7 +8458,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7676374caaee8a325c9e7a2ae557f216c5563a171d6997b0ef8a65af35147700" dependencies = [ "base64ct", - "rand_core", + "rand_core 0.6.4", "subtle", ] @@ -8432,7 +8469,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "346f04948ba92c43e8469c1ee6736c7563d71012b17d40745260fe106aac2166" dependencies = [ "base64ct", - "rand_core", + "rand_core 0.6.4", "subtle", ] @@ -8454,7 +8491,7 @@ version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c7fabb0b56aba5d2eb3fa9b1547c187f21f8c051295a7b97a50be6a9332f4cb" dependencies = [ - "smallvec 1.13.2", + "smallvec 1.14.0", ] [[package]] @@ -8614,10 +8651,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9cd31dcfdbbd7431a807ef4df6edd6473228e94d5c805e8cf671227a21bad068" dependencies = [ "anyhow", - "itertools 0.13.0", + "itertools 0.14.0", "proc-macro2", "quote", - "rand", + "rand 0.8.5", ] [[package]] @@ -8646,7 +8683,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "48e4cc64c2ad9ebe670cb8fd69dd50ae301650392e81c05f9bfcb2d5bdbc24b0" dependencies = [ "phf_shared 0.11.2", - "rand", + "rand 0.8.5", ] [[package]] @@ -8744,7 +8781,7 @@ checksum = "f950b2377845cebe5cf8b5165cb3cc1a5e0fa5cfa3e1f7f55707d8fd82e0a7b7" dependencies = [ "der", "pkcs5", - "rand_core", + "rand_core 0.6.4", "spki", ] @@ -8867,9 +8904,9 @@ dependencies = [ [[package]] name = "postgres-protocol" -version = "0.6.7" +version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "acda0ebdebc28befa84bee35e651e4c5f09073d668c7aed4cf7e23c3cda84b23" +checksum = "76ff0abab4a9b844b93ef7b81f1efc0a366062aaef2cd702c76256b5dc075c54" dependencies = [ "base64 0.22.1", "byteorder", @@ -8878,7 +8915,7 @@ dependencies = [ "hmac", "md-5", "memchr", - "rand", + "rand 0.9.0", "sha2", "stringprep", ] @@ -9041,11 +9078,33 @@ dependencies = [ "version_check", ] +[[package]] +name = "proc-macro-error-attr2" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96de42df36bb9bba5542fe9f1a054b8cc87e172759a1868aa05c1f3acc89dfc5" +dependencies = [ + "proc-macro2", + "quote", +] + +[[package]] +name = "proc-macro-error2" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "11ec05c52be0a07b08061f7dd003e7d7092e0472bc731b4af7bb1ef876109802" +dependencies = [ + "proc-macro-error-attr2", + "proc-macro2", + "quote", + "syn 2.0.96", +] + [[package]] name = "proc-macro2" -version = "1.0.92" +version = "1.0.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37d3544b3f2748c54e147655edb5025752e2303145b5aefb3c3ea2c78b973bb0" +checksum = "60946a68e5f9d28b0dc1c21bb8a97ee7d018a8b322fa57838ba31cc878e22d99" dependencies = [ "unicode-ident", ] @@ -9194,7 +9253,7 @@ dependencies = [ "progenitor 0.9.1", "progenitor-client 0.9.1", "propolis_api_types", - "rand", + "rand 0.8.5", "reqwest", "schemars", "serde", @@ -9215,7 +9274,7 @@ dependencies = [ "base64 0.21.7", "futures", "progenitor 0.8.0", - "rand", + "rand 0.8.5", "reqwest", "schemars", "serde", @@ -9241,7 +9300,7 @@ dependencies = [ "hyper", "progenitor 0.9.1", "propolis_types", - "rand", + "rand 0.8.5", "reqwest", "schemars", "serde", @@ -9303,8 +9362,8 @@ dependencies = [ "bitflags 2.6.0", "lazy_static", "num-traits", - "rand", - "rand_chacha", + "rand 0.8.5", + "rand_chacha 0.3.1", "rand_xorshift", "regex-syntax 0.8.5", "rusty-fork", @@ -9341,7 +9400,7 @@ dependencies = [ "dropshot 0.12.0", "futures", "hickory-resolver", - "rand", + "rand 0.8.5", "schemars", "serde", "serde_json", @@ -9361,9 +9420,9 @@ checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" [[package]] name = "quick-xml" -version = "0.33.0" +version = "0.37.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ca7dd09b5f4a9029c35e323b086d0a68acdc673317b9c4d002c6f1d4a7278c6" +checksum = "165859e9e55f79d67b96c5d96f4e88b6f2695a1972849c15a6a3f5c59fc2c003" dependencies = [ "memchr", "serde", @@ -9379,7 +9438,7 @@ dependencies = [ "pin-project-lite", "quinn-proto", "quinn-udp", - "rustc-hash 2.0.0", + "rustc-hash 2.1.1", "rustls 0.23.19", "socket2", "thiserror 1.0.69", @@ -9394,9 +9453,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fadfaed2cd7f389d0161bb73eeb07b7b78f8691047a6f3e73caaeae55310a4a6" dependencies = [ "bytes", - "rand", + "rand 0.8.5", "ring", - "rustc-hash 2.0.0", + "rustc-hash 2.1.1", "rustls 0.23.19", "slab", "thiserror 1.0.69", @@ -9460,8 +9519,19 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" dependencies = [ "libc", - "rand_chacha", - "rand_core", + "rand_chacha 0.3.1", + "rand_core 0.6.4", +] + +[[package]] +name = "rand" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3779b94aeb87e8bd4e834cee3650289ee9e0d5677f976ecdb6d219e5f4f6cd94" +dependencies = [ + "rand_chacha 0.9.0", + "rand_core 0.9.2", + "zerocopy 0.8.10", ] [[package]] @@ -9471,7 +9541,17 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" dependencies = [ "ppv-lite86", - "rand_core", + "rand_core 0.6.4", +] + +[[package]] +name = "rand_chacha" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3022b5f1df60f26e1ffddd6c66e8aa15de382ae63b3a0c1bfc0e4d3e3f325cb" +dependencies = [ + "ppv-lite86", + "rand_core 0.9.2", ] [[package]] @@ -9480,7 +9560,17 @@ version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "getrandom", + "getrandom 0.2.15", +] + +[[package]] +name = "rand_core" +version = "0.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a509b1a2ffbe92afab0e55c8fd99dea1c280e8171bd2d88682bb20bc41cbc2c" +dependencies = [ + "getrandom 0.3.1", + "zerocopy 0.8.10", ] [[package]] @@ -9490,7 +9580,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "32cb0b9bc82b0a0876c2dd994a7e7a2683d3e7390ca40e6886785ef0c7e3ee31" dependencies = [ "num-traits", - "rand", + "rand 0.8.5", ] [[package]] @@ -9499,7 +9589,7 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4a9febe641d2842ffc76ee962668a17578767c4e01735e4802b21ed9a24b2e4e" dependencies = [ - "rand_core", + "rand_core 0.6.4", ] [[package]] @@ -9508,7 +9598,7 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d25bf25ec5ae4a3f1b92f929810509a2f53d7dca2f50b794ff57e3face536c8f" dependencies = [ - "rand_core", + "rand_core 0.6.4", ] [[package]] @@ -9625,7 +9715,7 @@ dependencies = [ "slog-term", "subprocess", "swrite", - "tabled", + "tabled 0.15.0", "tokio", "uuid", ] @@ -9654,7 +9744,7 @@ version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ba009ff324d1fc1b900bd1fdb31564febe58a8ccc8a6fdbb93b543d33b13ca43" dependencies = [ - "getrandom", + "getrandom 0.2.15", "libredox", "thiserror 1.0.69", ] @@ -9785,9 +9875,9 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.12.9" +version = "0.12.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a77c62af46e79de0a562e1a9849205ffcb7fc1238876e9bd743357570e04046f" +checksum = "43e734407157c3c2034e0258f5e4473ddb361b1e85f95a66690d67264d7cd1da" dependencies = [ "base64 0.22.1", "bytes", @@ -9826,6 +9916,7 @@ dependencies = [ "tokio-native-tls", "tokio-rustls 0.26.0", "tokio-util", + "tower", "tower-service", "url", "wasm-bindgen", @@ -9858,15 +9949,14 @@ dependencies = [ [[package]] name = "ring" -version = "0.17.8" +version = "0.17.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c17fa4cb658e3583423e915b9f3acc01cceaee1860e33d59ebae66adc3a2dc0d" +checksum = "da5349ae27d3887ca812fb375b45a4fbb36d8d12d2df394968cd86e35683fe73" dependencies = [ "cc", "cfg-if", - "getrandom", + "getrandom 0.2.15", "libc", - "spin", "untrusted 0.9.0", "windows-sys 0.52.0", ] @@ -9918,7 +10008,7 @@ dependencies = [ "num-traits", "pkcs1", "pkcs8", - "rand_core", + "rand_core 0.6.4", "serde", "sha2", "signature", @@ -9978,7 +10068,7 @@ dependencies = [ "fallible-streaming-iterator", "hashlink", "libsqlite3-sys", - "smallvec 1.13.2", + "smallvec 1.14.0", ] [[package]] @@ -10011,8 +10101,8 @@ dependencies = [ "p384", "p521", "poly1305", - "rand", - "rand_core", + "rand 0.8.5", + "rand_core 0.6.4", "russh-cryptovec", "russh-keys", "sha1", @@ -10067,8 +10157,8 @@ dependencies = [ "pkcs1", "pkcs5", "pkcs8", - "rand", - "rand_core", + "rand 0.8.5", + "rand_core 0.6.4", "rsa", "russh-cryptovec", "sec1", @@ -10110,9 +10200,9 @@ checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" [[package]] name = "rustc-hash" -version = "2.0.0" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "583034fd73374156e66797ed8e5b0d5690409c9226b22d87cb7f19821c05d152" +checksum = "357703d41365b4b27c590e3ed91eabb1b663f07c4c084095e60cbed4362dff0d" [[package]] name = "rustc_version" @@ -10344,12 +10434,12 @@ dependencies = [ [[package]] name = "samael" -version = "0.0.17" +version = "0.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c3e9664150c82db0eba06db746594e1e8e092c5c91986ee0fe46c0619fb159f" +checksum = "bcefa9336e783352848ad9a1cb86bec02030624c0ce8205f79a19d4a7b6f148d" dependencies = [ "base64 0.22.1", - "bindgen", + "bindgen 0.71.1", "chrono", "data-encoding", "derive_builder", @@ -10362,9 +10452,9 @@ dependencies = [ "openssl-sys", "pkg-config", "quick-xml", - "rand", + "rand 0.8.5", "serde", - "thiserror 1.0.69", + "thiserror 2.0.11", "url", "uuid", ] @@ -10406,6 +10496,7 @@ dependencies = [ "chrono", "dyn-clone", "schemars_derive", + "semver 1.0.25", "serde", "serde_json", "uuid", @@ -10533,9 +10624,9 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.217" +version = "1.0.218" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02fc4265df13d6fa1d00ecff087228cc0a2b5f3c0e87e258d8b94a156e984c70" +checksum = "e8dfc9d19bdbf6d17e22319da49161d5d0108e4188e8b680aef6299eed22df60" dependencies = [ "serde_derive", ] @@ -10562,9 +10653,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.217" +version = "1.0.218" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a9bf7cf98d04a2b28aead066b7496853d4779c9cc183c440dbac457641e19a0" +checksum = "f09503e191f4e797cb8aac08e9a4a4695c5edf6a2e70e376d961ddd5c969f82b" dependencies = [ "proc-macro2", "quote", @@ -10812,7 +10903,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "77549399552de45a898a580c1b41d445bf730df867cc44e6c0233bbc4b8329de" dependencies = [ "digest", - "rand_core", + "rand_core 0.6.4", ] [[package]] @@ -10842,9 +10933,9 @@ dependencies = [ [[package]] name = "similar-asserts" -version = "1.6.0" +version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cfe85670573cd6f0fa97940f26e7e6601213c3b0555246c24234131f88c5709e" +checksum = "9f08357795f0d604ea7d7130f22c74b03838c959bdb14adde3142aab4d18a293" dependencies = [ "console", "similar", @@ -10989,7 +11080,7 @@ dependencies = [ "omicron-test-utils", "omicron-uuid-kinds", "omicron-workspace-hack", - "rand", + "rand 0.8.5", "schemars", "serde", "sled-hardware-types", @@ -11033,7 +11124,7 @@ dependencies = [ "omicron-test-utils", "omicron-uuid-kinds", "omicron-workspace-hack", - "rand", + "rand 0.8.5", "schemars", "serde", "serde_json", @@ -11181,9 +11272,9 @@ dependencies = [ [[package]] name = "smallvec" -version = "1.13.2" +version = "1.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" +checksum = "7fcf8323ef1faaee30a44a340193b1ac6814fd9b7b4e88e9d4519a3e4abe1cfd" [[package]] name = "smawk" @@ -11252,9 +11343,9 @@ dependencies = [ [[package]] name = "socket2" -version = "0.5.7" +version = "0.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce305eb0b4296696835b71df73eb912e0f1ffd2556a501fcede6e0c50349191c" +checksum = "c970269d99b64e60ec3bd6ad27270092a5394c4e309314b18ae3fe575695fbe8" dependencies = [ "libc", "windows-sys 0.52.0", @@ -11403,7 +11494,7 @@ dependencies = [ "p256", "p384", "p521", - "rand_core", + "rand_core 0.6.4", "rsa", "sec1", "sha2", @@ -11494,18 +11585,6 @@ version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" -[[package]] -name = "structmeta" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78ad9e09554f0456d67a69c1584c9798ba733a5b50349a6c0d0948710523922d" -dependencies = [ - "proc-macro2", - "quote", - "structmeta-derive 0.2.0", - "syn 2.0.96", -] - [[package]] name = "structmeta" version = "0.3.0" @@ -11514,18 +11593,7 @@ checksum = "2e1575d8d40908d70f6fd05537266b90ae71b15dbbe7a8b7dffa2b759306d329" dependencies = [ "proc-macro2", "quote", - "structmeta-derive 0.3.0", - "syn 2.0.96", -] - -[[package]] -name = "structmeta-derive" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a60bcaff7397072dca0017d1db428e30d5002e00b6847703e2e42005c95fbe00" -dependencies = [ - "proc-macro2", - "quote", + "structmeta-derive", "syn 2.0.96", ] @@ -11694,11 +11762,21 @@ version = "0.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4c998b0c8b921495196a48aabaf1901ff28be0760136e31604f7967b0792050e" dependencies = [ - "papergrid", - "tabled_derive", + "papergrid 0.11.0", + "tabled_derive 0.7.0", "unicode-width 0.1.14", ] +[[package]] +name = "tabled" +version = "0.18.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "121d8171ee5687a4978d1b244f7d99c43e7385a272185a2f1e1fa4dc0979d444" +dependencies = [ + "papergrid 0.14.0", + "tabled_derive 0.10.0", +] + [[package]] name = "tabled_derive" version = "0.7.0" @@ -11712,6 +11790,19 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "tabled_derive" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "52d9946811baad81710ec921809e2af67ad77719418673b2a3794932d57b7538" +dependencies = [ + "heck 0.5.0", + "proc-macro-error2", + "proc-macro2", + "quote", + "syn 2.0.96", +] + [[package]] name = "tabwriter" version = "1.4.0" @@ -11833,13 +11924,13 @@ checksum = "8f50febec83f5ee1df3015341d8bd429f2d1cc62bcba7ea2076759d315084683" [[package]] name = "test-strategy" -version = "0.3.1" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8361c808554228ad09bfed70f5c823caf8a3450b6881cc3a38eb57e8c08c1d9" +checksum = "2bf41af45e3f54cc184831d629d41d5b2bda8297e29c81add7ae4f362ed5e01b" dependencies = [ "proc-macro2", "quote", - "structmeta 0.2.0", + "structmeta", "syn 2.0.96", ] @@ -12140,7 +12231,7 @@ dependencies = [ "pin-project-lite", "postgres-protocol", "postgres-types", - "rand", + "rand 0.8.5", "socket2", "tokio", "tokio-util", @@ -12353,6 +12444,27 @@ dependencies = [ "walkdir", ] +[[package]] +name = "tower" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d039ad9159c98b70ecfd540b2573b97f7f52c3e8d9f8ad57a24b916a536975f9" +dependencies = [ + "futures-core", + "futures-util", + "pin-project-lite", + "sync_wrapper", + "tokio", + "tower-layer", + "tower-service", +] + +[[package]] +name = "tower-layer" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "121c2a6cda46980bb0fcd1647ffaf6cd3fc79a013de288782836f6df9c48780e" + [[package]] name = "tower-service" version = "0.3.3" @@ -12391,6 +12503,55 @@ dependencies = [ "once_cell", ] +[[package]] +name = "transceiver-controller" +version = "0.1.1" +source = "git+https://github.com/oxidecomputer/transceiver-control#9fc521c4bd0a7fa4331c555296b2888089db32a2" +dependencies = [ + "anyhow", + "clap", + "hubpack", + "itertools 0.14.0", + "nix 0.29.0", + "schemars", + "serde", + "slog", + "slog-async", + "slog-term", + "tabled 0.18.0", + "thiserror 2.0.11", + "tokio", + "transceiver-decode", + "transceiver-messages", + "usdt", + "version_check", +] + +[[package]] +name = "transceiver-decode" +version = "0.1.0" +source = "git+https://github.com/oxidecomputer/transceiver-control#9fc521c4bd0a7fa4331c555296b2888089db32a2" +dependencies = [ + "schemars", + "serde", + "static_assertions", + "thiserror 2.0.11", + "transceiver-messages", +] + +[[package]] +name = "transceiver-messages" +version = "0.1.1" +source = "git+https://github.com/oxidecomputer/transceiver-control#9fc521c4bd0a7fa4331c555296b2888089db32a2" +dependencies = [ + "bitflags 2.6.0", + "clap", + "hubpack", + "schemars", + "serde", + "thiserror 2.0.11", +] + [[package]] name = "try-lock" version = "0.2.5" @@ -12429,6 +12590,7 @@ dependencies = [ "omicron-test-utils", "omicron-workspace-hack", "predicates", + "semver 1.0.25", "slog", "slog-async", "slog-envlogger", @@ -12463,7 +12625,7 @@ dependencies = [ "omicron-test-utils", "omicron-workspace-hack", "parse-size", - "rand", + "rand 0.8.5", "semver 1.0.25", "serde", "serde_json", @@ -12500,7 +12662,7 @@ dependencies = [ "http", "httparse", "log", - "rand", + "rand 0.8.5", "sha1", "thiserror 1.0.69", "url", @@ -12519,7 +12681,7 @@ dependencies = [ "http", "httparse", "log", - "rand", + "rand 0.8.5", "sha1", "thiserror 1.0.69", "utf-8", @@ -12537,7 +12699,7 @@ dependencies = [ "http", "httparse", "log", - "rand", + "rand 0.8.5", "sha1", "thiserror 1.0.69", "utf-8", @@ -12549,7 +12711,7 @@ version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e7b17f197b3050ba473acf9181f7b1d3b66d1cf7356c6cc57886662276e65908" dependencies = [ - "rand", + "rand 0.8.5", ] [[package]] @@ -12564,8 +12726,8 @@ version = "0.1.0" dependencies = [ "newtype-uuid", "omicron-workspace-hack", - "rand", - "rand_core", + "rand 0.8.5", + "rand_core 0.6.4", "rand_seeder", "uuid", ] @@ -12815,7 +12977,8 @@ dependencies = [ "omicron-common", "omicron-test-utils", "omicron-workspace-hack", - "rand", + "rand 0.8.5", + "semver 1.0.25", "sha2", "slog", "tar", @@ -12971,7 +13134,7 @@ version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "744018581f9a3454a9e15beb8a33b017183f1e7c0cd170232a2d1453b23a51c4" dependencies = [ - "getrandom", + "getrandom 0.2.15", "serde", ] @@ -13028,9 +13191,9 @@ dependencies = [ "curve25519-dalek", "elliptic-curve", "hex", - "rand", - "rand_chacha", - "rand_core", + "rand 0.8.5", + "rand_chacha 0.3.1", + "rand_core 0.6.4", "serde", "subtle", "thiserror-no-std", @@ -13100,6 +13263,15 @@ version = "0.11.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" +[[package]] +name = "wasi" +version = "0.13.3+wasi-0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26816d2e1a4a36a2940b96c5296ce403917633dff8f3440e9b236ed6f6bacad2" +dependencies = [ + "wit-bindgen-rt", +] + [[package]] name = "wasite" version = "0.1.0" @@ -13260,12 +13432,12 @@ dependencies = [ "omicron-common", "omicron-passwords", "omicron-workspace-hack", - "once_cell", "owo-colors", "proptest", "ratatui", "reqwest", "rpassword", + "semver 1.0.25", "serde", "serde_json", "shell-words", @@ -13281,6 +13453,7 @@ dependencies = [ "tokio-util", "toml 0.8.19", "toml_edit 0.22.22", + "transceiver-controller", "tui-tree-widget", "unicode-width 0.1.14", "update-engine", @@ -13311,6 +13484,7 @@ dependencies = [ "thiserror 1.0.69", "tokio", "toml 0.8.19", + "transceiver-controller", "update-engine", ] @@ -13383,13 +13557,13 @@ dependencies = [ "omicron-test-utils", "omicron-uuid-kinds", "omicron-workspace-hack", - "once_cell", "openapi-lint", "openapiv3", "oxnet", - "rand", + "rand 0.8.5", "reqwest", "schemars", + "semver 1.0.25", "serde", "serde_json", "sha2", @@ -13404,6 +13578,7 @@ dependencies = [ "tokio-util", "toml 0.8.19", "tough", + "transceiver-controller", "tufaceous", "tufaceous-lib", "update-common", @@ -13427,6 +13602,7 @@ dependencies = [ "omicron-uuid-kinds", "omicron-workspace-hack", "schemars", + "semver 1.0.25", "serde", "sled-hardware-types", "slog", @@ -13446,6 +13622,7 @@ dependencies = [ "regress 0.9.1", "reqwest", "schemars", + "semver 1.0.25", "serde", "serde_json", "sled-agent-types", @@ -13718,6 +13895,15 @@ dependencies = [ "windows-sys 0.48.0", ] +[[package]] +name = "wit-bindgen-rt" +version = "0.33.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3268f3d866458b787f390cf61f4bbb563b922d091359f9608842999eaee3943c" +dependencies = [ + "bitflags 2.6.0", +] + [[package]] name = "write16" version = "1.0.0" @@ -13776,7 +13962,7 @@ dependencies = [ "macaddr", "serde", "swrite", - "tabled", + "tabled 0.15.0", "textwrap", "toml 0.8.19", "usdt", diff --git a/Cargo.toml b/Cargo.toml index a343f26eb85..6b4adcd3895 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -412,7 +412,7 @@ gateway-test-utils = { path = "gateway-test-utils" } gateway-types = { path = "gateway-types" } gethostname = "0.5.0" glob = "0.3.2" -guppy = "0.17.16" +guppy = "0.17.17" headers = "0.4.0" heck = "0.5" hex = "0.4.3" @@ -455,7 +455,7 @@ ipnetwork = { version = "0.21", features = ["schemars", "serde"] } ispf = { git = "https://github.com/oxidecomputer/ispf" } key-manager = { path = "key-manager" } kstat-rs = "0.2.4" -libc = "0.2.169" +libc = "0.2.170" libipcc = { git = "https://github.com/oxidecomputer/libipcc", rev = "fdffa212373a8f92473ea5f411088912bf458d5f" } libfalcon = { git = "https://github.com/oxidecomputer/falcon", branch = "main" } libnvme = { git = "https://github.com/oxidecomputer/libnvme", rev = "dd5bb221d327a1bc9287961718c3c10d6bd37da0" } @@ -555,7 +555,7 @@ pem = "3.0" # false: petname = { workspace = true, default-features = false }. petname = { version = "2.0.2", default-features = false, features = ["default-rng", "default-words"] } petgraph = "0.6.5" -postgres-protocol = "0.6.7" +postgres-protocol = "0.6.8" predicates = "3.1.3" pretty_assertions = "1.4.1" pretty-hex = "0.4.1" @@ -589,20 +589,20 @@ regress = "0.9.1" repo-depot-api = { path = "sled-agent/repo-depot-api" } repo-depot-client = { path = "clients/repo-depot-client" } reqwest = { version = "0.12", default-features = false } -ring = "0.17.8" +ring = "0.17.11" rpassword = "7.3.1" rstest = "0.23.0" rustfmt-wrapper = "0.2" rustls = "0.22.2" rustls-pemfile = "2.2.0" rustyline = "14.0.0" -samael = { version = "0.0.17", features = ["xmlsec"] } +samael = { version = "0.0.18", features = ["xmlsec"] } schemars = "0.8.21" secrecy = "0.8.0" -semver = { version = "1.0.23", features = ["std", "serde"] } +semver = { version = "1.0.25", features = ["std", "serde"] } serde = { version = "1.0", default-features = false, features = [ "derive", "rc" ] } serde_human_bytes = { git = "https://github.com/oxidecomputer/serde_human_bytes", branch = "main" } -serde_json = "1.0.133" +serde_json = "1.0.139" serde_path_to_error = "0.1.16" serde_tokenstream = "0.2" serde_urlencoded = "0.7.1" @@ -614,7 +614,7 @@ signal-hook = "0.3" signal-hook-tokio = { version = "0.3", features = [ "futures-v0_3" ] } sigpipe = "0.1.3" similar = { version = "2.6.0", features = ["bytes"] } -similar-asserts = "1.6.0" +similar-asserts = "1.6.1" # Don't change sled's version on accident; sled's on-disk format is not yet # stable and requires manual migrations. In the limit this won't matter because # the upgrade system will replace the DNS server zones entirely, but while we @@ -660,7 +660,7 @@ term = "0.7" termios = "0.3" termtree = "0.5.1" textwrap = "0.16.1" -test-strategy = "0.3.1" +test-strategy = "0.4.0" thiserror = "1.0" tofino = { git = "https://github.com/oxidecomputer/tofino", branch = "main" } tokio = "1.40.0" diff --git a/clients/bootstrap-agent-client/Cargo.toml b/clients/bootstrap-agent-client/Cargo.toml index e152e319662..bc9d501c78c 100644 --- a/clients/bootstrap-agent-client/Cargo.toml +++ b/clients/bootstrap-agent-client/Cargo.toml @@ -21,3 +21,4 @@ uuid.workspace = true omicron-uuid-kinds.workspace = true omicron-workspace-hack.workspace = true oxnet.workspace = true +semver.workspace = true diff --git a/clients/bootstrap-agent-client/src/lib.rs b/clients/bootstrap-agent-client/src/lib.rs index c737283d848..0bcac4ca9ac 100644 --- a/clients/bootstrap-agent-client/src/lib.rs +++ b/clients/bootstrap-agent-client/src/lib.rs @@ -26,6 +26,12 @@ progenitor::generate_api!( ImportExportPolicy = omicron_common::api::external::ImportExportPolicy, TypedUuidForRackInitKind = omicron_uuid_kinds::RackInitUuid, TypedUuidForRackResetKind = omicron_uuid_kinds::RackResetUuid, + }, + convert = { + { + type = "string", + pattern = r"^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$", + } = semver::Version, } ); diff --git a/clients/wicketd-client/Cargo.toml b/clients/wicketd-client/Cargo.toml index 5e52eedb490..06fc8c696a1 100644 --- a/clients/wicketd-client/Cargo.toml +++ b/clients/wicketd-client/Cargo.toml @@ -17,6 +17,7 @@ progenitor.workspace = true regress.workspace = true reqwest = { workspace = true, features = ["rustls-tls", "stream"] } schemars.workspace = true +semver.workspace = true serde.workspace = true serde_json.workspace = true sled-agent-types.workspace = true diff --git a/clients/wicketd-client/src/lib.rs b/clients/wicketd-client/src/lib.rs index 40b60ac612b..bef57a982ee 100644 --- a/clients/wicketd-client/src/lib.rs +++ b/clients/wicketd-client/src/lib.rs @@ -91,6 +91,12 @@ progenitor::generate_api!( UserSpecifiedImportExportPolicy = wicket_common::rack_setup::UserSpecifiedImportExportPolicy, UserSpecifiedPortConfig = wicket_common::rack_setup::UserSpecifiedPortConfig, UserSpecifiedRackNetworkConfig = wicket_common::rack_setup::UserSpecifiedRackNetworkConfig, + }, + convert = { + { + type = "string", + pattern = r"^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$", + } = semver::Version, } ); diff --git a/clippy.toml b/clippy.toml index 677fb67aded..a3004e43bb5 100644 --- a/clippy.toml +++ b/clippy.toml @@ -16,4 +16,15 @@ disallowed-methods = [ # Instead, the "transaction_retry_wrapper" should be preferred, as it # automatically retries transactions experiencing contention. { path = "async_bb8_diesel::AsyncConnection::transaction_async", reason = "Prefer to use transaction_retry_wrapper, if possible. For tests and nested transactions, use transaction_non_retry_wrapper to at least get dtrace probes" }, + + # We use disallowed-methods for these rather than disallowed-types, because + # there's still one legitimate use for `once_cell`'s types: + # `get_or_try_init`, which isn't stablet yet. + # https://github.com/rust-lang/rust/issues/109737 + { path = "once_cell::unsync::OnceCell::get_or_init", reason = "use `std::cell::OnceCell` instead, unless you need get_or_try_init in which case #[expect] this lint" }, + { path = "once_cell::sync::OnceCell::get_or_init", reason = "use `std::sync::OnceLock` instead, unless you need get_or_try_init in which case #[expect] this lint" }, +] +disallowed-types = [ + { path = "once_cell::unsync::Lazy", reason = "use `std::cell::LazyCell` instead" }, + { path = "once_cell::unsync::LazyCell", reason = "use `std::cell::LazyCell` instead" }, ] diff --git a/cockroach-admin/Cargo.toml b/cockroach-admin/Cargo.toml index 1738fd98e59..bdaf49f3b40 100644 --- a/cockroach-admin/Cargo.toml +++ b/cockroach-admin/Cargo.toml @@ -20,7 +20,6 @@ http.workspace = true illumos-utils.workspace = true omicron-common.workspace = true omicron-uuid-kinds.workspace = true -once_cell.workspace = true # See omicron-rpaths for more about the "pq-sys" dependency. pq-sys = "*" schemars.workspace = true diff --git a/common/Cargo.toml b/common/Cargo.toml index 4eebfe9a8ab..99902e1cd75 100644 --- a/common/Cargo.toml +++ b/common/Cargo.toml @@ -33,7 +33,7 @@ oxnet.workspace = true proptest = { workspace = true, optional = true } rand.workspace = true reqwest = { workspace = true, features = ["rustls-tls", "stream"] } -schemars = { workspace = true, features = ["chrono", "uuid1"] } +schemars = { workspace = true, features = ["chrono", "semver", "uuid1"] } semver.workspace = true serde.workspace = true serde_human_bytes.workspace = true @@ -49,7 +49,6 @@ uuid.workspace = true parse-display.workspace = true progenitor-client.workspace = true omicron-workspace-hack.workspace = true -once_cell.workspace = true regress.workspace = true [dev-dependencies] diff --git a/common/src/address.rs b/common/src/address.rs index a311bd688ed..dc79163581c 100644 --- a/common/src/address.rs +++ b/common/src/address.rs @@ -10,11 +10,13 @@ use crate::api::external::{self, Error}; use crate::policy::INTERNAL_DNS_REDUNDANCY; use ipnetwork::Ipv6Network; -use once_cell::sync::Lazy; use oxnet::{Ipv4Net, Ipv6Net}; use schemars::JsonSchema; use serde::{Deserialize, Serialize}; -use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddrV6}; +use std::{ + net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddrV6}, + sync::LazyLock, +}; pub const AZ_PREFIX: u8 = 48; pub const RACK_PREFIX: u8 = 56; @@ -101,7 +103,7 @@ pub const NUM_SOURCE_NAT_PORTS: u16 = 1 << 14; // prefix range (`fd00::/48`). See `random_vpc_ipv6_prefix`. // Furthermore, all the below *_OPTE_IPV6_SUBNET constants are // /64's within this prefix. -pub static SERVICE_VPC_IPV6_PREFIX: Lazy = Lazy::new(|| { +pub static SERVICE_VPC_IPV6_PREFIX: LazyLock = LazyLock::new(|| { Ipv6Net::new( Ipv6Addr::new(0xfd77, 0xe9d2, 0x9cd9, 0, 0, 0, 0, 0), VPC_IPV6_PREFIX_LENGTH, @@ -110,11 +112,11 @@ pub static SERVICE_VPC_IPV6_PREFIX: Lazy = Lazy::new(|| { }); /// The IPv4 subnet for External DNS OPTE ports. -pub static DNS_OPTE_IPV4_SUBNET: Lazy = - Lazy::new(|| Ipv4Net::new(Ipv4Addr::new(172, 30, 1, 0), 24).unwrap()); +pub static DNS_OPTE_IPV4_SUBNET: LazyLock = + LazyLock::new(|| Ipv4Net::new(Ipv4Addr::new(172, 30, 1, 0), 24).unwrap()); /// The IPv6 subnet for External DNS OPTE ports. -pub static DNS_OPTE_IPV6_SUBNET: Lazy = Lazy::new(|| { +pub static DNS_OPTE_IPV6_SUBNET: LazyLock = LazyLock::new(|| { Ipv6Net::new( Ipv6Addr::new(0xfd77, 0xe9d2, 0x9cd9, 1, 0, 0, 0, 0), VPC_SUBNET_IPV6_PREFIX_LENGTH, @@ -123,11 +125,11 @@ pub static DNS_OPTE_IPV6_SUBNET: Lazy = Lazy::new(|| { }); /// The IPv4 subnet for Nexus OPTE ports. -pub static NEXUS_OPTE_IPV4_SUBNET: Lazy = - Lazy::new(|| Ipv4Net::new(Ipv4Addr::new(172, 30, 2, 0), 24).unwrap()); +pub static NEXUS_OPTE_IPV4_SUBNET: LazyLock = + LazyLock::new(|| Ipv4Net::new(Ipv4Addr::new(172, 30, 2, 0), 24).unwrap()); /// The IPv6 subnet for Nexus OPTE ports. -pub static NEXUS_OPTE_IPV6_SUBNET: Lazy = Lazy::new(|| { +pub static NEXUS_OPTE_IPV6_SUBNET: LazyLock = LazyLock::new(|| { Ipv6Net::new( Ipv6Addr::new(0xfd77, 0xe9d2, 0x9cd9, 2, 0, 0, 0, 0), VPC_SUBNET_IPV6_PREFIX_LENGTH, @@ -136,11 +138,11 @@ pub static NEXUS_OPTE_IPV6_SUBNET: Lazy = Lazy::new(|| { }); /// The IPv4 subnet for Boundary NTP OPTE ports. -pub static NTP_OPTE_IPV4_SUBNET: Lazy = - Lazy::new(|| Ipv4Net::new(Ipv4Addr::new(172, 30, 3, 0), 24).unwrap()); +pub static NTP_OPTE_IPV4_SUBNET: LazyLock = + LazyLock::new(|| Ipv4Net::new(Ipv4Addr::new(172, 30, 3, 0), 24).unwrap()); /// The IPv6 subnet for Boundary NTP OPTE ports. -pub static NTP_OPTE_IPV6_SUBNET: Lazy = Lazy::new(|| { +pub static NTP_OPTE_IPV6_SUBNET: LazyLock = LazyLock::new(|| { Ipv6Net::new( Ipv6Addr::new(0xfd77, 0xe9d2, 0x9cd9, 3, 0, 0, 0, 0), VPC_SUBNET_IPV6_PREFIX_LENGTH, diff --git a/common/src/api/external/mod.rs b/common/src/api/external/mod.rs index c40a9a65b5e..30fbd7a21e5 100644 --- a/common/src/api/external/mod.rs +++ b/common/src/api/external/mod.rs @@ -31,7 +31,7 @@ use parse_display::FromStr; use rand::thread_rng; use rand::Rng; use schemars::JsonSchema; -use semver; +use semver::Version; use serde::Deserialize; use serde::Serialize; use serde_with::{DeserializeFromStr, SerializeDisplay}; @@ -293,6 +293,12 @@ impl<'a> From<&'a Name> for &'a str { } } +impl From for String { + fn from(name: Name) -> Self { + name.0 + } +} + /// `Name` instances are comparable like Strings, primarily so that they can /// be used as keys in trees. impl PartialEq for Name @@ -488,56 +494,6 @@ fn name_schema( .into() } -// TODO: remove wrapper for semver::Version once this PR goes through -// https://github.com/GREsau/schemars/pull/195 -#[derive( - Clone, - Debug, - Serialize, - Deserialize, - PartialEq, - Eq, - Hash, - PartialOrd, - Ord, - Display, - FromStr, -)] -#[display("{0}")] -#[serde(transparent)] -pub struct SemverVersion(pub semver::Version); - -impl SemverVersion { - pub const fn new(major: u64, minor: u64, patch: u64) -> Self { - Self(semver::Version::new(major, minor, patch)) - } - - /// This is the official ECMAScript-compatible validation regex for - /// semver: - /// - const VALIDATION_REGEX: &'static str = r"^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$"; -} - -impl JsonSchema for SemverVersion { - fn schema_name() -> String { - "SemverVersion".to_string() - } - - fn json_schema( - _: &mut schemars::gen::SchemaGenerator, - ) -> schemars::schema::Schema { - schemars::schema::SchemaObject { - instance_type: Some(schemars::schema::InstanceType::String.into()), - string: Some(Box::new(schemars::schema::StringValidation { - pattern: Some(Self::VALIDATION_REGEX.to_owned()), - ..Default::default() - })), - ..Default::default() - } - .into() - } -} - /// Name for a built-in role #[derive( Clone, @@ -3166,7 +3122,7 @@ pub struct TufRepoMeta { pub valid_until: DateTime, /// The system version in artifacts.json. - pub system_version: SemverVersion, + pub system_version: Version, /// The file name of the repository. /// @@ -3263,7 +3219,6 @@ mod test { use super::Generation; use super::RouteDestination; use super::RouteTarget; - use super::SemverVersion; use super::VpcFirewallRuleHostFilter; use super::VpcFirewallRuleTarget; use super::{ @@ -3276,61 +3231,21 @@ mod test { use crate::api::external::Error; use crate::api::external::Hostname; use crate::api::external::ResourceType; + use semver::Version; use std::convert::TryFrom; use std::str::FromStr; - #[test] - fn test_semver_validation() { - // Examples copied from - // https://github.com/dtolnay/semver/blob/cc2cfed67c17dfe6abae18726830bdb6d7cf740d/tests/test_version.rs#L13. - let valid = [ - "1.2.3", - "1.2.3-alpha1", - "1.2.3+build5", - "1.2.3+5build", - "1.2.3-alpha1+build5", - "1.2.3-1.alpha1.9+build5.7.3aedf", - "1.2.3-0a.alpha1.9+05build.7.3aed", - "0.4.0-beta.1+0851523", - "1.1.0-beta-10", - ]; - let invalid = [ - // These examples are rejected by the validation regex. - "", - "1", - "1.2", - "1.2.3-", - "a.b.c", - "1.2.3 abc", - "1.2.3-01", - ]; - - let r = regress::Regex::new(SemverVersion::VALIDATION_REGEX) - .expect("validation regex is valid"); - for input in valid { - let m = r - .find(input) - .unwrap_or_else(|| panic!("input {input} did not match regex")); - assert_eq!(m.start(), 0, "input {input} did not match start"); - assert_eq!(m.end(), input.len(), "input {input} did not match end"); - } - - for input in invalid { - assert!( - r.find(input).is_none(), - "invalid input {input} should not match validation regex" - ); - } - } - + // This test originates from when we had a wrapper struct around + // `semver::Version`, but it's probably worth carrying this test that + // ensures the behavior we rely on is accurate. #[test] fn test_semver_serialize() { #[derive(Debug, PartialEq, Eq, Deserialize, Serialize)] struct MyStruct { - version: SemverVersion, + version: Version, } - let v = MyStruct { version: SemverVersion::new(1, 2, 3) }; + let v = MyStruct { version: Version::new(1, 2, 3) }; let expected = "{\"version\":\"1.2.3\"}"; assert_eq!(serde_json::to_string(&v).unwrap(), expected); assert_eq!(serde_json::from_str::(expected).unwrap(), v); diff --git a/common/src/api/internal/nexus.rs b/common/src/api/internal/nexus.rs index 0396ffc28fa..14c9b2b8885 100644 --- a/common/src/api/internal/nexus.rs +++ b/common/src/api/internal/nexus.rs @@ -5,8 +5,7 @@ //! APIs exposed by Nexus. use crate::api::external::{ - ByteCount, DiskState, Generation, Hostname, InstanceCpuCount, - SemverVersion, Vni, + ByteCount, DiskState, Generation, Hostname, InstanceCpuCount, Vni, }; use chrono::{DateTime, Utc}; use omicron_uuid_kinds::DownstairsRegionKind; @@ -15,6 +14,7 @@ use omicron_uuid_kinds::UpstairsRepairKind; use omicron_uuid_kinds::UpstairsSessionKind; use parse_display::{Display, FromStr}; use schemars::JsonSchema; +use semver::Version; use serde::{Deserialize, Serialize}; use std::fmt; use std::net::SocketAddr; @@ -259,7 +259,7 @@ pub struct UpdateArtifactId { pub name: String, /// The artifact's version. - pub version: SemverVersion, + pub version: Version, /// The kind of update artifact this is. pub kind: KnownArtifactKind, diff --git a/common/src/api/internal/shared.rs b/common/src/api/internal/shared.rs index a3f02e5b759..63bdaa0a374 100644 --- a/common/src/api/internal/shared.rs +++ b/common/src/api/internal/shared.rs @@ -577,6 +577,16 @@ pub enum SwitchLocation { Switch1, } +impl SwitchLocation { + /// Return the location of the other switch, not ourself. + pub const fn other(&self) -> Self { + match self { + SwitchLocation::Switch0 => SwitchLocation::Switch1, + SwitchLocation::Switch1 => SwitchLocation::Switch0, + } + } +} + impl fmt::Display for SwitchLocation { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { diff --git a/common/src/update.rs b/common/src/update.rs index fc747cf16d9..c712ae45019 100644 --- a/common/src/update.rs +++ b/common/src/update.rs @@ -4,27 +4,28 @@ use std::{borrow::Cow, convert::Infallible, fmt, str::FromStr}; -use crate::api::{external::SemverVersion, internal::nexus::KnownArtifactKind}; +use crate::api::internal::nexus::KnownArtifactKind; use hex::FromHexError; use schemars::{ gen::SchemaGenerator, schema::{Schema, SchemaObject}, JsonSchema, }; +use semver::Version; use serde::{Deserialize, Serialize}; /// Description of the `artifacts.json` target found in rack update /// repositories. #[derive(Debug, Clone, Deserialize, Serialize)] pub struct ArtifactsDocument { - pub system_version: SemverVersion, + pub system_version: Version, pub artifacts: Vec, } impl ArtifactsDocument { /// Creates an artifacts document with the provided system version and an /// empty list of artifacts. - pub fn empty(system_version: SemverVersion) -> Self { + pub fn empty(system_version: Version) -> Self { Self { system_version, artifacts: Vec::new() } } } @@ -47,7 +48,7 @@ pub struct Artifact { /// In the future when [`KnownArtifactKind::ControlPlane`] is split up into /// separate zones, `name` will be the zone name. pub name: String, - pub version: SemverVersion, + pub version: Version, pub kind: ArtifactKind, pub target: String, } @@ -89,7 +90,7 @@ pub struct ArtifactId { pub name: String, /// The artifact's version. - pub version: SemverVersion, + pub version: Version, /// The kind of artifact this is. pub kind: ArtifactKind, diff --git a/dev-tools/omdb/tests/successes.out b/dev-tools/omdb/tests/successes.out index 1672d2ac283..bbeeeec0244 100644 --- a/dev-tools/omdb/tests/successes.out +++ b/dev-tools/omdb/tests/successes.out @@ -1342,19 +1342,19 @@ parent: sled: ..................... (active) physical disks at generation 2: - --------------------------------------------------- - vendor model serial - --------------------------------------------------- - nexus-tests nexus-test-model nexus-test-disk-10 - nexus-tests nexus-test-model nexus-test-disk-11 - nexus-tests nexus-test-model nexus-test-disk-12 - nexus-tests nexus-test-model nexus-test-disk-13 - nexus-tests nexus-test-model nexus-test-disk-14 - nexus-tests nexus-test-model nexus-test-disk-15 - nexus-tests nexus-test-model nexus-test-disk-16 - nexus-tests nexus-test-model nexus-test-disk-17 - nexus-tests nexus-test-model nexus-test-disk-18 - nexus-tests nexus-test-model nexus-test-disk-19 + ----------------------------------------------------------------- + vendor model serial disposition + ----------------------------------------------------------------- + nexus-tests nexus-test-model nexus-test-disk-10 in service + nexus-tests nexus-test-model nexus-test-disk-11 in service + nexus-tests nexus-test-model nexus-test-disk-12 in service + nexus-tests nexus-test-model nexus-test-disk-13 in service + nexus-tests nexus-test-model nexus-test-disk-14 in service + nexus-tests nexus-test-model nexus-test-disk-15 in service + nexus-tests nexus-test-model nexus-test-disk-16 in service + nexus-tests nexus-test-model nexus-test-disk-17 in service + nexus-tests nexus-test-model nexus-test-disk-18 in service + nexus-tests nexus-test-model nexus-test-disk-19 in service datasets at generation 2: @@ -1373,19 +1373,19 @@ parent: sled: ..................... (active) physical disks at generation 2: - -------------------------------------------------- - vendor model serial - -------------------------------------------------- - nexus-tests nexus-test-model nexus-test-disk-0 - nexus-tests nexus-test-model nexus-test-disk-1 - nexus-tests nexus-test-model nexus-test-disk-2 - nexus-tests nexus-test-model nexus-test-disk-3 - nexus-tests nexus-test-model nexus-test-disk-4 - nexus-tests nexus-test-model nexus-test-disk-5 - nexus-tests nexus-test-model nexus-test-disk-6 - nexus-tests nexus-test-model nexus-test-disk-7 - nexus-tests nexus-test-model nexus-test-disk-8 - nexus-tests nexus-test-model nexus-test-disk-9 + ---------------------------------------------------------------- + vendor model serial disposition + ---------------------------------------------------------------- + nexus-tests nexus-test-model nexus-test-disk-0 in service + nexus-tests nexus-test-model nexus-test-disk-1 in service + nexus-tests nexus-test-model nexus-test-disk-2 in service + nexus-tests nexus-test-model nexus-test-disk-3 in service + nexus-tests nexus-test-model nexus-test-disk-4 in service + nexus-tests nexus-test-model nexus-test-disk-5 in service + nexus-tests nexus-test-model nexus-test-disk-6 in service + nexus-tests nexus-test-model nexus-test-disk-7 in service + nexus-tests nexus-test-model nexus-test-disk-8 in service + nexus-tests nexus-test-model nexus-test-disk-9 in service datasets at generation 2: @@ -1438,19 +1438,19 @@ parent: sled: ..................... (active) physical disks at generation 2: - --------------------------------------------------- - vendor model serial - --------------------------------------------------- - nexus-tests nexus-test-model nexus-test-disk-10 - nexus-tests nexus-test-model nexus-test-disk-11 - nexus-tests nexus-test-model nexus-test-disk-12 - nexus-tests nexus-test-model nexus-test-disk-13 - nexus-tests nexus-test-model nexus-test-disk-14 - nexus-tests nexus-test-model nexus-test-disk-15 - nexus-tests nexus-test-model nexus-test-disk-16 - nexus-tests nexus-test-model nexus-test-disk-17 - nexus-tests nexus-test-model nexus-test-disk-18 - nexus-tests nexus-test-model nexus-test-disk-19 + ----------------------------------------------------------------- + vendor model serial disposition + ----------------------------------------------------------------- + nexus-tests nexus-test-model nexus-test-disk-10 in service + nexus-tests nexus-test-model nexus-test-disk-11 in service + nexus-tests nexus-test-model nexus-test-disk-12 in service + nexus-tests nexus-test-model nexus-test-disk-13 in service + nexus-tests nexus-test-model nexus-test-disk-14 in service + nexus-tests nexus-test-model nexus-test-disk-15 in service + nexus-tests nexus-test-model nexus-test-disk-16 in service + nexus-tests nexus-test-model nexus-test-disk-17 in service + nexus-tests nexus-test-model nexus-test-disk-18 in service + nexus-tests nexus-test-model nexus-test-disk-19 in service datasets at generation 2: @@ -1469,19 +1469,19 @@ parent: sled: ..................... (active) physical disks at generation 2: - -------------------------------------------------- - vendor model serial - -------------------------------------------------- - nexus-tests nexus-test-model nexus-test-disk-0 - nexus-tests nexus-test-model nexus-test-disk-1 - nexus-tests nexus-test-model nexus-test-disk-2 - nexus-tests nexus-test-model nexus-test-disk-3 - nexus-tests nexus-test-model nexus-test-disk-4 - nexus-tests nexus-test-model nexus-test-disk-5 - nexus-tests nexus-test-model nexus-test-disk-6 - nexus-tests nexus-test-model nexus-test-disk-7 - nexus-tests nexus-test-model nexus-test-disk-8 - nexus-tests nexus-test-model nexus-test-disk-9 + ---------------------------------------------------------------- + vendor model serial disposition + ---------------------------------------------------------------- + nexus-tests nexus-test-model nexus-test-disk-0 in service + nexus-tests nexus-test-model nexus-test-disk-1 in service + nexus-tests nexus-test-model nexus-test-disk-2 in service + nexus-tests nexus-test-model nexus-test-disk-3 in service + nexus-tests nexus-test-model nexus-test-disk-4 in service + nexus-tests nexus-test-model nexus-test-disk-5 in service + nexus-tests nexus-test-model nexus-test-disk-6 in service + nexus-tests nexus-test-model nexus-test-disk-7 in service + nexus-tests nexus-test-model nexus-test-disk-8 in service + nexus-tests nexus-test-model nexus-test-disk-9 in service datasets at generation 2: @@ -1536,37 +1536,37 @@ to: blueprint ............. sled ..................... (active): physical disks at generation 2: - --------------------------------------------------- - vendor model serial - --------------------------------------------------- - nexus-tests nexus-test-model nexus-test-disk-10 - nexus-tests nexus-test-model nexus-test-disk-11 - nexus-tests nexus-test-model nexus-test-disk-12 - nexus-tests nexus-test-model nexus-test-disk-13 - nexus-tests nexus-test-model nexus-test-disk-14 - nexus-tests nexus-test-model nexus-test-disk-15 - nexus-tests nexus-test-model nexus-test-disk-16 - nexus-tests nexus-test-model nexus-test-disk-17 - nexus-tests nexus-test-model nexus-test-disk-18 - nexus-tests nexus-test-model nexus-test-disk-19 + ----------------------------------------------------------------- + vendor model serial disposition + ----------------------------------------------------------------- + nexus-tests nexus-test-model nexus-test-disk-10 in service + nexus-tests nexus-test-model nexus-test-disk-11 in service + nexus-tests nexus-test-model nexus-test-disk-12 in service + nexus-tests nexus-test-model nexus-test-disk-13 in service + nexus-tests nexus-test-model nexus-test-disk-14 in service + nexus-tests nexus-test-model nexus-test-disk-15 in service + nexus-tests nexus-test-model nexus-test-disk-16 in service + nexus-tests nexus-test-model nexus-test-disk-17 in service + nexus-tests nexus-test-model nexus-test-disk-18 in service + nexus-tests nexus-test-model nexus-test-disk-19 in service sled ..................... (active): physical disks at generation 2: - -------------------------------------------------- - vendor model serial - -------------------------------------------------- - nexus-tests nexus-test-model nexus-test-disk-0 - nexus-tests nexus-test-model nexus-test-disk-1 - nexus-tests nexus-test-model nexus-test-disk-2 - nexus-tests nexus-test-model nexus-test-disk-3 - nexus-tests nexus-test-model nexus-test-disk-4 - nexus-tests nexus-test-model nexus-test-disk-5 - nexus-tests nexus-test-model nexus-test-disk-6 - nexus-tests nexus-test-model nexus-test-disk-7 - nexus-tests nexus-test-model nexus-test-disk-8 - nexus-tests nexus-test-model nexus-test-disk-9 + ---------------------------------------------------------------- + vendor model serial disposition + ---------------------------------------------------------------- + nexus-tests nexus-test-model nexus-test-disk-0 in service + nexus-tests nexus-test-model nexus-test-disk-1 in service + nexus-tests nexus-test-model nexus-test-disk-2 in service + nexus-tests nexus-test-model nexus-test-disk-3 in service + nexus-tests nexus-test-model nexus-test-disk-4 in service + nexus-tests nexus-test-model nexus-test-disk-5 in service + nexus-tests nexus-test-model nexus-test-disk-6 in service + nexus-tests nexus-test-model nexus-test-disk-7 in service + nexus-tests nexus-test-model nexus-test-disk-8 in service + nexus-tests nexus-test-model nexus-test-disk-9 in service datasets at generation 2: diff --git a/dev-tools/reconfigurator-cli/tests/output/cmd-example-stdout b/dev-tools/reconfigurator-cli/tests/output/cmd-example-stdout index de70fa82ea6..b1c2148beb2 100644 --- a/dev-tools/reconfigurator-cli/tests/output/cmd-example-stdout +++ b/dev-tools/reconfigurator-cli/tests/output/cmd-example-stdout @@ -69,19 +69,19 @@ parent: 02697f74-b14a-4418-90f0-c28b2a3a6aa9 sled: 2eb69596-f081-4e2d-9425-9994926e0832 (active) physical disks at generation 2: - ---------------------------------------------------------------------- - vendor model serial - ---------------------------------------------------------------------- - fake-vendor fake-model serial-088ed702-551e-453b-80d7-57700372a844 - fake-vendor fake-model serial-09e51697-abad-47c0-a193-eaf74bc5d3cd - fake-vendor fake-model serial-3a512d49-edbe-47f3-8d0b-6051bfdc4044 - fake-vendor fake-model serial-40517680-aa77-413c-bcf4-b9041dcf6612 - fake-vendor fake-model serial-78d3cb96-9295-4644-bf78-2e32191c71f9 - fake-vendor fake-model serial-853595e7-77da-404e-bc35-aba77478d55c - fake-vendor fake-model serial-8926e0e7-65d9-4e2e-ac6d-f1298af81ef1 - fake-vendor fake-model serial-9c0b9151-17f3-4857-94cc-b5bfcd402326 - fake-vendor fake-model serial-d61354fa-48d2-47c6-90bf-546e3ed1708b - fake-vendor fake-model serial-d792c8cb-7490-40cb-bb1c-d4917242edf4 + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-088ed702-551e-453b-80d7-57700372a844 in service + fake-vendor fake-model serial-09e51697-abad-47c0-a193-eaf74bc5d3cd in service + fake-vendor fake-model serial-3a512d49-edbe-47f3-8d0b-6051bfdc4044 in service + fake-vendor fake-model serial-40517680-aa77-413c-bcf4-b9041dcf6612 in service + fake-vendor fake-model serial-78d3cb96-9295-4644-bf78-2e32191c71f9 in service + fake-vendor fake-model serial-853595e7-77da-404e-bc35-aba77478d55c in service + fake-vendor fake-model serial-8926e0e7-65d9-4e2e-ac6d-f1298af81ef1 in service + fake-vendor fake-model serial-9c0b9151-17f3-4857-94cc-b5bfcd402326 in service + fake-vendor fake-model serial-d61354fa-48d2-47c6-90bf-546e3ed1708b in service + fake-vendor fake-model serial-d792c8cb-7490-40cb-bb1c-d4917242edf4 in service datasets at generation 2: @@ -165,19 +165,19 @@ parent: 02697f74-b14a-4418-90f0-c28b2a3a6aa9 sled: 32d8d836-4d8a-4e54-8fa9-f31d79c42646 (active) physical disks at generation 2: - ---------------------------------------------------------------------- - vendor model serial - ---------------------------------------------------------------------- - fake-vendor fake-model serial-128b0f04-229b-48dc-9c5c-555cb5723ed8 - fake-vendor fake-model serial-43ae0f4e-b0cf-4d74-8636-df0567ba01e6 - fake-vendor fake-model serial-4e9806d0-41cd-48c2-86ef-7f815c3ce3b1 - fake-vendor fake-model serial-70bb6d98-111f-4015-9d97-9ef1b2d6dcac - fake-vendor fake-model serial-7ce5029f-703c-4c08-8164-9af9cf1acf23 - fake-vendor fake-model serial-b113c11f-44e6-4fb4-a56e-1d91bd652faf - fake-vendor fake-model serial-bf149c80-2498-481c-9989-6344da914081 - fake-vendor fake-model serial-c69b6237-09f9-45aa-962c-5dbdd1d894be - fake-vendor fake-model serial-ccd5a87b-00ae-42ad-85da-b37d70436cb1 - fake-vendor fake-model serial-d7410a1c-e01d-49a4-be9c-f861f086760a + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-128b0f04-229b-48dc-9c5c-555cb5723ed8 in service + fake-vendor fake-model serial-43ae0f4e-b0cf-4d74-8636-df0567ba01e6 in service + fake-vendor fake-model serial-4e9806d0-41cd-48c2-86ef-7f815c3ce3b1 in service + fake-vendor fake-model serial-70bb6d98-111f-4015-9d97-9ef1b2d6dcac in service + fake-vendor fake-model serial-7ce5029f-703c-4c08-8164-9af9cf1acf23 in service + fake-vendor fake-model serial-b113c11f-44e6-4fb4-a56e-1d91bd652faf in service + fake-vendor fake-model serial-bf149c80-2498-481c-9989-6344da914081 in service + fake-vendor fake-model serial-c69b6237-09f9-45aa-962c-5dbdd1d894be in service + fake-vendor fake-model serial-ccd5a87b-00ae-42ad-85da-b37d70436cb1 in service + fake-vendor fake-model serial-d7410a1c-e01d-49a4-be9c-f861f086760a in service datasets at generation 2: @@ -258,19 +258,19 @@ parent: 02697f74-b14a-4418-90f0-c28b2a3a6aa9 sled: 89d02b1b-478c-401a-8e28-7a26f74fa41b (active) physical disks at generation 2: - ---------------------------------------------------------------------- - vendor model serial - ---------------------------------------------------------------------- - fake-vendor fake-model serial-44fa7024-c2bc-4d2c-b478-c4997e4aece8 - fake-vendor fake-model serial-5265edc6-debf-4687-a758-a9746893ebd3 - fake-vendor fake-model serial-532fbd69-b472-4445-86af-4c4c85afb313 - fake-vendor fake-model serial-54fd6fa6-ce3c-4abe-8c9d-7e107e159e84 - fake-vendor fake-model serial-8562317c-4736-4cfc-9292-7dcab96a6fee - fake-vendor fake-model serial-9a1327e4-d11b-4d98-8454-8c41862e9832 - fake-vendor fake-model serial-bf9d6692-64bc-459a-87dd-e7a83080a210 - fake-vendor fake-model serial-ce1c13f3-bef2-4306-b0f2-4e39bd4a18b6 - fake-vendor fake-model serial-f931ec80-a3e3-4adb-a8ba-fa5adbd2294c - fake-vendor fake-model serial-fe1d5b9f-8db7-4e2d-bf17-c4b80e1f897c + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-44fa7024-c2bc-4d2c-b478-c4997e4aece8 in service + fake-vendor fake-model serial-5265edc6-debf-4687-a758-a9746893ebd3 in service + fake-vendor fake-model serial-532fbd69-b472-4445-86af-4c4c85afb313 in service + fake-vendor fake-model serial-54fd6fa6-ce3c-4abe-8c9d-7e107e159e84 in service + fake-vendor fake-model serial-8562317c-4736-4cfc-9292-7dcab96a6fee in service + fake-vendor fake-model serial-9a1327e4-d11b-4d98-8454-8c41862e9832 in service + fake-vendor fake-model serial-bf9d6692-64bc-459a-87dd-e7a83080a210 in service + fake-vendor fake-model serial-ce1c13f3-bef2-4306-b0f2-4e39bd4a18b6 in service + fake-vendor fake-model serial-f931ec80-a3e3-4adb-a8ba-fa5adbd2294c in service + fake-vendor fake-model serial-fe1d5b9f-8db7-4e2d-bf17-c4b80e1f897c in service datasets at generation 2: @@ -413,13 +413,13 @@ parent: 02697f74-b14a-4418-90f0-c28b2a3a6aa9 sled: 89d02b1b-478c-401a-8e28-7a26f74fa41b (active) physical disks at generation 2: - ---------------------------------------------------------------------- - vendor model serial - ---------------------------------------------------------------------- - fake-vendor fake-model serial-44fa7024-c2bc-4d2c-b478-c4997e4aece8 - fake-vendor fake-model serial-8562317c-4736-4cfc-9292-7dcab96a6fee - fake-vendor fake-model serial-ce1c13f3-bef2-4306-b0f2-4e39bd4a18b6 - fake-vendor fake-model serial-f931ec80-a3e3-4adb-a8ba-fa5adbd2294c + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-44fa7024-c2bc-4d2c-b478-c4997e4aece8 in service + fake-vendor fake-model serial-8562317c-4736-4cfc-9292-7dcab96a6fee in service + fake-vendor fake-model serial-ce1c13f3-bef2-4306-b0f2-4e39bd4a18b6 in service + fake-vendor fake-model serial-f931ec80-a3e3-4adb-a8ba-fa5adbd2294c in service datasets at generation 2: diff --git a/dev-tools/reconfigurator-cli/tests/output/cmd-expunge-newly-added-stdout b/dev-tools/reconfigurator-cli/tests/output/cmd-expunge-newly-added-stdout index 50f5d4522ad..c553aec7bc0 100644 --- a/dev-tools/reconfigurator-cli/tests/output/cmd-expunge-newly-added-stdout +++ b/dev-tools/reconfigurator-cli/tests/output/cmd-expunge-newly-added-stdout @@ -13,19 +13,19 @@ parent: 06c88262-f435-410e-ba98-101bed41ec27 sled: 711ac7f8-d19e-4572-bdb9-e9b50f6e362a (active) physical disks at generation 2: - ---------------------------------------------------------------------- - vendor model serial - ---------------------------------------------------------------------- - fake-vendor fake-model serial-3b46c403-ad14-435c-a1a8-e8f940bf814f - fake-vendor fake-model serial-3f8c9484-06e8-4662-9a90-aa7e92c43405 - fake-vendor fake-model serial-4a62a827-4bf3-45d5-a7f5-d080f25c61ef - fake-vendor fake-model serial-5f774f00-52b7-41b5-a57f-6f38037196f5 - fake-vendor fake-model serial-68ae41d4-99ed-4612-99e1-fecf795ca694 - fake-vendor fake-model serial-6a66241b-b595-423d-84ef-a81b5d8430e8 - fake-vendor fake-model serial-7c45c3f6-6369-40d9-a73f-2f7ed0afe96b - fake-vendor fake-model serial-a216d334-4a9a-49dd-8b13-20548839306c - fake-vendor fake-model serial-c9ff8eb0-807c-40ad-a5c4-0d534947c9ad - fake-vendor fake-model serial-fb29d469-7d3f-47b9-944c-ce817fc70370 + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-3b46c403-ad14-435c-a1a8-e8f940bf814f in service + fake-vendor fake-model serial-3f8c9484-06e8-4662-9a90-aa7e92c43405 in service + fake-vendor fake-model serial-4a62a827-4bf3-45d5-a7f5-d080f25c61ef in service + fake-vendor fake-model serial-5f774f00-52b7-41b5-a57f-6f38037196f5 in service + fake-vendor fake-model serial-68ae41d4-99ed-4612-99e1-fecf795ca694 in service + fake-vendor fake-model serial-6a66241b-b595-423d-84ef-a81b5d8430e8 in service + fake-vendor fake-model serial-7c45c3f6-6369-40d9-a73f-2f7ed0afe96b in service + fake-vendor fake-model serial-a216d334-4a9a-49dd-8b13-20548839306c in service + fake-vendor fake-model serial-c9ff8eb0-807c-40ad-a5c4-0d534947c9ad in service + fake-vendor fake-model serial-fb29d469-7d3f-47b9-944c-ce817fc70370 in service datasets at generation 2: @@ -109,19 +109,19 @@ parent: 06c88262-f435-410e-ba98-101bed41ec27 sled: 9dc50690-f9bf-4520-bf80-051d0f465c2c (active) physical disks at generation 2: - ---------------------------------------------------------------------- - vendor model serial - ---------------------------------------------------------------------- - fake-vendor fake-model serial-1f9589d8-0a68-47e8-b977-d0fb17bd3fdb - fake-vendor fake-model serial-44882b6c-5e19-418b-b6c3-065f2af5a557 - fake-vendor fake-model serial-6de47efc-8a6d-4108-bf82-0146eab3be06 - fake-vendor fake-model serial-80e2c62f-052c-4580-8252-7af238fbbe9c - fake-vendor fake-model serial-81d326ae-5f8a-4ffd-9d5e-a9e8246ac014 - fake-vendor fake-model serial-878af5a0-7810-43e5-bdd5-a3215242459a - fake-vendor fake-model serial-af59fef5-8258-4852-be1d-ce55ae7dc822 - fake-vendor fake-model serial-b16aa11f-6e49-44c1-abcb-2e7584bffa12 - fake-vendor fake-model serial-f173c79b-a3b4-4f4a-a983-bc94b6b1a616 - fake-vendor fake-model serial-f1a041cc-85c7-4d14-8fc0-8d0e417f7e24 + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-1f9589d8-0a68-47e8-b977-d0fb17bd3fdb in service + fake-vendor fake-model serial-44882b6c-5e19-418b-b6c3-065f2af5a557 in service + fake-vendor fake-model serial-6de47efc-8a6d-4108-bf82-0146eab3be06 in service + fake-vendor fake-model serial-80e2c62f-052c-4580-8252-7af238fbbe9c in service + fake-vendor fake-model serial-81d326ae-5f8a-4ffd-9d5e-a9e8246ac014 in service + fake-vendor fake-model serial-878af5a0-7810-43e5-bdd5-a3215242459a in service + fake-vendor fake-model serial-af59fef5-8258-4852-be1d-ce55ae7dc822 in service + fake-vendor fake-model serial-b16aa11f-6e49-44c1-abcb-2e7584bffa12 in service + fake-vendor fake-model serial-f173c79b-a3b4-4f4a-a983-bc94b6b1a616 in service + fake-vendor fake-model serial-f1a041cc-85c7-4d14-8fc0-8d0e417f7e24 in service datasets at generation 2: @@ -202,19 +202,19 @@ parent: 06c88262-f435-410e-ba98-101bed41ec27 sled: a88790de-5962-4871-8686-61c1fd5b7094 (active) physical disks at generation 2: - ---------------------------------------------------------------------- - vendor model serial - ---------------------------------------------------------------------- - fake-vendor fake-model serial-2f26e76b-6c91-4ee9-87b3-f424b942091b - fake-vendor fake-model serial-6f8fa855-4f34-42db-a6e4-9d0090d6c828 - fake-vendor fake-model serial-71450d62-791e-4068-9882-8a206a465fd9 - fake-vendor fake-model serial-7b4ad242-8330-4c08-9588-c66782742678 - fake-vendor fake-model serial-9bf23b52-565e-4439-9728-edb603fa6c4e - fake-vendor fake-model serial-c9476e3d-7745-4fa9-b336-b54ac5b08f56 - fake-vendor fake-model serial-d2cd1e65-b63d-4748-895f-aafecc81e440 - fake-vendor fake-model serial-d4ad3cc1-956a-4444-81a6-da6a025f6df2 - fake-vendor fake-model serial-e1298a43-fa1a-4e6f-bcfa-b26996f69c50 - fake-vendor fake-model serial-fa9ce87c-fa7c-4854-95bd-69b8f01c46f9 + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-2f26e76b-6c91-4ee9-87b3-f424b942091b in service + fake-vendor fake-model serial-6f8fa855-4f34-42db-a6e4-9d0090d6c828 in service + fake-vendor fake-model serial-71450d62-791e-4068-9882-8a206a465fd9 in service + fake-vendor fake-model serial-7b4ad242-8330-4c08-9588-c66782742678 in service + fake-vendor fake-model serial-9bf23b52-565e-4439-9728-edb603fa6c4e in service + fake-vendor fake-model serial-c9476e3d-7745-4fa9-b336-b54ac5b08f56 in service + fake-vendor fake-model serial-d2cd1e65-b63d-4748-895f-aafecc81e440 in service + fake-vendor fake-model serial-d4ad3cc1-956a-4444-81a6-da6a025f6df2 in service + fake-vendor fake-model serial-e1298a43-fa1a-4e6f-bcfa-b26996f69c50 in service + fake-vendor fake-model serial-fa9ce87c-fa7c-4854-95bd-69b8f01c46f9 in service datasets at generation 2: @@ -316,19 +316,19 @@ parent: 3f00b694-1b16-4aaa-8f78-e6b3a527b434 sled: 711ac7f8-d19e-4572-bdb9-e9b50f6e362a (active) physical disks at generation 2: - ---------------------------------------------------------------------- - vendor model serial - ---------------------------------------------------------------------- - fake-vendor fake-model serial-3b46c403-ad14-435c-a1a8-e8f940bf814f - fake-vendor fake-model serial-3f8c9484-06e8-4662-9a90-aa7e92c43405 - fake-vendor fake-model serial-4a62a827-4bf3-45d5-a7f5-d080f25c61ef - fake-vendor fake-model serial-5f774f00-52b7-41b5-a57f-6f38037196f5 - fake-vendor fake-model serial-68ae41d4-99ed-4612-99e1-fecf795ca694 - fake-vendor fake-model serial-6a66241b-b595-423d-84ef-a81b5d8430e8 - fake-vendor fake-model serial-7c45c3f6-6369-40d9-a73f-2f7ed0afe96b - fake-vendor fake-model serial-a216d334-4a9a-49dd-8b13-20548839306c - fake-vendor fake-model serial-c9ff8eb0-807c-40ad-a5c4-0d534947c9ad - fake-vendor fake-model serial-fb29d469-7d3f-47b9-944c-ce817fc70370 + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-3b46c403-ad14-435c-a1a8-e8f940bf814f in service + fake-vendor fake-model serial-3f8c9484-06e8-4662-9a90-aa7e92c43405 in service + fake-vendor fake-model serial-4a62a827-4bf3-45d5-a7f5-d080f25c61ef in service + fake-vendor fake-model serial-5f774f00-52b7-41b5-a57f-6f38037196f5 in service + fake-vendor fake-model serial-68ae41d4-99ed-4612-99e1-fecf795ca694 in service + fake-vendor fake-model serial-6a66241b-b595-423d-84ef-a81b5d8430e8 in service + fake-vendor fake-model serial-7c45c3f6-6369-40d9-a73f-2f7ed0afe96b in service + fake-vendor fake-model serial-a216d334-4a9a-49dd-8b13-20548839306c in service + fake-vendor fake-model serial-c9ff8eb0-807c-40ad-a5c4-0d534947c9ad in service + fake-vendor fake-model serial-fb29d469-7d3f-47b9-944c-ce817fc70370 in service datasets at generation 2: @@ -412,19 +412,19 @@ parent: 3f00b694-1b16-4aaa-8f78-e6b3a527b434 sled: 9dc50690-f9bf-4520-bf80-051d0f465c2c (active) physical disks at generation 2: - ---------------------------------------------------------------------- - vendor model serial - ---------------------------------------------------------------------- - fake-vendor fake-model serial-1f9589d8-0a68-47e8-b977-d0fb17bd3fdb - fake-vendor fake-model serial-44882b6c-5e19-418b-b6c3-065f2af5a557 - fake-vendor fake-model serial-6de47efc-8a6d-4108-bf82-0146eab3be06 - fake-vendor fake-model serial-80e2c62f-052c-4580-8252-7af238fbbe9c - fake-vendor fake-model serial-81d326ae-5f8a-4ffd-9d5e-a9e8246ac014 - fake-vendor fake-model serial-878af5a0-7810-43e5-bdd5-a3215242459a - fake-vendor fake-model serial-af59fef5-8258-4852-be1d-ce55ae7dc822 - fake-vendor fake-model serial-b16aa11f-6e49-44c1-abcb-2e7584bffa12 - fake-vendor fake-model serial-f173c79b-a3b4-4f4a-a983-bc94b6b1a616 - fake-vendor fake-model serial-f1a041cc-85c7-4d14-8fc0-8d0e417f7e24 + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-1f9589d8-0a68-47e8-b977-d0fb17bd3fdb in service + fake-vendor fake-model serial-44882b6c-5e19-418b-b6c3-065f2af5a557 in service + fake-vendor fake-model serial-6de47efc-8a6d-4108-bf82-0146eab3be06 in service + fake-vendor fake-model serial-80e2c62f-052c-4580-8252-7af238fbbe9c in service + fake-vendor fake-model serial-81d326ae-5f8a-4ffd-9d5e-a9e8246ac014 in service + fake-vendor fake-model serial-878af5a0-7810-43e5-bdd5-a3215242459a in service + fake-vendor fake-model serial-af59fef5-8258-4852-be1d-ce55ae7dc822 in service + fake-vendor fake-model serial-b16aa11f-6e49-44c1-abcb-2e7584bffa12 in service + fake-vendor fake-model serial-f173c79b-a3b4-4f4a-a983-bc94b6b1a616 in service + fake-vendor fake-model serial-f1a041cc-85c7-4d14-8fc0-8d0e417f7e24 in service datasets at generation 2: @@ -505,19 +505,19 @@ parent: 3f00b694-1b16-4aaa-8f78-e6b3a527b434 sled: a88790de-5962-4871-8686-61c1fd5b7094 (active) physical disks at generation 2: - ---------------------------------------------------------------------- - vendor model serial - ---------------------------------------------------------------------- - fake-vendor fake-model serial-2f26e76b-6c91-4ee9-87b3-f424b942091b - fake-vendor fake-model serial-6f8fa855-4f34-42db-a6e4-9d0090d6c828 - fake-vendor fake-model serial-71450d62-791e-4068-9882-8a206a465fd9 - fake-vendor fake-model serial-7b4ad242-8330-4c08-9588-c66782742678 - fake-vendor fake-model serial-9bf23b52-565e-4439-9728-edb603fa6c4e - fake-vendor fake-model serial-c9476e3d-7745-4fa9-b336-b54ac5b08f56 - fake-vendor fake-model serial-d2cd1e65-b63d-4748-895f-aafecc81e440 - fake-vendor fake-model serial-d4ad3cc1-956a-4444-81a6-da6a025f6df2 - fake-vendor fake-model serial-e1298a43-fa1a-4e6f-bcfa-b26996f69c50 - fake-vendor fake-model serial-fa9ce87c-fa7c-4854-95bd-69b8f01c46f9 + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-2f26e76b-6c91-4ee9-87b3-f424b942091b in service + fake-vendor fake-model serial-6f8fa855-4f34-42db-a6e4-9d0090d6c828 in service + fake-vendor fake-model serial-71450d62-791e-4068-9882-8a206a465fd9 in service + fake-vendor fake-model serial-7b4ad242-8330-4c08-9588-c66782742678 in service + fake-vendor fake-model serial-9bf23b52-565e-4439-9728-edb603fa6c4e in service + fake-vendor fake-model serial-c9476e3d-7745-4fa9-b336-b54ac5b08f56 in service + fake-vendor fake-model serial-d2cd1e65-b63d-4748-895f-aafecc81e440 in service + fake-vendor fake-model serial-d4ad3cc1-956a-4444-81a6-da6a025f6df2 in service + fake-vendor fake-model serial-e1298a43-fa1a-4e6f-bcfa-b26996f69c50 in service + fake-vendor fake-model serial-fa9ce87c-fa7c-4854-95bd-69b8f01c46f9 in service datasets at generation 3: @@ -630,19 +630,19 @@ parent: 366b0b68-d80e-4bc1-abd3-dc69837847e0 sled: 711ac7f8-d19e-4572-bdb9-e9b50f6e362a (active) physical disks at generation 2: - ---------------------------------------------------------------------- - vendor model serial - ---------------------------------------------------------------------- - fake-vendor fake-model serial-3b46c403-ad14-435c-a1a8-e8f940bf814f - fake-vendor fake-model serial-3f8c9484-06e8-4662-9a90-aa7e92c43405 - fake-vendor fake-model serial-4a62a827-4bf3-45d5-a7f5-d080f25c61ef - fake-vendor fake-model serial-5f774f00-52b7-41b5-a57f-6f38037196f5 - fake-vendor fake-model serial-68ae41d4-99ed-4612-99e1-fecf795ca694 - fake-vendor fake-model serial-6a66241b-b595-423d-84ef-a81b5d8430e8 - fake-vendor fake-model serial-7c45c3f6-6369-40d9-a73f-2f7ed0afe96b - fake-vendor fake-model serial-a216d334-4a9a-49dd-8b13-20548839306c - fake-vendor fake-model serial-c9ff8eb0-807c-40ad-a5c4-0d534947c9ad - fake-vendor fake-model serial-fb29d469-7d3f-47b9-944c-ce817fc70370 + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-3b46c403-ad14-435c-a1a8-e8f940bf814f in service + fake-vendor fake-model serial-3f8c9484-06e8-4662-9a90-aa7e92c43405 in service + fake-vendor fake-model serial-4a62a827-4bf3-45d5-a7f5-d080f25c61ef in service + fake-vendor fake-model serial-5f774f00-52b7-41b5-a57f-6f38037196f5 in service + fake-vendor fake-model serial-68ae41d4-99ed-4612-99e1-fecf795ca694 in service + fake-vendor fake-model serial-6a66241b-b595-423d-84ef-a81b5d8430e8 in service + fake-vendor fake-model serial-7c45c3f6-6369-40d9-a73f-2f7ed0afe96b in service + fake-vendor fake-model serial-a216d334-4a9a-49dd-8b13-20548839306c in service + fake-vendor fake-model serial-c9ff8eb0-807c-40ad-a5c4-0d534947c9ad in service + fake-vendor fake-model serial-fb29d469-7d3f-47b9-944c-ce817fc70370 in service datasets at generation 2: @@ -726,19 +726,19 @@ parent: 366b0b68-d80e-4bc1-abd3-dc69837847e0 sled: 9dc50690-f9bf-4520-bf80-051d0f465c2c (active) physical disks at generation 2: - ---------------------------------------------------------------------- - vendor model serial - ---------------------------------------------------------------------- - fake-vendor fake-model serial-1f9589d8-0a68-47e8-b977-d0fb17bd3fdb - fake-vendor fake-model serial-44882b6c-5e19-418b-b6c3-065f2af5a557 - fake-vendor fake-model serial-6de47efc-8a6d-4108-bf82-0146eab3be06 - fake-vendor fake-model serial-80e2c62f-052c-4580-8252-7af238fbbe9c - fake-vendor fake-model serial-81d326ae-5f8a-4ffd-9d5e-a9e8246ac014 - fake-vendor fake-model serial-878af5a0-7810-43e5-bdd5-a3215242459a - fake-vendor fake-model serial-af59fef5-8258-4852-be1d-ce55ae7dc822 - fake-vendor fake-model serial-b16aa11f-6e49-44c1-abcb-2e7584bffa12 - fake-vendor fake-model serial-f173c79b-a3b4-4f4a-a983-bc94b6b1a616 - fake-vendor fake-model serial-f1a041cc-85c7-4d14-8fc0-8d0e417f7e24 + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-1f9589d8-0a68-47e8-b977-d0fb17bd3fdb in service + fake-vendor fake-model serial-44882b6c-5e19-418b-b6c3-065f2af5a557 in service + fake-vendor fake-model serial-6de47efc-8a6d-4108-bf82-0146eab3be06 in service + fake-vendor fake-model serial-80e2c62f-052c-4580-8252-7af238fbbe9c in service + fake-vendor fake-model serial-81d326ae-5f8a-4ffd-9d5e-a9e8246ac014 in service + fake-vendor fake-model serial-878af5a0-7810-43e5-bdd5-a3215242459a in service + fake-vendor fake-model serial-af59fef5-8258-4852-be1d-ce55ae7dc822 in service + fake-vendor fake-model serial-b16aa11f-6e49-44c1-abcb-2e7584bffa12 in service + fake-vendor fake-model serial-f173c79b-a3b4-4f4a-a983-bc94b6b1a616 in service + fake-vendor fake-model serial-f1a041cc-85c7-4d14-8fc0-8d0e417f7e24 in service datasets at generation 2: @@ -819,19 +819,19 @@ parent: 366b0b68-d80e-4bc1-abd3-dc69837847e0 sled: a88790de-5962-4871-8686-61c1fd5b7094 (active) physical disks at generation 2: - ---------------------------------------------------------------------- - vendor model serial - ---------------------------------------------------------------------- - fake-vendor fake-model serial-2f26e76b-6c91-4ee9-87b3-f424b942091b - fake-vendor fake-model serial-6f8fa855-4f34-42db-a6e4-9d0090d6c828 - fake-vendor fake-model serial-71450d62-791e-4068-9882-8a206a465fd9 - fake-vendor fake-model serial-7b4ad242-8330-4c08-9588-c66782742678 - fake-vendor fake-model serial-9bf23b52-565e-4439-9728-edb603fa6c4e - fake-vendor fake-model serial-c9476e3d-7745-4fa9-b336-b54ac5b08f56 - fake-vendor fake-model serial-d2cd1e65-b63d-4748-895f-aafecc81e440 - fake-vendor fake-model serial-d4ad3cc1-956a-4444-81a6-da6a025f6df2 - fake-vendor fake-model serial-e1298a43-fa1a-4e6f-bcfa-b26996f69c50 - fake-vendor fake-model serial-fa9ce87c-fa7c-4854-95bd-69b8f01c46f9 + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-2f26e76b-6c91-4ee9-87b3-f424b942091b in service + fake-vendor fake-model serial-6f8fa855-4f34-42db-a6e4-9d0090d6c828 in service + fake-vendor fake-model serial-71450d62-791e-4068-9882-8a206a465fd9 in service + fake-vendor fake-model serial-7b4ad242-8330-4c08-9588-c66782742678 in service + fake-vendor fake-model serial-9bf23b52-565e-4439-9728-edb603fa6c4e in service + fake-vendor fake-model serial-c9476e3d-7745-4fa9-b336-b54ac5b08f56 in service + fake-vendor fake-model serial-d2cd1e65-b63d-4748-895f-aafecc81e440 in service + fake-vendor fake-model serial-d4ad3cc1-956a-4444-81a6-da6a025f6df2 in service + fake-vendor fake-model serial-e1298a43-fa1a-4e6f-bcfa-b26996f69c50 in service + fake-vendor fake-model serial-fa9ce87c-fa7c-4854-95bd-69b8f01c46f9 in service datasets at generation 4: diff --git a/dev-tools/releng/Cargo.toml b/dev-tools/releng/Cargo.toml index 6aad69ae37b..69f7dae2d4e 100644 --- a/dev-tools/releng/Cargo.toml +++ b/dev-tools/releng/Cargo.toml @@ -18,7 +18,6 @@ omicron-common.workspace = true omicron-pins.workspace = true omicron-workspace-hack.workspace = true omicron-zone-package.workspace = true -once_cell.workspace = true reqwest.workspace = true semver.workspace = true serde.workspace = true diff --git a/dev-tools/releng/src/hubris.rs b/dev-tools/releng/src/hubris.rs index f46af4bfaf8..8ed3308d262 100644 --- a/dev-tools/releng/src/hubris.rs +++ b/dev-tools/releng/src/hubris.rs @@ -10,7 +10,6 @@ use anyhow::Result; use camino::Utf8PathBuf; use fs_err::tokio as fs; use futures::future::TryFutureExt; -use omicron_common::api::external::SemverVersion; use omicron_common::api::internal::nexus::KnownArtifactKind; use semver::Version; use serde::Deserialize; @@ -42,7 +41,7 @@ pub(crate) async fn fetch_hubris_artifacts( // takes less time than OS builds. let mut manifest = DeserializedManifest { - system_version: SemverVersion(Version::new(0, 0, 0)), + system_version: Version::new(0, 0, 0), artifacts: BTreeMap::new(), }; @@ -156,7 +155,7 @@ struct Manifest { #[derive(Deserialize)] struct Artifact { name: String, - version: SemverVersion, + version: Version, source: Source, } diff --git a/dev-tools/releng/src/main.rs b/dev-tools/releng/src/main.rs index a7d98a299c8..e00020aa83f 100644 --- a/dev-tools/releng/src/main.rs +++ b/dev-tools/releng/src/main.rs @@ -8,6 +8,7 @@ mod job; mod tuf; use std::sync::Arc; +use std::sync::LazyLock; use std::time::Duration; use std::time::Instant; @@ -20,7 +21,6 @@ use clap::Parser; use fs_err::tokio as fs; use omicron_zone_package::config::Config; use omicron_zone_package::config::PackageName; -use once_cell::sync::Lazy; use semver::Version; use slog::debug; use slog::error; @@ -88,7 +88,7 @@ const TUF_PACKAGES: [&PackageName; 11] = [ const HELIOS_REPO: &str = "https://pkg.oxide.computer/helios/2/dev/"; -static WORKSPACE_DIR: Lazy = Lazy::new(|| { +static WORKSPACE_DIR: LazyLock = LazyLock::new(|| { // $CARGO_MANIFEST_DIR is at `.../omicron/dev-tools/releng` let mut dir = Utf8PathBuf::from(std::env::var("CARGO_MANIFEST_DIR").expect( diff --git a/dev-tools/releng/src/tuf.rs b/dev-tools/releng/src/tuf.rs index 011b2ddd474..9b7e66278de 100644 --- a/dev-tools/releng/src/tuf.rs +++ b/dev-tools/releng/src/tuf.rs @@ -12,7 +12,6 @@ use chrono::Timelike; use chrono::Utc; use fs_err::tokio as fs; use fs_err::tokio::File; -use omicron_common::api::external::SemverVersion; use omicron_common::api::internal::nexus::KnownArtifactKind; use omicron_zone_package::config::Config; use semver::Version; @@ -48,7 +47,7 @@ pub(crate) async fn build_tuf_repo( ) .context("failed to open intermediate hubris staging manifest")?; // Set the version. - manifest.system_version = SemverVersion(version); + manifest.system_version = version; // Load the Hubris production manifest and merge it in. let hubris_production = DeserializedManifest::from_path( diff --git a/gateway/Cargo.toml b/gateway/Cargo.toml index bdf4a911afb..e754daf1855 100644 --- a/gateway/Cargo.toml +++ b/gateway/Cargo.toml @@ -25,7 +25,6 @@ hyper.workspace = true illumos-utils.workspace = true ipcc.workspace = true omicron-common.workspace = true -once_cell.workspace = true schemars.workspace = true serde.workspace = true signal-hook.workspace = true diff --git a/gateway/src/management_switch.rs b/gateway/src/management_switch.rs index 69c3d717f73..ce45d58c2f7 100644 --- a/gateway/src/management_switch.rs +++ b/gateway/src/management_switch.rs @@ -29,7 +29,6 @@ use gateway_sp_comms::HostPhase2Provider; use gateway_sp_comms::SharedSocket; use gateway_sp_comms::SingleSp; use gateway_sp_comms::SpRetryConfig; -use once_cell::sync::OnceCell; use serde::Deserialize; use serde::Serialize; use slog::o; @@ -39,6 +38,7 @@ use std::collections::HashMap; use std::net::Ipv6Addr; use std::net::SocketAddrV6; use std::sync::Arc; +use std::sync::OnceLock; use std::time::Duration; use tokio::net::UdpSocket; use tokio::task::JoinHandle; @@ -162,7 +162,7 @@ pub struct ManagementSwitch { // When it's dropped, it cancels the background tokio task that loops on // that socket receiving incoming packets. _shared_socket: Option, - location_map: Arc>>, + location_map: Arc>>, discovery_task: JoinHandle<()>, log: Logger, } @@ -290,7 +290,7 @@ impl ManagementSwitch { // completes (because we won't be able to map "the SP of sled 7" to a // correct switch port). let port_to_handle = Arc::new(port_to_handle); - let location_map = Arc::new(OnceCell::new()); + let location_map = Arc::new(OnceLock::new()); let discovery_task = { let log = log.clone(); let port_to_handle = Arc::clone(&port_to_handle); diff --git a/installinator-common/src/block_size_writer.rs b/installinator-common/src/block_size_writer.rs index 1548594b417..8a2bff3844f 100644 --- a/installinator-common/src/block_size_writer.rs +++ b/installinator-common/src/block_size_writer.rs @@ -196,7 +196,7 @@ mod tests { #[proptest] fn proptest_block_writer( chunks: Vec>, - #[strategy((16_usize..4096))] block_size: usize, + #[strategy(16_usize..4096)] block_size: usize, ) { with_test_runtime(move || async move { proptest_block_writer_impl(chunks, block_size) diff --git a/nexus/Cargo.toml b/nexus/Cargo.toml index 8afc36bc03b..4b73a8dd783 100644 --- a/nexus/Cargo.toml +++ b/nexus/Cargo.toml @@ -59,7 +59,6 @@ nexus-networking.workspace = true nexus-saga-recovery.workspace = true nexus-test-interface.workspace = true num-integer.workspace = true -once_cell.workspace = true openssl.workspace = true oximeter-client.workspace = true oximeter-db = { workspace = true, default-features = false, features = [ "oxql" ] } diff --git a/nexus/auth/Cargo.toml b/nexus/auth/Cargo.toml index 1a926f1789d..71e16eafbef 100644 --- a/nexus/auth/Cargo.toml +++ b/nexus/auth/Cargo.toml @@ -24,7 +24,6 @@ hyper.workspace = true newtype_derive.workspace = true # See omicron-rpaths for more about the "pq-sys" dependency. pq-sys = "*" -once_cell.workspace = true openssl.workspace = true oso.workspace = true samael.workspace = true diff --git a/nexus/auth/src/authn/external/spoof.rs b/nexus/auth/src/authn/external/spoof.rs index 326d5294317..4aa2b1b443d 100644 --- a/nexus/auth/src/authn/external/spoof.rs +++ b/nexus/auth/src/authn/external/spoof.rs @@ -4,6 +4,8 @@ //! Custom, test-only authn scheme that trusts whatever the client says +use std::sync::LazyLock; + use super::super::Details; use super::HttpAuthnScheme; use super::Reason; @@ -16,7 +18,6 @@ use anyhow::Context; use async_trait::async_trait; use headers::authorization::{Authorization, Bearer}; use headers::HeaderMapExt; -use once_cell::sync::Lazy; use slog::debug; use uuid::Uuid; @@ -56,20 +57,20 @@ const SPOOF_RESERVED_BAD_CREDS: &str = "this-fake-ID-it-is-truly-excellent"; const SPOOF_PREFIX: &str = "oxide-spoof-"; /// Actor (id) used for the special "bad credentials" error -static SPOOF_RESERVED_BAD_CREDS_ACTOR: Lazy = - Lazy::new(|| Actor::UserBuiltin { +static SPOOF_RESERVED_BAD_CREDS_ACTOR: LazyLock = + LazyLock::new(|| Actor::UserBuiltin { user_builtin_id: "22222222-2222-2222-2222-222222222222" .parse() .unwrap(), }); /// Complete HTTP header value to trigger the "bad actor" error -pub static SPOOF_HEADER_BAD_ACTOR: Lazy> = - Lazy::new(|| make_header_value_str(SPOOF_RESERVED_BAD_ACTOR).unwrap()); +pub static SPOOF_HEADER_BAD_ACTOR: LazyLock> = + LazyLock::new(|| make_header_value_str(SPOOF_RESERVED_BAD_ACTOR).unwrap()); /// Complete HTTP header value to trigger the "bad creds" error -pub static SPOOF_HEADER_BAD_CREDS: Lazy> = - Lazy::new(|| make_header_value_str(SPOOF_RESERVED_BAD_CREDS).unwrap()); +pub static SPOOF_HEADER_BAD_CREDS: LazyLock> = + LazyLock::new(|| make_header_value_str(SPOOF_RESERVED_BAD_CREDS).unwrap()); /// Implements a (test-only) authentication scheme where the client simply /// provides the actor information in a custom bearer token and we always trust diff --git a/nexus/auth/src/authz/api_resources.rs b/nexus/auth/src/authz/api_resources.rs index 745a699cf2b..d07e106747c 100644 --- a/nexus/auth/src/authz/api_resources.rs +++ b/nexus/auth/src/authz/api_resources.rs @@ -26,6 +26,8 @@ //! //! Most `authz` types are generated by the `authz_resource!` macro. +use std::sync::LazyLock; + use super::actor::AnyActor; use super::context::AuthorizedResource; use super::oso_generic::Init; @@ -40,7 +42,6 @@ use futures::FutureExt; use nexus_db_fixed_data::FLEET_ID; use nexus_types::external_api::shared::{FleetRole, ProjectRole, SiloRole}; use omicron_common::api::external::{Error, LookupType, ResourceType}; -use once_cell::sync::Lazy; use oso::PolarClass; use serde::{Deserialize, Serialize}; use uuid::Uuid; @@ -159,8 +160,8 @@ pub struct Fleet; /// Singleton representing the [`Fleet`] itself for authz purposes pub const FLEET: Fleet = Fleet; -pub static FLEET_LOOKUP: Lazy = - Lazy::new(|| LookupType::ById(*FLEET_ID)); +pub static FLEET_LOOKUP: LazyLock = + LazyLock::new(|| LookupType::ById(*FLEET_ID)); impl Eq for Fleet {} impl PartialEq for Fleet { @@ -713,6 +714,22 @@ authz_resource! { polar_snippet = InProject, } +authz_resource! { + name = "AffinityGroup", + parent = "Project", + primary_key = Uuid, + roles_allowed = false, + polar_snippet = InProject, +} + +authz_resource! { + name = "AntiAffinityGroup", + parent = "Project", + primary_key = Uuid, + roles_allowed = false, + polar_snippet = InProject, +} + authz_resource! { name = "InstanceNetworkInterface", parent = "Instance", diff --git a/nexus/auth/src/authz/oso_generic.rs b/nexus/auth/src/authz/oso_generic.rs index 321bb98b1c6..32b3dbd1f80 100644 --- a/nexus/auth/src/authz/oso_generic.rs +++ b/nexus/auth/src/authz/oso_generic.rs @@ -125,6 +125,8 @@ pub fn make_omicron_oso(log: &slog::Logger) -> Result { Disk::init(), Snapshot::init(), ProjectImage::init(), + AffinityGroup::init(), + AntiAffinityGroup::init(), Instance::init(), IpPool::init(), InstanceNetworkInterface::init(), diff --git a/nexus/db-fixed-data/Cargo.toml b/nexus/db-fixed-data/Cargo.toml index 486df156867..a6ac298452c 100644 --- a/nexus/db-fixed-data/Cargo.toml +++ b/nexus/db-fixed-data/Cargo.toml @@ -12,7 +12,6 @@ workspace = true omicron-rpaths.workspace = true [dependencies] -once_cell.workspace = true # See omicron-rpaths for more about the "pq-sys" dependency. pq-sys = "*" strum.workspace = true @@ -22,4 +21,3 @@ nexus-db-model.workspace = true nexus-types.workspace = true omicron-common.workspace = true omicron-workspace-hack.workspace = true - diff --git a/nexus/db-fixed-data/src/lib.rs b/nexus/db-fixed-data/src/lib.rs index 13444141cb3..0bfc0127ff2 100644 --- a/nexus/db-fixed-data/src/lib.rs +++ b/nexus/db-fixed-data/src/lib.rs @@ -32,7 +32,7 @@ // 001de000-c470 built-in services vpc subnets // 001de000-all0 singleton ID for source IP allowlist ("all0" is like "allow") -use once_cell::sync::Lazy; +use std::sync::LazyLock; pub mod allow_list; pub mod project; @@ -46,7 +46,7 @@ pub mod vpc_firewall_rule; pub mod vpc_subnet; /* See above for where this uuid comes from. */ -pub static FLEET_ID: Lazy = Lazy::new(|| { +pub static FLEET_ID: LazyLock = LazyLock::new(|| { "001de000-1334-4000-8000-000000000000" .parse() .expect("invalid uuid for builtin fleet id") diff --git a/nexus/db-fixed-data/src/project.rs b/nexus/db-fixed-data/src/project.rs index 77843908208..50e0e43e86d 100644 --- a/nexus/db-fixed-data/src/project.rs +++ b/nexus/db-fixed-data/src/project.rs @@ -5,20 +5,20 @@ use nexus_db_model as model; use nexus_types::{external_api::params, silo::INTERNAL_SILO_ID}; use omicron_common::api::external::IdentityMetadataCreateParams; -use once_cell::sync::Lazy; +use std::sync::LazyLock; /// The name of the built-in Project and VPC for Oxide services. pub const SERVICES_DB_NAME: &str = "oxide-services"; /// UUID of built-in project for internal services on the rack. -pub static SERVICES_PROJECT_ID: Lazy = Lazy::new(|| { +pub static SERVICES_PROJECT_ID: LazyLock = LazyLock::new(|| { "001de000-4401-4000-8000-000000000000" .parse() .expect("invalid uuid for builtin services project id") }); /// Built-in Project for internal services on the rack. -pub static SERVICES_PROJECT: Lazy = Lazy::new(|| { +pub static SERVICES_PROJECT: LazyLock = LazyLock::new(|| { model::Project::new_with_id( *SERVICES_PROJECT_ID, INTERNAL_SILO_ID, diff --git a/nexus/db-fixed-data/src/role_assignment.rs b/nexus/db-fixed-data/src/role_assignment.rs index 25b26786f8d..03ba63853b7 100644 --- a/nexus/db-fixed-data/src/role_assignment.rs +++ b/nexus/db-fixed-data/src/role_assignment.rs @@ -8,10 +8,10 @@ use super::user_builtin; use super::FLEET_ID; use nexus_db_model::IdentityType; use nexus_db_model::RoleAssignment; -use once_cell::sync::Lazy; +use std::sync::LazyLock; -pub static BUILTIN_ROLE_ASSIGNMENTS: Lazy> = - Lazy::new(|| { +pub static BUILTIN_ROLE_ASSIGNMENTS: LazyLock> = + LazyLock::new(|| { vec![ // The "internal-api" user gets the "admin" role on the sole Fleet. // This is a pretty elevated privilege. diff --git a/nexus/db-fixed-data/src/role_builtin.rs b/nexus/db-fixed-data/src/role_builtin.rs index c617874e98a..8646b3de3b2 100644 --- a/nexus/db-fixed-data/src/role_builtin.rs +++ b/nexus/db-fixed-data/src/role_builtin.rs @@ -4,7 +4,7 @@ //! Built-in roles use omicron_common::api; -use once_cell::sync::Lazy; +use std::sync::LazyLock; #[derive(Clone, Debug)] pub struct RoleBuiltinConfig { @@ -13,72 +13,73 @@ pub struct RoleBuiltinConfig { pub description: &'static str, } -pub static FLEET_ADMIN: Lazy = - Lazy::new(|| RoleBuiltinConfig { +pub static FLEET_ADMIN: LazyLock = + LazyLock::new(|| RoleBuiltinConfig { resource_type: api::external::ResourceType::Fleet, role_name: "admin", description: "Fleet Administrator", }); -pub static FLEET_AUTHENTICATOR: Lazy = - Lazy::new(|| RoleBuiltinConfig { +pub static FLEET_AUTHENTICATOR: LazyLock = + LazyLock::new(|| RoleBuiltinConfig { resource_type: api::external::ResourceType::Fleet, role_name: "external-authenticator", description: "Fleet External Authenticator", }); -pub static FLEET_VIEWER: Lazy = - Lazy::new(|| RoleBuiltinConfig { +pub static FLEET_VIEWER: LazyLock = + LazyLock::new(|| RoleBuiltinConfig { resource_type: api::external::ResourceType::Fleet, role_name: "viewer", description: "Fleet Viewer", }); -pub static SILO_ADMIN: Lazy = - Lazy::new(|| RoleBuiltinConfig { +pub static SILO_ADMIN: LazyLock = + LazyLock::new(|| RoleBuiltinConfig { resource_type: api::external::ResourceType::Silo, role_name: "admin", description: "Silo Administrator", }); -pub static BUILTIN_ROLES: Lazy> = Lazy::new(|| { - vec![ - FLEET_ADMIN.clone(), - FLEET_AUTHENTICATOR.clone(), - FLEET_VIEWER.clone(), - RoleBuiltinConfig { - resource_type: api::external::ResourceType::Fleet, - role_name: "collaborator", - description: "Fleet Collaborator", - }, - SILO_ADMIN.clone(), - RoleBuiltinConfig { - resource_type: api::external::ResourceType::Silo, - role_name: "collaborator", - description: "Silo Collaborator", - }, - RoleBuiltinConfig { - resource_type: api::external::ResourceType::Silo, - role_name: "viewer", - description: "Silo Viewer", - }, - RoleBuiltinConfig { - resource_type: api::external::ResourceType::Project, - role_name: "admin", - description: "Project Administrator", - }, - RoleBuiltinConfig { - resource_type: api::external::ResourceType::Project, - role_name: "collaborator", - description: "Project Collaborator", - }, - RoleBuiltinConfig { - resource_type: api::external::ResourceType::Project, - role_name: "viewer", - description: "Project Viewer", - }, - ] -}); +pub static BUILTIN_ROLES: LazyLock> = + LazyLock::new(|| { + vec![ + FLEET_ADMIN.clone(), + FLEET_AUTHENTICATOR.clone(), + FLEET_VIEWER.clone(), + RoleBuiltinConfig { + resource_type: api::external::ResourceType::Fleet, + role_name: "collaborator", + description: "Fleet Collaborator", + }, + SILO_ADMIN.clone(), + RoleBuiltinConfig { + resource_type: api::external::ResourceType::Silo, + role_name: "collaborator", + description: "Silo Collaborator", + }, + RoleBuiltinConfig { + resource_type: api::external::ResourceType::Silo, + role_name: "viewer", + description: "Silo Viewer", + }, + RoleBuiltinConfig { + resource_type: api::external::ResourceType::Project, + role_name: "admin", + description: "Project Administrator", + }, + RoleBuiltinConfig { + resource_type: api::external::ResourceType::Project, + role_name: "collaborator", + description: "Project Collaborator", + }, + RoleBuiltinConfig { + resource_type: api::external::ResourceType::Project, + role_name: "viewer", + description: "Project Viewer", + }, + ] + }); #[cfg(test)] mod test { diff --git a/nexus/db-fixed-data/src/silo.rs b/nexus/db-fixed-data/src/silo.rs index 10c624e30e0..d98fda3ef8f 100644 --- a/nexus/db-fixed-data/src/silo.rs +++ b/nexus/db-fixed-data/src/silo.rs @@ -11,13 +11,13 @@ use nexus_types::{ }, }; use omicron_common::api::external::IdentityMetadataCreateParams; -use once_cell::sync::Lazy; +use std::sync::LazyLock; /// "Default" Silo /// /// This was historically used for demos and the unit tests. The plan is to /// remove it per omicron#2305. -pub static DEFAULT_SILO: Lazy = Lazy::new(|| { +pub static DEFAULT_SILO: LazyLock = LazyLock::new(|| { model::Silo::new_with_id( DEFAULT_SILO_ID, params::SiloCreate { @@ -40,7 +40,7 @@ pub static DEFAULT_SILO: Lazy = Lazy::new(|| { /// Built-in Silo to house internal resources. It contains no users and /// can't be logged into. -pub static INTERNAL_SILO: Lazy = Lazy::new(|| { +pub static INTERNAL_SILO: LazyLock = LazyLock::new(|| { model::Silo::new_with_id( INTERNAL_SILO_ID, params::SiloCreate { diff --git a/nexus/db-fixed-data/src/silo_user.rs b/nexus/db-fixed-data/src/silo_user.rs index e6e6d7d0e5b..a8c500f336d 100644 --- a/nexus/db-fixed-data/src/silo_user.rs +++ b/nexus/db-fixed-data/src/silo_user.rs @@ -6,25 +6,26 @@ use super::role_builtin; use nexus_db_model as model; use nexus_types::{identity::Asset, silo::DEFAULT_SILO_ID}; -use once_cell::sync::Lazy; +use std::sync::LazyLock; /// Test user that's granted all privileges, used for automated testing and /// local development // TODO-security Once we have a way to bootstrap the initial Silo with the // initial privileged user, this user should be created in the test suite, // not automatically at Nexus startup. See omicron#2305. -pub static USER_TEST_PRIVILEGED: Lazy = Lazy::new(|| { - model::SiloUser::new( - DEFAULT_SILO_ID, - // "4007" looks a bit like "root". - "001de000-05e4-4000-8000-000000004007".parse().unwrap(), - "privileged".into(), - ) -}); +pub static USER_TEST_PRIVILEGED: LazyLock = + LazyLock::new(|| { + model::SiloUser::new( + DEFAULT_SILO_ID, + // "4007" looks a bit like "root". + "001de000-05e4-4000-8000-000000004007".parse().unwrap(), + "privileged".into(), + ) + }); /// Role assignments needed for the privileged user -pub static ROLE_ASSIGNMENTS_PRIVILEGED: Lazy> = - Lazy::new(|| { +pub static ROLE_ASSIGNMENTS_PRIVILEGED: LazyLock> = + LazyLock::new(|| { vec![ // The "test-privileged" user gets the "admin" role on the sole // Fleet as well as the default Silo. @@ -49,14 +50,15 @@ pub static ROLE_ASSIGNMENTS_PRIVILEGED: Lazy> = // TODO-security Once we have a way to bootstrap the initial Silo with the // initial privileged user, this user should be created in the test suite, // not automatically at Nexus startup. See omicron#2305. -pub static USER_TEST_UNPRIVILEGED: Lazy = Lazy::new(|| { - model::SiloUser::new( - DEFAULT_SILO_ID, - // 60001 is the decimal uid for "nobody" on Helios. - "001de000-05e4-4000-8000-000000060001".parse().unwrap(), - "unprivileged".into(), - ) -}); +pub static USER_TEST_UNPRIVILEGED: LazyLock = + LazyLock::new(|| { + model::SiloUser::new( + DEFAULT_SILO_ID, + // 60001 is the decimal uid for "nobody" on Helios. + "001de000-05e4-4000-8000-000000060001".parse().unwrap(), + "unprivileged".into(), + ) + }); #[cfg(test)] mod test { diff --git a/nexus/db-fixed-data/src/user_builtin.rs b/nexus/db-fixed-data/src/user_builtin.rs index 1e968026831..08236af5db3 100644 --- a/nexus/db-fixed-data/src/user_builtin.rs +++ b/nexus/db-fixed-data/src/user_builtin.rs @@ -4,7 +4,7 @@ //! Built-in users use omicron_common::api; -use once_cell::sync::Lazy; +use std::sync::LazyLock; use uuid::Uuid; pub struct UserBuiltinConfig { @@ -29,7 +29,7 @@ impl UserBuiltinConfig { /// Internal user used for seeding initial database data // NOTE: This uuid and name are duplicated in dbinit.sql. -pub static USER_DB_INIT: Lazy = Lazy::new(|| { +pub static USER_DB_INIT: LazyLock = LazyLock::new(|| { UserBuiltinConfig::new_static( // "0001" is the first possible user that wouldn't be confused with // 0, or root. @@ -41,51 +41,56 @@ pub static USER_DB_INIT: Lazy = Lazy::new(|| { /// Internal user for performing operations to manage the /// provisioning of services across the fleet. -pub static USER_SERVICE_BALANCER: Lazy = Lazy::new(|| { - UserBuiltinConfig::new_static( - "001de000-05e4-4000-8000-00000000bac3", - "service-balancer", - "used for Nexus-driven service balancing", - ) -}); +pub static USER_SERVICE_BALANCER: LazyLock = + LazyLock::new(|| { + UserBuiltinConfig::new_static( + "001de000-05e4-4000-8000-00000000bac3", + "service-balancer", + "used for Nexus-driven service balancing", + ) + }); /// Internal user used by Nexus when handling internal API requests -pub static USER_INTERNAL_API: Lazy = Lazy::new(|| { - UserBuiltinConfig::new_static( - "001de000-05e4-4000-8000-000000000002", - "internal-api", - "used by Nexus when handling internal API requests", - ) -}); +pub static USER_INTERNAL_API: LazyLock = + LazyLock::new(|| { + UserBuiltinConfig::new_static( + "001de000-05e4-4000-8000-000000000002", + "internal-api", + "used by Nexus when handling internal API requests", + ) + }); /// Internal user used by Nexus to read privileged control plane data -pub static USER_INTERNAL_READ: Lazy = Lazy::new(|| { - UserBuiltinConfig::new_static( - // "4ead" looks like "read" - "001de000-05e4-4000-8000-000000004ead", - "internal-read", - "used by Nexus to read privileged control plane data", - ) -}); +pub static USER_INTERNAL_READ: LazyLock = + LazyLock::new(|| { + UserBuiltinConfig::new_static( + // "4ead" looks like "read" + "001de000-05e4-4000-8000-000000004ead", + "internal-read", + "used by Nexus to read privileged control plane data", + ) + }); /// Internal user used by Nexus when recovering sagas -pub static USER_SAGA_RECOVERY: Lazy = Lazy::new(|| { - UserBuiltinConfig::new_static( - // "3a8a" looks a bit like "saga". - "001de000-05e4-4000-8000-000000003a8a", - "saga-recovery", - "used by Nexus when recovering sagas", - ) -}); +pub static USER_SAGA_RECOVERY: LazyLock = + LazyLock::new(|| { + UserBuiltinConfig::new_static( + // "3a8a" looks a bit like "saga". + "001de000-05e4-4000-8000-000000003a8a", + "saga-recovery", + "used by Nexus when recovering sagas", + ) + }); /// Internal user used by Nexus when authenticating external requests -pub static USER_EXTERNAL_AUTHN: Lazy = Lazy::new(|| { - UserBuiltinConfig::new_static( - "001de000-05e4-4000-8000-000000000003", - "external-authn", - "used by Nexus when authenticating external requests", - ) -}); +pub static USER_EXTERNAL_AUTHN: LazyLock = + LazyLock::new(|| { + UserBuiltinConfig::new_static( + "001de000-05e4-4000-8000-000000000003", + "external-authn", + "used by Nexus when authenticating external requests", + ) + }); #[cfg(test)] mod test { diff --git a/nexus/db-fixed-data/src/vpc.rs b/nexus/db-fixed-data/src/vpc.rs index 64a2563305e..fc263a02da1 100644 --- a/nexus/db-fixed-data/src/vpc.rs +++ b/nexus/db-fixed-data/src/vpc.rs @@ -7,47 +7,48 @@ use nexus_db_model as model; use nexus_types::external_api::params; use omicron_common::address::SERVICE_VPC_IPV6_PREFIX; use omicron_common::api::external::IdentityMetadataCreateParams; -use once_cell::sync::Lazy; +use std::sync::LazyLock; /// UUID of built-in VPC for internal services on the rack. -pub static SERVICES_VPC_ID: Lazy = Lazy::new(|| { +pub static SERVICES_VPC_ID: LazyLock = LazyLock::new(|| { "001de000-074c-4000-8000-000000000000" .parse() .expect("invalid uuid for builtin services vpc id") }); /// UUID of VpcRouter for built-in Services VPC. -pub static SERVICES_VPC_ROUTER_ID: Lazy = Lazy::new(|| { +pub static SERVICES_VPC_ROUTER_ID: LazyLock = LazyLock::new(|| { "001de000-074c-4000-8000-000000000001" .parse() .expect("invalid uuid for builtin services vpc router id") }); /// UUID of InternetGateway for built-in Services VPC. -pub static SERVICES_INTERNET_GATEWAY_ID: Lazy = Lazy::new(|| { - "001de000-074c-4000-8000-000000000002" - .parse() - .expect("invalid uuid for builtin services internet gateway id") -}); +pub static SERVICES_INTERNET_GATEWAY_ID: LazyLock = + LazyLock::new(|| { + "001de000-074c-4000-8000-000000000002" + .parse() + .expect("invalid uuid for builtin services internet gateway id") + }); /// UUID of InternetGateway IPv4 default route for built-in Services VPC. -pub static SERVICES_INTERNET_GATEWAY_DEFAULT_ROUTE_V4: Lazy = - Lazy::new(|| { +pub static SERVICES_INTERNET_GATEWAY_DEFAULT_ROUTE_V4: LazyLock = + LazyLock::new(|| { "001de000-074c-4000-8000-000000000003" .parse() .expect("invalid uuid for builtin services internet gateway default route v4") }); /// UUID of InternetGateway IPv6 default route for built-in Services VPC. -pub static SERVICES_INTERNET_GATEWAY_DEFAULT_ROUTE_V6: Lazy = - Lazy::new(|| { +pub static SERVICES_INTERNET_GATEWAY_DEFAULT_ROUTE_V6: LazyLock = + LazyLock::new(|| { "001de000-074c-4000-8000-000000000004" .parse() .expect("invalid uuid for builtin services internet gateway default route v4") }); /// Built-in VPC for internal services on the rack. -pub static SERVICES_VPC: Lazy = Lazy::new(|| { +pub static SERVICES_VPC: LazyLock = LazyLock::new(|| { model::IncompleteVpc::new( *SERVICES_VPC_ID, *super::project::SERVICES_PROJECT_ID, diff --git a/nexus/db-fixed-data/src/vpc_firewall_rule.rs b/nexus/db-fixed-data/src/vpc_firewall_rule.rs index 09cb9e1c72a..d59e1c556e6 100644 --- a/nexus/db-fixed-data/src/vpc_firewall_rule.rs +++ b/nexus/db-fixed-data/src/vpc_firewall_rule.rs @@ -8,11 +8,11 @@ use omicron_common::api::external::{ VpcFirewallRuleFilter, VpcFirewallRulePriority, VpcFirewallRuleProtocol, VpcFirewallRuleStatus, VpcFirewallRuleTarget, VpcFirewallRuleUpdate, }; -use once_cell::sync::Lazy; +use std::sync::LazyLock; /// Built-in VPC firewall rule for External DNS. -pub static DNS_VPC_FW_RULE: Lazy = - Lazy::new(|| VpcFirewallRuleUpdate { +pub static DNS_VPC_FW_RULE: LazyLock = + LazyLock::new(|| VpcFirewallRuleUpdate { name: "external-dns-inbound".parse().unwrap(), description: "allow inbound connections for DNS from anywhere" .to_string(), @@ -41,8 +41,8 @@ pub const NEXUS_VPC_FW_RULE_NAME: &str = "nexus-inbound"; /// Note that we currently rely on this being exactly one rule to implement the /// Nexus allowlist. See `nexus/networking/src/firewall_rules.rs` for more /// details. -pub static NEXUS_VPC_FW_RULE: Lazy = - Lazy::new(|| VpcFirewallRuleUpdate { +pub static NEXUS_VPC_FW_RULE: LazyLock = + LazyLock::new(|| VpcFirewallRuleUpdate { name: NEXUS_VPC_FW_RULE_NAME.parse().unwrap(), description: "allow inbound connections for console & api from anywhere" diff --git a/nexus/db-fixed-data/src/vpc_subnet.rs b/nexus/db-fixed-data/src/vpc_subnet.rs index c91581ac13d..2e4e5b215c4 100644 --- a/nexus/db-fixed-data/src/vpc_subnet.rs +++ b/nexus/db-fixed-data/src/vpc_subnet.rs @@ -8,52 +8,55 @@ use omicron_common::address::{ NEXUS_OPTE_IPV6_SUBNET, NTP_OPTE_IPV4_SUBNET, NTP_OPTE_IPV6_SUBNET, }; use omicron_common::api::external::IdentityMetadataCreateParams; -use once_cell::sync::Lazy; +use std::sync::LazyLock; /// UUID of built-in VPC Subnet for External DNS. -pub static DNS_VPC_SUBNET_ID: Lazy = Lazy::new(|| { +pub static DNS_VPC_SUBNET_ID: LazyLock = LazyLock::new(|| { "001de000-c470-4000-8000-000000000001" .parse() .expect("invalid uuid for builtin external dns vpc subnet id") }); /// UUID of built-in VPC Subnet for Nexus. -pub static NEXUS_VPC_SUBNET_ID: Lazy = Lazy::new(|| { +pub static NEXUS_VPC_SUBNET_ID: LazyLock = LazyLock::new(|| { "001de000-c470-4000-8000-000000000002" .parse() .expect("invalid uuid for builtin nexus vpc subnet id") }); /// UUID of built-in VPC Subnet for Boundary NTP. -pub static NTP_VPC_SUBNET_ID: Lazy = Lazy::new(|| { +pub static NTP_VPC_SUBNET_ID: LazyLock = LazyLock::new(|| { "001de000-c470-4000-8000-000000000003" .parse() .expect("invalid uuid for builtin boundary ntp vpc subnet id") }); /// UUID of built-in subnet route VPC Subnet route for External DNS. -pub static DNS_VPC_SUBNET_ROUTE_ID: Lazy = Lazy::new(|| { - "001de000-c470-4000-8000-000000000004" - .parse() - .expect("invalid uuid for builtin services vpc default route id") -}); +pub static DNS_VPC_SUBNET_ROUTE_ID: LazyLock = + LazyLock::new(|| { + "001de000-c470-4000-8000-000000000004" + .parse() + .expect("invalid uuid for builtin services vpc default route id") + }); /// UUID of built-in subnet route VPC Subnet route for Nexus. -pub static NEXUS_VPC_SUBNET_ROUTE_ID: Lazy = Lazy::new(|| { - "001de000-c470-4000-8000-000000000005" - .parse() - .expect("invalid uuid for builtin services vpc default route id") -}); +pub static NEXUS_VPC_SUBNET_ROUTE_ID: LazyLock = + LazyLock::new(|| { + "001de000-c470-4000-8000-000000000005" + .parse() + .expect("invalid uuid for builtin services vpc default route id") + }); /// UUID of built-in subnet route VPC Subnet route for Boundary NTP. -pub static NTP_VPC_SUBNET_ROUTE_ID: Lazy = Lazy::new(|| { - "001de000-c470-4000-8000-000000000006" - .parse() - .expect("invalid uuid for builtin services vpc default route id") -}); +pub static NTP_VPC_SUBNET_ROUTE_ID: LazyLock = + LazyLock::new(|| { + "001de000-c470-4000-8000-000000000006" + .parse() + .expect("invalid uuid for builtin services vpc default route id") + }); /// Built-in VPC Subnet for External DNS. -pub static DNS_VPC_SUBNET: Lazy = Lazy::new(|| { +pub static DNS_VPC_SUBNET: LazyLock = LazyLock::new(|| { VpcSubnet::new( *DNS_VPC_SUBNET_ID, *super::vpc::SERVICES_VPC_ID, @@ -68,7 +71,7 @@ pub static DNS_VPC_SUBNET: Lazy = Lazy::new(|| { }); /// Built-in VPC Subnet for Nexus. -pub static NEXUS_VPC_SUBNET: Lazy = Lazy::new(|| { +pub static NEXUS_VPC_SUBNET: LazyLock = LazyLock::new(|| { VpcSubnet::new( *NEXUS_VPC_SUBNET_ID, *super::vpc::SERVICES_VPC_ID, @@ -83,7 +86,7 @@ pub static NEXUS_VPC_SUBNET: Lazy = Lazy::new(|| { }); /// Built-in VPC Subnet for Boundary NTP. -pub static NTP_VPC_SUBNET: Lazy = Lazy::new(|| { +pub static NTP_VPC_SUBNET: LazyLock = LazyLock::new(|| { VpcSubnet::new( *NTP_VPC_SUBNET_ID, *super::vpc::SERVICES_VPC_ID, diff --git a/nexus/db-model/Cargo.toml b/nexus/db-model/Cargo.toml index 3e86f28b60f..26de4d9e890 100644 --- a/nexus/db-model/Cargo.toml +++ b/nexus/db-model/Cargo.toml @@ -22,7 +22,6 @@ ipnetwork.workspace = true macaddr.workspace = true newtype_derive.workspace = true omicron-uuid-kinds.workspace = true -once_cell.workspace = true oxnet.workspace = true parse-display.workspace = true # See omicron-rpaths for more about the "pq-sys" dependency. diff --git a/nexus/db-model/src/deployment.rs b/nexus/db-model/src/deployment.rs index 367e3c4fd5a..4cfd930ab7e 100644 --- a/nexus/db-model/src/deployment.rs +++ b/nexus/db-model/src/deployment.rs @@ -167,30 +167,66 @@ impl_enum_type!( Expunged => b"expunged" ); -/// Converts a [`BlueprintPhysicalDiskDisposition`] to a version that can be inserted -/// into a database. -pub fn to_db_bp_physical_disk_disposition( - disposition: BlueprintPhysicalDiskDisposition, -) -> DbBpPhysicalDiskDisposition { - match disposition { - BlueprintPhysicalDiskDisposition::InService => { - DbBpPhysicalDiskDisposition::InService - } - BlueprintPhysicalDiskDisposition::Expunged => { - DbBpPhysicalDiskDisposition::Expunged +struct DbBpPhysicalDiskDispositionColumns { + disposition: DbBpPhysicalDiskDisposition, + expunged_as_of_generation: Option, + expunged_ready_for_cleanup: bool, +} + +impl From + for DbBpPhysicalDiskDispositionColumns +{ + fn from(value: BlueprintPhysicalDiskDisposition) -> Self { + let ( + disposition, + disposition_expunged_as_of_generation, + disposition_expunged_ready_for_cleanup, + ) = match value { + BlueprintPhysicalDiskDisposition::InService => { + (DbBpPhysicalDiskDisposition::InService, None, false) + } + BlueprintPhysicalDiskDisposition::Expunged { + as_of_generation, + ready_for_cleanup, + } => ( + DbBpPhysicalDiskDisposition::Expunged, + Some(Generation(as_of_generation)), + ready_for_cleanup, + ), + }; + Self { + disposition, + expunged_as_of_generation: disposition_expunged_as_of_generation, + expunged_ready_for_cleanup: disposition_expunged_ready_for_cleanup, } } } -impl From for BlueprintPhysicalDiskDisposition { - fn from(disposition: DbBpPhysicalDiskDisposition) -> Self { - match disposition { - DbBpPhysicalDiskDisposition::InService => { - BlueprintPhysicalDiskDisposition::InService +impl TryFrom + for BlueprintPhysicalDiskDisposition +{ + type Error = anyhow::Error; + + fn try_from( + value: DbBpPhysicalDiskDispositionColumns, + ) -> Result { + match (value.disposition, value.expunged_as_of_generation) { + (DbBpPhysicalDiskDisposition::InService, None) => { + Ok(Self::InService) } - DbBpPhysicalDiskDisposition::Expunged => { - BlueprintPhysicalDiskDisposition::Expunged + (DbBpPhysicalDiskDisposition::Expunged, Some(as_of_generation)) => { + Ok(Self::Expunged { + as_of_generation: *as_of_generation, + ready_for_cleanup: value.expunged_ready_for_cleanup, + }) } + (DbBpPhysicalDiskDisposition::InService, Some(_)) + | (DbBpPhysicalDiskDisposition::Expunged, None) => Err(anyhow!( + "illegal database state (CHECK constraint broken?!): \ + disposition {:?}, disposition_expunged_as_of_generation {:?}", + value.disposition, + value.expunged_as_of_generation, + )), } } } @@ -232,7 +268,9 @@ pub struct BpOmicronPhysicalDisk { pub id: DbTypedUuid, pub pool_id: Uuid, - pub disposition: DbBpPhysicalDiskDisposition, + disposition: DbBpPhysicalDiskDisposition, + disposition_expunged_as_of_generation: Option, + disposition_expunged_ready_for_cleanup: bool, } impl BpOmicronPhysicalDisk { @@ -241,6 +279,11 @@ impl BpOmicronPhysicalDisk { sled_id: SledUuid, disk_config: &BlueprintPhysicalDiskConfig, ) -> Self { + let DbBpPhysicalDiskDispositionColumns { + disposition, + expunged_as_of_generation: disposition_expunged_as_of_generation, + expunged_ready_for_cleanup: disposition_expunged_ready_for_cleanup, + } = disk_config.disposition.into(); Self { blueprint_id: blueprint_id.into(), sled_id: sled_id.into(), @@ -249,17 +292,27 @@ impl BpOmicronPhysicalDisk { model: disk_config.identity.model.clone(), id: disk_config.id.into(), pool_id: disk_config.pool_id.into_untyped_uuid(), - disposition: to_db_bp_physical_disk_disposition( - disk_config.disposition, - ), + disposition, + disposition_expunged_as_of_generation, + disposition_expunged_ready_for_cleanup, } } } -impl From for BlueprintPhysicalDiskConfig { - fn from(disk: BpOmicronPhysicalDisk) -> Self { - Self { - disposition: disk.disposition.into(), +impl TryFrom for BlueprintPhysicalDiskConfig { + type Error = anyhow::Error; + + fn try_from(disk: BpOmicronPhysicalDisk) -> Result { + let disposition_cols = DbBpPhysicalDiskDispositionColumns { + disposition: disk.disposition, + expunged_as_of_generation: disk + .disposition_expunged_as_of_generation, + expunged_ready_for_cleanup: disk + .disposition_expunged_ready_for_cleanup, + }; + + Ok(Self { + disposition: disposition_cols.try_into()?, identity: DiskIdentity { vendor: disk.vendor, serial: disk.serial, @@ -267,7 +320,7 @@ impl From for BlueprintPhysicalDiskConfig { }, id: disk.id.into(), pool_id: ZpoolUuid::from_untyped_uuid(disk.pool_id), - } + }) } } diff --git a/nexus/db-model/src/schema.rs b/nexus/db-model/src/schema.rs index 151e3c2db84..c8fadc55dd3 100644 --- a/nexus/db-model/src/schema.rs +++ b/nexus/db-model/src/schema.rs @@ -1729,6 +1729,8 @@ table! { pool_id -> Uuid, disposition -> crate::DbBpPhysicalDiskDispositionEnum, + disposition_expunged_as_of_generation -> Nullable, + disposition_expunged_ready_for_cleanup -> Bool, } } diff --git a/nexus/db-model/src/schema_versions.rs b/nexus/db-model/src/schema_versions.rs index 9caaa419400..7905c008386 100644 --- a/nexus/db-model/src/schema_versions.rs +++ b/nexus/db-model/src/schema_versions.rs @@ -8,27 +8,27 @@ use anyhow::{bail, ensure, Context}; use camino::Utf8Path; -use omicron_common::api::external::SemverVersion; -use once_cell::sync::Lazy; -use std::collections::BTreeMap; +use semver::Version; +use std::{collections::BTreeMap, sync::LazyLock}; /// The version of the database schema this particular version of Nexus was /// built against /// /// This must be updated when you change the database schema. Refer to /// schema/crdb/README.adoc in the root of this repository for details. -pub const SCHEMA_VERSION: SemverVersion = SemverVersion::new(126, 0, 0); +pub const SCHEMA_VERSION: Version = Version::new(127, 0, 0); /// List of all past database schema versions, in *reverse* order /// /// If you want to change the Omicron database schema, you must update this. -static KNOWN_VERSIONS: Lazy> = Lazy::new(|| { +static KNOWN_VERSIONS: LazyLock> = LazyLock::new(|| { vec![ // +- The next version goes here! Duplicate this line, uncomment // | the *second* copy, then update that copy for your version, // | leaving the first copy as an example for the next person. // v // KnownVersion::new(next_int, "unique-dirname-with-the-sql-files"), + KnownVersion::new(127, "bp-disk-disposition-expunged-cleanup"), KnownVersion::new(126, "affinity"), KnownVersion::new(125, "blueprint-disposition-expunged-cleanup"), KnownVersion::new(124, "support-read-only-region-replacement"), @@ -169,15 +169,14 @@ static KNOWN_VERSIONS: Lazy> = Lazy::new(|| { }); /// The earliest supported schema version. -pub const EARLIEST_SUPPORTED_VERSION: SemverVersion = - SemverVersion::new(1, 0, 0); +pub const EARLIEST_SUPPORTED_VERSION: Version = Version::new(1, 0, 0); /// Describes one version of the database schema #[derive(Debug, Clone)] struct KnownVersion { /// All versions have an associated SemVer. We only use the major number in /// terms of determining compatibility. - semver: SemverVersion, + semver: Version, /// Path relative to the root of the schema ("schema/crdb" in the root of /// this repo) where this version's update SQL files are stored @@ -194,7 +193,7 @@ impl KnownVersion { /// this repository) where the SQL files live that will update the schema /// from the previous version to this version. fn new(major: u64, relative_path: &str) -> KnownVersion { - let semver = SemverVersion::new(major, 0, 0); + let semver = Version::new(major, 0, 0); KnownVersion { semver, relative_path: relative_path.to_owned() } } @@ -206,7 +205,7 @@ impl KnownVersion { /// /// **This should not be used for new schema versions.** fn legacy(major: u64, patch: u64) -> KnownVersion { - let semver = SemverVersion::new(major, 0, patch); + let semver = Version::new(major, 0, patch); let relative_path = semver.to_string(); KnownVersion { semver, relative_path } } @@ -221,7 +220,7 @@ impl std::fmt::Display for KnownVersion { /// Load and inspect the set of all known schema versions #[derive(Debug, Clone)] pub struct AllSchemaVersions { - versions: BTreeMap, + versions: BTreeMap, } impl AllSchemaVersions { @@ -244,12 +243,12 @@ impl AllSchemaVersions { #[doc(hidden)] pub fn load_specific_legacy_versions<'a>( schema_directory: &Utf8Path, - versions: impl Iterator, + versions: impl Iterator, ) -> Result { let known_versions: Vec<_> = versions .map(|v| { - assert_eq!(v.0.minor, 0); - KnownVersion::legacy(v.0.major, v.0.patch) + assert_eq!(v.minor, 0); + KnownVersion::legacy(v.major, v.patch) }) .collect(); @@ -288,7 +287,7 @@ impl AllSchemaVersions { } /// Return whether `version` is a known schema version - pub fn contains_version(&self, version: &SemverVersion) -> bool { + pub fn contains_version(&self, version: &Version) -> bool { self.versions.contains_key(version) } @@ -301,7 +300,7 @@ impl AllSchemaVersions { bounds: R, ) -> impl Iterator where - R: std::ops::RangeBounds, + R: std::ops::RangeBounds, { self.versions.range(bounds).map(|(_, v)| v) } @@ -311,7 +310,7 @@ impl AllSchemaVersions { /// from the previous version to the current one #[derive(Debug, Clone)] pub struct SchemaVersion { - semver: SemverVersion, + semver: Version, upgrade_from_previous: Vec, } @@ -332,7 +331,7 @@ impl SchemaVersion { /// Any violation of these two rules will result in an error. Collections of /// the second form (`up1.sql`, ...) will be sorted numerically. fn load_from_directory( - semver: SemverVersion, + semver: Version, directory: &Utf8Path, ) -> Result { let mut up_sqls = vec![]; @@ -434,7 +433,7 @@ impl SchemaVersion { } /// Returns the semver for this schema version - pub fn semver(&self) -> &SemverVersion { + pub fn semver(&self) -> &Version { &self.semver } @@ -506,8 +505,8 @@ mod test { // EARLIEST_SUPPORTED_VERSION is somehow wrong let error = verify_known_versions( [&KnownVersion::legacy(2, 0), &KnownVersion::legacy(3, 0)], - &SemverVersion::new(1, 0, 0), - &SemverVersion::new(3, 0, 0), + &Version::new(1, 0, 0), + &Version::new(3, 0, 0), 100, ) .unwrap_err(); @@ -519,8 +518,8 @@ mod test { // SCHEMA_VERSION was not updated let error = verify_known_versions( [&KnownVersion::legacy(1, 0), &KnownVersion::legacy(2, 0)], - &SemverVersion::new(1, 0, 0), - &SemverVersion::new(1, 0, 0), + &Version::new(1, 0, 0), + &Version::new(1, 0, 0), 100, ) .unwrap_err(); @@ -537,7 +536,7 @@ mod test { &KnownVersion::legacy(2, 0), ], &EARLIEST_SUPPORTED_VERSION, - &SemverVersion::new(2, 0, 0), + &Version::new(2, 0, 0), 100, ) .unwrap_err(); @@ -554,7 +553,7 @@ mod test { &KnownVersion::new(2, "dir2"), ], &EARLIEST_SUPPORTED_VERSION, - &SemverVersion::new(2, 0, 0), + &Version::new(2, 0, 0), 100, ) .unwrap_err(); @@ -571,7 +570,7 @@ mod test { &KnownVersion::legacy(1, 3), ], &EARLIEST_SUPPORTED_VERSION, - &SemverVersion::new(3, 0, 0), + &Version::new(3, 0, 0), 100, ) .unwrap_err(); @@ -588,7 +587,7 @@ mod test { &KnownVersion::legacy(4, 0), ], &EARLIEST_SUPPORTED_VERSION, - &SemverVersion::new(4, 0, 0), + &Version::new(4, 0, 0), 100, ) .unwrap_err(); @@ -608,7 +607,7 @@ mod test { &KnownVersion::legacy(3, 2), ], &EARLIEST_SUPPORTED_VERSION, - &SemverVersion::new(3, 0, 2), + &Version::new(3, 0, 2), 2, ) .unwrap_err(); @@ -624,7 +623,7 @@ mod test { &KnownVersion::legacy(3, 0), ], &EARLIEST_SUPPORTED_VERSION, - &SemverVersion::new(3, 0, 0), + &Version::new(3, 0, 0), 2, ) .unwrap_err(); @@ -638,8 +637,8 @@ mod test { fn verify_known_versions<'a, I>( // list of known versions in order from earliest to latest known_versions: I, - earliest: &SemverVersion, - latest: &SemverVersion, + earliest: &Version, + latest: &Version, min_strict_major: u64, ) -> Result<(), anyhow::Error> where @@ -673,8 +672,8 @@ mod test { // past schema versions only bumped the patch number for whatever // reason. ensure!( - v.semver.0.major == prev.semver.0.major - || v.semver.0.major == prev.semver.0.major + 1, + v.semver.major == prev.semver.major + || v.semver.major == prev.semver.major + 1, "KNOWN_VERSION {} appears directly after {}, but its major \ number is neither the same nor one greater", v, @@ -684,7 +683,7 @@ mod test { // We never allowed minor versions to be zero and it is not // currently possible to even construct one that had a non-zero // minor number. - ensure!(v.semver.0.minor == 0, "new minor versions must be zero"); + ensure!(v.semver.minor == 0, "new minor versions must be zero"); // We changed things after version 45 to require that: // @@ -694,11 +693,8 @@ mod test { // // After version 45, we do not allow non-zero minor or patch // numbers. - if v.semver.0.major > min_strict_major { - ensure!( - v.semver.0.patch == 0, - "new patch versions must be zero" - ); + if v.semver.major > min_strict_major { + ensure!(v.semver.patch == 0, "new patch versions must be zero"); ensure!( !v.relative_path.contains(&v.semver.to_string()), "the relative path for a version should not contain the \ @@ -745,7 +741,7 @@ mod test { let filename = tempdir.path().join(invalid_filename); _ = tokio::fs::File::create(&filename).await.unwrap(); let maybe_schema = SchemaVersion::load_from_directory( - SemverVersion::new(12, 0, 0), + Version::new(12, 0, 0), tempdir.path(), ); match maybe_schema { @@ -783,7 +779,7 @@ mod test { } let maybe_schema = SchemaVersion::load_from_directory( - SemverVersion::new(12, 0, 0), + Version::new(12, 0, 0), tempdir.path(), ); match maybe_schema { @@ -822,7 +818,7 @@ mod test { } let maybe_schema = SchemaVersion::load_from_directory( - SemverVersion::new(12, 0, 0), + Version::new(12, 0, 0), tempdir.path(), ); match maybe_schema { @@ -866,7 +862,7 @@ mod test { } let maybe_schema = SchemaVersion::load_from_directory( - SemverVersion::new(12, 0, 0), + Version::new(12, 0, 0), tempdir.path(), ); match maybe_schema { diff --git a/nexus/db-model/src/semver_version.rs b/nexus/db-model/src/semver_version.rs index f314e98ab30..47ab9408f50 100644 --- a/nexus/db-model/src/semver_version.rs +++ b/nexus/db-model/src/semver_version.rs @@ -11,9 +11,6 @@ use omicron_common::api::external; use parse_display::Display; use serde::{Deserialize, Serialize}; -// We wrap semver::Version in external to impl JsonSchema, and we wrap it again -// here to impl ToSql/FromSql - /// Semver version with zero-padded numbers in `ToSql`/`FromSql` to allow /// lexicographic DB sorting #[derive( @@ -30,10 +27,10 @@ use serde::{Deserialize, Serialize}; )] #[diesel(sql_type = sql_types::Text)] #[display("{0}")] -pub struct SemverVersion(pub external::SemverVersion); +pub struct SemverVersion(pub semver::Version); -NewtypeFrom! { () pub struct SemverVersion(external::SemverVersion); } -NewtypeDeref! { () pub struct SemverVersion(external::SemverVersion); } +NewtypeFrom! { () pub struct SemverVersion(semver::Version); } +NewtypeDeref! { () pub struct SemverVersion(semver::Version); } /// Width of version numbers after zero-padding. `u8` because you can always /// convert to both `u32` and `usize`. Everything having to do with ser/de on @@ -44,7 +41,7 @@ const PADDED_WIDTH: u8 = 8; impl SemverVersion { pub fn new(major: u64, minor: u64, patch: u64) -> Self { - Self(external::SemverVersion(semver::Version::new(major, minor, patch))) + Self(semver::Version::new(major, minor, patch)) } } @@ -65,7 +62,7 @@ impl SemverVersion { /// /// Compare to the `Display` implementation on `Semver::Version` /// -fn to_sortable_string(v: semver::Version) -> Result { +fn to_sortable_string(v: &semver::Version) -> Result { // the largest N-digit number is 10^N - 1 let max = u64::pow(10, u32::from(PADDED_WIDTH)) - 1; @@ -130,8 +127,7 @@ where &'b self, out: &mut serialize::Output<'b, '_, DB>, ) -> serialize::Result { - let v = (self.0).0.to_owned(); - to_sortable_string(v)?.to_sql(&mut out.reborrow()) + to_sortable_string(&self.0)?.to_sql(&mut out.reborrow()) } } @@ -145,7 +141,7 @@ where fn from_sql(raw: DB::RawValue<'_>) -> deserialize::Result { from_sortable_string(String::from_sql(raw)?) .parse() - .map(|s| SemverVersion(external::SemverVersion(s))) + .map(SemverVersion) .map_err(|e| e.into()) } } @@ -169,7 +165,7 @@ mod test { ]; for (orig, padded) in pairs { let v = orig.parse::().unwrap(); - assert_eq!(&to_sortable_string(v).unwrap(), padded); + assert_eq!(&to_sortable_string(&v).unwrap(), padded); assert_eq!(&from_sortable_string(padded.to_string()), orig); } } diff --git a/nexus/db-queries/Cargo.toml b/nexus/db-queries/Cargo.toml index db2b70488d7..fe5e299b89b 100644 --- a/nexus/db-queries/Cargo.toml +++ b/nexus/db-queries/Cargo.toml @@ -26,7 +26,6 @@ internal-dns-resolver.workspace = true internal-dns-types.workspace = true ipnetwork.workspace = true macaddr.workspace = true -once_cell.workspace = true oxnet.workspace = true paste.workspace = true # See omicron-rpaths for more about the "pq-sys" dependency. diff --git a/nexus/db-queries/src/db/datastore/affinity.rs b/nexus/db-queries/src/db/datastore/affinity.rs new file mode 100644 index 00000000000..e55afe41eac --- /dev/null +++ b/nexus/db-queries/src/db/datastore/affinity.rs @@ -0,0 +1,2790 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! [`DataStore`] methods on Affinity Groups + +use super::DataStore; +use crate::authz; +use crate::authz::ApiResource; +use crate::db; +use crate::db::collection_insert::AsyncInsertError; +use crate::db::collection_insert::DatastoreCollection; +use crate::db::datastore::OpContext; +use crate::db::error::public_error_from_diesel; +use crate::db::error::ErrorHandler; +use crate::db::identity::Resource; +use crate::db::model::AffinityGroup; +use crate::db::model::AffinityGroupInstanceMembership; +use crate::db::model::AffinityGroupUpdate; +use crate::db::model::AntiAffinityGroup; +use crate::db::model::AntiAffinityGroupInstanceMembership; +use crate::db::model::AntiAffinityGroupUpdate; +use crate::db::model::Name; +use crate::db::model::Project; +use crate::db::pagination::paginated; +use crate::transaction_retry::OptionalError; +use async_bb8_diesel::AsyncRunQueryDsl; +use chrono::Utc; +use diesel::prelude::*; +use omicron_common::api::external; +use omicron_common::api::external::http_pagination::PaginatedBy; +use omicron_common::api::external::CreateResult; +use omicron_common::api::external::DeleteResult; +use omicron_common::api::external::Error; +use omicron_common::api::external::ListResultVec; +use omicron_common::api::external::LookupType; +use omicron_common::api::external::ResourceType; +use omicron_common::api::external::UpdateResult; +use omicron_uuid_kinds::AffinityGroupUuid; +use omicron_uuid_kinds::AntiAffinityGroupUuid; +use omicron_uuid_kinds::GenericUuid; +use omicron_uuid_kinds::InstanceUuid; +use ref_cast::RefCast; + +impl DataStore { + pub async fn affinity_group_list( + &self, + opctx: &OpContext, + authz_project: &authz::Project, + pagparams: &PaginatedBy<'_>, + ) -> ListResultVec { + use db::schema::affinity_group::dsl; + + opctx.authorize(authz::Action::ListChildren, authz_project).await?; + + match pagparams { + PaginatedBy::Id(pagparams) => { + paginated(dsl::affinity_group, dsl::id, &pagparams) + } + PaginatedBy::Name(pagparams) => paginated( + dsl::affinity_group, + dsl::name, + &pagparams.map_name(|n| Name::ref_cast(n)), + ), + } + .filter(dsl::project_id.eq(authz_project.id())) + .filter(dsl::time_deleted.is_null()) + .select(AffinityGroup::as_select()) + .get_results_async(&*self.pool_connection_authorized(opctx).await?) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) + } + + pub async fn anti_affinity_group_list( + &self, + opctx: &OpContext, + authz_project: &authz::Project, + pagparams: &PaginatedBy<'_>, + ) -> ListResultVec { + use db::schema::anti_affinity_group::dsl; + + opctx.authorize(authz::Action::ListChildren, authz_project).await?; + + match pagparams { + PaginatedBy::Id(pagparams) => { + paginated(dsl::anti_affinity_group, dsl::id, &pagparams) + } + PaginatedBy::Name(pagparams) => paginated( + dsl::anti_affinity_group, + dsl::name, + &pagparams.map_name(|n| Name::ref_cast(n)), + ), + } + .filter(dsl::project_id.eq(authz_project.id())) + .filter(dsl::time_deleted.is_null()) + .select(AntiAffinityGroup::as_select()) + .get_results_async(&*self.pool_connection_authorized(opctx).await?) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) + } + + pub async fn affinity_group_create( + &self, + opctx: &OpContext, + authz_project: &authz::Project, + group: AffinityGroup, + ) -> CreateResult { + use db::schema::affinity_group::dsl; + + opctx.authorize(authz::Action::CreateChild, authz_project).await?; + + let conn = self.pool_connection_authorized(opctx).await?; + let name = group.name().as_str().to_string(); + + let affinity_group: AffinityGroup = Project::insert_resource( + authz_project.id(), + diesel::insert_into(dsl::affinity_group).values(group), + ) + .insert_and_get_result_async(&conn) + .await + .map_err(|e| match e { + AsyncInsertError::CollectionNotFound => authz_project.not_found(), + AsyncInsertError::DatabaseError(diesel_error) => { + public_error_from_diesel( + diesel_error, + ErrorHandler::Conflict(ResourceType::AffinityGroup, &name), + ) + } + })?; + Ok(affinity_group) + } + + pub async fn anti_affinity_group_create( + &self, + opctx: &OpContext, + authz_project: &authz::Project, + group: AntiAffinityGroup, + ) -> CreateResult { + use db::schema::anti_affinity_group::dsl; + + opctx.authorize(authz::Action::CreateChild, authz_project).await?; + + let conn = self.pool_connection_authorized(opctx).await?; + let name = group.name().as_str().to_string(); + + let anti_affinity_group: AntiAffinityGroup = Project::insert_resource( + authz_project.id(), + diesel::insert_into(dsl::anti_affinity_group).values(group), + ) + .insert_and_get_result_async(&conn) + .await + .map_err(|e| match e { + AsyncInsertError::CollectionNotFound => authz_project.not_found(), + AsyncInsertError::DatabaseError(diesel_error) => { + public_error_from_diesel( + diesel_error, + ErrorHandler::Conflict( + ResourceType::AntiAffinityGroup, + &name, + ), + ) + } + })?; + Ok(anti_affinity_group) + } + + pub async fn affinity_group_update( + &self, + opctx: &OpContext, + authz_affinity_group: &authz::AffinityGroup, + updates: AffinityGroupUpdate, + ) -> UpdateResult { + opctx.authorize(authz::Action::Modify, authz_affinity_group).await?; + + use db::schema::affinity_group::dsl; + diesel::update(dsl::affinity_group) + .filter(dsl::time_deleted.is_null()) + .filter(dsl::id.eq(authz_affinity_group.id())) + .set(updates) + .returning(AffinityGroup::as_returning()) + .get_result_async(&*self.pool_connection_authorized(opctx).await?) + .await + .map_err(|e| { + public_error_from_diesel( + e, + ErrorHandler::NotFoundByResource(authz_affinity_group), + ) + }) + } + + pub async fn affinity_group_delete( + &self, + opctx: &OpContext, + authz_affinity_group: &authz::AffinityGroup, + ) -> DeleteResult { + opctx.authorize(authz::Action::Delete, authz_affinity_group).await?; + + let err = OptionalError::new(); + let conn = self.pool_connection_authorized(opctx).await?; + self.transaction_retry_wrapper("affinity_group_delete") + .transaction(&conn, |conn| { + let err = err.clone(); + async move { + use db::schema::affinity_group::dsl as group_dsl; + let now = Utc::now(); + + // Delete the Affinity Group + diesel::update(group_dsl::affinity_group) + .filter(group_dsl::time_deleted.is_null()) + .filter(group_dsl::id.eq(authz_affinity_group.id())) + .set(group_dsl::time_deleted.eq(now)) + .returning(AffinityGroup::as_returning()) + .execute_async(&conn) + .await + .map_err(|e| { + err.bail_retryable_or_else(e, |e| { + public_error_from_diesel( + e, + ErrorHandler::NotFoundByResource( + authz_affinity_group, + ), + ) + }) + })?; + + // Ensure all memberships in the affinity group are deleted + use db::schema::affinity_group_instance_membership::dsl as member_dsl; + diesel::delete(member_dsl::affinity_group_instance_membership) + .filter(member_dsl::group_id.eq(authz_affinity_group.id())) + .execute_async(&conn) + .await + .map_err(|e| { + err.bail_retryable_or_else(e, |e| { + public_error_from_diesel(e, ErrorHandler::Server) + }) + })?; + + Ok(()) + } + }) + .await + .map_err(|e| { + if let Some(err) = err.take() { + return err; + } + public_error_from_diesel(e, ErrorHandler::Server) + })?; + Ok(()) + } + + pub async fn anti_affinity_group_update( + &self, + opctx: &OpContext, + authz_anti_affinity_group: &authz::AntiAffinityGroup, + updates: AntiAffinityGroupUpdate, + ) -> UpdateResult { + opctx + .authorize(authz::Action::Modify, authz_anti_affinity_group) + .await?; + + use db::schema::anti_affinity_group::dsl; + diesel::update(dsl::anti_affinity_group) + .filter(dsl::time_deleted.is_null()) + .filter(dsl::id.eq(authz_anti_affinity_group.id())) + .set(updates) + .returning(AntiAffinityGroup::as_returning()) + .get_result_async(&*self.pool_connection_authorized(opctx).await?) + .await + .map_err(|e| { + public_error_from_diesel( + e, + ErrorHandler::NotFoundByResource(authz_anti_affinity_group), + ) + }) + } + + pub async fn anti_affinity_group_delete( + &self, + opctx: &OpContext, + authz_anti_affinity_group: &authz::AntiAffinityGroup, + ) -> DeleteResult { + opctx + .authorize(authz::Action::Delete, authz_anti_affinity_group) + .await?; + + let err = OptionalError::new(); + let conn = self.pool_connection_authorized(opctx).await?; + self.transaction_retry_wrapper("anti_affinity_group_delete") + .transaction(&conn, |conn| { + let err = err.clone(); + async move { + use db::schema::anti_affinity_group::dsl as group_dsl; + let now = Utc::now(); + + // Delete the Anti Affinity Group + diesel::update(group_dsl::anti_affinity_group) + .filter(group_dsl::time_deleted.is_null()) + .filter(group_dsl::id.eq(authz_anti_affinity_group.id())) + .set(group_dsl::time_deleted.eq(now)) + .returning(AntiAffinityGroup::as_returning()) + .execute_async(&conn) + .await + .map_err(|e| { + err.bail_retryable_or_else(e, |e| { + public_error_from_diesel( + e, + ErrorHandler::NotFoundByResource( + authz_anti_affinity_group, + ), + ) + }) + })?; + + // Ensure all memberships in the anti affinity group are deleted + use db::schema::anti_affinity_group_instance_membership::dsl as member_dsl; + diesel::delete(member_dsl::anti_affinity_group_instance_membership) + .filter(member_dsl::group_id.eq(authz_anti_affinity_group.id())) + .execute_async(&conn) + .await + .map_err(|e| { + err.bail_retryable_or_else(e, |e| { + public_error_from_diesel(e, ErrorHandler::Server) + }) + })?; + + Ok(()) + } + }) + .await + .map_err(|e| { + if let Some(err) = err.take() { + return err; + } + public_error_from_diesel(e, ErrorHandler::Server) + })?; + Ok(()) + } + + pub async fn affinity_group_member_list( + &self, + opctx: &OpContext, + authz_affinity_group: &authz::AffinityGroup, + pagparams: &PaginatedBy<'_>, + ) -> ListResultVec { + opctx.authorize(authz::Action::Read, authz_affinity_group).await?; + + use db::schema::affinity_group_instance_membership::dsl; + match pagparams { + PaginatedBy::Id(pagparams) => paginated( + dsl::affinity_group_instance_membership, + dsl::instance_id, + &pagparams, + ), + PaginatedBy::Name(_) => { + return Err(Error::invalid_request( + "Cannot paginate group members by name", + )); + } + } + .filter(dsl::group_id.eq(authz_affinity_group.id())) + .select(AffinityGroupInstanceMembership::as_select()) + .load_async(&*self.pool_connection_authorized(opctx).await?) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) + } + + pub async fn anti_affinity_group_member_list( + &self, + opctx: &OpContext, + authz_anti_affinity_group: &authz::AntiAffinityGroup, + pagparams: &PaginatedBy<'_>, + ) -> ListResultVec { + opctx.authorize(authz::Action::Read, authz_anti_affinity_group).await?; + + use db::schema::anti_affinity_group_instance_membership::dsl; + match pagparams { + PaginatedBy::Id(pagparams) => paginated( + dsl::anti_affinity_group_instance_membership, + dsl::instance_id, + &pagparams, + ), + PaginatedBy::Name(_) => { + return Err(Error::invalid_request( + "Cannot paginate group members by name", + )); + } + } + .filter(dsl::group_id.eq(authz_anti_affinity_group.id())) + .select(AntiAffinityGroupInstanceMembership::as_select()) + .load_async(&*self.pool_connection_authorized(opctx).await?) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) + } + + pub async fn affinity_group_member_view( + &self, + opctx: &OpContext, + authz_affinity_group: &authz::AffinityGroup, + member: external::AffinityGroupMember, + ) -> Result { + opctx.authorize(authz::Action::Read, authz_affinity_group).await?; + let conn = self.pool_connection_authorized(opctx).await?; + + let instance_id = match member { + external::AffinityGroupMember::Instance(id) => id, + }; + + use db::schema::affinity_group_instance_membership::dsl; + dsl::affinity_group_instance_membership + .filter(dsl::group_id.eq(authz_affinity_group.id())) + .filter(dsl::instance_id.eq(instance_id.into_untyped_uuid())) + .select(AffinityGroupInstanceMembership::as_select()) + .get_result_async(&*conn) + .await + .map(|m| m.into()) + .map_err(|e| { + public_error_from_diesel( + e, + ErrorHandler::NotFoundByLookup( + ResourceType::AffinityGroupMember, + LookupType::by_id(instance_id.into_untyped_uuid()), + ), + ) + }) + } + + pub async fn anti_affinity_group_member_view( + &self, + opctx: &OpContext, + authz_anti_affinity_group: &authz::AntiAffinityGroup, + member: external::AntiAffinityGroupMember, + ) -> Result { + opctx.authorize(authz::Action::Read, authz_anti_affinity_group).await?; + let conn = self.pool_connection_authorized(opctx).await?; + + let instance_id = match member { + external::AntiAffinityGroupMember::Instance(id) => id, + }; + + use db::schema::anti_affinity_group_instance_membership::dsl; + dsl::anti_affinity_group_instance_membership + .filter(dsl::group_id.eq(authz_anti_affinity_group.id())) + .filter(dsl::instance_id.eq(instance_id.into_untyped_uuid())) + .select(AntiAffinityGroupInstanceMembership::as_select()) + .get_result_async(&*conn) + .await + .map(|m| m.into()) + .map_err(|e| { + public_error_from_diesel( + e, + ErrorHandler::NotFoundByLookup( + ResourceType::AntiAffinityGroupMember, + LookupType::by_id(instance_id.into_untyped_uuid()), + ), + ) + }) + } + + pub async fn affinity_group_member_add( + &self, + opctx: &OpContext, + authz_affinity_group: &authz::AffinityGroup, + member: external::AffinityGroupMember, + ) -> Result<(), Error> { + opctx.authorize(authz::Action::Modify, authz_affinity_group).await?; + + let instance_id = match member { + external::AffinityGroupMember::Instance(id) => id, + }; + + let err = OptionalError::new(); + let conn = self.pool_connection_authorized(opctx).await?; + self.transaction_retry_wrapper("affinity_group_member_add") + .transaction(&conn, |conn| { + let err = err.clone(); + use db::schema::affinity_group::dsl as group_dsl; + use db::schema::affinity_group_instance_membership::dsl as membership_dsl; + use db::schema::instance::dsl as instance_dsl; + use db::schema::sled_resource::dsl as resource_dsl; + + async move { + // Check that the group exists + group_dsl::affinity_group + .filter(group_dsl::time_deleted.is_null()) + .filter(group_dsl::id.eq(authz_affinity_group.id())) + .select(group_dsl::id) + .first_async::(&conn) + .await + .map_err(|e| { + err.bail_retryable_or_else(e, |e| { + public_error_from_diesel( + e, + ErrorHandler::NotFoundByResource( + authz_affinity_group, + ), + ) + }) + })?; + + // Check that the instance exists, and has no sled + // reservation. + // + // NOTE: I'd prefer to use the "LookupPath" infrastructure + // to look up the path, but that API does not give the + // option to use the transaction's database connection. + // + // Looking up the instance on a different database + // connection than the transaction risks several concurrency + // issues, so we do the lookup manually. + + let _check_instance_exists = instance_dsl::instance + .filter(instance_dsl::time_deleted.is_null()) + .filter(instance_dsl::id.eq(instance_id.into_untyped_uuid())) + .select(instance_dsl::id) + .get_result_async::(&conn) + .await + .map_err(|e| { + err.bail_retryable_or_else(e, |e| { + public_error_from_diesel( + e, + ErrorHandler::NotFoundByLookup( + ResourceType::Instance, + LookupType::ById(instance_id.into_untyped_uuid()) + ), + ) + }) + })?; + let has_reservation: bool = diesel::select( + diesel::dsl::exists( + resource_dsl::sled_resource + .filter(resource_dsl::instance_id.eq(instance_id.into_untyped_uuid())) + ) + ).get_result_async(&conn) + .await + .map_err(|e| { + err.bail_retryable_or_else(e, |e| { + public_error_from_diesel( + e, + ErrorHandler::Server, + ) + }) + })?; + + // NOTE: It may be possible to add non-stopped instances to + // affinity groups, depending on where they have already + // been placed. However, only operating on "stopped" + // instances is much easier to work with, as it does not + // require any understanding of the group policy. + if has_reservation { + return Err(err.bail(Error::invalid_request( + "Instance cannot be added to affinity group with reservation".to_string() + ))); + } + + diesel::insert_into(membership_dsl::affinity_group_instance_membership) + .values(AffinityGroupInstanceMembership::new( + AffinityGroupUuid::from_untyped_uuid(authz_affinity_group.id()), + instance_id, + )) + .execute_async(&conn) + .await + .map_err(|e| { + err.bail_retryable_or_else(e, |e| { + public_error_from_diesel( + e, + ErrorHandler::Conflict( + ResourceType::AffinityGroupMember, + &instance_id.to_string(), + ), + ) + }) + })?; + Ok(()) + } + }) + .await + .map_err(|e| { + if let Some(err) = err.take() { + return err; + } + public_error_from_diesel(e, ErrorHandler::Server) + })?; + Ok(()) + } + + pub async fn anti_affinity_group_member_add( + &self, + opctx: &OpContext, + authz_anti_affinity_group: &authz::AntiAffinityGroup, + member: external::AntiAffinityGroupMember, + ) -> Result<(), Error> { + opctx + .authorize(authz::Action::Modify, authz_anti_affinity_group) + .await?; + + let instance_id = match member { + external::AntiAffinityGroupMember::Instance(id) => id, + }; + + let err = OptionalError::new(); + let conn = self.pool_connection_authorized(opctx).await?; + self.transaction_retry_wrapper("anti_affinity_group_member_add") + .transaction(&conn, |conn| { + let err = err.clone(); + use db::schema::anti_affinity_group::dsl as group_dsl; + use db::schema::anti_affinity_group_instance_membership::dsl as membership_dsl; + use db::schema::instance::dsl as instance_dsl; + use db::schema::sled_resource::dsl as resource_dsl; + + async move { + // Check that the group exists + group_dsl::anti_affinity_group + .filter(group_dsl::time_deleted.is_null()) + .filter(group_dsl::id.eq(authz_anti_affinity_group.id())) + .select(group_dsl::id) + .first_async::(&conn) + .await + .map_err(|e| { + err.bail_retryable_or_else(e, |e| { + public_error_from_diesel( + e, + ErrorHandler::NotFoundByResource( + authz_anti_affinity_group, + ), + ) + }) + })?; + + // Check that the instance exists, and has no sled + // reservation. + let _check_instance_exists = instance_dsl::instance + .filter(instance_dsl::time_deleted.is_null()) + .filter(instance_dsl::id.eq(instance_id.into_untyped_uuid())) + .select(instance_dsl::id) + .get_result_async::(&conn) + .await + .map_err(|e| { + err.bail_retryable_or_else(e, |e| { + public_error_from_diesel( + e, + ErrorHandler::NotFoundByLookup( + ResourceType::Instance, + LookupType::ById(instance_id.into_untyped_uuid()) + ), + ) + }) + })?; + let has_reservation: bool = diesel::select( + diesel::dsl::exists( + resource_dsl::sled_resource + .filter(resource_dsl::instance_id.eq(instance_id.into_untyped_uuid())) + ) + ).get_result_async(&conn) + .await + .map_err(|e| { + err.bail_retryable_or_else(e, |e| { + public_error_from_diesel( + e, + ErrorHandler::Server, + ) + }) + })?; + + // NOTE: It may be possible to add non-stopped instances to + // anti-affinity groups, depending on where they have already + // been placed. However, only operating on "stopped" + // instances is much easier to work with, as it does not + // require any understanding of the group policy. + if has_reservation { + return Err(err.bail(Error::invalid_request( + "Instance cannot be added to anti-affinity group with reservation".to_string() + ))); + } + + diesel::insert_into(membership_dsl::anti_affinity_group_instance_membership) + .values(AntiAffinityGroupInstanceMembership::new( + AntiAffinityGroupUuid::from_untyped_uuid(authz_anti_affinity_group.id()), + instance_id, + )) + .execute_async(&conn) + .await + .map_err(|e| { + err.bail_retryable_or_else(e, |e| { + public_error_from_diesel( + e, + ErrorHandler::Conflict( + ResourceType::AntiAffinityGroupMember, + &instance_id.to_string(), + ), + ) + }) + })?; + Ok(()) + } + }) + .await + .map_err(|e| { + if let Some(err) = err.take() { + return err; + } + public_error_from_diesel(e, ErrorHandler::Server) + })?; + Ok(()) + } + + pub async fn instance_affinity_group_memberships_delete( + &self, + opctx: &OpContext, + instance_id: InstanceUuid, + ) -> Result<(), Error> { + use db::schema::affinity_group_instance_membership::dsl; + + diesel::delete(dsl::affinity_group_instance_membership) + .filter(dsl::instance_id.eq(instance_id.into_untyped_uuid())) + .execute_async(&*self.pool_connection_authorized(opctx).await?) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; + Ok(()) + } + + pub async fn instance_anti_affinity_group_memberships_delete( + &self, + opctx: &OpContext, + instance_id: InstanceUuid, + ) -> Result<(), Error> { + use db::schema::anti_affinity_group_instance_membership::dsl; + + diesel::delete(dsl::anti_affinity_group_instance_membership) + .filter(dsl::instance_id.eq(instance_id.into_untyped_uuid())) + .execute_async(&*self.pool_connection_authorized(opctx).await?) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; + Ok(()) + } + + pub async fn affinity_group_member_delete( + &self, + opctx: &OpContext, + authz_affinity_group: &authz::AffinityGroup, + member: external::AffinityGroupMember, + ) -> Result<(), Error> { + opctx.authorize(authz::Action::Modify, authz_affinity_group).await?; + + let instance_id = match member { + external::AffinityGroupMember::Instance(id) => id, + }; + + let err = OptionalError::new(); + let conn = self.pool_connection_authorized(opctx).await?; + self.transaction_retry_wrapper("affinity_group_member_delete") + .transaction(&conn, |conn| { + let err = err.clone(); + use db::schema::affinity_group::dsl as group_dsl; + use db::schema::affinity_group_instance_membership::dsl as membership_dsl; + + async move { + // Check that the group exists + group_dsl::affinity_group + .filter(group_dsl::time_deleted.is_null()) + .filter(group_dsl::id.eq(authz_affinity_group.id())) + .select(group_dsl::id) + .first_async::(&conn) + .await + .map_err(|e| { + err.bail_retryable_or_else(e, |e| { + public_error_from_diesel( + e, + ErrorHandler::NotFoundByResource( + authz_affinity_group, + ), + ) + }) + })?; + + let rows = diesel::delete(membership_dsl::affinity_group_instance_membership) + .filter(membership_dsl::group_id.eq(authz_affinity_group.id())) + .filter(membership_dsl::instance_id.eq(instance_id.into_untyped_uuid())) + .execute_async(&conn) + .await + .map_err(|e| { + err.bail_retryable_or_else(e, |e| { + public_error_from_diesel(e, ErrorHandler::Server) + }) + })?; + if rows == 0 { + return Err(err.bail(LookupType::ById(instance_id.into_untyped_uuid()).into_not_found( + ResourceType::AffinityGroupMember, + ))); + } + Ok(()) + } + }) + .await + .map_err(|e| { + if let Some(err) = err.take() { + return err; + } + public_error_from_diesel(e, ErrorHandler::Server) + })?; + Ok(()) + } + + pub async fn anti_affinity_group_member_delete( + &self, + opctx: &OpContext, + authz_anti_affinity_group: &authz::AntiAffinityGroup, + member: external::AntiAffinityGroupMember, + ) -> Result<(), Error> { + opctx + .authorize(authz::Action::Modify, authz_anti_affinity_group) + .await?; + + let instance_id = match member { + external::AntiAffinityGroupMember::Instance(id) => id, + }; + + let err = OptionalError::new(); + let conn = self.pool_connection_authorized(opctx).await?; + self.transaction_retry_wrapper("anti_affinity_group_member_delete") + .transaction(&conn, |conn| { + let err = err.clone(); + use db::schema::anti_affinity_group::dsl as group_dsl; + use db::schema::anti_affinity_group_instance_membership::dsl as membership_dsl; + + async move { + // Check that the group exists + group_dsl::anti_affinity_group + .filter(group_dsl::time_deleted.is_null()) + .filter(group_dsl::id.eq(authz_anti_affinity_group.id())) + .select(group_dsl::id) + .first_async::(&conn) + .await + .map_err(|e| { + err.bail_retryable_or_else(e, |e| { + public_error_from_diesel( + e, + ErrorHandler::NotFoundByResource( + authz_anti_affinity_group, + ), + ) + }) + })?; + + let rows = diesel::delete(membership_dsl::anti_affinity_group_instance_membership) + .filter(membership_dsl::group_id.eq(authz_anti_affinity_group.id())) + .filter(membership_dsl::instance_id.eq(instance_id.into_untyped_uuid())) + .execute_async(&conn) + .await + .map_err(|e| { + err.bail_retryable_or_else(e, |e| { + public_error_from_diesel(e, ErrorHandler::Server) + }) + })?; + if rows == 0 { + return Err(err.bail(LookupType::ById(instance_id.into_untyped_uuid()).into_not_found( + ResourceType::AntiAffinityGroupMember, + ))); + } + Ok(()) + } + }) + .await + .map_err(|e| { + if let Some(err) = err.take() { + return err; + } + public_error_from_diesel(e, ErrorHandler::Server) + })?; + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + use crate::db::lookup::LookupPath; + use crate::db::pub_test_utils::TestDatabase; + use nexus_db_model::Instance; + use nexus_db_model::Resources; + use nexus_db_model::SledResource; + use nexus_types::external_api::params; + use omicron_common::api::external::{ + self, ByteCount, DataPageParams, IdentityMetadataCreateParams, + }; + use omicron_test_utils::dev; + use omicron_uuid_kinds::InstanceUuid; + use omicron_uuid_kinds::PropolisUuid; + use omicron_uuid_kinds::SledUuid; + use std::num::NonZeroU32; + + // Helper function for creating a project + async fn create_project( + opctx: &OpContext, + datastore: &DataStore, + name: &str, + ) -> (authz::Project, Project) { + let authz_silo = opctx.authn.silo_required().unwrap(); + let project = Project::new( + authz_silo.id(), + params::ProjectCreate { + identity: IdentityMetadataCreateParams { + name: name.parse().unwrap(), + description: "".to_string(), + }, + }, + ); + datastore.project_create(&opctx, project).await.unwrap() + } + + // Helper function for creating an affinity group with + // arbitrary configuration. + async fn create_affinity_group( + opctx: &OpContext, + datastore: &DataStore, + authz_project: &authz::Project, + name: &str, + ) -> CreateResult { + let group = AffinityGroup::new( + authz_project.id(), + params::AffinityGroupCreate { + identity: IdentityMetadataCreateParams { + name: name.parse().unwrap(), + description: "".to_string(), + }, + policy: external::AffinityPolicy::Fail, + failure_domain: external::FailureDomain::Sled, + }, + ); + datastore.affinity_group_create(&opctx, &authz_project, group).await + } + + // Helper function for creating an anti-affinity group with + // arbitrary configuration. + async fn create_anti_affinity_group( + opctx: &OpContext, + datastore: &DataStore, + authz_project: &authz::Project, + name: &str, + ) -> CreateResult { + let group = AntiAffinityGroup::new( + authz_project.id(), + params::AntiAffinityGroupCreate { + identity: IdentityMetadataCreateParams { + name: name.parse().unwrap(), + description: "".to_string(), + }, + policy: external::AffinityPolicy::Fail, + failure_domain: external::FailureDomain::Sled, + }, + ); + datastore + .anti_affinity_group_create(&opctx, &authz_project, group) + .await + } + + // Helper function for creating an instance without a VMM. + async fn create_instance_record( + opctx: &OpContext, + datastore: &DataStore, + authz_project: &authz::Project, + name: &str, + ) -> Instance { + let instance = Instance::new( + InstanceUuid::new_v4(), + authz_project.id(), + ¶ms::InstanceCreate { + identity: IdentityMetadataCreateParams { + name: name.parse().unwrap(), + description: "".to_string(), + }, + ncpus: 2i64.try_into().unwrap(), + memory: ByteCount::from_gibibytes_u32(16), + hostname: "myhostname".try_into().unwrap(), + user_data: Vec::new(), + network_interfaces: + params::InstanceNetworkInterfaceAttachment::None, + external_ips: Vec::new(), + disks: Vec::new(), + boot_disk: None, + ssh_public_keys: None, + start: false, + auto_restart_policy: Default::default(), + }, + ); + + let instance = datastore + .project_create_instance(&opctx, &authz_project, instance) + .await + .unwrap(); + + set_instance_state_stopped(&datastore, instance.id()).await; + + instance + } + + async fn set_instance_state_stopped( + datastore: &DataStore, + instance: uuid::Uuid, + ) { + use db::schema::instance::dsl; + diesel::update(dsl::instance) + .filter(dsl::id.eq(instance)) + .set(( + dsl::state.eq(db::model::InstanceState::NoVmm), + dsl::active_propolis_id.eq(None::), + )) + .execute_async( + &*datastore.pool_connection_for_tests().await.unwrap(), + ) + .await + .unwrap(); + } + + // Helper for explicitly modifying sled resource usage + // + // The interaction we typically use to create and modify instance state + // is more complex in production, using a real allocation algorithm. + // + // Here, we just set the value of state explicitly. Be warned, there + // are no guardrails! + async fn allocate_instance_reservation( + datastore: &DataStore, + instance: InstanceUuid, + ) { + use db::schema::sled_resource::dsl; + diesel::insert_into(dsl::sled_resource) + .values(SledResource::new_for_vmm( + PropolisUuid::new_v4(), + instance, + SledUuid::new_v4(), + Resources::new( + 1, + ByteCount::from_kibibytes_u32(1).into(), + ByteCount::from_kibibytes_u32(1).into(), + ), + )) + .execute_async( + &*datastore.pool_connection_for_tests().await.unwrap(), + ) + .await + .unwrap(); + } + + async fn delete_instance_reservation( + datastore: &DataStore, + instance: InstanceUuid, + ) { + use db::schema::sled_resource::dsl; + diesel::delete(dsl::sled_resource) + .filter(dsl::instance_id.eq(instance.into_untyped_uuid())) + .execute_async( + &*datastore.pool_connection_for_tests().await.unwrap(), + ) + .await + .unwrap(); + } + + #[tokio::test] + async fn affinity_groups_are_project_scoped() { + // Setup + let logctx = dev::test_setup_log("affinity_groups_are_project_scoped"); + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); + + let (authz_project, _) = + create_project(&opctx, &datastore, "my-project").await; + + let (authz_other_project, _) = + create_project(&opctx, &datastore, "my-other-project").await; + + let pagparams_id = DataPageParams { + marker: None, + limit: NonZeroU32::new(100).unwrap(), + direction: dropshot::PaginationOrder::Ascending, + }; + let pagbyid = PaginatedBy::Id(pagparams_id); + + // To start: No groups exist + let groups = datastore + .affinity_group_list(&opctx, &authz_project, &pagbyid) + .await + .unwrap(); + assert!(groups.is_empty()); + + // Create a group + let group = create_affinity_group( + &opctx, + &datastore, + &authz_project, + "my-group", + ) + .await + .unwrap(); + + // Now when we list groups, we'll see the one we created. + let groups = datastore + .affinity_group_list(&opctx, &authz_project, &pagbyid) + .await + .unwrap(); + assert_eq!(groups.len(), 1); + assert_eq!(groups[0], group); + + // This group won't appear in the other project + let groups = datastore + .affinity_group_list(&opctx, &authz_other_project, &pagbyid) + .await + .unwrap(); + assert!(groups.is_empty()); + + // Clean up. + db.terminate().await; + logctx.cleanup_successful(); + } + + #[tokio::test] + async fn anti_affinity_groups_are_project_scoped() { + // Setup + let logctx = + dev::test_setup_log("anti_affinity_groups_are_project_scoped"); + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); + + let (authz_project, _) = + create_project(&opctx, &datastore, "my-project").await; + + let (authz_other_project, _) = + create_project(&opctx, &datastore, "my-other-project").await; + + let pagparams_id = DataPageParams { + marker: None, + limit: NonZeroU32::new(100).unwrap(), + direction: dropshot::PaginationOrder::Ascending, + }; + let pagbyid = PaginatedBy::Id(pagparams_id); + + // To start: No groups exist + let groups = datastore + .anti_affinity_group_list(&opctx, &authz_project, &pagbyid) + .await + .unwrap(); + assert!(groups.is_empty()); + + // Create a group + let group = create_anti_affinity_group( + &opctx, + &datastore, + &authz_project, + "my-group", + ) + .await + .unwrap(); + + // Now when we list groups, we'll see the one we created. + let groups = datastore + .anti_affinity_group_list(&opctx, &authz_project, &pagbyid) + .await + .unwrap(); + assert_eq!(groups.len(), 1); + assert_eq!(groups[0], group); + + // This group won't appear in the other project + let groups = datastore + .anti_affinity_group_list(&opctx, &authz_other_project, &pagbyid) + .await + .unwrap(); + assert!(groups.is_empty()); + + // Clean up. + db.terminate().await; + logctx.cleanup_successful(); + } + + #[tokio::test] + async fn affinity_groups_prevent_project_deletion() { + // Setup + let logctx = + dev::test_setup_log("affinity_groups_prevent_project_deletion"); + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); + + // Create a project and a group + let (authz_project, mut project) = + create_project(&opctx, &datastore, "my-project").await; + let group = create_affinity_group( + &opctx, + &datastore, + &authz_project, + "my-group", + ) + .await + .unwrap(); + + // If we try to delete the project, we'll fail. + let err = datastore + .project_delete(&opctx, &authz_project, &project) + .await + .unwrap_err(); + assert!(matches!(err, Error::InvalidRequest { .. })); + assert!( + err.to_string() + .contains("project to be deleted contains an affinity group"), + "{err:?}" + ); + + // Delete the group, then try to delete the project again. + let (.., authz_group) = LookupPath::new(opctx, datastore) + .affinity_group_id(group.id()) + .lookup_for(authz::Action::Delete) + .await + .unwrap(); + datastore.affinity_group_delete(&opctx, &authz_group).await.unwrap(); + + // When the group was created, it bumped the rcgen in the project. If we + // have an old view of the project, we expect a "concurrent + // modification" error. + let err = datastore + .project_delete(&opctx, &authz_project, &project) + .await + .unwrap_err(); + assert!(err.to_string().contains("concurrent modification"), "{err:?}"); + + // If we update rcgen, however, and the group has been deleted, + // we can successfully delete the project. + project.rcgen = project.rcgen.next().into(); + datastore + .project_delete(&opctx, &authz_project, &project) + .await + .unwrap(); + + // Clean up. + db.terminate().await; + logctx.cleanup_successful(); + } + + #[tokio::test] + async fn anti_affinity_groups_prevent_project_deletion() { + // Setup + let logctx = dev::test_setup_log( + "anti_affinity_groups_prevent_project_deletion", + ); + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); + + // Create a project and a group + let (authz_project, mut project) = + create_project(&opctx, &datastore, "my-project").await; + let group = create_anti_affinity_group( + &opctx, + &datastore, + &authz_project, + "my-group", + ) + .await + .unwrap(); + + // If we try to delete the project, we'll fail. + let err = datastore + .project_delete(&opctx, &authz_project, &project) + .await + .unwrap_err(); + assert!(matches!(err, Error::InvalidRequest { .. })); + assert!( + err.to_string().contains( + "project to be deleted contains an anti affinity group" + ), + "{err:?}" + ); + + // Delete the group, then try to delete the project again. + let (.., authz_group) = LookupPath::new(opctx, datastore) + .anti_affinity_group_id(group.id()) + .lookup_for(authz::Action::Delete) + .await + .unwrap(); + datastore + .anti_affinity_group_delete(&opctx, &authz_group) + .await + .unwrap(); + + // When the group was created, it bumped the rcgen in the project. If we + // have an old view of the project, we expect a "concurrent + // modification" error. + let err = datastore + .project_delete(&opctx, &authz_project, &project) + .await + .unwrap_err(); + assert!(err.to_string().contains("concurrent modification"), "{err:?}"); + + // If we update rcgen, however, and the group has been deleted, + // we can successfully delete the project. + project.rcgen = project.rcgen.next().into(); + datastore + .project_delete(&opctx, &authz_project, &project) + .await + .unwrap(); + + // Clean up. + db.terminate().await; + logctx.cleanup_successful(); + } + + #[tokio::test] + async fn affinity_group_names_are_unique_in_project() { + // Setup + let logctx = + dev::test_setup_log("affinity_group_names_are_unique_in_project"); + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); + + // Create two projects + let (authz_project1, _) = + create_project(&opctx, &datastore, "my-project-1").await; + let (authz_project2, _) = + create_project(&opctx, &datastore, "my-project-2").await; + + // We can create a group wiht the same name in different projects + let group = create_affinity_group( + &opctx, + &datastore, + &authz_project1, + "my-group", + ) + .await + .unwrap(); + create_affinity_group(&opctx, &datastore, &authz_project2, "my-group") + .await + .unwrap(); + + // If we try to create a new group with the same name in the same + // project, we'll see an error. + let err = create_affinity_group( + &opctx, + &datastore, + &authz_project1, + "my-group", + ) + .await + .unwrap_err(); + assert!( + matches!(&err, Error::ObjectAlreadyExists { + type_name, + object_name, + } if *type_name == ResourceType::AffinityGroup && + *object_name == "my-group"), + "Unexpected error: {err:?}" + ); + + // If we delete the group from the project, we can re-use the name. + let (.., authz_group) = LookupPath::new(opctx, datastore) + .affinity_group_id(group.id()) + .lookup_for(authz::Action::Delete) + .await + .unwrap(); + datastore.affinity_group_delete(&opctx, &authz_group).await.unwrap(); + + create_affinity_group(&opctx, &datastore, &authz_project1, "my-group") + .await + .expect("Should have been able to re-use name after deletion"); + + // Clean up. + db.terminate().await; + logctx.cleanup_successful(); + } + + #[tokio::test] + async fn anti_affinity_group_names_are_unique_in_project() { + // Setup + let logctx = dev::test_setup_log( + "anti_affinity_group_names_are_unique_in_project", + ); + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); + + // Create two projects + let (authz_project1, _) = + create_project(&opctx, &datastore, "my-project-1").await; + let (authz_project2, _) = + create_project(&opctx, &datastore, "my-project-2").await; + + // We can create a group wiht the same name in different projects + let group = create_anti_affinity_group( + &opctx, + &datastore, + &authz_project1, + "my-group", + ) + .await + .unwrap(); + create_anti_affinity_group( + &opctx, + &datastore, + &authz_project2, + "my-group", + ) + .await + .unwrap(); + + // If we try to create a new group with the same name in the same + // project, we'll see an error. + let err = create_anti_affinity_group( + &opctx, + &datastore, + &authz_project1, + "my-group", + ) + .await + .unwrap_err(); + assert!( + matches!(&err, Error::ObjectAlreadyExists { + type_name, + object_name, + } if *type_name == ResourceType::AntiAffinityGroup && + *object_name == "my-group"), + "Unexpected error: {err:?}" + ); + + // If we delete the group from the project, we can re-use the name. + let (.., authz_group) = LookupPath::new(opctx, datastore) + .anti_affinity_group_id(group.id()) + .lookup_for(authz::Action::Delete) + .await + .unwrap(); + datastore + .anti_affinity_group_delete(&opctx, &authz_group) + .await + .unwrap(); + + create_anti_affinity_group( + &opctx, + &datastore, + &authz_project1, + "my-group", + ) + .await + .expect("Should have been able to re-use name after deletion"); + + // Clean up. + db.terminate().await; + logctx.cleanup_successful(); + } + + #[tokio::test] + async fn affinity_group_membership_add_list_remove() { + // Setup + let logctx = + dev::test_setup_log("affinity_group_membership_add_list_remove"); + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); + + // Create a project and a group + let (authz_project, ..) = + create_project(&opctx, &datastore, "my-project").await; + let group = create_affinity_group( + &opctx, + &datastore, + &authz_project, + "my-group", + ) + .await + .unwrap(); + + let (.., authz_group) = LookupPath::new(opctx, datastore) + .affinity_group_id(group.id()) + .lookup_for(authz::Action::Modify) + .await + .unwrap(); + + // A new group should have no members + let pagparams_id = DataPageParams { + marker: None, + limit: NonZeroU32::new(100).unwrap(), + direction: dropshot::PaginationOrder::Ascending, + }; + let pagbyid = PaginatedBy::Id(pagparams_id); + let members = datastore + .affinity_group_member_list(&opctx, &authz_group, &pagbyid) + .await + .unwrap(); + assert!(members.is_empty()); + + // Create an instance without a VMM. + let instance = create_instance_record( + &opctx, + &datastore, + &authz_project, + "my-instance", + ) + .await; + + // Add the instance as a member to the group + datastore + .affinity_group_member_add( + &opctx, + &authz_group, + external::AffinityGroupMember::Instance( + InstanceUuid::from_untyped_uuid(instance.id()), + ), + ) + .await + .unwrap(); + + // We should now be able to list the new member + let members = datastore + .affinity_group_member_list(&opctx, &authz_group, &pagbyid) + .await + .unwrap(); + assert_eq!(members.len(), 1); + assert_eq!( + external::AffinityGroupMember::Instance( + InstanceUuid::from_untyped_uuid(instance.id()) + ), + members[0].clone().into() + ); + + // We can delete the member and observe an empty member list + datastore + .affinity_group_member_delete( + &opctx, + &authz_group, + external::AffinityGroupMember::Instance( + InstanceUuid::from_untyped_uuid(instance.id()), + ), + ) + .await + .unwrap(); + let members = datastore + .affinity_group_member_list(&opctx, &authz_group, &pagbyid) + .await + .unwrap(); + assert!(members.is_empty()); + + // Clean up. + db.terminate().await; + logctx.cleanup_successful(); + } + + #[tokio::test] + async fn anti_affinity_group_membership_add_list_remove() { + // Setup + let logctx = dev::test_setup_log( + "anti_affinity_group_membership_add_list_remove", + ); + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); + + // Create a project and a group + let (authz_project, ..) = + create_project(&opctx, &datastore, "my-project").await; + let group = create_anti_affinity_group( + &opctx, + &datastore, + &authz_project, + "my-group", + ) + .await + .unwrap(); + + let (.., authz_group) = LookupPath::new(opctx, datastore) + .anti_affinity_group_id(group.id()) + .lookup_for(authz::Action::Modify) + .await + .unwrap(); + + // A new group should have no members + let pagparams_id = DataPageParams { + marker: None, + limit: NonZeroU32::new(100).unwrap(), + direction: dropshot::PaginationOrder::Ascending, + }; + let pagbyid = PaginatedBy::Id(pagparams_id); + let members = datastore + .anti_affinity_group_member_list(&opctx, &authz_group, &pagbyid) + .await + .unwrap(); + assert!(members.is_empty()); + + // Create an instance without a VMM. + let instance = create_instance_record( + &opctx, + &datastore, + &authz_project, + "my-instance", + ) + .await; + + // Add the instance as a member to the group + datastore + .anti_affinity_group_member_add( + &opctx, + &authz_group, + external::AntiAffinityGroupMember::Instance( + InstanceUuid::from_untyped_uuid(instance.id()), + ), + ) + .await + .unwrap(); + + // We should now be able to list the new member + let members = datastore + .anti_affinity_group_member_list(&opctx, &authz_group, &pagbyid) + .await + .unwrap(); + assert_eq!(members.len(), 1); + assert_eq!( + external::AntiAffinityGroupMember::Instance( + InstanceUuid::from_untyped_uuid(instance.id()) + ), + members[0].clone().into() + ); + + // We can delete the member and observe an empty member list + datastore + .anti_affinity_group_member_delete( + &opctx, + &authz_group, + external::AntiAffinityGroupMember::Instance( + InstanceUuid::from_untyped_uuid(instance.id()), + ), + ) + .await + .unwrap(); + let members = datastore + .anti_affinity_group_member_list(&opctx, &authz_group, &pagbyid) + .await + .unwrap(); + assert!(members.is_empty()); + + // Clean up. + db.terminate().await; + logctx.cleanup_successful(); + } + + #[tokio::test] + async fn affinity_group_membership_add_remove_instance_with_vmm() { + // Setup + let logctx = dev::test_setup_log( + "affinity_group_membership_add_remove_instance_with_vmm", + ); + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); + + // Create a project and a group + let (authz_project, ..) = + create_project(&opctx, &datastore, "my-project").await; + let group = create_affinity_group( + &opctx, + &datastore, + &authz_project, + "my-group", + ) + .await + .unwrap(); + + let (.., authz_group) = LookupPath::new(opctx, datastore) + .affinity_group_id(group.id()) + .lookup_for(authz::Action::Modify) + .await + .unwrap(); + + // A new group should have no members + let pagparams_id = DataPageParams { + marker: None, + limit: NonZeroU32::new(100).unwrap(), + direction: dropshot::PaginationOrder::Ascending, + }; + let pagbyid = PaginatedBy::Id(pagparams_id); + let members = datastore + .affinity_group_member_list(&opctx, &authz_group, &pagbyid) + .await + .unwrap(); + assert!(members.is_empty()); + + // Create an instance with a VMM. + let instance = create_instance_record( + &opctx, + &datastore, + &authz_project, + "my-instance", + ) + .await; + + allocate_instance_reservation( + &datastore, + InstanceUuid::from_untyped_uuid(instance.id()), + ) + .await; + + // Cannot add the instance to the group while it's running. + let err = datastore + .affinity_group_member_add( + &opctx, + &authz_group, + external::AffinityGroupMember::Instance( + InstanceUuid::from_untyped_uuid(instance.id()), + ), + ) + .await + .expect_err( + "Shouldn't be able to add running instances to affinity groups", + ); + assert!(matches!(err, Error::InvalidRequest { .. })); + assert!( + err.to_string().contains( + "Instance cannot be added to affinity group with reservation" + ), + "{err:?}" + ); + + // If we have no reservation for the instance, we can add it to the group. + delete_instance_reservation( + &datastore, + InstanceUuid::from_untyped_uuid(instance.id()), + ) + .await; + datastore + .affinity_group_member_add( + &opctx, + &authz_group, + external::AffinityGroupMember::Instance( + InstanceUuid::from_untyped_uuid(instance.id()), + ), + ) + .await + .unwrap(); + + // Now we can reserve a sled for the instance once more. + allocate_instance_reservation( + &datastore, + InstanceUuid::from_untyped_uuid(instance.id()), + ) + .await; + + // We should now be able to list the new member + let members = datastore + .affinity_group_member_list(&opctx, &authz_group, &pagbyid) + .await + .unwrap(); + assert_eq!(members.len(), 1); + assert_eq!( + external::AffinityGroupMember::Instance( + InstanceUuid::from_untyped_uuid(instance.id()) + ), + members[0].clone().into() + ); + + // We can delete the member and observe an empty member list -- even + // though it's running! + datastore + .affinity_group_member_delete( + &opctx, + &authz_group, + external::AffinityGroupMember::Instance( + InstanceUuid::from_untyped_uuid(instance.id()), + ), + ) + .await + .unwrap(); + let members = datastore + .affinity_group_member_list(&opctx, &authz_group, &pagbyid) + .await + .unwrap(); + assert!(members.is_empty()); + + // Clean up. + db.terminate().await; + logctx.cleanup_successful(); + } + + #[tokio::test] + async fn anti_affinity_group_membership_add_remove_instance_with_vmm() { + // Setup + let logctx = dev::test_setup_log( + "anti_affinity_group_membership_add_remove_instance_with_vmm", + ); + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); + + // Create a project and a group + let (authz_project, ..) = + create_project(&opctx, &datastore, "my-project").await; + let group = create_anti_affinity_group( + &opctx, + &datastore, + &authz_project, + "my-group", + ) + .await + .unwrap(); + + let (.., authz_group) = LookupPath::new(opctx, datastore) + .anti_affinity_group_id(group.id()) + .lookup_for(authz::Action::Modify) + .await + .unwrap(); + + // A new group should have no members + let pagparams_id = DataPageParams { + marker: None, + limit: NonZeroU32::new(100).unwrap(), + direction: dropshot::PaginationOrder::Ascending, + }; + let pagbyid = PaginatedBy::Id(pagparams_id); + let members = datastore + .anti_affinity_group_member_list(&opctx, &authz_group, &pagbyid) + .await + .unwrap(); + assert!(members.is_empty()); + + // Create an instance with a VMM. + let instance = create_instance_record( + &opctx, + &datastore, + &authz_project, + "my-instance", + ) + .await; + allocate_instance_reservation( + &datastore, + InstanceUuid::from_untyped_uuid(instance.id()), + ) + .await; + + // Cannot add the instance to the group while it's running. + let err = datastore + .anti_affinity_group_member_add( + &opctx, + &authz_group, + external::AntiAffinityGroupMember::Instance(InstanceUuid::from_untyped_uuid(instance.id())), + ) + .await + .expect_err( + "Shouldn't be able to add running instances to anti-affinity groups", + ); + assert!(matches!(err, Error::InvalidRequest { .. })); + assert!( + err.to_string().contains( + "Instance cannot be added to anti-affinity group with reservation" + ), + "{err:?}" + ); + + // If we have no reservation for the instance, we can add it to the group. + delete_instance_reservation( + &datastore, + InstanceUuid::from_untyped_uuid(instance.id()), + ) + .await; + datastore + .anti_affinity_group_member_add( + &opctx, + &authz_group, + external::AntiAffinityGroupMember::Instance( + InstanceUuid::from_untyped_uuid(instance.id()), + ), + ) + .await + .unwrap(); + + // Now we can reserve a sled for the instance once more. + allocate_instance_reservation( + &datastore, + InstanceUuid::from_untyped_uuid(instance.id()), + ) + .await; + + // We should now be able to list the new member + let members = datastore + .anti_affinity_group_member_list(&opctx, &authz_group, &pagbyid) + .await + .unwrap(); + assert_eq!(members.len(), 1); + assert_eq!( + external::AntiAffinityGroupMember::Instance( + InstanceUuid::from_untyped_uuid(instance.id()) + ), + members[0].clone().into() + ); + + // We can delete the member and observe an empty member list -- even + // though it's running! + datastore + .anti_affinity_group_member_delete( + &opctx, + &authz_group, + external::AntiAffinityGroupMember::Instance( + InstanceUuid::from_untyped_uuid(instance.id()), + ), + ) + .await + .unwrap(); + let members = datastore + .anti_affinity_group_member_list(&opctx, &authz_group, &pagbyid) + .await + .unwrap(); + assert!(members.is_empty()); + + // Clean up. + db.terminate().await; + logctx.cleanup_successful(); + } + + #[tokio::test] + async fn affinity_group_delete_group_deletes_members() { + // Setup + let logctx = + dev::test_setup_log("affinity_group_delete_group_deletes_members"); + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); + + // Create a project and a group + let (authz_project, ..) = + create_project(&opctx, &datastore, "my-project").await; + let group = create_affinity_group( + &opctx, + &datastore, + &authz_project, + "my-group", + ) + .await + .unwrap(); + + let (.., authz_group) = LookupPath::new(opctx, datastore) + .affinity_group_id(group.id()) + .lookup_for(authz::Action::Modify) + .await + .unwrap(); + + // A new group should have no members + let pagparams_id = DataPageParams { + marker: None, + limit: NonZeroU32::new(100).unwrap(), + direction: dropshot::PaginationOrder::Ascending, + }; + let pagbyid = PaginatedBy::Id(pagparams_id); + let members = datastore + .affinity_group_member_list(&opctx, &authz_group, &pagbyid) + .await + .unwrap(); + assert!(members.is_empty()); + + // Create an instance without a VMM, add it to the group. + let instance = create_instance_record( + &opctx, + &datastore, + &authz_project, + "my-instance", + ) + .await; + datastore + .affinity_group_member_add( + &opctx, + &authz_group, + external::AffinityGroupMember::Instance( + InstanceUuid::from_untyped_uuid(instance.id()), + ), + ) + .await + .unwrap(); + + // Delete the group + datastore.affinity_group_delete(&opctx, &authz_group).await.unwrap(); + + // Confirm that no instance members exist + let members = datastore + .affinity_group_member_list(&opctx, &authz_group, &pagbyid) + .await + .unwrap(); + assert!(members.is_empty()); + + // Clean up. + db.terminate().await; + logctx.cleanup_successful(); + } + + #[tokio::test] + async fn anti_affinity_group_delete_group_deletes_members() { + // Setup + let logctx = dev::test_setup_log( + "anti_affinity_group_delete_group_deletes_members", + ); + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); + + // Create a project and a group + let (authz_project, ..) = + create_project(&opctx, &datastore, "my-project").await; + let group = create_anti_affinity_group( + &opctx, + &datastore, + &authz_project, + "my-group", + ) + .await + .unwrap(); + + let (.., authz_group) = LookupPath::new(opctx, datastore) + .anti_affinity_group_id(group.id()) + .lookup_for(authz::Action::Modify) + .await + .unwrap(); + + // A new group should have no members + let pagparams_id = DataPageParams { + marker: None, + limit: NonZeroU32::new(100).unwrap(), + direction: dropshot::PaginationOrder::Ascending, + }; + let pagbyid = PaginatedBy::Id(pagparams_id); + let members = datastore + .anti_affinity_group_member_list(&opctx, &authz_group, &pagbyid) + .await + .unwrap(); + assert!(members.is_empty()); + + // Create an instance without a VMM, add it to the group. + let instance = create_instance_record( + &opctx, + &datastore, + &authz_project, + "my-instance", + ) + .await; + datastore + .anti_affinity_group_member_add( + &opctx, + &authz_group, + external::AntiAffinityGroupMember::Instance( + InstanceUuid::from_untyped_uuid(instance.id()), + ), + ) + .await + .unwrap(); + + // Delete the group + datastore + .anti_affinity_group_delete(&opctx, &authz_group) + .await + .unwrap(); + + // Confirm that no instance members exist + let members = datastore + .anti_affinity_group_member_list(&opctx, &authz_group, &pagbyid) + .await + .unwrap(); + assert!(members.is_empty()); + + // Clean up. + db.terminate().await; + logctx.cleanup_successful(); + } + + #[tokio::test] + async fn affinity_group_delete_instance_deletes_membership() { + // Setup + let logctx = dev::test_setup_log( + "affinity_group_delete_instance_deletes_membership", + ); + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); + + // Create a project and a group + let (authz_project, ..) = + create_project(&opctx, &datastore, "my-project").await; + let group = create_affinity_group( + &opctx, + &datastore, + &authz_project, + "my-group", + ) + .await + .unwrap(); + + let (.., authz_group) = LookupPath::new(opctx, datastore) + .affinity_group_id(group.id()) + .lookup_for(authz::Action::Modify) + .await + .unwrap(); + + // A new group should have no members + let pagparams_id = DataPageParams { + marker: None, + limit: NonZeroU32::new(100).unwrap(), + direction: dropshot::PaginationOrder::Ascending, + }; + let pagbyid = PaginatedBy::Id(pagparams_id); + let members = datastore + .affinity_group_member_list(&opctx, &authz_group, &pagbyid) + .await + .unwrap(); + assert!(members.is_empty()); + + // Create an instance without a VMM, add it to the group. + let instance = create_instance_record( + &opctx, + &datastore, + &authz_project, + "my-instance", + ) + .await; + datastore + .affinity_group_member_add( + &opctx, + &authz_group, + external::AffinityGroupMember::Instance( + InstanceUuid::from_untyped_uuid(instance.id()), + ), + ) + .await + .unwrap(); + + // Delete the instance + let (.., authz_instance) = LookupPath::new(opctx, datastore) + .instance_id(instance.id()) + .lookup_for(authz::Action::Delete) + .await + .unwrap(); + datastore + .project_delete_instance(&opctx, &authz_instance) + .await + .unwrap(); + + // Confirm that no instance members exist + let members = datastore + .affinity_group_member_list(&opctx, &authz_group, &pagbyid) + .await + .unwrap(); + assert!(members.is_empty()); + + // Clean up. + db.terminate().await; + logctx.cleanup_successful(); + } + + #[tokio::test] + async fn anti_affinity_group_delete_instance_deletes_membership() { + // Setup + let logctx = dev::test_setup_log( + "anti_affinity_group_delete_instance_deletes_membership", + ); + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); + + // Create a project and a group + let (authz_project, ..) = + create_project(&opctx, &datastore, "my-project").await; + let group = create_anti_affinity_group( + &opctx, + &datastore, + &authz_project, + "my-group", + ) + .await + .unwrap(); + + let (.., authz_group) = LookupPath::new(opctx, datastore) + .anti_affinity_group_id(group.id()) + .lookup_for(authz::Action::Modify) + .await + .unwrap(); + + // A new group should have no members + let pagparams_id = DataPageParams { + marker: None, + limit: NonZeroU32::new(100).unwrap(), + direction: dropshot::PaginationOrder::Ascending, + }; + let pagbyid = PaginatedBy::Id(pagparams_id); + let members = datastore + .anti_affinity_group_member_list(&opctx, &authz_group, &pagbyid) + .await + .unwrap(); + assert!(members.is_empty()); + + // Create an instance without a VMM, add it to the group. + let instance = create_instance_record( + &opctx, + &datastore, + &authz_project, + "my-instance", + ) + .await; + datastore + .anti_affinity_group_member_add( + &opctx, + &authz_group, + external::AntiAffinityGroupMember::Instance( + InstanceUuid::from_untyped_uuid(instance.id()), + ), + ) + .await + .unwrap(); + + // Delete the instance + let (.., authz_instance) = LookupPath::new(opctx, datastore) + .instance_id(instance.id()) + .lookup_for(authz::Action::Delete) + .await + .unwrap(); + datastore + .project_delete_instance(&opctx, &authz_instance) + .await + .unwrap(); + + // Confirm that no instance members exist + let members = datastore + .anti_affinity_group_member_list(&opctx, &authz_group, &pagbyid) + .await + .unwrap(); + assert!(members.is_empty()); + + // Clean up. + db.terminate().await; + logctx.cleanup_successful(); + } + + #[tokio::test] + async fn affinity_group_membership_for_deleted_objects() { + // Setup + let logctx = dev::test_setup_log( + "affinity_group_membership_for_deleted_objects", + ); + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); + + // Create a project and a group + let (authz_project, ..) = + create_project(&opctx, &datastore, "my-project").await; + + struct TestArgs { + // Does the group exist? + group: bool, + // Does the instance exist? + instance: bool, + } + + let args = [ + TestArgs { group: false, instance: false }, + TestArgs { group: true, instance: false }, + TestArgs { group: false, instance: true }, + ]; + + for arg in args { + // Create an affinity group, and maybe delete it. + let group = create_affinity_group( + &opctx, + &datastore, + &authz_project, + "my-group", + ) + .await + .unwrap(); + + let (.., authz_group) = LookupPath::new(opctx, datastore) + .affinity_group_id(group.id()) + .lookup_for(authz::Action::Modify) + .await + .unwrap(); + + if !arg.group { + datastore + .affinity_group_delete(&opctx, &authz_group) + .await + .unwrap(); + } + + // Create an instance, and maybe delete it. + let instance = create_instance_record( + &opctx, + &datastore, + &authz_project, + "my-instance", + ) + .await; + let (.., authz_instance) = LookupPath::new(opctx, datastore) + .instance_id(instance.id()) + .lookup_for(authz::Action::Modify) + .await + .unwrap(); + if !arg.instance { + datastore + .project_delete_instance(&opctx, &authz_instance) + .await + .unwrap(); + } + + // Try to add the instance to the group. + // + // Expect to see specific errors, depending on whether or not the + // group/instance exist. + let err = datastore + .affinity_group_member_add( + &opctx, + &authz_group, + external::AffinityGroupMember::Instance( + InstanceUuid::from_untyped_uuid(instance.id()), + ), + ) + .await + .expect_err("Should have failed"); + + match (arg.group, arg.instance) { + (false, _) => { + assert!( + matches!(err, Error::ObjectNotFound { + type_name, .. + } if type_name == ResourceType::AffinityGroup), + "{err:?}" + ); + } + (true, false) => { + assert!( + matches!(err, Error::ObjectNotFound { + type_name, .. + } if type_name == ResourceType::Instance), + "{err:?}" + ); + } + (true, true) => { + panic!("If both exist, we won't throw an error") + } + } + + // Do the same thing, but for group membership removal. + let err = datastore + .affinity_group_member_delete( + &opctx, + &authz_group, + external::AffinityGroupMember::Instance( + InstanceUuid::from_untyped_uuid(instance.id()), + ), + ) + .await + .expect_err("Should have failed"); + match (arg.group, arg.instance) { + (false, _) => { + assert!( + matches!(err, Error::ObjectNotFound { + type_name, .. + } if type_name == ResourceType::AffinityGroup), + "{err:?}" + ); + } + (true, false) => { + assert!( + matches!(err, Error::ObjectNotFound { + type_name, .. + } if type_name == ResourceType::AffinityGroupMember), + "{err:?}" + ); + } + (true, true) => { + panic!("If both exist, we won't throw an error") + } + } + + // Cleanup, if we actually created anything. + if arg.instance { + datastore + .project_delete_instance(&opctx, &authz_instance) + .await + .unwrap(); + } + if arg.group { + datastore + .affinity_group_delete(&opctx, &authz_group) + .await + .unwrap(); + } + } + + // Clean up. + db.terminate().await; + logctx.cleanup_successful(); + } + + #[tokio::test] + async fn anti_affinity_group_membership_for_deleted_objects() { + // Setup + let logctx = dev::test_setup_log( + "anti_affinity_group_membership_for_deleted_objects", + ); + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); + + // Create a project and a group + let (authz_project, ..) = + create_project(&opctx, &datastore, "my-project").await; + + struct TestArgs { + // Does the group exist? + group: bool, + // Does the instance exist? + instance: bool, + } + + let args = [ + TestArgs { group: false, instance: false }, + TestArgs { group: true, instance: false }, + TestArgs { group: false, instance: true }, + ]; + + for arg in args { + // Create an anti-affinity group, and maybe delete it. + let group = create_anti_affinity_group( + &opctx, + &datastore, + &authz_project, + "my-group", + ) + .await + .unwrap(); + + let (.., authz_group) = LookupPath::new(opctx, datastore) + .anti_affinity_group_id(group.id()) + .lookup_for(authz::Action::Modify) + .await + .unwrap(); + + if !arg.group { + datastore + .anti_affinity_group_delete(&opctx, &authz_group) + .await + .unwrap(); + } + + // Create an instance, and maybe delete it. + let instance = create_instance_record( + &opctx, + &datastore, + &authz_project, + "my-instance", + ) + .await; + let (.., authz_instance) = LookupPath::new(opctx, datastore) + .instance_id(instance.id()) + .lookup_for(authz::Action::Modify) + .await + .unwrap(); + if !arg.instance { + datastore + .project_delete_instance(&opctx, &authz_instance) + .await + .unwrap(); + } + + // Try to add the instance to the group. + // + // Expect to see specific errors, depending on whether or not the + // group/instance exist. + let err = datastore + .anti_affinity_group_member_add( + &opctx, + &authz_group, + external::AntiAffinityGroupMember::Instance( + InstanceUuid::from_untyped_uuid(instance.id()), + ), + ) + .await + .expect_err("Should have failed"); + + match (arg.group, arg.instance) { + (false, _) => { + assert!( + matches!(err, Error::ObjectNotFound { + type_name, .. + } if type_name == ResourceType::AntiAffinityGroup), + "{err:?}" + ); + } + (true, false) => { + assert!( + matches!(err, Error::ObjectNotFound { + type_name, .. + } if type_name == ResourceType::Instance), + "{err:?}" + ); + } + (true, true) => { + panic!("If both exist, we won't throw an error") + } + } + + // Do the same thing, but for group membership removal. + let err = datastore + .anti_affinity_group_member_delete( + &opctx, + &authz_group, + external::AntiAffinityGroupMember::Instance( + InstanceUuid::from_untyped_uuid(instance.id()), + ), + ) + .await + .expect_err("Should have failed"); + match (arg.group, arg.instance) { + (false, _) => { + assert!( + matches!(err, Error::ObjectNotFound { + type_name, .. + } if type_name == ResourceType::AntiAffinityGroup), + "{err:?}" + ); + } + (true, false) => { + assert!( + matches!(err, Error::ObjectNotFound { + type_name, .. + } if type_name == ResourceType::AntiAffinityGroupMember), + "{err:?}" + ); + } + (true, true) => { + panic!("If both exist, we won't throw an error") + } + } + + // Cleanup, if we actually created anything. + if arg.instance { + datastore + .project_delete_instance(&opctx, &authz_instance) + .await + .unwrap(); + } + if arg.group { + datastore + .anti_affinity_group_delete(&opctx, &authz_group) + .await + .unwrap(); + } + } + + // Clean up. + db.terminate().await; + logctx.cleanup_successful(); + } + + #[tokio::test] + async fn affinity_group_membership_idempotency() { + // Setup + let logctx = + dev::test_setup_log("affinity_group_membership_idempotency"); + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); + + // Create a project and a group + let (authz_project, ..) = + create_project(&opctx, &datastore, "my-project").await; + let group = create_affinity_group( + &opctx, + &datastore, + &authz_project, + "my-group", + ) + .await + .unwrap(); + let (.., authz_group) = LookupPath::new(opctx, datastore) + .affinity_group_id(group.id()) + .lookup_for(authz::Action::Modify) + .await + .unwrap(); + + // Create an instance + let instance = create_instance_record( + &opctx, + &datastore, + &authz_project, + "my-instance", + ) + .await; + + // Add the instance to the group + datastore + .affinity_group_member_add( + &opctx, + &authz_group, + external::AffinityGroupMember::Instance( + InstanceUuid::from_untyped_uuid(instance.id()), + ), + ) + .await + .unwrap(); + + // Add the instance to the group again + let err = datastore + .affinity_group_member_add( + &opctx, + &authz_group, + external::AffinityGroupMember::Instance( + InstanceUuid::from_untyped_uuid(instance.id()), + ), + ) + .await + .unwrap_err(); + assert!( + matches!( + err, + Error::ObjectAlreadyExists { + type_name: ResourceType::AffinityGroupMember, + .. + } + ), + "Error: {err:?}" + ); + + // We should still only observe a single member in the group. + // + // Two calls to "affinity_group_member_add" should be the same + // as a single call. + let pagparams_id = DataPageParams { + marker: None, + limit: NonZeroU32::new(100).unwrap(), + direction: dropshot::PaginationOrder::Ascending, + }; + let pagbyid = PaginatedBy::Id(pagparams_id); + let members = datastore + .affinity_group_member_list(&opctx, &authz_group, &pagbyid) + .await + .unwrap(); + assert_eq!(members.len(), 1); + + // We should be able to delete the membership idempotently. + datastore + .affinity_group_member_delete( + &opctx, + &authz_group, + external::AffinityGroupMember::Instance( + InstanceUuid::from_untyped_uuid(instance.id()), + ), + ) + .await + .unwrap(); + let err = datastore + .affinity_group_member_delete( + &opctx, + &authz_group, + external::AffinityGroupMember::Instance( + InstanceUuid::from_untyped_uuid(instance.id()), + ), + ) + .await + .unwrap_err(); + assert!( + matches!( + err, + Error::ObjectNotFound { + type_name: ResourceType::AffinityGroupMember, + .. + } + ), + "Error: {err:?}" + ); + + let members = datastore + .affinity_group_member_list(&opctx, &authz_group, &pagbyid) + .await + .unwrap(); + assert!(members.is_empty()); + + // Clean up. + db.terminate().await; + logctx.cleanup_successful(); + } + + #[tokio::test] + async fn anti_affinity_group_membership_idempotency() { + // Setup + let logctx = + dev::test_setup_log("anti_affinity_group_membership_idempotency"); + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); + + // Create a project and a group + let (authz_project, ..) = + create_project(&opctx, &datastore, "my-project").await; + let group = create_anti_affinity_group( + &opctx, + &datastore, + &authz_project, + "my-group", + ) + .await + .unwrap(); + let (.., authz_group) = LookupPath::new(opctx, datastore) + .anti_affinity_group_id(group.id()) + .lookup_for(authz::Action::Modify) + .await + .unwrap(); + + // Create an instance + let instance = create_instance_record( + &opctx, + &datastore, + &authz_project, + "my-instance", + ) + .await; + + // Add the instance to the group + datastore + .anti_affinity_group_member_add( + &opctx, + &authz_group, + external::AntiAffinityGroupMember::Instance( + InstanceUuid::from_untyped_uuid(instance.id()), + ), + ) + .await + .unwrap(); + + // Add the instance to the group again + let err = datastore + .anti_affinity_group_member_add( + &opctx, + &authz_group, + external::AntiAffinityGroupMember::Instance( + InstanceUuid::from_untyped_uuid(instance.id()), + ), + ) + .await + .unwrap_err(); + assert!( + matches!( + err, + Error::ObjectAlreadyExists { + type_name: ResourceType::AntiAffinityGroupMember, + .. + } + ), + "Error: {err:?}" + ); + + // We should still only observe a single member in the group. + // + // Two calls to "anti_affinity_group_member_add" should be the same + // as a single call. + let pagparams_id = DataPageParams { + marker: None, + limit: NonZeroU32::new(100).unwrap(), + direction: dropshot::PaginationOrder::Ascending, + }; + let pagbyid = PaginatedBy::Id(pagparams_id); + let members = datastore + .anti_affinity_group_member_list(&opctx, &authz_group, &pagbyid) + .await + .unwrap(); + assert_eq!(members.len(), 1); + + // We should be able to delete the membership idempotently. + datastore + .anti_affinity_group_member_delete( + &opctx, + &authz_group, + external::AntiAffinityGroupMember::Instance( + InstanceUuid::from_untyped_uuid(instance.id()), + ), + ) + .await + .unwrap(); + let err = datastore + .anti_affinity_group_member_delete( + &opctx, + &authz_group, + external::AntiAffinityGroupMember::Instance( + InstanceUuid::from_untyped_uuid(instance.id()), + ), + ) + .await + .unwrap_err(); + assert!( + matches!( + err, + Error::ObjectNotFound { + type_name: ResourceType::AntiAffinityGroupMember, + .. + } + ), + "Error: {err:?}" + ); + + let members = datastore + .anti_affinity_group_member_list(&opctx, &authz_group, &pagbyid) + .await + .unwrap(); + assert!(members.is_empty()); + + // Clean up. + db.terminate().await; + logctx.cleanup_successful(); + } +} diff --git a/nexus/db-queries/src/db/datastore/db_metadata.rs b/nexus/db-queries/src/db/datastore/db_metadata.rs index 985a0453c6a..de9a520a361 100644 --- a/nexus/db-queries/src/db/datastore/db_metadata.rs +++ b/nexus/db-queries/src/db/datastore/db_metadata.rs @@ -17,7 +17,7 @@ use nexus_db_model::SchemaUpgradeStep; use nexus_db_model::SchemaVersion; use nexus_db_model::EARLIEST_SUPPORTED_VERSION; use omicron_common::api::external::Error; -use omicron_common::api::external::SemverVersion; +use semver::Version; use slog::{error, info, o, Logger}; use std::ops::Bound; use std::str::FromStr; @@ -32,7 +32,7 @@ use std::str::FromStr; // an auto-generated pre-release version. #[derive(Clone)] struct StepSemverVersion { - version: SemverVersion, + version: Version, i: usize, } @@ -45,20 +45,19 @@ impl StepSemverVersion { // // By using "dot number" notation, we can order the pre-release // steps, which matters while comparing their order. - fn new(target_version: &SemverVersion, i: usize) -> anyhow::Result { + fn new(target_version: &Version, i: usize) -> anyhow::Result { let mut target_step_version = target_version.clone(); - target_step_version.0.pre = - semver::Prerelease::new(&format!("step.{i}")) - .context("Cannot parse step as semver pre-release")?; + target_step_version.pre = semver::Prerelease::new(&format!("step.{i}")) + .context("Cannot parse step as semver pre-release")?; Ok(Self { version: target_step_version, i }) } // Drops the pre-release information about this target version. // // This is the version we are upgrading to, using these incremental steps. - fn without_prerelease(&self) -> SemverVersion { + fn without_prerelease(&self) -> Version { let mut target_version = self.version.clone(); - target_version.0.pre = semver::Prerelease::EMPTY; + target_version.pre = semver::Prerelease::EMPTY; target_version } @@ -76,8 +75,8 @@ impl StepSemverVersion { // "found_target_version". fn skippable_version( log: &Logger, - target_step_version: &SemverVersion, - found_target_version: &Option, + target_step_version: &Version, + found_target_version: &Option, ) -> bool { if let Some(found_target_version) = found_target_version.as_ref() { info!( @@ -119,7 +118,7 @@ impl DataStore { pub async fn ensure_schema( &self, log: &Logger, - desired_version: SemverVersion, + desired_version: Version, all_versions: Option<&AllSchemaVersions>, ) -> Result<(), anyhow::Error> { let (found_version, found_target_version) = self @@ -205,7 +204,7 @@ impl DataStore { info!(log, "Attempting to upgrade schema"); // For the rationale here, see: StepSemverVersion::new. - if target_version.semver().0.pre != semver::Prerelease::EMPTY { + if target_version.semver().pre != semver::Prerelease::EMPTY { bail!("Cannot upgrade to version which includes pre-release"); } @@ -293,8 +292,8 @@ impl DataStore { log: &Logger, step: &SchemaUpgradeStep, target_step: &StepSemverVersion, - current_version: &SemverVersion, - found_target_version: &Option, + current_version: &Version, + found_target_version: &Option, ) -> Result<(), anyhow::Error> { if skippable_version(&log, &target_step.version, &found_target_version) { @@ -344,7 +343,7 @@ impl DataStore { pub async fn database_schema_version( &self, - ) -> Result<(SemverVersion, Option), Error> { + ) -> Result<(Version, Option), Error> { use db::schema::db_metadata::dsl; let (version, target): (String, Option) = dsl::db_metadata @@ -354,12 +353,12 @@ impl DataStore { .await .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; - let version = SemverVersion::from_str(&version).map_err(|e| { + let version = Version::from_str(&version).map_err(|e| { Error::internal_error(&format!("Invalid schema version: {e}")) })?; if let Some(target) = target { - let target = SemverVersion::from_str(&target).map_err(|e| { + let target = Version::from_str(&target).map_err(|e| { Error::internal_error(&format!("Invalid schema version: {e}")) })?; return Ok((version, Some(target))); @@ -383,7 +382,7 @@ impl DataStore { // make progress. async fn prepare_schema_update( &self, - from_version: &SemverVersion, + from_version: &Version, target_step: &StepSemverVersion, ) -> Result<(), Error> { use db::schema::db_metadata::dsl; @@ -425,8 +424,8 @@ impl DataStore { // configuration file. async fn apply_schema_update( &self, - current: &SemverVersion, - target: &SemverVersion, + current: &Version, + target: &Version, sql: &str, ) -> Result<(), Error> { let conn = self.pool_connection_unauthorized().await?; @@ -462,7 +461,7 @@ impl DataStore { // - last_step: What we expect "target_version" must be to proceed. async fn finalize_schema_update( &self, - from_version: &SemverVersion, + from_version: &Version, last_step: &StepSemverVersion, ) -> Result<(), Error> { use db::schema::db_metadata::dsl; @@ -522,7 +521,7 @@ mod test { // Helper to create the version directory and "up.sql". async fn add_upgrade>( config_dir_path: &Utf8Path, - version: SemverVersion, + version: Version, sql: S, ) { let dir = config_dir_path.join(version.to_string()); @@ -532,7 +531,7 @@ mod test { async fn add_upgrade_subcomponent>( config_dir_path: &Utf8Path, - version: SemverVersion, + version: Version, sql: S, i: usize, ) { @@ -566,7 +565,7 @@ mod test { // // To trigger this action within a test, we manually set the "known to // DB" version. - let v0 = SemverVersion::new(0, 0, 0); + let v0 = Version::new(0, 0, 0); use db::schema::db_metadata::dsl; diesel::update(dsl::db_metadata.filter(dsl::singleton.eq(true))) .set(dsl::version.eq(v0.to_string())) @@ -574,7 +573,7 @@ mod test { .await .expect("Failed to set version back to 0.0.0"); - let v1 = SemverVersion::new(0, 0, 1); + let v1 = Version::new(0, 0, 1); let v2 = SCHEMA_VERSION; assert!(v0 < v1); @@ -674,7 +673,7 @@ mod test { // Nexus will decide to upgrade to, at most, the version that its own binary understands. // // To trigger this action within a test, we manually set the "known to DB" version. - let v0 = SemverVersion::new(0, 0, 0); + let v0 = Version::new(0, 0, 0); use db::schema::db_metadata::dsl; diesel::update(dsl::db_metadata.filter(dsl::singleton.eq(true))) .set(dsl::version.eq(v0.to_string())) @@ -682,7 +681,7 @@ mod test { .await .expect("Failed to set version back to 0.0.0"); - let v1 = SemverVersion::new(1, 0, 0); + let v1 = Version::new(1, 0, 0); let v2 = SCHEMA_VERSION; assert!(v0 < v1); diff --git a/nexus/db-queries/src/db/datastore/deployment.rs b/nexus/db-queries/src/db/datastore/deployment.rs index ede962e561e..345fb164147 100644 --- a/nexus/db-queries/src/db/datastore/deployment.rs +++ b/nexus/db-queries/src/db/datastore/deployment.rs @@ -838,7 +838,13 @@ impl DataStore { d.id, d.sled_id )) })?; - sled_disks.disks.insert(d.into()); + let disk_id = d.id; + sled_disks.disks.insert(d.try_into().map_err(|e| { + Error::internal_error(&format!( + "Cannot convert BpOmicronPhysicalDisk {}: {e}", + disk_id + )) + })?); } } } @@ -2039,7 +2045,6 @@ mod tests { use omicron_uuid_kinds::PhysicalDiskUuid; use omicron_uuid_kinds::SledUuid; use omicron_uuid_kinds::ZpoolUuid; - use once_cell::sync::Lazy; use oxnet::IpNet; use pretty_assertions::assert_eq; use rand::thread_rng; @@ -2054,10 +2059,11 @@ mod tests { use std::sync::atomic::AtomicBool; use std::sync::atomic::Ordering; use std::sync::Arc; + use std::sync::LazyLock; use std::time::Duration; - static EMPTY_PLANNING_INPUT: Lazy = - Lazy::new(|| PlanningInputBuilder::empty_input()); + static EMPTY_PLANNING_INPUT: LazyLock = + LazyLock::new(|| PlanningInputBuilder::empty_input()); #[derive(Default)] pub struct NetworkResourceControlFlow { @@ -2344,12 +2350,12 @@ mod tests { assert_eq!( EnsureMultiple::from( builder - .sled_ensure_disks( + .sled_add_disks( new_sled_id, &planning_input .sled_lookup(SledFilter::Commissioned, new_sled_id) .unwrap() - .resources, + .resources ) .unwrap() .disks diff --git a/nexus/db-queries/src/db/datastore/instance.rs b/nexus/db-queries/src/db/datastore/instance.rs index b2358c0d291..fc3fcf25cb9 100644 --- a/nexus/db-queries/src/db/datastore/instance.rs +++ b/nexus/db-queries/src/db/datastore/instance.rs @@ -1365,6 +1365,13 @@ impl DataStore { // Note that due to idempotency of this function, it's possible that // "authz_instance.id()" has already been deleted. let instance_id = InstanceUuid::from_untyped_uuid(authz_instance.id()); + self.instance_affinity_group_memberships_delete(opctx, instance_id) + .await?; + self.instance_anti_affinity_group_memberships_delete( + opctx, + instance_id, + ) + .await?; self.instance_ssh_keys_delete(opctx, instance_id).await?; self.instance_mark_migrations_deleted(opctx, instance_id).await?; diff --git a/nexus/db-queries/src/db/datastore/mod.rs b/nexus/db-queries/src/db/datastore/mod.rs index b2d9f8f2471..0e6a939581f 100644 --- a/nexus/db-queries/src/db/datastore/mod.rs +++ b/nexus/db-queries/src/db/datastore/mod.rs @@ -38,7 +38,6 @@ use omicron_common::api::external::Error; use omicron_common::api::external::IdentityMetadataCreateParams; use omicron_common::api::external::LookupType; use omicron_common::api::external::ResourceType; -use omicron_common::api::external::SemverVersion; use omicron_common::backoff::{ retry_notify, retry_policy_internal_service, BackoffError, }; @@ -49,6 +48,7 @@ use std::num::NonZeroU32; use std::sync::Arc; mod address_lot; +mod affinity; mod allow_list; mod auth; mod bfd; @@ -221,13 +221,14 @@ impl DataStore { config: Option<&AllSchemaVersions>, try_for: Option, ) -> Result { + use nexus_db_model::SCHEMA_VERSION as EXPECTED_VERSION; + let datastore = Self::new_unchecked(log.new(o!("component" => "datastore")), pool); let start = std::time::Instant::now(); // Keep looping until we find that the schema matches our expectation. - const EXPECTED_VERSION: SemverVersion = nexus_db_model::SCHEMA_VERSION; retry_notify( retry_policy_internal_service(), || async { @@ -262,9 +263,10 @@ impl DataStore { log: &Logger, pool: Arc, ) -> Result { + use nexus_db_model::SCHEMA_VERSION as EXPECTED_VERSION; + let datastore = Self::new_unchecked(log.new(o!("component" => "datastore")), pool); - const EXPECTED_VERSION: SemverVersion = nexus_db_model::SCHEMA_VERSION; let (found_version, found_target) = datastore .database_schema_version() .await diff --git a/nexus/db-queries/src/db/datastore/physical_disk.rs b/nexus/db-queries/src/db/datastore/physical_disk.rs index 1d1c6286a6d..796ee27ca9f 100644 --- a/nexus/db-queries/src/db/datastore/physical_disk.rs +++ b/nexus/db-queries/src/db/datastore/physical_disk.rs @@ -154,12 +154,13 @@ impl DataStore { ) -> Result<(), Error> { opctx.authorize(authz::Action::Read, &authz::FLEET).await?; use db::schema::physical_disk::dsl; + let now = Utc::now(); diesel::update( dsl::physical_disk.filter(dsl::id.eq(to_db_typed_uuid(id))), ) .filter(dsl::time_deleted.is_null()) - .set(dsl::disk_policy.eq(policy)) + .set((dsl::disk_policy.eq(policy), dsl::time_modified.eq(now))) .execute_async(&*self.pool_connection_authorized(&opctx).await?) .await .map_err(|err| public_error_from_diesel(err, ErrorHandler::Server))?; @@ -174,12 +175,13 @@ impl DataStore { ) -> Result<(), Error> { opctx.authorize(authz::Action::Read, &authz::FLEET).await?; use db::schema::physical_disk::dsl; + let now = Utc::now(); diesel::update( dsl::physical_disk.filter(dsl::id.eq(to_db_typed_uuid(id))), ) .filter(dsl::time_deleted.is_null()) - .set(dsl::disk_state.eq(state)) + .set((dsl::disk_state.eq(state), dsl::time_modified.eq(now))) .execute_async(&*self.pool_connection_authorized(&opctx).await?) .await .map_err(|err| public_error_from_diesel(err, ErrorHandler::Server))?; @@ -281,19 +283,27 @@ impl DataStore { .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } - /// Decommissions all expunged disks. - pub async fn physical_disk_decommission_all_expunged( + /// Decommissions a single expunged disk. + /// + /// This is a no-op if the disk is already decommissioned. + pub async fn physical_disk_decommission( &self, opctx: &OpContext, + id: PhysicalDiskUuid, ) -> Result<(), Error> { opctx.authorize(authz::Action::Modify, &authz::FLEET).await?; use db::schema::physical_disk::dsl; - + let now = Utc::now(); let conn = &*self.pool_connection_authorized(&opctx).await?; diesel::update(dsl::physical_disk) + .filter(dsl::id.eq(to_db_typed_uuid(id))) .filter(dsl::time_deleted.is_null()) - .physical_disk_filter(DiskFilter::ExpungedButActive) - .set(dsl::disk_state.eq(PhysicalDiskState::Decommissioned)) + .filter(dsl::disk_policy.eq(PhysicalDiskPolicy::Expunged)) + .filter(dsl::disk_state.ne(PhysicalDiskState::Decommissioned)) + .set(( + dsl::disk_state.eq(PhysicalDiskState::Decommissioned), + dsl::time_modified.eq(now), + )) .execute_async(conn) .await .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; diff --git a/nexus/db-queries/src/db/datastore/project.rs b/nexus/db-queries/src/db/datastore/project.rs index 58b7b315c1c..367e094b2e5 100644 --- a/nexus/db-queries/src/db/datastore/project.rs +++ b/nexus/db-queries/src/db/datastore/project.rs @@ -225,6 +225,8 @@ impl DataStore { generate_fn_to_ensure_none_in_project!(project_image, name, String); generate_fn_to_ensure_none_in_project!(snapshot, name, String); generate_fn_to_ensure_none_in_project!(vpc, name, String); + generate_fn_to_ensure_none_in_project!(affinity_group, name, String); + generate_fn_to_ensure_none_in_project!(anti_affinity_group, name, String); /// Delete a project pub async fn project_delete( @@ -242,6 +244,9 @@ impl DataStore { self.ensure_no_project_images_in_project(opctx, authz_project).await?; self.ensure_no_snapshots_in_project(opctx, authz_project).await?; self.ensure_no_vpcs_in_project(opctx, authz_project).await?; + self.ensure_no_affinity_groups_in_project(opctx, authz_project).await?; + self.ensure_no_anti_affinity_groups_in_project(opctx, authz_project) + .await?; use db::schema::project::dsl; diff --git a/nexus/db-queries/src/db/datastore/vpc.rs b/nexus/db-queries/src/db/datastore/vpc.rs index dbccde8c500..ba34f2d9ad5 100644 --- a/nexus/db-queries/src/db/datastore/vpc.rs +++ b/nexus/db-queries/src/db/datastore/vpc.rs @@ -3234,7 +3234,7 @@ mod tests { .expect("created blueprint builder"); for &sled_id in &sled_ids { builder - .sled_ensure_disks( + .sled_add_disks( sled_id, &planning_input .sled_lookup(SledFilter::InService, sled_id) diff --git a/nexus/db-queries/src/db/lookup.rs b/nexus/db-queries/src/db/lookup.rs index 402e56c0ed7..1fa6ade9904 100644 --- a/nexus/db-queries/src/db/lookup.rs +++ b/nexus/db-queries/src/db/lookup.rs @@ -175,6 +175,16 @@ impl<'a> LookupPath<'a> { Instance::PrimaryKey(Root { lookup_root: self }, id) } + /// Select a resource of type AffinityGroup, identified by its id + pub fn affinity_group_id(self, id: Uuid) -> AffinityGroup<'a> { + AffinityGroup::PrimaryKey(Root { lookup_root: self }, id) + } + + /// Select a resource of type AntiAffinityGroup, identified by its id + pub fn anti_affinity_group_id(self, id: Uuid) -> AntiAffinityGroup<'a> { + AntiAffinityGroup::PrimaryKey(Root { lookup_root: self }, id) + } + /// Select a resource of type IpPool, identified by its name pub fn ip_pool_name<'b, 'c>(self, name: &'b Name) -> IpPool<'c> where @@ -682,6 +692,22 @@ lookup_resource! { primary_key_columns = [ { column_name = "id", rust_type = Uuid } ] } +lookup_resource! { + name = "AffinityGroup", + ancestors = [ "Silo", "Project" ], + lookup_by_name = true, + soft_deletes = true, + primary_key_columns = [ { column_name = "id", rust_type = Uuid } ] +} + +lookup_resource! { + name = "AntiAffinityGroup", + ancestors = [ "Silo", "Project" ], + lookup_by_name = true, + soft_deletes = true, + primary_key_columns = [ { column_name = "id", rust_type = Uuid } ] +} + lookup_resource! { name = "InstanceNetworkInterface", ancestors = [ "Silo", "Project", "Instance" ], diff --git a/nexus/db-queries/src/db/queries/network_interface.rs b/nexus/db-queries/src/db/queries/network_interface.rs index 6f955edf735..57868f32809 100644 --- a/nexus/db-queries/src/db/queries/network_interface.rs +++ b/nexus/db-queries/src/db/queries/network_interface.rs @@ -31,9 +31,9 @@ use nexus_db_model::{NetworkInterfaceKind, MAX_NICS_PER_INSTANCE}; use nexus_db_model::{NetworkInterfaceKindEnum, SqlU8}; use omicron_common::api::external; use omicron_common::api::external::MacAddr; -use once_cell::sync::Lazy; use slog_error_chain::SlogInlineError; use std::net::{IpAddr, Ipv6Addr}; +use std::sync::LazyLock; use uuid::Uuid; // These are sentinel values and other constants used to verify the state of the @@ -62,11 +62,11 @@ const INSTANCE_DESTROYED: db::model::InstanceState = const INSTANCE_RUNNING: db::model::InstanceState = db::model::InstanceState::Vmm; -static NO_INSTANCE_SENTINEL_STRING: Lazy = - Lazy::new(|| String::from(NO_INSTANCE_SENTINEL)); +static NO_INSTANCE_SENTINEL_STRING: LazyLock = + LazyLock::new(|| String::from(NO_INSTANCE_SENTINEL)); -static INSTANCE_BAD_STATE_SENTINEL_STRING: Lazy = - Lazy::new(|| String::from(INSTANCE_BAD_STATE_SENTINEL)); +static INSTANCE_BAD_STATE_SENTINEL_STRING: LazyLock = + LazyLock::new(|| String::from(INSTANCE_BAD_STATE_SENTINEL)); // Uncastable sentinel used to detect when an instance exists, but is not // in the right state to have its network interfaces altered diff --git a/nexus/db-queries/src/policy_test/resource_builder.rs b/nexus/db-queries/src/policy_test/resource_builder.rs index b6d7d97553e..310c11adf3c 100644 --- a/nexus/db-queries/src/policy_test/resource_builder.rs +++ b/nexus/db-queries/src/policy_test/resource_builder.rs @@ -243,6 +243,8 @@ macro_rules! impl_dyn_authorized_resource_for_resource { } impl_dyn_authorized_resource_for_resource!(authz::AddressLot); +impl_dyn_authorized_resource_for_resource!(authz::AffinityGroup); +impl_dyn_authorized_resource_for_resource!(authz::AntiAffinityGroup); impl_dyn_authorized_resource_for_resource!(authz::Blueprint); impl_dyn_authorized_resource_for_resource!(authz::Certificate); impl_dyn_authorized_resource_for_resource!(authz::DeviceAccessToken); diff --git a/nexus/db-queries/src/policy_test/resources.rs b/nexus/db-queries/src/policy_test/resources.rs index 6ee92e167cf..04655410533 100644 --- a/nexus/db-queries/src/policy_test/resources.rs +++ b/nexus/db-queries/src/policy_test/resources.rs @@ -300,6 +300,21 @@ async fn make_project( LookupType::ByName(vpc1_name.clone()), ); + let affinity_group_name = format!("{}-affinity-group1", project_name); + let affinity_group = authz::AffinityGroup::new( + project.clone(), + Uuid::new_v4(), + LookupType::ByName(affinity_group_name.clone()), + ); + + let anti_affinity_group_name = + format!("{}-anti-affinity-group1", project_name); + let anti_affinity_group = authz::AntiAffinityGroup::new( + project.clone(), + Uuid::new_v4(), + LookupType::ByName(anti_affinity_group_name.clone()), + ); + let instance_name = format!("{}-instance1", project_name); let instance = authz::Instance::new( project.clone(), @@ -313,6 +328,8 @@ async fn make_project( Uuid::new_v4(), LookupType::ByName(disk_name.clone()), )); + builder.new_resource(affinity_group.clone()); + builder.new_resource(anti_affinity_group.clone()); builder.new_resource(instance.clone()); builder.new_resource(authz::InstanceNetworkInterface::new( instance, diff --git a/nexus/db-queries/tests/output/authz-roles.out b/nexus/db-queries/tests/output/authz-roles.out index 4b24e649ccb..e0d43250d13 100644 --- a/nexus/db-queries/tests/output/authz-roles.out +++ b/nexus/db-queries/tests/output/authz-roles.out @@ -306,6 +306,34 @@ resource: Disk "silo1-proj1-disk1" silo1-proj1-viewer ✘ ✔ ✔ ✔ ✘ ✘ ✘ ✘ unauthenticated ! ! ! ! ! ! ! ! +resource: AffinityGroup "silo1-proj1-affinity-group1" + + USER Q R LC RP M MP CC D + fleet-admin ✘ ✘ ✘ ✘ ✘ ✘ ✘ ✘ + fleet-collaborator ✘ ✘ ✘ ✘ ✘ ✘ ✘ ✘ + fleet-viewer ✘ ✘ ✘ ✘ ✘ ✘ ✘ ✘ + silo1-admin ✘ ✔ ✔ ✔ ✔ ✔ ✔ ✔ + silo1-collaborator ✘ ✔ ✔ ✔ ✔ ✔ ✔ ✔ + silo1-viewer ✘ ✔ ✔ ✔ ✘ ✘ ✘ ✘ + silo1-proj1-admin ✘ ✔ ✔ ✔ ✔ ✔ ✔ ✔ + silo1-proj1-collaborator ✘ ✔ ✔ ✔ ✔ ✔ ✔ ✔ + silo1-proj1-viewer ✘ ✔ ✔ ✔ ✘ ✘ ✘ ✘ + unauthenticated ! ! ! ! ! ! ! ! + +resource: AntiAffinityGroup "silo1-proj1-anti-affinity-group1" + + USER Q R LC RP M MP CC D + fleet-admin ✘ ✘ ✘ ✘ ✘ ✘ ✘ ✘ + fleet-collaborator ✘ ✘ ✘ ✘ ✘ ✘ ✘ ✘ + fleet-viewer ✘ ✘ ✘ ✘ ✘ ✘ ✘ ✘ + silo1-admin ✘ ✔ ✔ ✔ ✔ ✔ ✔ ✔ + silo1-collaborator ✘ ✔ ✔ ✔ ✔ ✔ ✔ ✔ + silo1-viewer ✘ ✔ ✔ ✔ ✘ ✘ ✘ ✘ + silo1-proj1-admin ✘ ✔ ✔ ✔ ✔ ✔ ✔ ✔ + silo1-proj1-collaborator ✘ ✔ ✔ ✔ ✔ ✔ ✔ ✔ + silo1-proj1-viewer ✘ ✔ ✔ ✔ ✘ ✘ ✘ ✘ + unauthenticated ! ! ! ! ! ! ! ! + resource: Instance "silo1-proj1-instance1" USER Q R LC RP M MP CC D @@ -474,6 +502,34 @@ resource: Disk "silo1-proj2-disk1" silo1-proj1-viewer ✘ ✘ ✘ ✘ ✘ ✘ ✘ ✘ unauthenticated ! ! ! ! ! ! ! ! +resource: AffinityGroup "silo1-proj2-affinity-group1" + + USER Q R LC RP M MP CC D + fleet-admin ✘ ✘ ✘ ✘ ✘ ✘ ✘ ✘ + fleet-collaborator ✘ ✘ ✘ ✘ ✘ ✘ ✘ ✘ + fleet-viewer ✘ ✘ ✘ ✘ ✘ ✘ ✘ ✘ + silo1-admin ✘ ✔ ✔ ✔ ✔ ✔ ✔ ✔ + silo1-collaborator ✘ ✔ ✔ ✔ ✔ ✔ ✔ ✔ + silo1-viewer ✘ ✔ ✔ ✔ ✘ ✘ ✘ ✘ + silo1-proj1-admin ✘ ✘ ✘ ✘ ✘ ✘ ✘ ✘ + silo1-proj1-collaborator ✘ ✘ ✘ ✘ ✘ ✘ ✘ ✘ + silo1-proj1-viewer ✘ ✘ ✘ ✘ ✘ ✘ ✘ ✘ + unauthenticated ! ! ! ! ! ! ! ! + +resource: AntiAffinityGroup "silo1-proj2-anti-affinity-group1" + + USER Q R LC RP M MP CC D + fleet-admin ✘ ✘ ✘ ✘ ✘ ✘ ✘ ✘ + fleet-collaborator ✘ ✘ ✘ ✘ ✘ ✘ ✘ ✘ + fleet-viewer ✘ ✘ ✘ ✘ ✘ ✘ ✘ ✘ + silo1-admin ✘ ✔ ✔ ✔ ✔ ✔ ✔ ✔ + silo1-collaborator ✘ ✔ ✔ ✔ ✔ ✔ ✔ ✔ + silo1-viewer ✘ ✔ ✔ ✔ ✘ ✘ ✘ ✘ + silo1-proj1-admin ✘ ✘ ✘ ✘ ✘ ✘ ✘ ✘ + silo1-proj1-collaborator ✘ ✘ ✘ ✘ ✘ ✘ ✘ ✘ + silo1-proj1-viewer ✘ ✘ ✘ ✘ ✘ ✘ ✘ ✘ + unauthenticated ! ! ! ! ! ! ! ! + resource: Instance "silo1-proj2-instance1" USER Q R LC RP M MP CC D @@ -810,6 +866,34 @@ resource: Disk "silo2-proj1-disk1" silo1-proj1-viewer ✘ ✘ ✘ ✘ ✘ ✘ ✘ ✘ unauthenticated ! ! ! ! ! ! ! ! +resource: AffinityGroup "silo2-proj1-affinity-group1" + + USER Q R LC RP M MP CC D + fleet-admin ✘ ✘ ✘ ✘ ✘ ✘ ✘ ✘ + fleet-collaborator ✘ ✘ ✘ ✘ ✘ ✘ ✘ ✘ + fleet-viewer ✘ ✘ ✘ ✘ ✘ ✘ ✘ ✘ + silo1-admin ✘ ✘ ✘ ✘ ✘ ✘ ✘ ✘ + silo1-collaborator ✘ ✘ ✘ ✘ ✘ ✘ ✘ ✘ + silo1-viewer ✘ ✘ ✘ ✘ ✘ ✘ ✘ ✘ + silo1-proj1-admin ✘ ✘ ✘ ✘ ✘ ✘ ✘ ✘ + silo1-proj1-collaborator ✘ ✘ ✘ ✘ ✘ ✘ ✘ ✘ + silo1-proj1-viewer ✘ ✘ ✘ ✘ ✘ ✘ ✘ ✘ + unauthenticated ! ! ! ! ! ! ! ! + +resource: AntiAffinityGroup "silo2-proj1-anti-affinity-group1" + + USER Q R LC RP M MP CC D + fleet-admin ✘ ✘ ✘ ✘ ✘ ✘ ✘ ✘ + fleet-collaborator ✘ ✘ ✘ ✘ ✘ ✘ ✘ ✘ + fleet-viewer ✘ ✘ ✘ ✘ ✘ ✘ ✘ ✘ + silo1-admin ✘ ✘ ✘ ✘ ✘ ✘ ✘ ✘ + silo1-collaborator ✘ ✘ ✘ ✘ ✘ ✘ ✘ ✘ + silo1-viewer ✘ ✘ ✘ ✘ ✘ ✘ ✘ ✘ + silo1-proj1-admin ✘ ✘ ✘ ✘ ✘ ✘ ✘ ✘ + silo1-proj1-collaborator ✘ ✘ ✘ ✘ ✘ ✘ ✘ ✘ + silo1-proj1-viewer ✘ ✘ ✘ ✘ ✘ ✘ ✘ ✘ + unauthenticated ! ! ! ! ! ! ! ! + resource: Instance "silo2-proj1-instance1" USER Q R LC RP M MP CC D diff --git a/nexus/defaults/Cargo.toml b/nexus/defaults/Cargo.toml index 1d941deb8e0..96197d248a8 100644 --- a/nexus/defaults/Cargo.toml +++ b/nexus/defaults/Cargo.toml @@ -9,7 +9,6 @@ workspace = true [dependencies] ipnetwork.workspace = true -once_cell.workspace = true oxnet.workspace = true rand.workspace = true serde_json.workspace = true diff --git a/nexus/defaults/src/lib.rs b/nexus/defaults/src/lib.rs index 32def47b9e8..73c090a4d1b 100644 --- a/nexus/defaults/src/lib.rs +++ b/nexus/defaults/src/lib.rs @@ -5,11 +5,11 @@ //! Default values for data in the Nexus API, when not provided explicitly in a request. use omicron_common::api::external; -use once_cell::sync::Lazy; use oxnet::Ipv4Net; use oxnet::Ipv6Net; use std::net::Ipv4Addr; use std::net::Ipv6Addr; +use std::sync::LazyLock; /// The name provided for a default primary network interface for a guest /// instance. @@ -18,12 +18,13 @@ pub const DEFAULT_PRIMARY_NIC_NAME: &str = "net0"; /// The default IPv4 subnet range assigned to the default VPC Subnet, when /// the VPC is created, if one is not provided in the request. See /// for details. -pub static DEFAULT_VPC_SUBNET_IPV4_BLOCK: Lazy = - Lazy::new(|| Ipv4Net::new(Ipv4Addr::new(172, 30, 0, 0), 22).unwrap()); +pub static DEFAULT_VPC_SUBNET_IPV4_BLOCK: LazyLock = + LazyLock::new(|| Ipv4Net::new(Ipv4Addr::new(172, 30, 0, 0), 22).unwrap()); -pub static DEFAULT_FIREWALL_RULES: Lazy = - Lazy::new(|| { - serde_json::from_str(r#"{ +pub static DEFAULT_FIREWALL_RULES: LazyLock< + external::VpcFirewallRuleUpdateParams, +> = LazyLock::new(|| { + serde_json::from_str(r#"{ "rules": [ { "name": "allow-internal-inbound", @@ -57,7 +58,7 @@ pub static DEFAULT_FIREWALL_RULES: Lazy = } ] }"#).unwrap() - }); +}); /// Generate a random VPC IPv6 prefix, in the range `fd00::/48`. pub fn random_vpc_ipv6_prefix() -> Result { diff --git a/nexus/reconfigurator/blippy/src/checks.rs b/nexus/reconfigurator/blippy/src/checks.rs index 2c56fce1d25..e3211e8a5e4 100644 --- a/nexus/reconfigurator/blippy/src/checks.rs +++ b/nexus/reconfigurator/blippy/src/checks.rs @@ -10,6 +10,7 @@ use nexus_sled_agent_shared::inventory::ZoneKind; use nexus_types::deployment::blueprint_zone_type; use nexus_types::deployment::BlueprintDatasetConfig; use nexus_types::deployment::BlueprintDatasetFilter; +use nexus_types::deployment::BlueprintPhysicalDiskDisposition; use nexus_types::deployment::BlueprintZoneConfig; use nexus_types::deployment::BlueprintZoneDisposition; use nexus_types::deployment::BlueprintZoneType; @@ -461,11 +462,13 @@ fn check_datasets(blippy: &mut Blippy<'_>) { let mut expected_datasets = BTreeSet::new(); // All disks should have debug and zone root datasets. - // - // TODO-correctness We currently only include in-service disks in the - // blueprint; once we include expunged or decommissioned disks too, we - // should filter here to only in-service. - for (&sled_id, disk_config) in &blippy.blueprint().blueprint_disks { + for (sled_id, disk) in blippy + .blueprint() + .all_omicron_disks(BlueprintPhysicalDiskDisposition::is_in_service) + { + // Note: This may be called multiple times per `sled_id`, + // which is somewhat inefficient. However it will still only report + // one error note per `sled_id`. let Some(sled_datasets) = datasets.get_sled_or_note_missing( blippy, sled_id, @@ -474,41 +477,37 @@ fn check_datasets(blippy: &mut Blippy<'_>) { continue; }; - for disk in &disk_config.disks { - let sled_datasets = sled_datasets.get(&disk.pool_id); + let sled_datasets = sled_datasets.get(&disk.pool_id); - match sled_datasets - .and_then(|by_zpool| by_zpool.get(&DatasetKind::Debug)) - { - Some(dataset) => { - expected_datasets.insert(dataset.id); - } - None => { - blippy.push_sled_note( - sled_id, - Severity::Fatal, - SledKind::ZpoolMissingDebugDataset { - zpool: disk.pool_id, - }, - ); - } + match sled_datasets + .and_then(|by_zpool| by_zpool.get(&DatasetKind::Debug)) + { + Some(dataset) => { + expected_datasets.insert(dataset.id); + } + None => { + blippy.push_sled_note( + sled_id, + Severity::Fatal, + SledKind::ZpoolMissingDebugDataset { zpool: disk.pool_id }, + ); } + } - match sled_datasets.and_then(|by_zpool| { - by_zpool.get(&DatasetKind::TransientZoneRoot) - }) { - Some(dataset) => { - expected_datasets.insert(dataset.id); - } - None => { - blippy.push_sled_note( - sled_id, - Severity::Fatal, - SledKind::ZpoolMissingZoneRootDataset { - zpool: disk.pool_id, - }, - ); - } + match sled_datasets + .and_then(|by_zpool| by_zpool.get(&DatasetKind::TransientZoneRoot)) + { + Some(dataset) => { + expected_datasets.insert(dataset.id); + } + None => { + blippy.push_sled_note( + sled_id, + Severity::Fatal, + SledKind::ZpoolMissingZoneRootDataset { + zpool: disk.pool_id, + }, + ); } } } diff --git a/nexus/reconfigurator/execution/src/lib.rs b/nexus/reconfigurator/execution/src/lib.rs index 79116b58f3c..73c517542e6 100644 --- a/nexus/reconfigurator/execution/src/lib.rs +++ b/nexus/reconfigurator/execution/src/lib.rs @@ -12,11 +12,11 @@ use nexus_db_queries::context::OpContext; use nexus_db_queries::db::DataStore; use nexus_types::deployment::execution::*; use nexus_types::deployment::Blueprint; +use nexus_types::deployment::BlueprintPhysicalDiskDisposition; use nexus_types::deployment::BlueprintZoneDisposition; use nexus_types::deployment::SledFilter; use nexus_types::external_api::views::SledState; use nexus_types::identity::Asset; -use omicron_physical_disks::DeployDisksDone; use omicron_uuid_kinds::GenericUuid; use omicron_uuid_kinds::OmicronZoneUuid; use omicron_uuid_kinds::SledUuid; @@ -27,6 +27,7 @@ use std::collections::BTreeMap; use std::sync::Arc; use tokio::sync::mpsc; use update_engine::merge_anyhow_list; +use update_engine::StepSuccess; use update_engine::StepWarning; mod clickhouse; @@ -110,7 +111,7 @@ pub async fn realize_blueprint_with_overrides( ) .into_shared(); - let deploy_disks_done = register_deploy_disks_step( + register_deploy_disks_step( &engine.for_component(ExecutionComponent::PhysicalDisks), &opctx, blueprint, @@ -163,11 +164,11 @@ pub async fn realize_blueprint_with_overrides( blueprint, ); - register_decommission_expunged_disks_step( + register_decommission_disks_step( &engine.for_component(ExecutionComponent::PhysicalDisks), &opctx, datastore, - deploy_disks_done, + blueprint, ); register_deploy_clickhouse_cluster_nodes_step( @@ -219,6 +220,23 @@ pub async fn realize_blueprint_with_overrides( Ok(output.into_value(result.token()).await) } +// Convert a `Result<(), anyhow::Error>` nto a `StepResult` containing either a +// `StepSuccess` or `StepWarning` and wrap it in `Result::Ok`. +// +// This is necessary because we never want to return an error from execution. +// Doing so stops execution at the errored step and prevents other independent +// steps after the errored step from executing. +fn result_to_step_result( + res: Result<(), anyhow::Error>, +) -> Result, anyhow::Error> { + match res { + Ok(_) => Ok(StepSuccess::new(()).build()), + Err(e) => Ok(StepWarning::new((), e.to_string()).build()), + } +} + +/// We explicitly don't continue with execution after this step. It only talks +/// to CRDB, and if nexus cannot talk to CRDB, we probably shouldn't continue. fn register_zone_external_networking_step<'a>( registrar: &ComponentRegistrar<'_, 'a>, opctx: &'a OpContext, @@ -308,25 +326,24 @@ fn register_deploy_disks_step<'a>( opctx: &'a OpContext, blueprint: &'a Blueprint, sleds: SharedStepHandle>>, -) -> StepHandle { +) { registrar .new_step( ExecutionStepId::Ensure, "Deploy physical disks", move |cx| async move { let sleds_by_id = sleds.into_value(cx.token()).await; - let done = omicron_physical_disks::deploy_disks( + let res = omicron_physical_disks::deploy_disks( &opctx, &sleds_by_id, &blueprint.blueprint_disks, ) .await - .map_err(merge_anyhow_list)?; - - StepSuccess::new(done).into() + .map_err(merge_anyhow_list); + result_to_step_result(res) }, ) - .register() + .register(); } fn register_deploy_datasets_step<'a>( @@ -341,15 +358,14 @@ fn register_deploy_datasets_step<'a>( "Deploy datasets", move |cx| async move { let sleds_by_id = sleds.into_value(cx.token()).await; - datasets::deploy_datasets( + let res = datasets::deploy_datasets( &opctx, &sleds_by_id, &blueprint.blueprint_datasets, ) .await - .map_err(merge_anyhow_list)?; - - StepSuccess::new(()).into() + .map_err(merge_anyhow_list); + result_to_step_result(res) }, ) .register(); @@ -398,7 +414,7 @@ fn register_plumb_firewall_rules_step<'a>( ExecutionStepId::Ensure, "Plumb service firewall rules", move |_cx| async move { - nexus_networking::plumb_service_firewall_rules( + let res = nexus_networking::plumb_service_firewall_rules( datastore, &opctx, &[], @@ -406,9 +422,8 @@ fn register_plumb_firewall_rules_step<'a>( &opctx.log, ) .await - .context("failed to plumb service firewall rules to sleds")?; - - StepSuccess::new(()).into() + .context("failed to plumb service firewall rules to sleds"); + result_to_step_result(res) }, ) .register(); @@ -430,7 +445,7 @@ fn register_dns_records_step<'a>( move |cx| async move { let sleds_by_id = sleds.into_value(cx.token()).await; - dns::deploy_dns( + let res = dns::deploy_dns( &opctx, datastore, nexus_id.to_string(), @@ -439,9 +454,8 @@ fn register_dns_records_step<'a>( overrides, ) .await - .map_err(|e| anyhow!("{}", InlineErrorChain::new(&e)))?; - - StepSuccess::new(()).into() + .map_err(|e| anyhow!("{}", InlineErrorChain::new(&e))); + result_to_step_result(res) }, ) .register(); @@ -494,7 +508,7 @@ fn register_decommission_sleds_step<'a>( ExecutionStepId::Remove, "Decommission sleds", move |_cx| async move { - sled_state::decommission_sleds( + let res = sled_state::decommission_sleds( &opctx, datastore, blueprint @@ -506,36 +520,34 @@ fn register_decommission_sleds_step<'a>( .map(|(&sled_id, _)| sled_id), ) .await - .map_err(merge_anyhow_list)?; - - StepSuccess::new(()).into() + .map_err(merge_anyhow_list); + result_to_step_result(res) }, ) .register(); } -fn register_decommission_expunged_disks_step<'a>( +fn register_decommission_disks_step<'a>( registrar: &ComponentRegistrar<'_, 'a>, opctx: &'a OpContext, datastore: &'a DataStore, - deploy_disks_done: StepHandle, + blueprint: &'a Blueprint, ) { - // This depends on the "deploy_disks" call earlier -- disk expungement is a - // statement of policy, but we need to be assured that the Sled Agent has - // stopped using that disk before we can mark its state as decommissioned. registrar .new_step( ExecutionStepId::Remove, "Decommission expunged disks", - move |cx| async move { - let done = deploy_disks_done.into_value(cx.token()).await; - omicron_physical_disks::decommission_expunged_disks( - &opctx, datastore, done, + move |_cx| async move { + let res = omicron_physical_disks::decommission_expunged_disks( + &opctx, + datastore, + blueprint + .all_omicron_disks(BlueprintPhysicalDiskDisposition::is_ready_for_cleanup) + .map(|(sled_id, config)| (sled_id, config.id)), ) .await - .map_err(merge_anyhow_list)?; - - StepSuccess::new(()).into() + .map_err(merge_anyhow_list); + result_to_step_result(res) }, ) .register(); @@ -554,13 +566,14 @@ fn register_deploy_clickhouse_cluster_nodes_step<'a>( if let Some(clickhouse_cluster_config) = &blueprint.clickhouse_cluster_config { - clickhouse::deploy_nodes( + let res = clickhouse::deploy_nodes( &opctx, &blueprint.blueprint_zones, &clickhouse_cluster_config, ) .await - .map_err(merge_anyhow_list)?; + .map_err(merge_anyhow_list); + return result_to_step_result(res); } StepSuccess::new(()).into() @@ -579,16 +592,12 @@ fn register_deploy_clickhouse_single_node_step<'a>( ExecutionStepId::Ensure, "Deploy single-node clickhouse cluster", move |_cx| async move { - if let Err(e) = clickhouse::deploy_single_node( + let res = clickhouse::deploy_single_node( &opctx, &blueprint.blueprint_zones, ) - .await - { - StepWarning::new((), e.to_string()).into() - } else { - StepSuccess::new(()).into() - } + .await; + result_to_step_result(res) }, ) .register(); diff --git a/nexus/reconfigurator/execution/src/omicron_physical_disks.rs b/nexus/reconfigurator/execution/src/omicron_physical_disks.rs index 6655eeb8b62..8bd065ad084 100644 --- a/nexus/reconfigurator/execution/src/omicron_physical_disks.rs +++ b/nexus/reconfigurator/execution/src/omicron_physical_disks.rs @@ -13,6 +13,7 @@ use nexus_db_queries::context::OpContext; use nexus_db_queries::db::DataStore; use nexus_types::deployment::BlueprintPhysicalDisksConfig; use omicron_uuid_kinds::GenericUuid; +use omicron_uuid_kinds::PhysicalDiskUuid; use omicron_uuid_kinds::SledUuid; use slog::info; use slog::o; @@ -25,7 +26,7 @@ pub(crate) async fn deploy_disks( opctx: &OpContext, sleds_by_id: &BTreeMap, sled_configs: &BTreeMap, -) -> Result> { +) -> Result<(), Vec> { let errors: Vec<_> = stream::iter(sled_configs) .filter_map(|(sled_id, config)| async move { let log = opctx.log.new(o!( @@ -96,38 +97,55 @@ pub(crate) async fn deploy_disks( .await; if errors.is_empty() { - Ok(DeployDisksDone {}) + Ok(()) } else { Err(errors) } } -/// Typestate indicating that the deploy disks step was performed. -#[derive(Debug)] -#[must_use = "this should be passed into decommission_expunged_disks"] -pub(crate) struct DeployDisksDone {} - /// Decommissions all disks which are currently expunged. pub(crate) async fn decommission_expunged_disks( opctx: &OpContext, datastore: &DataStore, - // This is taken as a parameter to ensure that this depends on a - // "deploy_disks" call made earlier. Disk expungement is a statement of - // policy, but we need to be assured that the Sled Agent has stopped using - // that disk before we can mark its state as decommissioned. - _deploy_disks_done: DeployDisksDone, + expunged_disks: impl Iterator, ) -> Result<(), Vec> { - datastore - .physical_disk_decommission_all_expunged(&opctx) - .await - .map_err(|e| vec![anyhow!(e)])?; - Ok(()) + let errors: Vec = stream::iter(expunged_disks) + .filter_map(|(sled_id, disk_id)| async move { + let log = opctx.log.new(slog::o!( + "sled_id" => sled_id.to_string(), + "disk_id" => disk_id.to_string(), + )); + + match datastore.physical_disk_decommission(&opctx, disk_id).await { + Err(error) => { + warn!( + log, + "failed to decommission expunged disk"; + "error" => #%error + ); + Some(anyhow!(error).context(format!( + "failed to decommission: disk_id = {disk_id}", + ))) + } + Ok(()) => { + info!(log, "successfully decommissioned expunged disk"); + None + } + } + }) + .collect() + .await; + + if errors.is_empty() { + Ok(()) + } else { + Err(errors) + } } #[cfg(test)] mod test { use super::deploy_disks; - use super::DeployDisksDone; use crate::DataStore; use crate::Sled; @@ -238,13 +256,9 @@ mod test { // Get a success result back when the blueprint has an empty set of // disks. let (_, blueprint) = create_blueprint(BTreeMap::new()); - // Use an explicit type here because not doing so can cause errors to - // be ignored (this behavior is genuinely terrible). Instead, ensure - // that the type has the right result. - let _: DeployDisksDone = - deploy_disks(&opctx, &sleds_by_id, &blueprint.blueprint_disks) - .await - .expect("failed to deploy no disks"); + deploy_disks(&opctx, &sleds_by_id, &blueprint.blueprint_disks) + .await + .expect("failed to deploy no disks"); // Disks are updated in a particular order, but each request contains // the full set of disks that must be running. @@ -298,10 +312,9 @@ mod test { } // Execute it. - let _: DeployDisksDone = - deploy_disks(&opctx, &sleds_by_id, &blueprint.blueprint_disks) - .await - .expect("failed to deploy initial disks"); + deploy_disks(&opctx, &sleds_by_id, &blueprint.blueprint_disks) + .await + .expect("failed to deploy initial disks"); s1.verify_and_clear(); s2.verify_and_clear(); @@ -318,10 +331,9 @@ mod test { )), ); } - let _: DeployDisksDone = - deploy_disks(&opctx, &sleds_by_id, &blueprint.blueprint_disks) - .await - .expect("failed to deploy same disks"); + deploy_disks(&opctx, &sleds_by_id, &blueprint.blueprint_disks) + .await + .expect("failed to deploy same disks"); s1.verify_and_clear(); s2.verify_and_clear(); @@ -588,9 +600,7 @@ mod test { super::decommission_expunged_disks( &opctx, &datastore, - // This is an internal test, and we're testing decommissioning in - // isolation, so it's okay to create the typestate here. - DeployDisksDone {}, + [(sled_id, disk_to_decommission)].into_iter(), ) .await .unwrap(); diff --git a/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs b/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs index 0a1244f5198..860d0366ce0 100644 --- a/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs +++ b/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs @@ -144,7 +144,8 @@ pub enum EnsureMultiple { expunged: usize, /// An item was removed from the blueprint. /// - /// This usually happens after the work of expungment has completed. + /// This happens after expungement or decommissioning has completed + /// depending upon the resource type. removed: usize, }, @@ -174,7 +175,8 @@ pub struct EditCounts { pub expunged: usize, /// An item was removed from the blueprint. /// - /// This usually happens after the work of expungment has completed. + /// This happens after expungement or decommissioning has completed + /// depending upon the resource type. pub removed: usize, } @@ -380,6 +382,9 @@ pub struct BlueprintBuilder<'a> { // adding zones, so this delay allows us to reuse resources that just came // free. (This is implicit and awkward; as we rework the builder we should // rework this to make it more explicit.) + // + // Note: this is currently still a `once_cell` `OnceCell` rather than a std + // `OnceCell`, because `get_or_try_init` isn't stable yet. resource_allocator: OnceCell, // These fields will become part of the final blueprint. See the @@ -632,6 +637,10 @@ impl<'a> BlueprintBuilder<'a> { }) } + pub fn parent_blueprint(&self) -> &Blueprint { + &self.parent_blueprint + } + fn resource_allocator( &mut self, ) -> Result<&mut BlueprintResourceAllocator, Error> { @@ -682,6 +691,20 @@ impl<'a> BlueprintBuilder<'a> { Either::Right(editor.zones(filter)) } + pub fn current_sled_disks( + &self, + sled_id: SledUuid, + filter: F, + ) -> impl Iterator + where + F: FnMut(BlueprintPhysicalDiskDisposition) -> bool, + { + let Some(editor) = self.sled_editors.get(&sled_id) else { + return Either::Left(iter::empty()); + }; + Either::Right(editor.disks(filter)) + } + /// Assemble a final [`Blueprint`] based on the contents of the builder pub fn build(mut self) -> Blueprint { let blueprint_id = self.rng.next_blueprint(); @@ -716,27 +739,12 @@ impl<'a> BlueprintBuilder<'a> { ); } } - // Preserving backwards compatibility, for now: disks should only - // have entries for in-service sleds, and expunged disks should be - // removed entirely. + // Preserving backwards compatibility, for now: datasets should only + // have entries for in-service sleds. let in_service_sled_ids = self .input .all_sled_ids(SledFilter::InService) .collect::>(); - blueprint_disks.retain(|sled_id, disks_config| { - if !in_service_sled_ids.contains(sled_id) { - return false; - } - - disks_config.disks.retain(|config| match config.disposition { - BlueprintPhysicalDiskDisposition::InService => true, - BlueprintPhysicalDiskDisposition::Expunged => false, - }); - - true - }); - // Preserving backwards compatibility, for now: datasets should only - // have entries for in-service sleds. blueprint_datasets .retain(|sled_id, _| in_service_sled_ids.contains(sled_id)); @@ -913,13 +921,10 @@ impl<'a> BlueprintBuilder<'a> { let mut num_zones_expunged = 0; let mut disks_to_expunge = Vec::new(); - for disk in editor.disks(DiskFilter::All) { - match disk.disposition { - BlueprintPhysicalDiskDisposition::InService => { - disks_to_expunge.push(disk.id); - } - BlueprintPhysicalDiskDisposition::Expunged => (), - } + for disk in + editor.disks(BlueprintPhysicalDiskDisposition::is_in_service) + { + disks_to_expunge.push(disk.id); } for disk_id in disks_to_expunge { let details = editor @@ -930,6 +935,13 @@ impl<'a> BlueprintBuilder<'a> { } num_datasets_expunged += details.num_datasets_expunged; num_zones_expunged += details.num_zones_expunged; + + // When we expunge a disk on an expunged sled, we can decommission + // it immediately because the sled is already gone. There is no sled + // agent to notify about the disk expungement. + editor + .decommission_disk(&disk_id) + .map_err(|err| Error::SledEditError { sled_id, err })?; } // Expunging a disk expunges any datasets and zones that depend on it, @@ -1088,35 +1100,23 @@ impl<'a> BlueprintBuilder<'a> { Ok(()) } - /// Ensures that the blueprint contains disks for a sled which already - /// exists in the database. - /// - /// This operation must perform the following: - /// - Ensure that any disks / zpools that exist in the database - /// are propagated into the blueprint. - /// - Ensure that any disks that are expunged from the database are - /// removed from the blueprint. - pub fn sled_ensure_disks( + /// Add any disks to the blueprint + /// Called by the planner in the `do_plan_add()` stage of planning + pub fn sled_add_disks( &mut self, sled_id: SledUuid, - resources: &SledResources, + sled_resources: &SledResources, ) -> Result { - // These are the disks known to our (last?) blueprint let editor = self.sled_editors.get_mut(&sled_id).ok_or_else(|| { Error::Planner(anyhow!( - "tried to ensure disks for unknown sled {sled_id}" + "tried to add disks for unknown sled {sled_id}" )) })?; let initial_counts = editor.edit_counts(); - let blueprint_disk_ids = editor - .disks(DiskFilter::InService) - .map(|config| config.id) - .collect::>(); - // These are the in-service disks as we observed them in the database, // during the planning phase - let database_disks = resources + let database_disks = sled_resources .all_disks(DiskFilter::InService) .map(|(zpool, disk)| (disk.disk_id, (zpool, disk))); let mut database_disk_ids = BTreeSet::new(); @@ -1139,19 +1139,38 @@ impl<'a> BlueprintBuilder<'a> { .map_err(|err| Error::SledEditError { sled_id, err })?; } - // Remove any disks that appear in the blueprint, but not the database - for disk_id in blueprint_disk_ids { - if !database_disk_ids.contains(&disk_id) { - editor - .expunge_disk(&disk_id) - .map_err(|err| Error::SledEditError { sled_id, err })?; - } - } let final_counts = editor.edit_counts(); - Ok(final_counts.difference_since(initial_counts)) } + /// Decommission any expunged disks. + /// + /// Note: This method is called by the planner only for `InService` sleds. + /// `Self::expunge_sled` expunges and decommissions disks immediately since + /// the sled is already gone by that point. + /// + /// Called by the planner in + /// `do_plan_decommission_expunged_disks_for_in_service_sled()`. + pub fn sled_decommission_disks( + &mut self, + sled_id: SledUuid, + disk_ids: Vec, + ) -> Result<(), Error> { + let editor = self.sled_editors.get_mut(&sled_id).ok_or_else(|| { + Error::Planner(anyhow!( + "tried to decommission disks for unknown sled {sled_id}" + )) + })?; + + for disk_id in disk_ids { + editor + .decommission_disk(&disk_id) + .map_err(|err| Error::SledEditError { sled_id, err })?; + } + + Ok(()) + } + /// Ensure that a sled in the blueprint has all the datasets it needs for /// its running zones. /// @@ -1866,7 +1885,7 @@ impl<'a> BlueprintBuilder<'a> { // blueprint and the list of all in-service zpools on this sled per our // planning input, and only pick zpools that are available in both. let current_sled_disks = editor - .disks(DiskFilter::InService) + .disks(BlueprintPhysicalDiskDisposition::is_in_service) .map(|disk_config| disk_config.pool_id) .collect::>(); @@ -2174,7 +2193,7 @@ pub mod test { for (sled_id, sled_resources) in example.input.all_sled_resources(SledFilter::Commissioned) { - builder.sled_ensure_disks(sled_id, sled_resources).unwrap(); + builder.sled_add_disks(sled_id, sled_resources).unwrap(); builder.sled_ensure_zone_ntp(sled_id).unwrap(); for pool_id in sled_resources.zpools.keys() { builder.sled_ensure_zone_crucible(sled_id, *pool_id).unwrap(); @@ -2211,7 +2230,7 @@ pub mod test { .sled_lookup(SledFilter::Commissioned, new_sled_id) .unwrap() .resources; - builder.sled_ensure_disks(new_sled_id, new_sled_resources).unwrap(); + builder.sled_add_disks(new_sled_id, &new_sled_resources).unwrap(); builder.sled_ensure_zone_ntp(new_sled_id).unwrap(); for pool_id in new_sled_resources.zpools.keys() { builder.sled_ensure_zone_crucible(new_sled_id, *pool_id).unwrap(); @@ -2435,7 +2454,7 @@ pub mod test { let logctx = test_setup_log(TEST_NAME); // Start with an empty system (sleds with no zones). However, we leave - // the disks around so that `sled_ensure_disks` can add them. + // the disks around so that `sled_add_disks` can add them. let (example, parent) = ExampleSystemBuilder::new(&logctx.log, TEST_NAME) .create_zones(false) @@ -2462,7 +2481,7 @@ pub mod test { .sled_editors .get(&sled_id) .unwrap() - .disks(DiskFilter::All) + .disks(BlueprintPhysicalDiskDisposition::any) .collect::>(); assert!( disks.is_empty(), @@ -2473,9 +2492,8 @@ pub mod test { for (sled_id, sled_resources) in input.all_sled_resources(SledFilter::InService) { - let edits = builder - .sled_ensure_disks(sled_id, &sled_resources) - .unwrap(); + let edits = + builder.sled_add_disks(sled_id, &sled_resources).unwrap(); assert_eq!( edits.disks, EditCounts { diff --git a/nexus/reconfigurator/planning/src/blueprint_editor/sled_editor.rs b/nexus/reconfigurator/planning/src/blueprint_editor/sled_editor.rs index 6be1e48042e..fc132495f66 100644 --- a/nexus/reconfigurator/planning/src/blueprint_editor/sled_editor.rs +++ b/nexus/reconfigurator/planning/src/blueprint_editor/sled_editor.rs @@ -14,12 +14,12 @@ use nexus_types::deployment::BlueprintDatasetConfig; use nexus_types::deployment::BlueprintDatasetFilter; use nexus_types::deployment::BlueprintDatasetsConfig; use nexus_types::deployment::BlueprintPhysicalDiskConfig; +use nexus_types::deployment::BlueprintPhysicalDiskDisposition; use nexus_types::deployment::BlueprintPhysicalDisksConfig; use nexus_types::deployment::BlueprintZoneConfig; use nexus_types::deployment::BlueprintZoneDisposition; use nexus_types::deployment::BlueprintZoneType; use nexus_types::deployment::BlueprintZonesConfig; -use nexus_types::deployment::DiskFilter; use nexus_types::external_api::views::SledState; use nexus_types::inventory::Dataset; use nexus_types::inventory::Zpool; @@ -234,10 +234,13 @@ impl SledEditor { .ok_or(SledEditError::OutOfUnderlayIps) } - pub fn disks( + pub fn disks( &self, - filter: DiskFilter, - ) -> impl Iterator { + mut filter: F, + ) -> impl Iterator + where + F: FnMut(BlueprintPhysicalDiskDisposition) -> bool, + { match &self.0 { InnerSledEditor::Active(editor) => { Either::Left(editor.disks(filter)) @@ -247,7 +250,7 @@ impl SledEditor { .disks .disks .iter() - .filter(move |disk| disk.disposition.matches(filter)), + .filter(move |disk| filter(disk.disposition)), ), } } @@ -307,8 +310,7 @@ impl SledEditor { disk: BlueprintPhysicalDiskConfig, rng: &mut SledPlannerRng, ) -> Result<(), SledEditError> { - self.as_active_mut()?.ensure_disk(disk, rng); - Ok(()) + self.as_active_mut()?.ensure_disk(disk, rng) } pub fn expunge_disk( @@ -318,6 +320,14 @@ impl SledEditor { self.as_active_mut()?.expunge_disk(disk_id) } + pub fn decommission_disk( + &mut self, + disk_id: &PhysicalDiskUuid, + ) -> Result<(), SledEditError> { + self.as_active_mut()?.decommission_disk(disk_id)?; + Ok(()) + } + pub fn add_zone( &mut self, zone: BlueprintZoneConfig, @@ -471,10 +481,13 @@ impl ActiveSledEditor { self.underlay_ip_allocator.alloc() } - pub fn disks( + pub fn disks( &self, - filter: DiskFilter, - ) -> impl Iterator { + filter: F, + ) -> impl Iterator + where + F: FnMut(BlueprintPhysicalDiskDisposition) -> bool, + { self.disks.disks(filter) } @@ -499,10 +512,10 @@ impl ActiveSledEditor { &mut self, disk: BlueprintPhysicalDiskConfig, rng: &mut SledPlannerRng, - ) { + ) -> Result<(), SledEditError> { let zpool = ZpoolName::new_external(disk.pool_id); - self.disks.ensure(disk); + self.disks.ensure(disk)?; // Every disk also gets a Debug and Transient Zone Root dataset; ensure // both of those exist as well. @@ -511,6 +524,8 @@ impl ActiveSledEditor { self.datasets.ensure_in_service(debug, rng); self.datasets.ensure_in_service(zone_root, rng); + + Ok(()) } pub fn expunge_disk( @@ -540,6 +555,15 @@ impl ActiveSledEditor { }) } + pub fn decommission_disk( + &mut self, + disk_id: &PhysicalDiskUuid, + ) -> Result<(), SledEditError> { + // TODO: report decommissioning + let _ = self.disks.decommission(disk_id)?; + Ok(()) + } + pub fn add_zone( &mut self, zone: BlueprintZoneConfig, diff --git a/nexus/reconfigurator/planning/src/blueprint_editor/sled_editor/disks.rs b/nexus/reconfigurator/planning/src/blueprint_editor/sled_editor/disks.rs index 84cce288a79..fb8e3726e81 100644 --- a/nexus/reconfigurator/planning/src/blueprint_editor/sled_editor/disks.rs +++ b/nexus/reconfigurator/planning/src/blueprint_editor/sled_editor/disks.rs @@ -6,7 +6,6 @@ use crate::blueprint_builder::EditCounts; use nexus_types::deployment::BlueprintPhysicalDiskConfig; use nexus_types::deployment::BlueprintPhysicalDiskDisposition; use nexus_types::deployment::BlueprintPhysicalDisksConfig; -use nexus_types::deployment::DiskFilter; use omicron_common::api::external::Generation; use omicron_uuid_kinds::PhysicalDiskUuid; use omicron_uuid_kinds::ZpoolUuid; @@ -17,6 +16,12 @@ use std::collections::BTreeMap; pub enum DisksEditError { #[error("tried to expunge nonexistent disk {id}")] ExpungeNonexistentDisk { id: PhysicalDiskUuid }, + #[error("tried to decommission nonexistent disk {id}")] + DecommissionNonexistentDisk { id: PhysicalDiskUuid }, + #[error("tried to mark an expunged disk as in service {id}")] + AddExpungedDisk { id: PhysicalDiskUuid }, + #[error("tried to decommission an in service disk {id}")] + DecommissionInServiceDisk { id: PhysicalDiskUuid }, } #[derive(Debug, thiserror::Error)] @@ -65,32 +70,71 @@ impl DisksEditor { self.counts } - pub fn disks( + pub fn disks( &self, - filter: DiskFilter, - ) -> impl Iterator { - self.disks - .values() - .filter(move |config| config.disposition.matches(filter)) + mut filter: F, + ) -> impl Iterator + where + F: FnMut(BlueprintPhysicalDiskDisposition) -> bool, + { + self.disks.values().filter(move |config| filter(config.disposition)) } pub fn contains_zpool(&self, zpool_id: &ZpoolUuid) -> bool { self.disks.values().any(|disk| disk.pool_id == *zpool_id) } - pub fn ensure(&mut self, disk: BlueprintPhysicalDiskConfig) { + pub fn ensure( + &mut self, + disk: BlueprintPhysicalDiskConfig, + ) -> Result<(), DisksEditError> { match self.disks.entry(disk.id) { Entry::Vacant(slot) => { slot.insert(disk); self.counts.added += 1; } Entry::Occupied(mut slot) => { - if *slot.get() != disk { + let existing = slot.get(); + if *existing != disk { + match (existing.disposition, disk.disposition) { + // All other combinations are valid + ( + BlueprintPhysicalDiskDisposition::Expunged { + .. + }, + BlueprintPhysicalDiskDisposition::InService, + ) => { + return Err(DisksEditError::AddExpungedDisk { + id: disk.id, + }); + } + // All following combinations are valid + ( + BlueprintPhysicalDiskDisposition::Expunged { + .. + }, + BlueprintPhysicalDiskDisposition::Expunged { + .. + }, + ) => (), + ( + BlueprintPhysicalDiskDisposition::InService, + BlueprintPhysicalDiskDisposition::InService, + ) => (), + ( + BlueprintPhysicalDiskDisposition::InService, + BlueprintPhysicalDiskDisposition::Expunged { + .. + }, + ) => (), + } + slot.insert(disk); self.counts.updated += 1; } } } + Ok(()) } pub fn expunge( @@ -104,11 +148,18 @@ impl DisksEditor { let did_expunge: bool; match config.disposition { BlueprintPhysicalDiskDisposition::InService => { - config.disposition = BlueprintPhysicalDiskDisposition::Expunged; + config.disposition = + BlueprintPhysicalDiskDisposition::Expunged { + // We don't update the editor generation until the call + // to `finalize` which occurs later. We bump it here + // to compensate. + as_of_generation: self.generation.next(), + ready_for_cleanup: false, + }; self.counts.expunged += 1; did_expunge = true; } - BlueprintPhysicalDiskDisposition::Expunged => { + BlueprintPhysicalDiskDisposition::Expunged { .. } => { // expunge is idempotent; do nothing did_expunge = false; } @@ -116,6 +167,43 @@ impl DisksEditor { Ok((did_expunge, config.pool_id)) } + + pub fn decommission( + &mut self, + disk_id: &PhysicalDiskUuid, + ) -> Result { + let config = self.disks.get_mut(disk_id).ok_or_else(|| { + DisksEditError::DecommissionNonexistentDisk { id: *disk_id } + })?; + + match config.disposition { + BlueprintPhysicalDiskDisposition::InService => { + return Err(DisksEditError::DecommissionInServiceDisk { + id: *disk_id, + }); + } + BlueprintPhysicalDiskDisposition::Expunged { + ready_for_cleanup: false, + as_of_generation, + } => { + config.disposition = + BlueprintPhysicalDiskDisposition::Expunged { + ready_for_cleanup: true, + as_of_generation, + }; + let did_decommission = true; + Ok(did_decommission) + } + // We've already decommissioned this disk + BlueprintPhysicalDiskDisposition::Expunged { + ready_for_cleanup: true, + .. + } => { + let did_decommision = false; + Ok(did_decommision) + } + } + } } impl TryFrom for DisksEditor { diff --git a/nexus/reconfigurator/planning/src/example.rs b/nexus/reconfigurator/planning/src/example.rs index 535b3f7ee64..fdc1680fc1b 100644 --- a/nexus/reconfigurator/planning/src/example.rs +++ b/nexus/reconfigurator/planning/src/example.rs @@ -418,12 +418,13 @@ impl ExampleSystemBuilder { ); } - for (i, (sled_id, sled_resources)) in - base_input.all_sled_resources(SledFilter::Commissioned).enumerate() + for (i, (sled_id, sled_details)) in + base_input.all_sleds(SledFilter::Commissioned).enumerate() { if self.create_disks_in_blueprint { - let _ = - builder.sled_ensure_disks(sled_id, sled_resources).unwrap(); + let _ = builder + .sled_add_disks(sled_id, &sled_details.resources) + .unwrap(); } if self.create_zones { let _ = builder.sled_ensure_zone_ntp(sled_id).unwrap(); @@ -446,7 +447,7 @@ impl ExampleSystemBuilder { } } if self.create_zones { - for pool_name in sled_resources.zpools.keys() { + for pool_name in sled_details.resources.zpools.keys() { let _ = builder .sled_ensure_zone_crucible(sled_id, *pool_name) .unwrap(); diff --git a/nexus/reconfigurator/planning/src/planner.rs b/nexus/reconfigurator/planning/src/planner.rs index f678e54f200..928b3c288bf 100644 --- a/nexus/reconfigurator/planning/src/planner.rs +++ b/nexus/reconfigurator/planning/src/planner.rs @@ -17,6 +17,7 @@ use crate::planner::omicron_zone_placement::PlacementError; use nexus_sled_agent_shared::inventory::OmicronZoneType; use nexus_sled_agent_shared::inventory::ZoneKind; use nexus_types::deployment::Blueprint; +use nexus_types::deployment::BlueprintPhysicalDiskDisposition; use nexus_types::deployment::BlueprintZoneDisposition; use nexus_types::deployment::CockroachDbClusterVersion; use nexus_types::deployment::CockroachDbPreserveDowngrade; @@ -30,6 +31,7 @@ use nexus_types::external_api::views::PhysicalDiskPolicy; use nexus_types::external_api::views::SledPolicy; use nexus_types::external_api::views::SledState; use nexus_types::inventory::Collection; +use omicron_uuid_kinds::PhysicalDiskUuid; use omicron_uuid_kinds::SledUuid; use slog::error; use slog::{info, warn, Logger}; @@ -124,13 +126,25 @@ impl<'a> Planner<'a> { // 4. All disks associated with the sled have been marked expunged. This // happens implicitly when a sled is expunged, so is covered by our // first check. + // + // Note that we must check both the planning input, and the parent + // blueprint to tell if a sled is decommissioned because we carry + // decommissioned sleds forward and do not prune them from the blueprint + // right away. for (sled_id, sled_details) in self.input.all_sleds(SledFilter::Commissioned) { // Check 1: look for sleds that are expunged. match (sled_details.policy, sled_details.state) { // If the sled is still in service, don't decommission it. - (SledPolicy::InService { .. }, _) => continue, + // + // We do still want to decommission any expunged disks if + // possible though. For example, we can expunge disks on active + // sleds if they are faulty. + (SledPolicy::InService { .. }, _) => { + self.do_plan_decommission_expunged_disks_for_in_service_sled(sled_id)?; + continue; + } // If the sled is already decommissioned it... why is it showing // up when we ask for commissioned sleds? Warn, but don't try to // decommission it again. @@ -148,6 +162,15 @@ impl<'a> Planner<'a> { (SledPolicy::Expunged, SledState::Active) => (), } + // Check that the sled isn't already decommissioned in the parent blueprint, if it exists. + if let Some(sled_state) = + self.blueprint.parent_blueprint().sled_state.get(&sled_id) + { + if *sled_state == SledState::Decommissioned { + continue; + } + } + // Check 2: have all this sled's zones been expunged? It's possible // we ourselves have made this change, which is fine. let all_zones_expunged = self @@ -173,6 +196,51 @@ impl<'a> Planner<'a> { Ok(()) } + fn do_plan_decommission_expunged_disks_for_in_service_sled( + &mut self, + sled_id: SledUuid, + ) -> Result<(), Error> { + // The sled is not expunged. We have to see if the inventory + // reflects the parent blueprint disk generation. If it does + // then we mark any expunged disks decommissioned. + let Some(seen_generation) = self + .inventory + .sled_agents + .get(&sled_id) + .map(|sa| sa.omicron_physical_disks_generation) + else { + // There is no current inventory for the sled agent, so we cannot + // decommission any disks. + return Ok(()); + }; + + let disks_to_decommission: Vec = self + .blueprint + .current_sled_disks(sled_id, |disposition| match disposition { + BlueprintPhysicalDiskDisposition::Expunged { + ready_for_cleanup, + .. + } => !ready_for_cleanup, + BlueprintPhysicalDiskDisposition::InService => false, + }) + .filter_map(|disk| { + // Has the sled agent seen this disk's expungement yet as + // reflected in inventory? + // + // SAFETY: We filtered to only have expunged disks above + if seen_generation + >= disk.disposition.expunged_as_of_generation().unwrap() + { + Some(disk.id) + } else { + None + } + }) + .collect(); + + self.blueprint.sled_decommission_disks(sled_id, disks_to_decommission) + } + fn do_plan_expunge(&mut self) -> Result<(), Error> { let mut commissioned_sled_ids = BTreeSet::new(); @@ -386,7 +454,8 @@ impl<'a> Planner<'a> { // First, we need to ensure that sleds are using their expected // disks. This is necessary before we can allocate any zones. let sled_edits = - self.blueprint.sled_ensure_disks(sled_id, &sled_resources)?; + self.blueprint.sled_add_disks(sled_id, &sled_resources)?; + if let EnsureMultiple::Changed { added, updated, @@ -906,6 +975,8 @@ pub(crate) mod test { use nexus_types::deployment::blueprint_zone_type; use nexus_types::deployment::BlueprintDatasetDisposition; use nexus_types::deployment::BlueprintDiffSummary; + use nexus_types::deployment::BlueprintPhysicalDiskDisposition; + use nexus_types::deployment::BlueprintZoneDisposition; use nexus_types::deployment::BlueprintZoneType; use nexus_types::deployment::ClickhouseMode; use nexus_types::deployment::ClickhousePolicy; @@ -1487,10 +1558,10 @@ pub(crate) mod test { // IP no longer being associated with a running zone, and a new Nexus // zone being added to one of the two remaining sleds. let mut builder = input.into_builder(); - let (sled_id, details) = + let (sled_id, _) = builder.sleds_mut().iter_mut().next().expect("no sleds"); let sled_id = *sled_id; - details.policy = SledPolicy::Expunged; + builder.expunge_sled(&sled_id).unwrap(); let input = builder.build(); let blueprint2 = Planner::new_based_on( logctx.log.clone(), @@ -1690,11 +1761,7 @@ pub(crate) mod test { // external DNS zones; two external DNS zones should then be added to // the remaining sleds. let mut input_builder = input.into_builder(); - input_builder - .sleds_mut() - .get_mut(&sled_1) - .expect("found sled 1 again") - .policy = SledPolicy::Expunged; + input_builder.expunge_sled(&sled_1).expect("found sled 1 again"); let input = input_builder.build(); let blueprint3 = Planner::new_based_on( logctx.log.clone(), @@ -1937,6 +2004,215 @@ pub(crate) mod test { logctx.cleanup_successful(); } + #[test] + fn test_disk_add_expunge_decommission() { + static TEST_NAME: &str = "planner_disk_add_expunge_decommission"; + let logctx = test_setup_log(TEST_NAME); + + // Create an example system with a single sled + let (example, blueprint1) = + ExampleSystemBuilder::new(&logctx.log, TEST_NAME).nsleds(1).build(); + let mut collection = example.collection; + let input = example.input; + + // The initial collection configuration has generation 1 for disks + // The initial blueprint configuration has generation 2 for disks + let (sled_id, disks_config) = + blueprint1.blueprint_disks.first_key_value().unwrap(); + assert_eq!(disks_config.generation, Generation::from_u32(2)); + assert_eq!( + collection + .sled_agents + .get(&sled_id) + .unwrap() + .omicron_physical_disks_generation, + Generation::new() + ); + + // All disks should have an `InService` disposition and `Active` state + for disk in &disks_config.disks { + assert_eq!( + disk.disposition, + BlueprintPhysicalDiskDisposition::InService + ); + } + + let mut builder = input.into_builder(); + + // Let's expunge a disk. Its disposition should change to `Expunged` + // but its state should remain active. + let expunged_disk_id = { + let expunged_disk = &mut builder + .sleds_mut() + .get_mut(&sled_id) + .unwrap() + .resources + .zpools + .iter_mut() + .next() + .unwrap() + .1; + expunged_disk.policy = PhysicalDiskPolicy::Expunged; + expunged_disk.disk_id + }; + + let input = builder.build(); + + let blueprint2 = Planner::new_based_on( + logctx.log.clone(), + &blueprint1, + &input, + "test: expunge a disk", + &collection, + ) + .expect("failed to create planner") + .with_rng(PlannerRng::from_seed((TEST_NAME, "bp2"))) + .plan() + .expect("failed to plan"); + + let diff = blueprint2.diff_since_blueprint(&blueprint1); + println!("1 -> 2 (expunge a disk):\n{}", diff.display()); + + let (_, disks_config) = + blueprint2.blueprint_disks.first_key_value().unwrap(); + + // The disks generation goes from 2 -> 3 + assert_eq!(disks_config.generation, Generation::from_u32(3)); + // One disk should have it's disposition set to + // `Expunged{ready_for_cleanup: false, ..}`. + for disk in &disks_config.disks { + if disk.id == expunged_disk_id { + assert!(matches!( + disk.disposition, + BlueprintPhysicalDiskDisposition::Expunged { + ready_for_cleanup: false, + .. + } + )); + } else { + assert_eq!( + disk.disposition, + BlueprintPhysicalDiskDisposition::InService + ); + } + println!("{disk:?}"); + } + + // We haven't updated the inventory, so no changes should be made + assert_planning_makes_no_changes( + &logctx.log, + &blueprint2, + &input, + &collection, + TEST_NAME, + ); + + // Let's update the inventory to reflect that the sled-agent + // has learned about the expungement. + collection + .sled_agents + .get_mut(&sled_id) + .unwrap() + .omicron_physical_disks_generation = Generation::from_u32(3); + + let blueprint3 = Planner::new_based_on( + logctx.log.clone(), + &blueprint2, + &input, + "test: decommission a disk", + &collection, + ) + .expect("failed to create planner") + .with_rng(PlannerRng::from_seed((TEST_NAME, "bp3"))) + .plan() + .expect("failed to plan"); + + let diff = blueprint3.diff_since_blueprint(&blueprint2); + println!("2 -> 3 (decommission a disk):\n{}", diff.display()); + + let (_, disks_config) = + blueprint3.blueprint_disks.first_key_value().unwrap(); + + // The disks generation does not change, as decommissioning doesn't bump + // the generation. + // + // The reason for this is because the generation is there primarily to + // inform the sled-agent that it has work to do, but decommissioning + // doesn't trigger any sled-agent changes. + assert_eq!(disks_config.generation, Generation::from_u32(3)); + // One disk should have its disposition set to + // `Expunged{ready_for_cleanup: true, ..}`. + for disk in &disks_config.disks { + if disk.id == expunged_disk_id { + assert!(matches!( + disk.disposition, + BlueprintPhysicalDiskDisposition::Expunged { + ready_for_cleanup: true, + .. + } + )); + } else { + assert_eq!( + disk.disposition, + BlueprintPhysicalDiskDisposition::InService + ); + } + println!("{disk:?}"); + } + + // Now let's expunge a sled via the planning input. All disks should get + // expunged and decommissioned in the same planning round. We also have + // to manually expunge all the disks via policy, which would happen in a + // database transaction when an operator expunges a sled. + // + // We don't rely on the sled-agents learning about expungement to + // decommission because by definition expunging a sled means it's + // already gone. + let mut builder = input.into_builder(); + builder.expunge_sled(sled_id).unwrap(); + let input = builder.build(); + + let blueprint4 = Planner::new_based_on( + logctx.log.clone(), + &blueprint3, + &input, + "test: expunge and decommission all disks", + &collection, + ) + .expect("failed to create planner") + .with_rng(PlannerRng::from_seed((TEST_NAME, "bp4"))) + .plan() + .expect("failed to plan"); + + let diff = blueprint3.diff_since_blueprint(&blueprint2); + println!( + "3 -> 4 (expunge and decommission all disks):\n{}", + diff.display() + ); + + let (_, disks_config) = + blueprint4.blueprint_disks.first_key_value().unwrap(); + + // The disks generation goes from 3 -> 4 + assert_eq!(disks_config.generation, Generation::from_u32(4)); + // We should still have 10 disks + assert_eq!(disks_config.disks.len(), 10); + // All disks should have their disposition set to + // `Expunged{ready_for_cleanup: true, ..}`. + for disk in &disks_config.disks { + assert!(matches!( + disk.disposition, + BlueprintPhysicalDiskDisposition::Expunged { + ready_for_cleanup: true, + .. + } + )); + println!("{disk:?}"); + } + + logctx.cleanup_successful(); + } + #[test] fn test_disk_expungement_removes_zones_durable_zpool() { static TEST_NAME: &str = @@ -2011,7 +2287,7 @@ pub(crate) mod test { assert_eq!(summary.total_zones_removed(), 0); assert_eq!(summary.total_zones_modified(), 1); assert_eq!(summary.total_disks_added(), 0); - assert_eq!(summary.total_disks_removed(), 1); + assert_eq!(summary.total_disks_removed(), 0); assert_eq!(summary.total_datasets_added(), 0); // NOTE: Expunging a disk doesn't immediately delete datasets; see the // "decommissioned_disk_cleaner" background task for more context. @@ -2270,8 +2546,10 @@ pub(crate) mod test { }; println!("1 -> 2: marked non-provisionable {nonprovisionable_sled_id}"); let expunged_sled_id = { - let (sled_id, details) = sleds_iter.next().expect("no sleds"); - details.policy = SledPolicy::Expunged; + let (sled_id, _) = sleds_iter.next().expect("no sleds"); + // We need to call builder.expunge_sled(), but can't while + // iterating; we defer that work until after we're done with + // `sleds_iter`. *sled_id }; println!("1 -> 2: expunged {expunged_sled_id}"); @@ -2279,12 +2557,19 @@ pub(crate) mod test { let (sled_id, details) = sleds_iter.next().expect("no sleds"); details.state = SledState::Decommissioned; + // Drop the mutable borrow on the builder so we can call + // `builder.expunge_sled()` + let sled_id = *sled_id; + // Let's also properly expunge the sled and its disks. We can't have + // a decommissioned sled that is not expunged also. + builder.expunge_sled(&sled_id).unwrap(); + // Decommissioned sleds can only occur if their zones have been // expunged, so lie and pretend like that already happened // (otherwise the planner will rightfully fail to generate a new // blueprint, because we're feeding it invalid inputs). for mut zone in - &mut blueprint1.blueprint_zones.get_mut(sled_id).unwrap().zones + &mut blueprint1.blueprint_zones.get_mut(&sled_id).unwrap().zones { zone.disposition = BlueprintZoneDisposition::Expunged { as_of_generation: Generation::new(), @@ -2298,12 +2583,14 @@ pub(crate) mod test { // that's an invalid state that the planner will reject. *blueprint1 .sled_state - .get_mut(sled_id) + .get_mut(&sled_id) .expect("found state in parent blueprint") = SledState::Decommissioned; - *sled_id + sled_id }; + // Actually expunge the sled (the work that was deferred during iteration above) + builder.expunge_sled(&expunged_sled_id).unwrap(); println!("1 -> 2: decommissioned {decommissioned_sled_id}"); // Now run the planner with a high number of target Nexus zones. The @@ -2558,15 +2845,18 @@ pub(crate) mod test { let (collection, input, blueprint1) = example(&logctx.log, TEST_NAME); // Expunge one of the sleds. + // + // We expunge a sled via planning input using the builder so that disks + // are properly taken into account. let mut builder = input.into_builder(); let expunged_sled_id = { let mut iter = builder.sleds_mut().iter_mut(); - let (sled_id, details) = iter.next().expect("at least one sled"); - details.policy = SledPolicy::Expunged; + let (sled_id, _) = iter.next().expect("at least one sled"); *sled_id }; - + builder.expunge_sled(&expunged_sled_id).expect("sled is expungable"); let input = builder.build(); + let mut blueprint2 = Planner::new_based_on( logctx.log.clone(), &blueprint1, @@ -3433,8 +3723,7 @@ pub(crate) mod test { // Expunge a keeper zone let mut builder = input.into_builder(); - builder.sleds_mut().get_mut(&sled_id).unwrap().policy = - SledPolicy::Expunged; + builder.expunge_sled(&sled_id).unwrap(); let input = builder.build(); let blueprint4 = Planner::new_based_on( diff --git a/nexus/reconfigurator/planning/tests/output/example_builder_zone_counts_blueprint.txt b/nexus/reconfigurator/planning/tests/output/example_builder_zone_counts_blueprint.txt index b607a92241f..d2495da0532 100644 --- a/nexus/reconfigurator/planning/tests/output/example_builder_zone_counts_blueprint.txt +++ b/nexus/reconfigurator/planning/tests/output/example_builder_zone_counts_blueprint.txt @@ -4,19 +4,19 @@ parent: e35b2fdd-354d-48d9-acb5-703b2c269a54 sled: 0dbf1e39-e265-4071-a8df-6d1225b46694 (active) physical disks at generation 2: - ---------------------------------------------------------------------- - vendor model serial - ---------------------------------------------------------------------- - fake-vendor fake-model serial-2b9c7004-fa39-4cf0-ae41-c299d3191f26 - fake-vendor fake-model serial-3ad934d9-90ee-4805-881c-20108827773f - fake-vendor fake-model serial-69db5dd6-795e-4e04-bfb3-f51962c49853 - fake-vendor fake-model serial-8557a3fb-cc12-497f-86d2-9f1a463b3685 - fake-vendor fake-model serial-9bd3cc34-4891-4c28-a4de-c4fcf01b6215 - fake-vendor fake-model serial-9dafffa2-31b7-43c0-b673-0c946be799f0 - fake-vendor fake-model serial-9e626a52-b5f1-4776-9cb8-271848b9c651 - fake-vendor fake-model serial-a645a1ac-4c49-4c7e-ba53-3dc60b737f06 - fake-vendor fake-model serial-b5ae209c-9226-44a0-8a6b-03b44f93d456 - fake-vendor fake-model serial-cd783b74-e400-41e0-9bb7-1d1d2f8958ce + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-2b9c7004-fa39-4cf0-ae41-c299d3191f26 in service + fake-vendor fake-model serial-3ad934d9-90ee-4805-881c-20108827773f in service + fake-vendor fake-model serial-69db5dd6-795e-4e04-bfb3-f51962c49853 in service + fake-vendor fake-model serial-8557a3fb-cc12-497f-86d2-9f1a463b3685 in service + fake-vendor fake-model serial-9bd3cc34-4891-4c28-a4de-c4fcf01b6215 in service + fake-vendor fake-model serial-9dafffa2-31b7-43c0-b673-0c946be799f0 in service + fake-vendor fake-model serial-9e626a52-b5f1-4776-9cb8-271848b9c651 in service + fake-vendor fake-model serial-a645a1ac-4c49-4c7e-ba53-3dc60b737f06 in service + fake-vendor fake-model serial-b5ae209c-9226-44a0-8a6b-03b44f93d456 in service + fake-vendor fake-model serial-cd783b74-e400-41e0-9bb7-1d1d2f8958ce in service datasets at generation 2: @@ -105,19 +105,19 @@ parent: e35b2fdd-354d-48d9-acb5-703b2c269a54 sled: 15cf73a6-445b-4d36-9232-5ed364019bc6 (active) physical disks at generation 2: - ---------------------------------------------------------------------- - vendor model serial - ---------------------------------------------------------------------- - fake-vendor fake-model serial-3cba16f1-1a3d-44e5-ba1d-e68cd2188615 - fake-vendor fake-model serial-3fc3ec87-1c39-4df8-99bf-30ca97ec5fac - fake-vendor fake-model serial-4c18c1af-dc1c-4de5-92a1-1b2923ea6a87 - fake-vendor fake-model serial-6f3a85db-de97-40d9-bf66-e6643ac1c114 - fake-vendor fake-model serial-96f39cf4-b2ac-413d-8f94-ba66b127cddd - fake-vendor fake-model serial-99c392f3-77b8-4f60-9efa-4efae0c92721 - fake-vendor fake-model serial-bc3195df-61ca-4111-863b-08b5cc243eab - fake-vendor fake-model serial-c787b52c-2cb8-4da2-a17a-128feb5eea0c - fake-vendor fake-model serial-d59d419e-c4d3-41ef-ab6d-0df3620dc84b - fake-vendor fake-model serial-dfaae221-11a9-4db0-b861-41fe5648f185 + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-3cba16f1-1a3d-44e5-ba1d-e68cd2188615 in service + fake-vendor fake-model serial-3fc3ec87-1c39-4df8-99bf-30ca97ec5fac in service + fake-vendor fake-model serial-4c18c1af-dc1c-4de5-92a1-1b2923ea6a87 in service + fake-vendor fake-model serial-6f3a85db-de97-40d9-bf66-e6643ac1c114 in service + fake-vendor fake-model serial-96f39cf4-b2ac-413d-8f94-ba66b127cddd in service + fake-vendor fake-model serial-99c392f3-77b8-4f60-9efa-4efae0c92721 in service + fake-vendor fake-model serial-bc3195df-61ca-4111-863b-08b5cc243eab in service + fake-vendor fake-model serial-c787b52c-2cb8-4da2-a17a-128feb5eea0c in service + fake-vendor fake-model serial-d59d419e-c4d3-41ef-ab6d-0df3620dc84b in service + fake-vendor fake-model serial-dfaae221-11a9-4db0-b861-41fe5648f185 in service datasets at generation 2: @@ -201,19 +201,19 @@ parent: e35b2fdd-354d-48d9-acb5-703b2c269a54 sled: 50e6c1c0-43b2-4abc-9041-41165597f639 (active) physical disks at generation 2: - ---------------------------------------------------------------------- - vendor model serial - ---------------------------------------------------------------------- - fake-vendor fake-model serial-00fb9aa9-0bbf-49ab-a712-6e8feaf719e2 - fake-vendor fake-model serial-37f466d7-510b-40e4-b9a4-c5c092a6a5f6 - fake-vendor fake-model serial-734b7b3a-86af-48a7-bd00-8d79fa2690c3 - fake-vendor fake-model serial-747d2504-36a4-4acc-ad73-22291b5bbedb - fake-vendor fake-model serial-7dd422ab-4839-4a7a-8109-ba1941357c70 - fake-vendor fake-model serial-8b020037-bc77-48b2-9280-a622f571908b - fake-vendor fake-model serial-96753d7f-de6b-4ce6-a9dc-004f6a0ba0cf - fake-vendor fake-model serial-a0bd8e79-1113-4c40-8705-ed00e66f0c35 - fake-vendor fake-model serial-b30e150e-c83e-4c1e-b3bf-91a330d42135 - fake-vendor fake-model serial-ff911b9b-57a8-4318-a253-e2363b70083d + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-00fb9aa9-0bbf-49ab-a712-6e8feaf719e2 in service + fake-vendor fake-model serial-37f466d7-510b-40e4-b9a4-c5c092a6a5f6 in service + fake-vendor fake-model serial-734b7b3a-86af-48a7-bd00-8d79fa2690c3 in service + fake-vendor fake-model serial-747d2504-36a4-4acc-ad73-22291b5bbedb in service + fake-vendor fake-model serial-7dd422ab-4839-4a7a-8109-ba1941357c70 in service + fake-vendor fake-model serial-8b020037-bc77-48b2-9280-a622f571908b in service + fake-vendor fake-model serial-96753d7f-de6b-4ce6-a9dc-004f6a0ba0cf in service + fake-vendor fake-model serial-a0bd8e79-1113-4c40-8705-ed00e66f0c35 in service + fake-vendor fake-model serial-b30e150e-c83e-4c1e-b3bf-91a330d42135 in service + fake-vendor fake-model serial-ff911b9b-57a8-4318-a253-e2363b70083d in service datasets at generation 2: @@ -294,19 +294,19 @@ parent: e35b2fdd-354d-48d9-acb5-703b2c269a54 sled: 969ff976-df34-402c-a362-53db03a6b97f (active) physical disks at generation 2: - ---------------------------------------------------------------------- - vendor model serial - ---------------------------------------------------------------------- - fake-vendor fake-model serial-07444848-952b-4333-aa72-401c7bf5d724 - fake-vendor fake-model serial-242f8f98-fdc2-4ea9-ab69-e57b993df0df - fake-vendor fake-model serial-26cc7ce1-dc59-4398-8083-a4e1db957a46 - fake-vendor fake-model serial-3b757772-8c62-4543-a276-7c0051280687 - fake-vendor fake-model serial-981430ec-a43e-4418-bd2c-28db344c8b06 - fake-vendor fake-model serial-9dbfe441-887c-45d0-a3ed-7d8e1a63327f - fake-vendor fake-model serial-b37f5663-bedb-42a3-9b1a-5e417ee6c3d2 - fake-vendor fake-model serial-b48a178d-f7fd-4b50-811d-f7d195752710 - fake-vendor fake-model serial-be1784b0-017a-436f-8a6a-1884cddc5fa1 - fake-vendor fake-model serial-f9415bcf-5757-442a-a400-5a9ccfb5d80a + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-07444848-952b-4333-aa72-401c7bf5d724 in service + fake-vendor fake-model serial-242f8f98-fdc2-4ea9-ab69-e57b993df0df in service + fake-vendor fake-model serial-26cc7ce1-dc59-4398-8083-a4e1db957a46 in service + fake-vendor fake-model serial-3b757772-8c62-4543-a276-7c0051280687 in service + fake-vendor fake-model serial-981430ec-a43e-4418-bd2c-28db344c8b06 in service + fake-vendor fake-model serial-9dbfe441-887c-45d0-a3ed-7d8e1a63327f in service + fake-vendor fake-model serial-b37f5663-bedb-42a3-9b1a-5e417ee6c3d2 in service + fake-vendor fake-model serial-b48a178d-f7fd-4b50-811d-f7d195752710 in service + fake-vendor fake-model serial-be1784b0-017a-436f-8a6a-1884cddc5fa1 in service + fake-vendor fake-model serial-f9415bcf-5757-442a-a400-5a9ccfb5d80a in service datasets at generation 2: @@ -387,19 +387,19 @@ parent: e35b2fdd-354d-48d9-acb5-703b2c269a54 sled: ec5c0b37-b651-4c45-ac1c-24541ef9c44b (active) physical disks at generation 2: - ---------------------------------------------------------------------- - vendor model serial - ---------------------------------------------------------------------- - fake-vendor fake-model serial-148436fe-d3e9-4371-8d2e-ec950cc8a84c - fake-vendor fake-model serial-7dd076f1-9d62-49a8-bc0c-5ff5d045c917 - fake-vendor fake-model serial-a50f4bb9-d19a-4be8-ad49-b9a552a21062 - fake-vendor fake-model serial-b4ee33bb-03f1-4085-9830-9da92002a969 - fake-vendor fake-model serial-b50bec8b-a8d3-4ba6-ba3d-12c2a0da911c - fake-vendor fake-model serial-b64f79f6-188f-4e98-9eac-d8111673a130 - fake-vendor fake-model serial-c144a26e-f859-42a0-adca-00d9091d98e4 - fake-vendor fake-model serial-c45c08e4-aade-4333-9dad-935ccf4e8352 - fake-vendor fake-model serial-df62d5da-7da0-468b-b328-0fefbf57568b - fake-vendor fake-model serial-e6433ded-7c90-46a9-8bda-648bcc9fbf07 + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-148436fe-d3e9-4371-8d2e-ec950cc8a84c in service + fake-vendor fake-model serial-7dd076f1-9d62-49a8-bc0c-5ff5d045c917 in service + fake-vendor fake-model serial-a50f4bb9-d19a-4be8-ad49-b9a552a21062 in service + fake-vendor fake-model serial-b4ee33bb-03f1-4085-9830-9da92002a969 in service + fake-vendor fake-model serial-b50bec8b-a8d3-4ba6-ba3d-12c2a0da911c in service + fake-vendor fake-model serial-b64f79f6-188f-4e98-9eac-d8111673a130 in service + fake-vendor fake-model serial-c144a26e-f859-42a0-adca-00d9091d98e4 in service + fake-vendor fake-model serial-c45c08e4-aade-4333-9dad-935ccf4e8352 in service + fake-vendor fake-model serial-df62d5da-7da0-468b-b328-0fefbf57568b in service + fake-vendor fake-model serial-e6433ded-7c90-46a9-8bda-648bcc9fbf07 in service datasets at generation 2: diff --git a/nexus/reconfigurator/planning/tests/output/planner_basic_add_sled_2_3.txt b/nexus/reconfigurator/planning/tests/output/planner_basic_add_sled_2_3.txt index 8a578be17e5..7ff32c12b4b 100644 --- a/nexus/reconfigurator/planning/tests/output/planner_basic_add_sled_2_3.txt +++ b/nexus/reconfigurator/planning/tests/output/planner_basic_add_sled_2_3.txt @@ -6,19 +6,19 @@ to: blueprint 4171ad05-89dd-474b-846b-b007e4346366 sled 41f45d9f-766e-4ca6-a881-61ee45c80f57 (active): physical disks at generation 2: - ---------------------------------------------------------------------- - vendor model serial - ---------------------------------------------------------------------- - fake-vendor fake-model serial-014eb1e9-04fe-4f36-8339-0a090b053ada - fake-vendor fake-model serial-31a3bc64-7a3b-496d-b644-785dc44b6e37 - fake-vendor fake-model serial-7bb40bd6-9c43-4b63-8337-18313c72aea2 - fake-vendor fake-model serial-988aa8c2-cb5e-406b-9289-425dc2e5bc3a - fake-vendor fake-model serial-ad574c09-2ae0-4534-a2a4-f923ce20ae87 - fake-vendor fake-model serial-ad91e238-4901-4ff4-a91b-75233c936426 - fake-vendor fake-model serial-ce58d463-d442-4c97-a6b4-f7d98c3fd902 - fake-vendor fake-model serial-f18f7689-0059-4b79-880e-34faf7a0fe0e - fake-vendor fake-model serial-f1d6cea4-640f-415e-89fe-2b1784ce3db8 - fake-vendor fake-model serial-f4a96860-bdeb-4435-bdf5-2a10beb3d44a + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-014eb1e9-04fe-4f36-8339-0a090b053ada in service + fake-vendor fake-model serial-31a3bc64-7a3b-496d-b644-785dc44b6e37 in service + fake-vendor fake-model serial-7bb40bd6-9c43-4b63-8337-18313c72aea2 in service + fake-vendor fake-model serial-988aa8c2-cb5e-406b-9289-425dc2e5bc3a in service + fake-vendor fake-model serial-ad574c09-2ae0-4534-a2a4-f923ce20ae87 in service + fake-vendor fake-model serial-ad91e238-4901-4ff4-a91b-75233c936426 in service + fake-vendor fake-model serial-ce58d463-d442-4c97-a6b4-f7d98c3fd902 in service + fake-vendor fake-model serial-f18f7689-0059-4b79-880e-34faf7a0fe0e in service + fake-vendor fake-model serial-f1d6cea4-640f-415e-89fe-2b1784ce3db8 in service + fake-vendor fake-model serial-f4a96860-bdeb-4435-bdf5-2a10beb3d44a in service datasets at generation 2: @@ -98,19 +98,19 @@ to: blueprint 4171ad05-89dd-474b-846b-b007e4346366 sled 43677374-8d2f-4deb-8a41-eeea506db8e0 (active): physical disks at generation 2: - ---------------------------------------------------------------------- - vendor model serial - ---------------------------------------------------------------------- - fake-vendor fake-model serial-12057b4a-0b06-4f70-ba22-336de2385bfe - fake-vendor fake-model serial-29758363-6c77-40c3-8740-9c0c64f6e14a - fake-vendor fake-model serial-3f331c10-7882-48ab-85d9-05108490b55b - fake-vendor fake-model serial-5152d1aa-9045-4e06-9ef6-6eadac3696e4 - fake-vendor fake-model serial-5c0dd424-d905-4fc5-a73c-36254fdd470c - fake-vendor fake-model serial-794df76f-bca0-4635-9eb6-773ad0108f7e - fake-vendor fake-model serial-9024d350-38a7-459b-8550-3b2c4a88b5c1 - fake-vendor fake-model serial-95e86080-e162-4980-a589-db6bb1a95ca7 - fake-vendor fake-model serial-d55d36d7-df92-4615-944d-440a1f8b5001 - fake-vendor fake-model serial-db6686c8-2dd9-4032-8444-2a06b43baa68 + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-12057b4a-0b06-4f70-ba22-336de2385bfe in service + fake-vendor fake-model serial-29758363-6c77-40c3-8740-9c0c64f6e14a in service + fake-vendor fake-model serial-3f331c10-7882-48ab-85d9-05108490b55b in service + fake-vendor fake-model serial-5152d1aa-9045-4e06-9ef6-6eadac3696e4 in service + fake-vendor fake-model serial-5c0dd424-d905-4fc5-a73c-36254fdd470c in service + fake-vendor fake-model serial-794df76f-bca0-4635-9eb6-773ad0108f7e in service + fake-vendor fake-model serial-9024d350-38a7-459b-8550-3b2c4a88b5c1 in service + fake-vendor fake-model serial-95e86080-e162-4980-a589-db6bb1a95ca7 in service + fake-vendor fake-model serial-d55d36d7-df92-4615-944d-440a1f8b5001 in service + fake-vendor fake-model serial-db6686c8-2dd9-4032-8444-2a06b43baa68 in service datasets at generation 2: @@ -187,19 +187,19 @@ to: blueprint 4171ad05-89dd-474b-846b-b007e4346366 sled 590e3034-d946-4166-b0e5-2d0034197a07 (active): physical disks at generation 2: - ---------------------------------------------------------------------- - vendor model serial - ---------------------------------------------------------------------- - fake-vendor fake-model serial-2a94863d-16e2-4535-973b-e98dd47fd18d - fake-vendor fake-model serial-32456d15-f5b6-4efc-90c8-dbba979b69cb - fake-vendor fake-model serial-416fe9f9-5161-4b0f-9e11-c9d81563ded5 - fake-vendor fake-model serial-4c68800e-23f8-485b-b251-628fd151e445 - fake-vendor fake-model serial-9dd87c4d-5fb4-475a-86fa-c0da81a3e00a - fake-vendor fake-model serial-be93a517-445e-46c2-aa21-3dc526d4a413 - fake-vendor fake-model serial-d9344e2b-84d2-4392-84ab-41b86ed02237 - fake-vendor fake-model serial-eab188d0-b34a-4673-b254-12e705597654 - fake-vendor fake-model serial-f1e0386f-11b6-4cdf-8250-826d256db6b5 - fake-vendor fake-model serial-f8c9c9a9-d73e-4cdf-a9af-03cfbbbce12b + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-2a94863d-16e2-4535-973b-e98dd47fd18d in service + fake-vendor fake-model serial-32456d15-f5b6-4efc-90c8-dbba979b69cb in service + fake-vendor fake-model serial-416fe9f9-5161-4b0f-9e11-c9d81563ded5 in service + fake-vendor fake-model serial-4c68800e-23f8-485b-b251-628fd151e445 in service + fake-vendor fake-model serial-9dd87c4d-5fb4-475a-86fa-c0da81a3e00a in service + fake-vendor fake-model serial-be93a517-445e-46c2-aa21-3dc526d4a413 in service + fake-vendor fake-model serial-d9344e2b-84d2-4392-84ab-41b86ed02237 in service + fake-vendor fake-model serial-eab188d0-b34a-4673-b254-12e705597654 in service + fake-vendor fake-model serial-f1e0386f-11b6-4cdf-8250-826d256db6b5 in service + fake-vendor fake-model serial-f8c9c9a9-d73e-4cdf-a9af-03cfbbbce12b in service datasets at generation 2: @@ -278,19 +278,19 @@ to: blueprint 4171ad05-89dd-474b-846b-b007e4346366 sled ec61eded-c34f-443d-a580-dadf757529c4 (active): physical disks at generation 2: - ---------------------------------------------------------------------- - vendor model serial - ---------------------------------------------------------------------- -+ fake-vendor fake-model serial-28699448-c5d9-49ea-bf7e-627800efe783 -+ fake-vendor fake-model serial-2c490e96-27f2-4a7f-b440-04d4bfd1e4f6 -+ fake-vendor fake-model serial-4c3bb1c7-55b6-49b8-b212-516b8f2c26c2 -+ fake-vendor fake-model serial-5db07562-31a8-43e3-b99e-7c7cb89754b7 -+ fake-vendor fake-model serial-9451a5d5-b358-4719-b6c1-a0d187da217c -+ fake-vendor fake-model serial-bb2e2869-9481-483a-bc49-2bdd62f515f5 -+ fake-vendor fake-model serial-d5a36c66-4b2f-46e6-96f4-b82debee1a4a -+ fake-vendor fake-model serial-f99ec996-ec08-4ccf-9a6e-6c5cab440fb4 -+ fake-vendor fake-model serial-faccbb39-d686-42a1-a50a-0eb59ba74a87 -+ fake-vendor fake-model serial-fdfd067b-1d86-444d-a21f-ed33709f3e4d + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ ++ fake-vendor fake-model serial-28699448-c5d9-49ea-bf7e-627800efe783 in service ++ fake-vendor fake-model serial-2c490e96-27f2-4a7f-b440-04d4bfd1e4f6 in service ++ fake-vendor fake-model serial-4c3bb1c7-55b6-49b8-b212-516b8f2c26c2 in service ++ fake-vendor fake-model serial-5db07562-31a8-43e3-b99e-7c7cb89754b7 in service ++ fake-vendor fake-model serial-9451a5d5-b358-4719-b6c1-a0d187da217c in service ++ fake-vendor fake-model serial-bb2e2869-9481-483a-bc49-2bdd62f515f5 in service ++ fake-vendor fake-model serial-d5a36c66-4b2f-46e6-96f4-b82debee1a4a in service ++ fake-vendor fake-model serial-f99ec996-ec08-4ccf-9a6e-6c5cab440fb4 in service ++ fake-vendor fake-model serial-faccbb39-d686-42a1-a50a-0eb59ba74a87 in service ++ fake-vendor fake-model serial-fdfd067b-1d86-444d-a21f-ed33709f3e4d in service datasets at generation 2: diff --git a/nexus/reconfigurator/planning/tests/output/planner_basic_add_sled_3_5.txt b/nexus/reconfigurator/planning/tests/output/planner_basic_add_sled_3_5.txt index cafd440f054..70ae7a1c057 100644 --- a/nexus/reconfigurator/planning/tests/output/planner_basic_add_sled_3_5.txt +++ b/nexus/reconfigurator/planning/tests/output/planner_basic_add_sled_3_5.txt @@ -6,19 +6,19 @@ to: blueprint f432fcd5-1284-4058-8b4a-9286a3de6163 sled 41f45d9f-766e-4ca6-a881-61ee45c80f57 (active): physical disks at generation 2: - ---------------------------------------------------------------------- - vendor model serial - ---------------------------------------------------------------------- - fake-vendor fake-model serial-014eb1e9-04fe-4f36-8339-0a090b053ada - fake-vendor fake-model serial-31a3bc64-7a3b-496d-b644-785dc44b6e37 - fake-vendor fake-model serial-7bb40bd6-9c43-4b63-8337-18313c72aea2 - fake-vendor fake-model serial-988aa8c2-cb5e-406b-9289-425dc2e5bc3a - fake-vendor fake-model serial-ad574c09-2ae0-4534-a2a4-f923ce20ae87 - fake-vendor fake-model serial-ad91e238-4901-4ff4-a91b-75233c936426 - fake-vendor fake-model serial-ce58d463-d442-4c97-a6b4-f7d98c3fd902 - fake-vendor fake-model serial-f18f7689-0059-4b79-880e-34faf7a0fe0e - fake-vendor fake-model serial-f1d6cea4-640f-415e-89fe-2b1784ce3db8 - fake-vendor fake-model serial-f4a96860-bdeb-4435-bdf5-2a10beb3d44a + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-014eb1e9-04fe-4f36-8339-0a090b053ada in service + fake-vendor fake-model serial-31a3bc64-7a3b-496d-b644-785dc44b6e37 in service + fake-vendor fake-model serial-7bb40bd6-9c43-4b63-8337-18313c72aea2 in service + fake-vendor fake-model serial-988aa8c2-cb5e-406b-9289-425dc2e5bc3a in service + fake-vendor fake-model serial-ad574c09-2ae0-4534-a2a4-f923ce20ae87 in service + fake-vendor fake-model serial-ad91e238-4901-4ff4-a91b-75233c936426 in service + fake-vendor fake-model serial-ce58d463-d442-4c97-a6b4-f7d98c3fd902 in service + fake-vendor fake-model serial-f18f7689-0059-4b79-880e-34faf7a0fe0e in service + fake-vendor fake-model serial-f1d6cea4-640f-415e-89fe-2b1784ce3db8 in service + fake-vendor fake-model serial-f4a96860-bdeb-4435-bdf5-2a10beb3d44a in service datasets at generation 2: @@ -98,19 +98,19 @@ to: blueprint f432fcd5-1284-4058-8b4a-9286a3de6163 sled 43677374-8d2f-4deb-8a41-eeea506db8e0 (active): physical disks at generation 2: - ---------------------------------------------------------------------- - vendor model serial - ---------------------------------------------------------------------- - fake-vendor fake-model serial-12057b4a-0b06-4f70-ba22-336de2385bfe - fake-vendor fake-model serial-29758363-6c77-40c3-8740-9c0c64f6e14a - fake-vendor fake-model serial-3f331c10-7882-48ab-85d9-05108490b55b - fake-vendor fake-model serial-5152d1aa-9045-4e06-9ef6-6eadac3696e4 - fake-vendor fake-model serial-5c0dd424-d905-4fc5-a73c-36254fdd470c - fake-vendor fake-model serial-794df76f-bca0-4635-9eb6-773ad0108f7e - fake-vendor fake-model serial-9024d350-38a7-459b-8550-3b2c4a88b5c1 - fake-vendor fake-model serial-95e86080-e162-4980-a589-db6bb1a95ca7 - fake-vendor fake-model serial-d55d36d7-df92-4615-944d-440a1f8b5001 - fake-vendor fake-model serial-db6686c8-2dd9-4032-8444-2a06b43baa68 + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-12057b4a-0b06-4f70-ba22-336de2385bfe in service + fake-vendor fake-model serial-29758363-6c77-40c3-8740-9c0c64f6e14a in service + fake-vendor fake-model serial-3f331c10-7882-48ab-85d9-05108490b55b in service + fake-vendor fake-model serial-5152d1aa-9045-4e06-9ef6-6eadac3696e4 in service + fake-vendor fake-model serial-5c0dd424-d905-4fc5-a73c-36254fdd470c in service + fake-vendor fake-model serial-794df76f-bca0-4635-9eb6-773ad0108f7e in service + fake-vendor fake-model serial-9024d350-38a7-459b-8550-3b2c4a88b5c1 in service + fake-vendor fake-model serial-95e86080-e162-4980-a589-db6bb1a95ca7 in service + fake-vendor fake-model serial-d55d36d7-df92-4615-944d-440a1f8b5001 in service + fake-vendor fake-model serial-db6686c8-2dd9-4032-8444-2a06b43baa68 in service datasets at generation 2: @@ -187,19 +187,19 @@ to: blueprint f432fcd5-1284-4058-8b4a-9286a3de6163 sled 590e3034-d946-4166-b0e5-2d0034197a07 (active): physical disks at generation 2: - ---------------------------------------------------------------------- - vendor model serial - ---------------------------------------------------------------------- - fake-vendor fake-model serial-2a94863d-16e2-4535-973b-e98dd47fd18d - fake-vendor fake-model serial-32456d15-f5b6-4efc-90c8-dbba979b69cb - fake-vendor fake-model serial-416fe9f9-5161-4b0f-9e11-c9d81563ded5 - fake-vendor fake-model serial-4c68800e-23f8-485b-b251-628fd151e445 - fake-vendor fake-model serial-9dd87c4d-5fb4-475a-86fa-c0da81a3e00a - fake-vendor fake-model serial-be93a517-445e-46c2-aa21-3dc526d4a413 - fake-vendor fake-model serial-d9344e2b-84d2-4392-84ab-41b86ed02237 - fake-vendor fake-model serial-eab188d0-b34a-4673-b254-12e705597654 - fake-vendor fake-model serial-f1e0386f-11b6-4cdf-8250-826d256db6b5 - fake-vendor fake-model serial-f8c9c9a9-d73e-4cdf-a9af-03cfbbbce12b + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-2a94863d-16e2-4535-973b-e98dd47fd18d in service + fake-vendor fake-model serial-32456d15-f5b6-4efc-90c8-dbba979b69cb in service + fake-vendor fake-model serial-416fe9f9-5161-4b0f-9e11-c9d81563ded5 in service + fake-vendor fake-model serial-4c68800e-23f8-485b-b251-628fd151e445 in service + fake-vendor fake-model serial-9dd87c4d-5fb4-475a-86fa-c0da81a3e00a in service + fake-vendor fake-model serial-be93a517-445e-46c2-aa21-3dc526d4a413 in service + fake-vendor fake-model serial-d9344e2b-84d2-4392-84ab-41b86ed02237 in service + fake-vendor fake-model serial-eab188d0-b34a-4673-b254-12e705597654 in service + fake-vendor fake-model serial-f1e0386f-11b6-4cdf-8250-826d256db6b5 in service + fake-vendor fake-model serial-f8c9c9a9-d73e-4cdf-a9af-03cfbbbce12b in service datasets at generation 2: @@ -278,19 +278,19 @@ to: blueprint f432fcd5-1284-4058-8b4a-9286a3de6163 sled ec61eded-c34f-443d-a580-dadf757529c4 (active): physical disks at generation 2: - ---------------------------------------------------------------------- - vendor model serial - ---------------------------------------------------------------------- - fake-vendor fake-model serial-28699448-c5d9-49ea-bf7e-627800efe783 - fake-vendor fake-model serial-2c490e96-27f2-4a7f-b440-04d4bfd1e4f6 - fake-vendor fake-model serial-4c3bb1c7-55b6-49b8-b212-516b8f2c26c2 - fake-vendor fake-model serial-5db07562-31a8-43e3-b99e-7c7cb89754b7 - fake-vendor fake-model serial-9451a5d5-b358-4719-b6c1-a0d187da217c - fake-vendor fake-model serial-bb2e2869-9481-483a-bc49-2bdd62f515f5 - fake-vendor fake-model serial-d5a36c66-4b2f-46e6-96f4-b82debee1a4a - fake-vendor fake-model serial-f99ec996-ec08-4ccf-9a6e-6c5cab440fb4 - fake-vendor fake-model serial-faccbb39-d686-42a1-a50a-0eb59ba74a87 - fake-vendor fake-model serial-fdfd067b-1d86-444d-a21f-ed33709f3e4d + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-28699448-c5d9-49ea-bf7e-627800efe783 in service + fake-vendor fake-model serial-2c490e96-27f2-4a7f-b440-04d4bfd1e4f6 in service + fake-vendor fake-model serial-4c3bb1c7-55b6-49b8-b212-516b8f2c26c2 in service + fake-vendor fake-model serial-5db07562-31a8-43e3-b99e-7c7cb89754b7 in service + fake-vendor fake-model serial-9451a5d5-b358-4719-b6c1-a0d187da217c in service + fake-vendor fake-model serial-bb2e2869-9481-483a-bc49-2bdd62f515f5 in service + fake-vendor fake-model serial-d5a36c66-4b2f-46e6-96f4-b82debee1a4a in service + fake-vendor fake-model serial-f99ec996-ec08-4ccf-9a6e-6c5cab440fb4 in service + fake-vendor fake-model serial-faccbb39-d686-42a1-a50a-0eb59ba74a87 in service + fake-vendor fake-model serial-fdfd067b-1d86-444d-a21f-ed33709f3e4d in service datasets generation 2 -> 3: diff --git a/nexus/reconfigurator/planning/tests/output/planner_dataset_settings_modified_in_place_1_2.txt b/nexus/reconfigurator/planning/tests/output/planner_dataset_settings_modified_in_place_1_2.txt index 956abf486b8..4ba528af5c1 100644 --- a/nexus/reconfigurator/planning/tests/output/planner_dataset_settings_modified_in_place_1_2.txt +++ b/nexus/reconfigurator/planning/tests/output/planner_dataset_settings_modified_in_place_1_2.txt @@ -6,19 +6,19 @@ to: blueprint fe13be30-94c2-4fa6-aad5-ae3c5028f6bb sled c52410de-5fea-4e77-b162-756d103523b3 (active): physical disks at generation 2: - ---------------------------------------------------------------------- - vendor model serial - ---------------------------------------------------------------------- - fake-vendor fake-model serial-3b6e2ade-57fc-4f9d-85c3-38fca27f1df6 - fake-vendor fake-model serial-5192ef62-5a12-4a0c-829d-a409da87909c - fake-vendor fake-model serial-8778bcc5-dddf-4345-9fdf-5c46a36497b0 - fake-vendor fake-model serial-9134de8d-9ba8-4ddc-9e84-eb00ec616b53 - fake-vendor fake-model serial-96569b61-9e0c-4ee7-bd11-a5e0c541ca99 - fake-vendor fake-model serial-ba90170e-7399-4260-910a-376254a8a9bf - fake-vendor fake-model serial-bc649720-926b-48f2-a62a-efdcff96b49e - fake-vendor fake-model serial-d55da288-4f35-4e92-97b0-29a5e6009109 - fake-vendor fake-model serial-f83302fc-785c-4ab3-bcca-0d040b3c3062 - fake-vendor fake-model serial-f843fb62-0f04-4c7d-a56f-62531104dc77 + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-3b6e2ade-57fc-4f9d-85c3-38fca27f1df6 in service + fake-vendor fake-model serial-5192ef62-5a12-4a0c-829d-a409da87909c in service + fake-vendor fake-model serial-8778bcc5-dddf-4345-9fdf-5c46a36497b0 in service + fake-vendor fake-model serial-9134de8d-9ba8-4ddc-9e84-eb00ec616b53 in service + fake-vendor fake-model serial-96569b61-9e0c-4ee7-bd11-a5e0c541ca99 in service + fake-vendor fake-model serial-ba90170e-7399-4260-910a-376254a8a9bf in service + fake-vendor fake-model serial-bc649720-926b-48f2-a62a-efdcff96b49e in service + fake-vendor fake-model serial-d55da288-4f35-4e92-97b0-29a5e6009109 in service + fake-vendor fake-model serial-f83302fc-785c-4ab3-bcca-0d040b3c3062 in service + fake-vendor fake-model serial-f843fb62-0f04-4c7d-a56f-62531104dc77 in service datasets generation 2 -> 3: diff --git a/nexus/reconfigurator/planning/tests/output/planner_decommissions_sleds_1_2.txt b/nexus/reconfigurator/planning/tests/output/planner_decommissions_sleds_1_2.txt index 709aa372179..9a0da373d82 100644 --- a/nexus/reconfigurator/planning/tests/output/planner_decommissions_sleds_1_2.txt +++ b/nexus/reconfigurator/planning/tests/output/planner_decommissions_sleds_1_2.txt @@ -5,20 +5,30 @@ to: blueprint 1ac2d88f-27dd-4506-8585-6b2be832528e sled a1b477db-b629-48eb-911d-1ccdafca75b9 (active -> decommissioned): - physical disks from generation 2: - ---------------------------------------------------------------------- - vendor model serial - ---------------------------------------------------------------------- -- fake-vendor fake-model serial-069446b4-7881-49dc-838a-63a782d4896d -- fake-vendor fake-model serial-20eba316-dffe-4516-9703-af561da19b0b -- fake-vendor fake-model serial-426f4b6d-4a82-4106-bf4b-64ee86a2a5a4 -- fake-vendor fake-model serial-82daeef2-8641-4bf5-ac66-f7b5f62c48b6 -- fake-vendor fake-model serial-8e5feeb2-14f1-440f-a909-3c34aa8e129b -- fake-vendor fake-model serial-942e2123-7c4e-4f6b-9317-1341fe212647 -- fake-vendor fake-model serial-97a5ce17-df5b-47e7-baf8-80ae710ce18e -- fake-vendor fake-model serial-debc9fb6-bd58-4e4f-b8b8-6a9a07fcf25d -- fake-vendor fake-model serial-f63a32a9-0659-43cf-8efc-8f34e7af9d45 -- fake-vendor fake-model serial-ffea118f-7715-4e21-8fc5-bb23cd0f59e8 + physical disks generation 2 -> 3: + --------------------------------------------------------------------------------------- + vendor model serial disposition + --------------------------------------------------------------------------------------- +* fake-vendor fake-model serial-069446b4-7881-49dc-838a-63a782d4896d - in service + └─ + expunged ✓ +* fake-vendor fake-model serial-20eba316-dffe-4516-9703-af561da19b0b - in service + └─ + expunged ✓ +* fake-vendor fake-model serial-426f4b6d-4a82-4106-bf4b-64ee86a2a5a4 - in service + └─ + expunged ✓ +* fake-vendor fake-model serial-82daeef2-8641-4bf5-ac66-f7b5f62c48b6 - in service + └─ + expunged ✓ +* fake-vendor fake-model serial-8e5feeb2-14f1-440f-a909-3c34aa8e129b - in service + └─ + expunged ✓ +* fake-vendor fake-model serial-942e2123-7c4e-4f6b-9317-1341fe212647 - in service + └─ + expunged ✓ +* fake-vendor fake-model serial-97a5ce17-df5b-47e7-baf8-80ae710ce18e - in service + └─ + expunged ✓ +* fake-vendor fake-model serial-debc9fb6-bd58-4e4f-b8b8-6a9a07fcf25d - in service + └─ + expunged ✓ +* fake-vendor fake-model serial-f63a32a9-0659-43cf-8efc-8f34e7af9d45 - in service + └─ + expunged ✓ +* fake-vendor fake-model serial-ffea118f-7715-4e21-8fc5-bb23cd0f59e8 - in service + └─ + expunged ✓ datasets from generation 2: @@ -113,19 +123,19 @@ to: blueprint 1ac2d88f-27dd-4506-8585-6b2be832528e sled d67ce8f0-a691-4010-b414-420d82e80527 (active): physical disks at generation 2: - ---------------------------------------------------------------------- - vendor model serial - ---------------------------------------------------------------------- - fake-vendor fake-model serial-1e2ec79e-9c11-4133-ac77-e0b994a507d5 - fake-vendor fake-model serial-440ae69d-5e2e-4539-91d0-e2930bdd7203 - fake-vendor fake-model serial-4e91d4a3-bb6c-44bb-bd4e-bf8913c1ba2b - fake-vendor fake-model serial-67de3a80-29cb-4066-b743-e285a2ca1f4e - fake-vendor fake-model serial-9139b70f-c1d3-475d-8f02-7c9acba52b2b - fake-vendor fake-model serial-95fbb110-5272-4646-ab50-21b31b7cde23 - fake-vendor fake-model serial-9bf35cd7-4938-4c34-8189-288b3195cb64 - fake-vendor fake-model serial-9d833141-18a1-4f24-8a34-6076c026aa87 - fake-vendor fake-model serial-a279461f-a7b9-413f-a79f-cb4dab4c3fce - fake-vendor fake-model serial-ff7e002b-3ad8-4d45-b03a-c46ef0ac8e59 + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-1e2ec79e-9c11-4133-ac77-e0b994a507d5 in service + fake-vendor fake-model serial-440ae69d-5e2e-4539-91d0-e2930bdd7203 in service + fake-vendor fake-model serial-4e91d4a3-bb6c-44bb-bd4e-bf8913c1ba2b in service + fake-vendor fake-model serial-67de3a80-29cb-4066-b743-e285a2ca1f4e in service + fake-vendor fake-model serial-9139b70f-c1d3-475d-8f02-7c9acba52b2b in service + fake-vendor fake-model serial-95fbb110-5272-4646-ab50-21b31b7cde23 in service + fake-vendor fake-model serial-9bf35cd7-4938-4c34-8189-288b3195cb64 in service + fake-vendor fake-model serial-9d833141-18a1-4f24-8a34-6076c026aa87 in service + fake-vendor fake-model serial-a279461f-a7b9-413f-a79f-cb4dab4c3fce in service + fake-vendor fake-model serial-ff7e002b-3ad8-4d45-b03a-c46ef0ac8e59 in service datasets generation 2 -> 3: @@ -206,19 +216,19 @@ to: blueprint 1ac2d88f-27dd-4506-8585-6b2be832528e sled fefcf4cf-f7e7-46b3-b629-058526ce440e (active): physical disks at generation 2: - ---------------------------------------------------------------------- - vendor model serial - ---------------------------------------------------------------------- - fake-vendor fake-model serial-07068f19-1ff2-48da-8e72-874780df2339 - fake-vendor fake-model serial-0f12e6ee-41d2-4eb0-813f-ba5240900ded - fake-vendor fake-model serial-0fdb4a39-3cd5-47a0-9064-e7f3c285af61 - fake-vendor fake-model serial-13572832-83ad-40d6-896a-751f7e53f4f6 - fake-vendor fake-model serial-3602bdd9-f7bb-4490-87a6-8f061f7712f5 - fake-vendor fake-model serial-65707837-95a4-45d7-84e6-8b9a4da215f1 - fake-vendor fake-model serial-7a43b2b0-3846-401c-8317-d555715a00f7 - fake-vendor fake-model serial-855e3ef1-6929-4e21-8451-0e62bd93c7c9 - fake-vendor fake-model serial-8adcf329-4cee-4075-b798-28b5add1edf5 - fake-vendor fake-model serial-99e926d6-bd42-4cde-9f63-5ecc7ea14322 + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-07068f19-1ff2-48da-8e72-874780df2339 in service + fake-vendor fake-model serial-0f12e6ee-41d2-4eb0-813f-ba5240900ded in service + fake-vendor fake-model serial-0fdb4a39-3cd5-47a0-9064-e7f3c285af61 in service + fake-vendor fake-model serial-13572832-83ad-40d6-896a-751f7e53f4f6 in service + fake-vendor fake-model serial-3602bdd9-f7bb-4490-87a6-8f061f7712f5 in service + fake-vendor fake-model serial-65707837-95a4-45d7-84e6-8b9a4da215f1 in service + fake-vendor fake-model serial-7a43b2b0-3846-401c-8317-d555715a00f7 in service + fake-vendor fake-model serial-855e3ef1-6929-4e21-8451-0e62bd93c7c9 in service + fake-vendor fake-model serial-8adcf329-4cee-4075-b798-28b5add1edf5 in service + fake-vendor fake-model serial-99e926d6-bd42-4cde-9f63-5ecc7ea14322 in service datasets generation 2 -> 3: diff --git a/nexus/reconfigurator/planning/tests/output/planner_decommissions_sleds_bp2.txt b/nexus/reconfigurator/planning/tests/output/planner_decommissions_sleds_bp2.txt index 146a7f3b45c..cdd3f8cd126 100644 --- a/nexus/reconfigurator/planning/tests/output/planner_decommissions_sleds_bp2.txt +++ b/nexus/reconfigurator/planning/tests/output/planner_decommissions_sleds_bp2.txt @@ -1,22 +1,62 @@ blueprint 1ac2d88f-27dd-4506-8585-6b2be832528e parent: 516e80a3-b362-4fac-bd3c-4559717120dd + sled: a1b477db-b629-48eb-911d-1ccdafca75b9 (decommissioned) + + physical disks at generation 3: + ------------------------------------------------------------------------------------- + vendor model serial disposition + ------------------------------------------------------------------------------------- + fake-vendor fake-model serial-069446b4-7881-49dc-838a-63a782d4896d expunged ✓ + fake-vendor fake-model serial-20eba316-dffe-4516-9703-af561da19b0b expunged ✓ + fake-vendor fake-model serial-426f4b6d-4a82-4106-bf4b-64ee86a2a5a4 expunged ✓ + fake-vendor fake-model serial-82daeef2-8641-4bf5-ac66-f7b5f62c48b6 expunged ✓ + fake-vendor fake-model serial-8e5feeb2-14f1-440f-a909-3c34aa8e129b expunged ✓ + fake-vendor fake-model serial-942e2123-7c4e-4f6b-9317-1341fe212647 expunged ✓ + fake-vendor fake-model serial-97a5ce17-df5b-47e7-baf8-80ae710ce18e expunged ✓ + fake-vendor fake-model serial-debc9fb6-bd58-4e4f-b8b8-6a9a07fcf25d expunged ✓ + fake-vendor fake-model serial-f63a32a9-0659-43cf-8efc-8f34e7af9d45 expunged ✓ + fake-vendor fake-model serial-ffea118f-7715-4e21-8fc5-bb23cd0f59e8 expunged ✓ + + + omicron zones at generation 3: + ---------------------------------------------------------------------------------------------- + zone type zone id disposition underlay IP + ---------------------------------------------------------------------------------------------- + clickhouse 3fd081ea-93f1-417e-bcb1-405854435f28 expunged ✓ fd00:1122:3344:103::23 + crucible 290e7e97-c4b3-47da-9f40-8d909397fbae expunged ✓ fd00:1122:3344:103::2e + crucible 29bbe4ad-e6e8-4e05-b188-a811a793ccbb expunged ✓ fd00:1122:3344:103::2a + crucible 8500a060-a426-4324-ba40-a66dd4b89bc6 expunged ✓ fd00:1122:3344:103::29 + crucible 92b7abd8-3e34-49dd-9c56-19a314e97d49 expunged ✓ fd00:1122:3344:103::25 + crucible b320954e-6c66-4540-9bf4-3d976f21ee1b expunged ✓ fd00:1122:3344:103::26 + crucible bc3e4495-7e51-46b6-9f55-026ea1da39dd expunged ✓ fd00:1122:3344:103::27 + crucible cfb7595b-280c-40f5-b1aa-6e154adf280b expunged ✓ fd00:1122:3344:103::28 + crucible d6b6ea5a-3f29-4815-aa42-b1afeb11dfc5 expunged ✓ fd00:1122:3344:103::2c + crucible d6b77c1f-8c9e-406d-944e-c97a57b3984d expunged ✓ fd00:1122:3344:103::2b + crucible ecc03801-b315-4495-9b2c-49e0eead1283 expunged ✓ fd00:1122:3344:103::2d + crucible_pantry ecebab45-11e7-47ab-8bc2-ab9114c6e2bc expunged ✓ fd00:1122:3344:103::24 + internal_dns 96b7a45b-be74-44e8-b68a-e530cfa81830 expunged ✓ fd00:1122:3344:1::1 + internal_ntp b3dbc671-0e4d-49ff-9f4f-71b249d21f57 expunged ✓ fd00:1122:3344:103::21 + nexus bc0f4342-f88d-49cc-bb44-b555d9b8ca12 expunged ✓ fd00:1122:3344:103::22 + + + sled: d67ce8f0-a691-4010-b414-420d82e80527 (active) physical disks at generation 2: - ---------------------------------------------------------------------- - vendor model serial - ---------------------------------------------------------------------- - fake-vendor fake-model serial-1e2ec79e-9c11-4133-ac77-e0b994a507d5 - fake-vendor fake-model serial-440ae69d-5e2e-4539-91d0-e2930bdd7203 - fake-vendor fake-model serial-4e91d4a3-bb6c-44bb-bd4e-bf8913c1ba2b - fake-vendor fake-model serial-67de3a80-29cb-4066-b743-e285a2ca1f4e - fake-vendor fake-model serial-9139b70f-c1d3-475d-8f02-7c9acba52b2b - fake-vendor fake-model serial-95fbb110-5272-4646-ab50-21b31b7cde23 - fake-vendor fake-model serial-9bf35cd7-4938-4c34-8189-288b3195cb64 - fake-vendor fake-model serial-9d833141-18a1-4f24-8a34-6076c026aa87 - fake-vendor fake-model serial-a279461f-a7b9-413f-a79f-cb4dab4c3fce - fake-vendor fake-model serial-ff7e002b-3ad8-4d45-b03a-c46ef0ac8e59 + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-1e2ec79e-9c11-4133-ac77-e0b994a507d5 in service + fake-vendor fake-model serial-440ae69d-5e2e-4539-91d0-e2930bdd7203 in service + fake-vendor fake-model serial-4e91d4a3-bb6c-44bb-bd4e-bf8913c1ba2b in service + fake-vendor fake-model serial-67de3a80-29cb-4066-b743-e285a2ca1f4e in service + fake-vendor fake-model serial-9139b70f-c1d3-475d-8f02-7c9acba52b2b in service + fake-vendor fake-model serial-95fbb110-5272-4646-ab50-21b31b7cde23 in service + fake-vendor fake-model serial-9bf35cd7-4938-4c34-8189-288b3195cb64 in service + fake-vendor fake-model serial-9d833141-18a1-4f24-8a34-6076c026aa87 in service + fake-vendor fake-model serial-a279461f-a7b9-413f-a79f-cb4dab4c3fce in service + fake-vendor fake-model serial-ff7e002b-3ad8-4d45-b03a-c46ef0ac8e59 in service datasets at generation 3: @@ -98,19 +138,19 @@ parent: 516e80a3-b362-4fac-bd3c-4559717120dd sled: fefcf4cf-f7e7-46b3-b629-058526ce440e (active) physical disks at generation 2: - ---------------------------------------------------------------------- - vendor model serial - ---------------------------------------------------------------------- - fake-vendor fake-model serial-07068f19-1ff2-48da-8e72-874780df2339 - fake-vendor fake-model serial-0f12e6ee-41d2-4eb0-813f-ba5240900ded - fake-vendor fake-model serial-0fdb4a39-3cd5-47a0-9064-e7f3c285af61 - fake-vendor fake-model serial-13572832-83ad-40d6-896a-751f7e53f4f6 - fake-vendor fake-model serial-3602bdd9-f7bb-4490-87a6-8f061f7712f5 - fake-vendor fake-model serial-65707837-95a4-45d7-84e6-8b9a4da215f1 - fake-vendor fake-model serial-7a43b2b0-3846-401c-8317-d555715a00f7 - fake-vendor fake-model serial-855e3ef1-6929-4e21-8451-0e62bd93c7c9 - fake-vendor fake-model serial-8adcf329-4cee-4075-b798-28b5add1edf5 - fake-vendor fake-model serial-99e926d6-bd42-4cde-9f63-5ecc7ea14322 + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-07068f19-1ff2-48da-8e72-874780df2339 in service + fake-vendor fake-model serial-0f12e6ee-41d2-4eb0-813f-ba5240900ded in service + fake-vendor fake-model serial-0fdb4a39-3cd5-47a0-9064-e7f3c285af61 in service + fake-vendor fake-model serial-13572832-83ad-40d6-896a-751f7e53f4f6 in service + fake-vendor fake-model serial-3602bdd9-f7bb-4490-87a6-8f061f7712f5 in service + fake-vendor fake-model serial-65707837-95a4-45d7-84e6-8b9a4da215f1 in service + fake-vendor fake-model serial-7a43b2b0-3846-401c-8317-d555715a00f7 in service + fake-vendor fake-model serial-855e3ef1-6929-4e21-8451-0e62bd93c7c9 in service + fake-vendor fake-model serial-8adcf329-4cee-4075-b798-28b5add1edf5 in service + fake-vendor fake-model serial-99e926d6-bd42-4cde-9f63-5ecc7ea14322 in service datasets at generation 3: @@ -190,30 +230,6 @@ parent: 516e80a3-b362-4fac-bd3c-4559717120dd nexus 294379f6-502c-465b-b32d-771c415a38af in service fd00:1122:3344:102::22 -!a1b477db-b629-48eb-911d-1ccdafca75b9 -WARNING: Zones exist without physical disks! - omicron zones at generation 3: - ---------------------------------------------------------------------------------------------- - zone type zone id disposition underlay IP - ---------------------------------------------------------------------------------------------- - clickhouse 3fd081ea-93f1-417e-bcb1-405854435f28 expunged ✓ fd00:1122:3344:103::23 - crucible 290e7e97-c4b3-47da-9f40-8d909397fbae expunged ✓ fd00:1122:3344:103::2e - crucible 29bbe4ad-e6e8-4e05-b188-a811a793ccbb expunged ✓ fd00:1122:3344:103::2a - crucible 8500a060-a426-4324-ba40-a66dd4b89bc6 expunged ✓ fd00:1122:3344:103::29 - crucible 92b7abd8-3e34-49dd-9c56-19a314e97d49 expunged ✓ fd00:1122:3344:103::25 - crucible b320954e-6c66-4540-9bf4-3d976f21ee1b expunged ✓ fd00:1122:3344:103::26 - crucible bc3e4495-7e51-46b6-9f55-026ea1da39dd expunged ✓ fd00:1122:3344:103::27 - crucible cfb7595b-280c-40f5-b1aa-6e154adf280b expunged ✓ fd00:1122:3344:103::28 - crucible d6b6ea5a-3f29-4815-aa42-b1afeb11dfc5 expunged ✓ fd00:1122:3344:103::2c - crucible d6b77c1f-8c9e-406d-944e-c97a57b3984d expunged ✓ fd00:1122:3344:103::2b - crucible ecc03801-b315-4495-9b2c-49e0eead1283 expunged ✓ fd00:1122:3344:103::2d - crucible_pantry ecebab45-11e7-47ab-8bc2-ab9114c6e2bc expunged ✓ fd00:1122:3344:103::24 - internal_dns 96b7a45b-be74-44e8-b68a-e530cfa81830 expunged ✓ fd00:1122:3344:1::1 - internal_ntp b3dbc671-0e4d-49ff-9f4f-71b249d21f57 expunged ✓ fd00:1122:3344:103::21 - nexus bc0f4342-f88d-49cc-bb44-b555d9b8ca12 expunged ✓ fd00:1122:3344:103::22 - - - COCKROACHDB SETTINGS: state fingerprint::::::::::::::::: (none) cluster.preserve_downgrade_option: (do not modify) diff --git a/nexus/reconfigurator/planning/tests/output/planner_deploy_all_keeper_nodes_1_2.txt b/nexus/reconfigurator/planning/tests/output/planner_deploy_all_keeper_nodes_1_2.txt index 0bde24adbaf..59471714899 100644 --- a/nexus/reconfigurator/planning/tests/output/planner_deploy_all_keeper_nodes_1_2.txt +++ b/nexus/reconfigurator/planning/tests/output/planner_deploy_all_keeper_nodes_1_2.txt @@ -6,19 +6,19 @@ to: blueprint 31ef2071-2ec9-49d9-8827-fd83b17a0e3d sled 164d275d-a936-4f06-ad53-a32cb3c8d3c8 (active): physical disks at generation 2: - ---------------------------------------------------------------------- - vendor model serial - ---------------------------------------------------------------------- - fake-vendor fake-model serial-60e6b021-79d6-4a6b-917c-6637e0769558 - fake-vendor fake-model serial-71a07ac0-d24e-446c-a202-1998e7f6ac8c - fake-vendor fake-model serial-736d4cab-c262-485e-89c2-07e6543f0855 - fake-vendor fake-model serial-8ae56ca0-709d-4b8f-9869-51b62b542eef - fake-vendor fake-model serial-8e0bcc4f-0799-450c-a0ad-80c1637f59e3 - fake-vendor fake-model serial-8eeebb8e-6db6-43e8-a429-d26dde99882c - fake-vendor fake-model serial-a3e8de5b-c47a-49c0-b698-4d6955e1327f - fake-vendor fake-model serial-b32fa3da-4dec-4237-9776-e1a57ba15a21 - fake-vendor fake-model serial-e9c238f6-87a3-4087-8ba4-aa594a82d012 - fake-vendor fake-model serial-fbf997ef-52d3-438a-b036-b9117322e569 + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-60e6b021-79d6-4a6b-917c-6637e0769558 in service + fake-vendor fake-model serial-71a07ac0-d24e-446c-a202-1998e7f6ac8c in service + fake-vendor fake-model serial-736d4cab-c262-485e-89c2-07e6543f0855 in service + fake-vendor fake-model serial-8ae56ca0-709d-4b8f-9869-51b62b542eef in service + fake-vendor fake-model serial-8e0bcc4f-0799-450c-a0ad-80c1637f59e3 in service + fake-vendor fake-model serial-8eeebb8e-6db6-43e8-a429-d26dde99882c in service + fake-vendor fake-model serial-a3e8de5b-c47a-49c0-b698-4d6955e1327f in service + fake-vendor fake-model serial-b32fa3da-4dec-4237-9776-e1a57ba15a21 in service + fake-vendor fake-model serial-e9c238f6-87a3-4087-8ba4-aa594a82d012 in service + fake-vendor fake-model serial-fbf997ef-52d3-438a-b036-b9117322e569 in service datasets generation 2 -> 3: @@ -101,19 +101,19 @@ to: blueprint 31ef2071-2ec9-49d9-8827-fd83b17a0e3d sled 6a4c45f6-e02f-490c-bbfa-b32fb89e8e86 (active): physical disks at generation 2: - ---------------------------------------------------------------------- - vendor model serial - ---------------------------------------------------------------------- - fake-vendor fake-model serial-0bf9d028-2b4a-4bff-82a1-6eb5fcefd985 - fake-vendor fake-model serial-19293a1d-fddc-40a7-88a4-ccafdb6f66d3 - fake-vendor fake-model serial-44484c44-477a-4676-8266-b98a00e80d79 - fake-vendor fake-model serial-79787cd4-92da-4de5-bfd8-30a635521e10 - fake-vendor fake-model serial-9ae94c94-baae-477e-912a-60f0c4f3bd13 - fake-vendor fake-model serial-af85eec8-36b3-4b88-966d-a717b9b58fe5 - fake-vendor fake-model serial-ddfaaba3-dafe-4103-b868-e9843d29d346 - fake-vendor fake-model serial-ec458c3e-91ca-40f1-a2a3-3f4292c1f279 - fake-vendor fake-model serial-f2fc7c4c-7966-449d-8ec3-5a70f460501d - fake-vendor fake-model serial-f635c28a-e5ca-4d22-ac94-d8f278a6ea0e + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-0bf9d028-2b4a-4bff-82a1-6eb5fcefd985 in service + fake-vendor fake-model serial-19293a1d-fddc-40a7-88a4-ccafdb6f66d3 in service + fake-vendor fake-model serial-44484c44-477a-4676-8266-b98a00e80d79 in service + fake-vendor fake-model serial-79787cd4-92da-4de5-bfd8-30a635521e10 in service + fake-vendor fake-model serial-9ae94c94-baae-477e-912a-60f0c4f3bd13 in service + fake-vendor fake-model serial-af85eec8-36b3-4b88-966d-a717b9b58fe5 in service + fake-vendor fake-model serial-ddfaaba3-dafe-4103-b868-e9843d29d346 in service + fake-vendor fake-model serial-ec458c3e-91ca-40f1-a2a3-3f4292c1f279 in service + fake-vendor fake-model serial-f2fc7c4c-7966-449d-8ec3-5a70f460501d in service + fake-vendor fake-model serial-f635c28a-e5ca-4d22-ac94-d8f278a6ea0e in service datasets generation 2 -> 3: @@ -196,19 +196,19 @@ to: blueprint 31ef2071-2ec9-49d9-8827-fd83b17a0e3d sled be531a62-9897-430d-acd2-ce14b4632627 (active): physical disks at generation 2: - ---------------------------------------------------------------------- - vendor model serial - ---------------------------------------------------------------------- - fake-vendor fake-model serial-2a76ab1a-fb16-412d-93f9-b8cd9aa94e85 - fake-vendor fake-model serial-2de2bf7e-c679-4b2b-b373-908e9d3ffbfc - fake-vendor fake-model serial-2f02a5c6-fcf5-4b5a-bc7d-7f65369918ba - fake-vendor fake-model serial-32041dbf-e58d-4f32-840d-923d3d3b68af - fake-vendor fake-model serial-5f88adff-bb50-4dc1-bbfb-5a410c753ed5 - fake-vendor fake-model serial-74d64eb9-bf69-4782-af16-2d3a761ca171 - fake-vendor fake-model serial-b3c231c9-b2a5-4267-b4bf-9651881b91a5 - fake-vendor fake-model serial-bca80b95-8dca-4a3b-b24d-d44b6a9ff71b - fake-vendor fake-model serial-db16345e-427a-4c8e-9032-17270f729308 - fake-vendor fake-model serial-e3b45593-8f0c-47b2-a381-802a7dad1f54 + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-2a76ab1a-fb16-412d-93f9-b8cd9aa94e85 in service + fake-vendor fake-model serial-2de2bf7e-c679-4b2b-b373-908e9d3ffbfc in service + fake-vendor fake-model serial-2f02a5c6-fcf5-4b5a-bc7d-7f65369918ba in service + fake-vendor fake-model serial-32041dbf-e58d-4f32-840d-923d3d3b68af in service + fake-vendor fake-model serial-5f88adff-bb50-4dc1-bbfb-5a410c753ed5 in service + fake-vendor fake-model serial-74d64eb9-bf69-4782-af16-2d3a761ca171 in service + fake-vendor fake-model serial-b3c231c9-b2a5-4267-b4bf-9651881b91a5 in service + fake-vendor fake-model serial-bca80b95-8dca-4a3b-b24d-d44b6a9ff71b in service + fake-vendor fake-model serial-db16345e-427a-4c8e-9032-17270f729308 in service + fake-vendor fake-model serial-e3b45593-8f0c-47b2-a381-802a7dad1f54 in service datasets generation 2 -> 3: diff --git a/nexus/reconfigurator/planning/tests/output/planner_deploy_all_keeper_nodes_3_4.txt b/nexus/reconfigurator/planning/tests/output/planner_deploy_all_keeper_nodes_3_4.txt index c94a43a79ef..003c3e2991a 100644 --- a/nexus/reconfigurator/planning/tests/output/planner_deploy_all_keeper_nodes_3_4.txt +++ b/nexus/reconfigurator/planning/tests/output/planner_deploy_all_keeper_nodes_3_4.txt @@ -6,19 +6,19 @@ to: blueprint 92fa943c-7dd4-48c3-9447-c9d0665744b6 sled 164d275d-a936-4f06-ad53-a32cb3c8d3c8 (active): physical disks at generation 2: - ---------------------------------------------------------------------- - vendor model serial - ---------------------------------------------------------------------- - fake-vendor fake-model serial-60e6b021-79d6-4a6b-917c-6637e0769558 - fake-vendor fake-model serial-71a07ac0-d24e-446c-a202-1998e7f6ac8c - fake-vendor fake-model serial-736d4cab-c262-485e-89c2-07e6543f0855 - fake-vendor fake-model serial-8ae56ca0-709d-4b8f-9869-51b62b542eef - fake-vendor fake-model serial-8e0bcc4f-0799-450c-a0ad-80c1637f59e3 - fake-vendor fake-model serial-8eeebb8e-6db6-43e8-a429-d26dde99882c - fake-vendor fake-model serial-a3e8de5b-c47a-49c0-b698-4d6955e1327f - fake-vendor fake-model serial-b32fa3da-4dec-4237-9776-e1a57ba15a21 - fake-vendor fake-model serial-e9c238f6-87a3-4087-8ba4-aa594a82d012 - fake-vendor fake-model serial-fbf997ef-52d3-438a-b036-b9117322e569 + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-60e6b021-79d6-4a6b-917c-6637e0769558 in service + fake-vendor fake-model serial-71a07ac0-d24e-446c-a202-1998e7f6ac8c in service + fake-vendor fake-model serial-736d4cab-c262-485e-89c2-07e6543f0855 in service + fake-vendor fake-model serial-8ae56ca0-709d-4b8f-9869-51b62b542eef in service + fake-vendor fake-model serial-8e0bcc4f-0799-450c-a0ad-80c1637f59e3 in service + fake-vendor fake-model serial-8eeebb8e-6db6-43e8-a429-d26dde99882c in service + fake-vendor fake-model serial-a3e8de5b-c47a-49c0-b698-4d6955e1327f in service + fake-vendor fake-model serial-b32fa3da-4dec-4237-9776-e1a57ba15a21 in service + fake-vendor fake-model serial-e9c238f6-87a3-4087-8ba4-aa594a82d012 in service + fake-vendor fake-model serial-fbf997ef-52d3-438a-b036-b9117322e569 in service datasets at generation 3: @@ -101,19 +101,19 @@ to: blueprint 92fa943c-7dd4-48c3-9447-c9d0665744b6 sled 6a4c45f6-e02f-490c-bbfa-b32fb89e8e86 (active): physical disks at generation 2: - ---------------------------------------------------------------------- - vendor model serial - ---------------------------------------------------------------------- - fake-vendor fake-model serial-0bf9d028-2b4a-4bff-82a1-6eb5fcefd985 - fake-vendor fake-model serial-19293a1d-fddc-40a7-88a4-ccafdb6f66d3 - fake-vendor fake-model serial-44484c44-477a-4676-8266-b98a00e80d79 - fake-vendor fake-model serial-79787cd4-92da-4de5-bfd8-30a635521e10 - fake-vendor fake-model serial-9ae94c94-baae-477e-912a-60f0c4f3bd13 - fake-vendor fake-model serial-af85eec8-36b3-4b88-966d-a717b9b58fe5 - fake-vendor fake-model serial-ddfaaba3-dafe-4103-b868-e9843d29d346 - fake-vendor fake-model serial-ec458c3e-91ca-40f1-a2a3-3f4292c1f279 - fake-vendor fake-model serial-f2fc7c4c-7966-449d-8ec3-5a70f460501d - fake-vendor fake-model serial-f635c28a-e5ca-4d22-ac94-d8f278a6ea0e + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-0bf9d028-2b4a-4bff-82a1-6eb5fcefd985 in service + fake-vendor fake-model serial-19293a1d-fddc-40a7-88a4-ccafdb6f66d3 in service + fake-vendor fake-model serial-44484c44-477a-4676-8266-b98a00e80d79 in service + fake-vendor fake-model serial-79787cd4-92da-4de5-bfd8-30a635521e10 in service + fake-vendor fake-model serial-9ae94c94-baae-477e-912a-60f0c4f3bd13 in service + fake-vendor fake-model serial-af85eec8-36b3-4b88-966d-a717b9b58fe5 in service + fake-vendor fake-model serial-ddfaaba3-dafe-4103-b868-e9843d29d346 in service + fake-vendor fake-model serial-ec458c3e-91ca-40f1-a2a3-3f4292c1f279 in service + fake-vendor fake-model serial-f2fc7c4c-7966-449d-8ec3-5a70f460501d in service + fake-vendor fake-model serial-f635c28a-e5ca-4d22-ac94-d8f278a6ea0e in service datasets at generation 3: @@ -196,19 +196,19 @@ to: blueprint 92fa943c-7dd4-48c3-9447-c9d0665744b6 sled be531a62-9897-430d-acd2-ce14b4632627 (active): physical disks at generation 2: - ---------------------------------------------------------------------- - vendor model serial - ---------------------------------------------------------------------- - fake-vendor fake-model serial-2a76ab1a-fb16-412d-93f9-b8cd9aa94e85 - fake-vendor fake-model serial-2de2bf7e-c679-4b2b-b373-908e9d3ffbfc - fake-vendor fake-model serial-2f02a5c6-fcf5-4b5a-bc7d-7f65369918ba - fake-vendor fake-model serial-32041dbf-e58d-4f32-840d-923d3d3b68af - fake-vendor fake-model serial-5f88adff-bb50-4dc1-bbfb-5a410c753ed5 - fake-vendor fake-model serial-74d64eb9-bf69-4782-af16-2d3a761ca171 - fake-vendor fake-model serial-b3c231c9-b2a5-4267-b4bf-9651881b91a5 - fake-vendor fake-model serial-bca80b95-8dca-4a3b-b24d-d44b6a9ff71b - fake-vendor fake-model serial-db16345e-427a-4c8e-9032-17270f729308 - fake-vendor fake-model serial-e3b45593-8f0c-47b2-a381-802a7dad1f54 + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-2a76ab1a-fb16-412d-93f9-b8cd9aa94e85 in service + fake-vendor fake-model serial-2de2bf7e-c679-4b2b-b373-908e9d3ffbfc in service + fake-vendor fake-model serial-2f02a5c6-fcf5-4b5a-bc7d-7f65369918ba in service + fake-vendor fake-model serial-32041dbf-e58d-4f32-840d-923d3d3b68af in service + fake-vendor fake-model serial-5f88adff-bb50-4dc1-bbfb-5a410c753ed5 in service + fake-vendor fake-model serial-74d64eb9-bf69-4782-af16-2d3a761ca171 in service + fake-vendor fake-model serial-b3c231c9-b2a5-4267-b4bf-9651881b91a5 in service + fake-vendor fake-model serial-bca80b95-8dca-4a3b-b24d-d44b6a9ff71b in service + fake-vendor fake-model serial-db16345e-427a-4c8e-9032-17270f729308 in service + fake-vendor fake-model serial-e3b45593-8f0c-47b2-a381-802a7dad1f54 in service datasets at generation 3: diff --git a/nexus/reconfigurator/planning/tests/output/planner_deploy_all_keeper_nodes_4_5.txt b/nexus/reconfigurator/planning/tests/output/planner_deploy_all_keeper_nodes_4_5.txt index e86629f97a1..2c4eb088e16 100644 --- a/nexus/reconfigurator/planning/tests/output/planner_deploy_all_keeper_nodes_4_5.txt +++ b/nexus/reconfigurator/planning/tests/output/planner_deploy_all_keeper_nodes_4_5.txt @@ -6,19 +6,19 @@ to: blueprint 2886dab5-61a2-46b4-87af-bc7aeb44cccb sled 164d275d-a936-4f06-ad53-a32cb3c8d3c8 (active): physical disks at generation 2: - ---------------------------------------------------------------------- - vendor model serial - ---------------------------------------------------------------------- - fake-vendor fake-model serial-60e6b021-79d6-4a6b-917c-6637e0769558 - fake-vendor fake-model serial-71a07ac0-d24e-446c-a202-1998e7f6ac8c - fake-vendor fake-model serial-736d4cab-c262-485e-89c2-07e6543f0855 - fake-vendor fake-model serial-8ae56ca0-709d-4b8f-9869-51b62b542eef - fake-vendor fake-model serial-8e0bcc4f-0799-450c-a0ad-80c1637f59e3 - fake-vendor fake-model serial-8eeebb8e-6db6-43e8-a429-d26dde99882c - fake-vendor fake-model serial-a3e8de5b-c47a-49c0-b698-4d6955e1327f - fake-vendor fake-model serial-b32fa3da-4dec-4237-9776-e1a57ba15a21 - fake-vendor fake-model serial-e9c238f6-87a3-4087-8ba4-aa594a82d012 - fake-vendor fake-model serial-fbf997ef-52d3-438a-b036-b9117322e569 + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-60e6b021-79d6-4a6b-917c-6637e0769558 in service + fake-vendor fake-model serial-71a07ac0-d24e-446c-a202-1998e7f6ac8c in service + fake-vendor fake-model serial-736d4cab-c262-485e-89c2-07e6543f0855 in service + fake-vendor fake-model serial-8ae56ca0-709d-4b8f-9869-51b62b542eef in service + fake-vendor fake-model serial-8e0bcc4f-0799-450c-a0ad-80c1637f59e3 in service + fake-vendor fake-model serial-8eeebb8e-6db6-43e8-a429-d26dde99882c in service + fake-vendor fake-model serial-a3e8de5b-c47a-49c0-b698-4d6955e1327f in service + fake-vendor fake-model serial-b32fa3da-4dec-4237-9776-e1a57ba15a21 in service + fake-vendor fake-model serial-e9c238f6-87a3-4087-8ba4-aa594a82d012 in service + fake-vendor fake-model serial-fbf997ef-52d3-438a-b036-b9117322e569 in service datasets at generation 3: @@ -103,19 +103,19 @@ to: blueprint 2886dab5-61a2-46b4-87af-bc7aeb44cccb sled 6a4c45f6-e02f-490c-bbfa-b32fb89e8e86 (active): physical disks at generation 2: - ---------------------------------------------------------------------- - vendor model serial - ---------------------------------------------------------------------- - fake-vendor fake-model serial-0bf9d028-2b4a-4bff-82a1-6eb5fcefd985 - fake-vendor fake-model serial-19293a1d-fddc-40a7-88a4-ccafdb6f66d3 - fake-vendor fake-model serial-44484c44-477a-4676-8266-b98a00e80d79 - fake-vendor fake-model serial-79787cd4-92da-4de5-bfd8-30a635521e10 - fake-vendor fake-model serial-9ae94c94-baae-477e-912a-60f0c4f3bd13 - fake-vendor fake-model serial-af85eec8-36b3-4b88-966d-a717b9b58fe5 - fake-vendor fake-model serial-ddfaaba3-dafe-4103-b868-e9843d29d346 - fake-vendor fake-model serial-ec458c3e-91ca-40f1-a2a3-3f4292c1f279 - fake-vendor fake-model serial-f2fc7c4c-7966-449d-8ec3-5a70f460501d - fake-vendor fake-model serial-f635c28a-e5ca-4d22-ac94-d8f278a6ea0e + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-0bf9d028-2b4a-4bff-82a1-6eb5fcefd985 in service + fake-vendor fake-model serial-19293a1d-fddc-40a7-88a4-ccafdb6f66d3 in service + fake-vendor fake-model serial-44484c44-477a-4676-8266-b98a00e80d79 in service + fake-vendor fake-model serial-79787cd4-92da-4de5-bfd8-30a635521e10 in service + fake-vendor fake-model serial-9ae94c94-baae-477e-912a-60f0c4f3bd13 in service + fake-vendor fake-model serial-af85eec8-36b3-4b88-966d-a717b9b58fe5 in service + fake-vendor fake-model serial-ddfaaba3-dafe-4103-b868-e9843d29d346 in service + fake-vendor fake-model serial-ec458c3e-91ca-40f1-a2a3-3f4292c1f279 in service + fake-vendor fake-model serial-f2fc7c4c-7966-449d-8ec3-5a70f460501d in service + fake-vendor fake-model serial-f635c28a-e5ca-4d22-ac94-d8f278a6ea0e in service datasets generation 3 -> 4: @@ -201,19 +201,19 @@ to: blueprint 2886dab5-61a2-46b4-87af-bc7aeb44cccb sled be531a62-9897-430d-acd2-ce14b4632627 (active): physical disks at generation 2: - ---------------------------------------------------------------------- - vendor model serial - ---------------------------------------------------------------------- - fake-vendor fake-model serial-2a76ab1a-fb16-412d-93f9-b8cd9aa94e85 - fake-vendor fake-model serial-2de2bf7e-c679-4b2b-b373-908e9d3ffbfc - fake-vendor fake-model serial-2f02a5c6-fcf5-4b5a-bc7d-7f65369918ba - fake-vendor fake-model serial-32041dbf-e58d-4f32-840d-923d3d3b68af - fake-vendor fake-model serial-5f88adff-bb50-4dc1-bbfb-5a410c753ed5 - fake-vendor fake-model serial-74d64eb9-bf69-4782-af16-2d3a761ca171 - fake-vendor fake-model serial-b3c231c9-b2a5-4267-b4bf-9651881b91a5 - fake-vendor fake-model serial-bca80b95-8dca-4a3b-b24d-d44b6a9ff71b - fake-vendor fake-model serial-db16345e-427a-4c8e-9032-17270f729308 - fake-vendor fake-model serial-e3b45593-8f0c-47b2-a381-802a7dad1f54 + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-2a76ab1a-fb16-412d-93f9-b8cd9aa94e85 in service + fake-vendor fake-model serial-2de2bf7e-c679-4b2b-b373-908e9d3ffbfc in service + fake-vendor fake-model serial-2f02a5c6-fcf5-4b5a-bc7d-7f65369918ba in service + fake-vendor fake-model serial-32041dbf-e58d-4f32-840d-923d3d3b68af in service + fake-vendor fake-model serial-5f88adff-bb50-4dc1-bbfb-5a410c753ed5 in service + fake-vendor fake-model serial-74d64eb9-bf69-4782-af16-2d3a761ca171 in service + fake-vendor fake-model serial-b3c231c9-b2a5-4267-b4bf-9651881b91a5 in service + fake-vendor fake-model serial-bca80b95-8dca-4a3b-b24d-d44b6a9ff71b in service + fake-vendor fake-model serial-db16345e-427a-4c8e-9032-17270f729308 in service + fake-vendor fake-model serial-e3b45593-8f0c-47b2-a381-802a7dad1f54 in service datasets generation 3 -> 4: diff --git a/nexus/reconfigurator/planning/tests/output/planner_deploy_all_keeper_nodes_5_6.txt b/nexus/reconfigurator/planning/tests/output/planner_deploy_all_keeper_nodes_5_6.txt index c7f2cf502bc..6594c146c5b 100644 --- a/nexus/reconfigurator/planning/tests/output/planner_deploy_all_keeper_nodes_5_6.txt +++ b/nexus/reconfigurator/planning/tests/output/planner_deploy_all_keeper_nodes_5_6.txt @@ -6,19 +6,19 @@ to: blueprint cb39be9d-5476-44fa-9edf-9938376219ef sled 164d275d-a936-4f06-ad53-a32cb3c8d3c8 (active): physical disks at generation 2: - ---------------------------------------------------------------------- - vendor model serial - ---------------------------------------------------------------------- - fake-vendor fake-model serial-60e6b021-79d6-4a6b-917c-6637e0769558 - fake-vendor fake-model serial-71a07ac0-d24e-446c-a202-1998e7f6ac8c - fake-vendor fake-model serial-736d4cab-c262-485e-89c2-07e6543f0855 - fake-vendor fake-model serial-8ae56ca0-709d-4b8f-9869-51b62b542eef - fake-vendor fake-model serial-8e0bcc4f-0799-450c-a0ad-80c1637f59e3 - fake-vendor fake-model serial-8eeebb8e-6db6-43e8-a429-d26dde99882c - fake-vendor fake-model serial-a3e8de5b-c47a-49c0-b698-4d6955e1327f - fake-vendor fake-model serial-b32fa3da-4dec-4237-9776-e1a57ba15a21 - fake-vendor fake-model serial-e9c238f6-87a3-4087-8ba4-aa594a82d012 - fake-vendor fake-model serial-fbf997ef-52d3-438a-b036-b9117322e569 + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-60e6b021-79d6-4a6b-917c-6637e0769558 in service + fake-vendor fake-model serial-71a07ac0-d24e-446c-a202-1998e7f6ac8c in service + fake-vendor fake-model serial-736d4cab-c262-485e-89c2-07e6543f0855 in service + fake-vendor fake-model serial-8ae56ca0-709d-4b8f-9869-51b62b542eef in service + fake-vendor fake-model serial-8e0bcc4f-0799-450c-a0ad-80c1637f59e3 in service + fake-vendor fake-model serial-8eeebb8e-6db6-43e8-a429-d26dde99882c in service + fake-vendor fake-model serial-a3e8de5b-c47a-49c0-b698-4d6955e1327f in service + fake-vendor fake-model serial-b32fa3da-4dec-4237-9776-e1a57ba15a21 in service + fake-vendor fake-model serial-e9c238f6-87a3-4087-8ba4-aa594a82d012 in service + fake-vendor fake-model serial-fbf997ef-52d3-438a-b036-b9117322e569 in service datasets at generation 3: @@ -101,19 +101,19 @@ to: blueprint cb39be9d-5476-44fa-9edf-9938376219ef sled 6a4c45f6-e02f-490c-bbfa-b32fb89e8e86 (active): physical disks at generation 2: - ---------------------------------------------------------------------- - vendor model serial - ---------------------------------------------------------------------- - fake-vendor fake-model serial-0bf9d028-2b4a-4bff-82a1-6eb5fcefd985 - fake-vendor fake-model serial-19293a1d-fddc-40a7-88a4-ccafdb6f66d3 - fake-vendor fake-model serial-44484c44-477a-4676-8266-b98a00e80d79 - fake-vendor fake-model serial-79787cd4-92da-4de5-bfd8-30a635521e10 - fake-vendor fake-model serial-9ae94c94-baae-477e-912a-60f0c4f3bd13 - fake-vendor fake-model serial-af85eec8-36b3-4b88-966d-a717b9b58fe5 - fake-vendor fake-model serial-ddfaaba3-dafe-4103-b868-e9843d29d346 - fake-vendor fake-model serial-ec458c3e-91ca-40f1-a2a3-3f4292c1f279 - fake-vendor fake-model serial-f2fc7c4c-7966-449d-8ec3-5a70f460501d - fake-vendor fake-model serial-f635c28a-e5ca-4d22-ac94-d8f278a6ea0e + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-0bf9d028-2b4a-4bff-82a1-6eb5fcefd985 in service + fake-vendor fake-model serial-19293a1d-fddc-40a7-88a4-ccafdb6f66d3 in service + fake-vendor fake-model serial-44484c44-477a-4676-8266-b98a00e80d79 in service + fake-vendor fake-model serial-79787cd4-92da-4de5-bfd8-30a635521e10 in service + fake-vendor fake-model serial-9ae94c94-baae-477e-912a-60f0c4f3bd13 in service + fake-vendor fake-model serial-af85eec8-36b3-4b88-966d-a717b9b58fe5 in service + fake-vendor fake-model serial-ddfaaba3-dafe-4103-b868-e9843d29d346 in service + fake-vendor fake-model serial-ec458c3e-91ca-40f1-a2a3-3f4292c1f279 in service + fake-vendor fake-model serial-f2fc7c4c-7966-449d-8ec3-5a70f460501d in service + fake-vendor fake-model serial-f635c28a-e5ca-4d22-ac94-d8f278a6ea0e in service datasets at generation 4: @@ -199,19 +199,19 @@ to: blueprint cb39be9d-5476-44fa-9edf-9938376219ef sled be531a62-9897-430d-acd2-ce14b4632627 (active): physical disks at generation 2: - ---------------------------------------------------------------------- - vendor model serial - ---------------------------------------------------------------------- - fake-vendor fake-model serial-2a76ab1a-fb16-412d-93f9-b8cd9aa94e85 - fake-vendor fake-model serial-2de2bf7e-c679-4b2b-b373-908e9d3ffbfc - fake-vendor fake-model serial-2f02a5c6-fcf5-4b5a-bc7d-7f65369918ba - fake-vendor fake-model serial-32041dbf-e58d-4f32-840d-923d3d3b68af - fake-vendor fake-model serial-5f88adff-bb50-4dc1-bbfb-5a410c753ed5 - fake-vendor fake-model serial-74d64eb9-bf69-4782-af16-2d3a761ca171 - fake-vendor fake-model serial-b3c231c9-b2a5-4267-b4bf-9651881b91a5 - fake-vendor fake-model serial-bca80b95-8dca-4a3b-b24d-d44b6a9ff71b - fake-vendor fake-model serial-db16345e-427a-4c8e-9032-17270f729308 - fake-vendor fake-model serial-e3b45593-8f0c-47b2-a381-802a7dad1f54 + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-2a76ab1a-fb16-412d-93f9-b8cd9aa94e85 in service + fake-vendor fake-model serial-2de2bf7e-c679-4b2b-b373-908e9d3ffbfc in service + fake-vendor fake-model serial-2f02a5c6-fcf5-4b5a-bc7d-7f65369918ba in service + fake-vendor fake-model serial-32041dbf-e58d-4f32-840d-923d3d3b68af in service + fake-vendor fake-model serial-5f88adff-bb50-4dc1-bbfb-5a410c753ed5 in service + fake-vendor fake-model serial-74d64eb9-bf69-4782-af16-2d3a761ca171 in service + fake-vendor fake-model serial-b3c231c9-b2a5-4267-b4bf-9651881b91a5 in service + fake-vendor fake-model serial-bca80b95-8dca-4a3b-b24d-d44b6a9ff71b in service + fake-vendor fake-model serial-db16345e-427a-4c8e-9032-17270f729308 in service + fake-vendor fake-model serial-e3b45593-8f0c-47b2-a381-802a7dad1f54 in service datasets at generation 4: diff --git a/nexus/reconfigurator/planning/tests/output/planner_expunge_clickhouse_clusters_3_4.txt b/nexus/reconfigurator/planning/tests/output/planner_expunge_clickhouse_clusters_3_4.txt index 68cf8f9f28b..d88d69ad8e0 100644 --- a/nexus/reconfigurator/planning/tests/output/planner_expunge_clickhouse_clusters_3_4.txt +++ b/nexus/reconfigurator/planning/tests/output/planner_expunge_clickhouse_clusters_3_4.txt @@ -5,20 +5,30 @@ to: blueprint 74f2e7fd-687e-4c9e-b5d8-e474a5bb8e7c sled b340c044-bd87-4a3b-aee3-e6ccd9d3ff02 (active -> decommissioned): - physical disks from generation 2: - ---------------------------------------------------------------------- - vendor model serial - ---------------------------------------------------------------------- -- fake-vendor fake-model serial-03a84dd2-e0a4-435d-96de-67bfe2674f4e -- fake-vendor fake-model serial-188c3b95-16fa-45ea-b9f7-e987560b4d62 -- fake-vendor fake-model serial-1f3bbc7c-888f-40fa-b705-fab3f148b147 -- fake-vendor fake-model serial-45a81e70-03bb-4c53-bf49-d598c9fb8d34 -- fake-vendor fake-model serial-56c3f0ef-fac1-473d-9317-0e3668aa7e88 -- fake-vendor fake-model serial-96f1615c-3dda-427f-8132-408b2fad24e0 -- fake-vendor fake-model serial-a9ef71b2-ec22-421c-adc9-bddc4c0641c4 -- fake-vendor fake-model serial-b9f9c626-3293-48eb-a475-1debaaccdf6c -- fake-vendor fake-model serial-d563fd5f-9306-49b4-8511-78a2f64733ce -- fake-vendor fake-model serial-fcca32b6-9629-468f-a282-63d7da992447 + physical disks generation 2 -> 3: + --------------------------------------------------------------------------------------- + vendor model serial disposition + --------------------------------------------------------------------------------------- +* fake-vendor fake-model serial-03a84dd2-e0a4-435d-96de-67bfe2674f4e - in service + └─ + expunged ✓ +* fake-vendor fake-model serial-188c3b95-16fa-45ea-b9f7-e987560b4d62 - in service + └─ + expunged ✓ +* fake-vendor fake-model serial-1f3bbc7c-888f-40fa-b705-fab3f148b147 - in service + └─ + expunged ✓ +* fake-vendor fake-model serial-45a81e70-03bb-4c53-bf49-d598c9fb8d34 - in service + └─ + expunged ✓ +* fake-vendor fake-model serial-56c3f0ef-fac1-473d-9317-0e3668aa7e88 - in service + └─ + expunged ✓ +* fake-vendor fake-model serial-96f1615c-3dda-427f-8132-408b2fad24e0 - in service + └─ + expunged ✓ +* fake-vendor fake-model serial-a9ef71b2-ec22-421c-adc9-bddc4c0641c4 - in service + └─ + expunged ✓ +* fake-vendor fake-model serial-b9f9c626-3293-48eb-a475-1debaaccdf6c - in service + └─ + expunged ✓ +* fake-vendor fake-model serial-d563fd5f-9306-49b4-8511-78a2f64733ce - in service + └─ + expunged ✓ +* fake-vendor fake-model serial-fcca32b6-9629-468f-a282-63d7da992447 - in service + └─ + expunged ✓ datasets from generation 3: @@ -117,19 +127,19 @@ to: blueprint 74f2e7fd-687e-4c9e-b5d8-e474a5bb8e7c sled cdba3bea-3407-4b6e-a029-19bf4a02fca7 (active): physical disks at generation 2: - ---------------------------------------------------------------------- - vendor model serial - ---------------------------------------------------------------------- - fake-vendor fake-model serial-08616473-ded4-4785-9b53-b6ccc1efb67a - fake-vendor fake-model serial-2d44b756-94df-45ec-a644-50021248682d - fake-vendor fake-model serial-2dce7cf0-3097-485d-aaf6-9fc51f99eae5 - fake-vendor fake-model serial-3b9d69e5-aa80-4fc8-9d2e-a2a24bd0f1d7 - fake-vendor fake-model serial-44342c41-75a7-4708-8004-eb2ca5c5a3c2 - fake-vendor fake-model serial-650b4eff-80a2-430a-97c8-f837248480a1 - fake-vendor fake-model serial-6e418b8c-cadd-4fb8-8370-f351a07e1eed - fake-vendor fake-model serial-6e5772a5-8234-46d1-ba5a-503a83d9d2fb - fake-vendor fake-model serial-c1da692e-7713-43a0-b6bb-5c182084c09d - fake-vendor fake-model serial-e35766ef-789a-4b2f-9a6c-e6626d5ab195 + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-08616473-ded4-4785-9b53-b6ccc1efb67a in service + fake-vendor fake-model serial-2d44b756-94df-45ec-a644-50021248682d in service + fake-vendor fake-model serial-2dce7cf0-3097-485d-aaf6-9fc51f99eae5 in service + fake-vendor fake-model serial-3b9d69e5-aa80-4fc8-9d2e-a2a24bd0f1d7 in service + fake-vendor fake-model serial-44342c41-75a7-4708-8004-eb2ca5c5a3c2 in service + fake-vendor fake-model serial-650b4eff-80a2-430a-97c8-f837248480a1 in service + fake-vendor fake-model serial-6e418b8c-cadd-4fb8-8370-f351a07e1eed in service + fake-vendor fake-model serial-6e5772a5-8234-46d1-ba5a-503a83d9d2fb in service + fake-vendor fake-model serial-c1da692e-7713-43a0-b6bb-5c182084c09d in service + fake-vendor fake-model serial-e35766ef-789a-4b2f-9a6c-e6626d5ab195 in service datasets generation 3 -> 4: @@ -218,19 +228,19 @@ to: blueprint 74f2e7fd-687e-4c9e-b5d8-e474a5bb8e7c sled fe7b9b01-e803-41ea-9e39-db240dcd9029 (active): physical disks at generation 2: - ---------------------------------------------------------------------- - vendor model serial - ---------------------------------------------------------------------- - fake-vendor fake-model serial-21d60319-5fe1-4a3b-a4c0-6aa7465e7bde - fake-vendor fake-model serial-2acfbb84-5ce0-424e-8d73-44c5071d4430 - fake-vendor fake-model serial-2db7f3b4-ed19-4229-b42c-44f49eeb8a91 - fake-vendor fake-model serial-2fa34d8e-13d9-42d3-b8ba-ca9d74ac496a - fake-vendor fake-model serial-355e268c-c932-4f32-841c-f3ec88fe0495 - fake-vendor fake-model serial-427b2ccd-998f-4085-af21-e600604cf21e - fake-vendor fake-model serial-588058f2-f51b-4800-a211-1c5dbb32296b - fake-vendor fake-model serial-736f6f07-2aa2-4658-8b5c-3bf409ea747a - fake-vendor fake-model serial-bcfcdede-7084-4a31-97a8-ac4299c268f9 - fake-vendor fake-model serial-fe379ac6-1938-4cc2-93a9-43b1447229ae + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-21d60319-5fe1-4a3b-a4c0-6aa7465e7bde in service + fake-vendor fake-model serial-2acfbb84-5ce0-424e-8d73-44c5071d4430 in service + fake-vendor fake-model serial-2db7f3b4-ed19-4229-b42c-44f49eeb8a91 in service + fake-vendor fake-model serial-2fa34d8e-13d9-42d3-b8ba-ca9d74ac496a in service + fake-vendor fake-model serial-355e268c-c932-4f32-841c-f3ec88fe0495 in service + fake-vendor fake-model serial-427b2ccd-998f-4085-af21-e600604cf21e in service + fake-vendor fake-model serial-588058f2-f51b-4800-a211-1c5dbb32296b in service + fake-vendor fake-model serial-736f6f07-2aa2-4658-8b5c-3bf409ea747a in service + fake-vendor fake-model serial-bcfcdede-7084-4a31-97a8-ac4299c268f9 in service + fake-vendor fake-model serial-fe379ac6-1938-4cc2-93a9-43b1447229ae in service datasets generation 3 -> 4: diff --git a/nexus/reconfigurator/planning/tests/output/planner_expunge_clickhouse_clusters_5_6.txt b/nexus/reconfigurator/planning/tests/output/planner_expunge_clickhouse_clusters_5_6.txt index 2120fb85c1a..53fbec49672 100644 --- a/nexus/reconfigurator/planning/tests/output/planner_expunge_clickhouse_clusters_5_6.txt +++ b/nexus/reconfigurator/planning/tests/output/planner_expunge_clickhouse_clusters_5_6.txt @@ -5,6 +5,22 @@ to: blueprint df68d4d4-5af4-4b56-95bb-1654a6957d4f sled b340c044-bd87-4a3b-aee3-e6ccd9d3ff02 (decommissioned): + physical disks at generation 3: + ------------------------------------------------------------------------------------- + vendor model serial disposition + ------------------------------------------------------------------------------------- + fake-vendor fake-model serial-03a84dd2-e0a4-435d-96de-67bfe2674f4e expunged ✓ + fake-vendor fake-model serial-188c3b95-16fa-45ea-b9f7-e987560b4d62 expunged ✓ + fake-vendor fake-model serial-1f3bbc7c-888f-40fa-b705-fab3f148b147 expunged ✓ + fake-vendor fake-model serial-45a81e70-03bb-4c53-bf49-d598c9fb8d34 expunged ✓ + fake-vendor fake-model serial-56c3f0ef-fac1-473d-9317-0e3668aa7e88 expunged ✓ + fake-vendor fake-model serial-96f1615c-3dda-427f-8132-408b2fad24e0 expunged ✓ + fake-vendor fake-model serial-a9ef71b2-ec22-421c-adc9-bddc4c0641c4 expunged ✓ + fake-vendor fake-model serial-b9f9c626-3293-48eb-a475-1debaaccdf6c expunged ✓ + fake-vendor fake-model serial-d563fd5f-9306-49b4-8511-78a2f64733ce expunged ✓ + fake-vendor fake-model serial-fcca32b6-9629-468f-a282-63d7da992447 expunged ✓ + + omicron zones at generation 4: ------------------------------------------------------------------------------------------------ zone type zone id disposition underlay IP @@ -30,19 +46,19 @@ to: blueprint df68d4d4-5af4-4b56-95bb-1654a6957d4f sled cdba3bea-3407-4b6e-a029-19bf4a02fca7 (active): physical disks at generation 2: - ---------------------------------------------------------------------- - vendor model serial - ---------------------------------------------------------------------- - fake-vendor fake-model serial-08616473-ded4-4785-9b53-b6ccc1efb67a - fake-vendor fake-model serial-2d44b756-94df-45ec-a644-50021248682d - fake-vendor fake-model serial-2dce7cf0-3097-485d-aaf6-9fc51f99eae5 - fake-vendor fake-model serial-3b9d69e5-aa80-4fc8-9d2e-a2a24bd0f1d7 - fake-vendor fake-model serial-44342c41-75a7-4708-8004-eb2ca5c5a3c2 - fake-vendor fake-model serial-650b4eff-80a2-430a-97c8-f837248480a1 - fake-vendor fake-model serial-6e418b8c-cadd-4fb8-8370-f351a07e1eed - fake-vendor fake-model serial-6e5772a5-8234-46d1-ba5a-503a83d9d2fb - fake-vendor fake-model serial-c1da692e-7713-43a0-b6bb-5c182084c09d - fake-vendor fake-model serial-e35766ef-789a-4b2f-9a6c-e6626d5ab195 + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-08616473-ded4-4785-9b53-b6ccc1efb67a in service + fake-vendor fake-model serial-2d44b756-94df-45ec-a644-50021248682d in service + fake-vendor fake-model serial-2dce7cf0-3097-485d-aaf6-9fc51f99eae5 in service + fake-vendor fake-model serial-3b9d69e5-aa80-4fc8-9d2e-a2a24bd0f1d7 in service + fake-vendor fake-model serial-44342c41-75a7-4708-8004-eb2ca5c5a3c2 in service + fake-vendor fake-model serial-650b4eff-80a2-430a-97c8-f837248480a1 in service + fake-vendor fake-model serial-6e418b8c-cadd-4fb8-8370-f351a07e1eed in service + fake-vendor fake-model serial-6e5772a5-8234-46d1-ba5a-503a83d9d2fb in service + fake-vendor fake-model serial-c1da692e-7713-43a0-b6bb-5c182084c09d in service + fake-vendor fake-model serial-e35766ef-789a-4b2f-9a6c-e6626d5ab195 in service datasets at generation 4: @@ -131,19 +147,19 @@ to: blueprint df68d4d4-5af4-4b56-95bb-1654a6957d4f sled fe7b9b01-e803-41ea-9e39-db240dcd9029 (active): physical disks at generation 2: - ---------------------------------------------------------------------- - vendor model serial - ---------------------------------------------------------------------- - fake-vendor fake-model serial-21d60319-5fe1-4a3b-a4c0-6aa7465e7bde - fake-vendor fake-model serial-2acfbb84-5ce0-424e-8d73-44c5071d4430 - fake-vendor fake-model serial-2db7f3b4-ed19-4229-b42c-44f49eeb8a91 - fake-vendor fake-model serial-2fa34d8e-13d9-42d3-b8ba-ca9d74ac496a - fake-vendor fake-model serial-355e268c-c932-4f32-841c-f3ec88fe0495 - fake-vendor fake-model serial-427b2ccd-998f-4085-af21-e600604cf21e - fake-vendor fake-model serial-588058f2-f51b-4800-a211-1c5dbb32296b - fake-vendor fake-model serial-736f6f07-2aa2-4658-8b5c-3bf409ea747a - fake-vendor fake-model serial-bcfcdede-7084-4a31-97a8-ac4299c268f9 - fake-vendor fake-model serial-fe379ac6-1938-4cc2-93a9-43b1447229ae + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-21d60319-5fe1-4a3b-a4c0-6aa7465e7bde in service + fake-vendor fake-model serial-2acfbb84-5ce0-424e-8d73-44c5071d4430 in service + fake-vendor fake-model serial-2db7f3b4-ed19-4229-b42c-44f49eeb8a91 in service + fake-vendor fake-model serial-2fa34d8e-13d9-42d3-b8ba-ca9d74ac496a in service + fake-vendor fake-model serial-355e268c-c932-4f32-841c-f3ec88fe0495 in service + fake-vendor fake-model serial-427b2ccd-998f-4085-af21-e600604cf21e in service + fake-vendor fake-model serial-588058f2-f51b-4800-a211-1c5dbb32296b in service + fake-vendor fake-model serial-736f6f07-2aa2-4658-8b5c-3bf409ea747a in service + fake-vendor fake-model serial-bcfcdede-7084-4a31-97a8-ac4299c268f9 in service + fake-vendor fake-model serial-fe379ac6-1938-4cc2-93a9-43b1447229ae in service datasets at generation 4: diff --git a/nexus/reconfigurator/planning/tests/output/planner_expunge_clickhouse_zones_after_policy_is_changed_3_4.txt b/nexus/reconfigurator/planning/tests/output/planner_expunge_clickhouse_zones_after_policy_is_changed_3_4.txt index 23147060249..c4223a9415c 100644 --- a/nexus/reconfigurator/planning/tests/output/planner_expunge_clickhouse_zones_after_policy_is_changed_3_4.txt +++ b/nexus/reconfigurator/planning/tests/output/planner_expunge_clickhouse_zones_after_policy_is_changed_3_4.txt @@ -6,19 +6,19 @@ to: blueprint d895ef50-9978-454c-bdfb-b8dbe2c9a918 sled 883d9767-021c-4836-81cf-fa02d73fead0 (active): physical disks at generation 2: - ---------------------------------------------------------------------- - vendor model serial - ---------------------------------------------------------------------- - fake-vendor fake-model serial-41eaf63b-4fa9-443e-8da1-78d1e79aac7d - fake-vendor fake-model serial-5b04e3d3-7a8b-466e-ab63-6ca89a93e100 - fake-vendor fake-model serial-6e8fdb9f-c47a-47b0-b7ee-9a2adc7e4af5 - fake-vendor fake-model serial-8e2c9e92-e35e-494c-8e14-dcf5f5009656 - fake-vendor fake-model serial-a4c575b4-934b-49b9-9c47-9c1241a33607 - fake-vendor fake-model serial-a9a8a692-d2d7-4b3e-a297-d648faf8c7cf - fake-vendor fake-model serial-b65c8376-0084-4d6f-9891-9d6a413d4e56 - fake-vendor fake-model serial-bc61cdae-c96f-4886-b8bd-f9fd69d51e3a - fake-vendor fake-model serial-e9f68306-460a-4b11-b904-f752633bf1fc - fake-vendor fake-model serial-fbc5bdf2-9644-4d0a-b349-f490486da25d + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-41eaf63b-4fa9-443e-8da1-78d1e79aac7d in service + fake-vendor fake-model serial-5b04e3d3-7a8b-466e-ab63-6ca89a93e100 in service + fake-vendor fake-model serial-6e8fdb9f-c47a-47b0-b7ee-9a2adc7e4af5 in service + fake-vendor fake-model serial-8e2c9e92-e35e-494c-8e14-dcf5f5009656 in service + fake-vendor fake-model serial-a4c575b4-934b-49b9-9c47-9c1241a33607 in service + fake-vendor fake-model serial-a9a8a692-d2d7-4b3e-a297-d648faf8c7cf in service + fake-vendor fake-model serial-b65c8376-0084-4d6f-9891-9d6a413d4e56 in service + fake-vendor fake-model serial-bc61cdae-c96f-4886-b8bd-f9fd69d51e3a in service + fake-vendor fake-model serial-e9f68306-460a-4b11-b904-f752633bf1fc in service + fake-vendor fake-model serial-fbc5bdf2-9644-4d0a-b349-f490486da25d in service datasets generation 4 -> 5: @@ -104,19 +104,19 @@ to: blueprint d895ef50-9978-454c-bdfb-b8dbe2c9a918 sled aae6114d-956b-4980-9759-b00b1ed893ee (active): physical disks at generation 2: - ---------------------------------------------------------------------- - vendor model serial - ---------------------------------------------------------------------- - fake-vendor fake-model serial-21b01477-48d0-4b65-9089-4a48277af033 - fake-vendor fake-model serial-2704e66b-3d5c-4b64-951c-a051fa15e4a8 - fake-vendor fake-model serial-27a22f7e-754a-43ea-8ec4-e9cbd9b62e08 - fake-vendor fake-model serial-51c788ff-de33-43f7-b9c5-f5f56bf80736 - fake-vendor fake-model serial-5fe54077-c016-49a9-becb-14993f133d43 - fake-vendor fake-model serial-7e2644a1-bec7-433c-8168-8898d7140aab - fake-vendor fake-model serial-9825ff38-f07d-44a1-9efc-55a25e72015b - fake-vendor fake-model serial-cc585a73-ec86-4f8e-a327-901b947a4c69 - fake-vendor fake-model serial-d2801671-bb69-408e-93f7-ac2b05d992f8 - fake-vendor fake-model serial-f52832ea-60d7-443b-9847-df5384bfc8e2 + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-21b01477-48d0-4b65-9089-4a48277af033 in service + fake-vendor fake-model serial-2704e66b-3d5c-4b64-951c-a051fa15e4a8 in service + fake-vendor fake-model serial-27a22f7e-754a-43ea-8ec4-e9cbd9b62e08 in service + fake-vendor fake-model serial-51c788ff-de33-43f7-b9c5-f5f56bf80736 in service + fake-vendor fake-model serial-5fe54077-c016-49a9-becb-14993f133d43 in service + fake-vendor fake-model serial-7e2644a1-bec7-433c-8168-8898d7140aab in service + fake-vendor fake-model serial-9825ff38-f07d-44a1-9efc-55a25e72015b in service + fake-vendor fake-model serial-cc585a73-ec86-4f8e-a327-901b947a4c69 in service + fake-vendor fake-model serial-d2801671-bb69-408e-93f7-ac2b05d992f8 in service + fake-vendor fake-model serial-f52832ea-60d7-443b-9847-df5384bfc8e2 in service datasets generation 3 -> 4: @@ -205,19 +205,19 @@ to: blueprint d895ef50-9978-454c-bdfb-b8dbe2c9a918 sled be4a3b25-dde1-40a4-b909-9fa4379a8510 (active): physical disks at generation 2: - ---------------------------------------------------------------------- - vendor model serial - ---------------------------------------------------------------------- - fake-vendor fake-model serial-288d864c-3a9e-4f21-8c6e-720702c82a29 - fake-vendor fake-model serial-5f877424-eca4-4c5d-af7f-41627382cfd8 - fake-vendor fake-model serial-65ebc532-cbb7-43e1-923b-37c5cb7236d7 - fake-vendor fake-model serial-8277a18f-3187-4893-8ef1-5cbfe2284616 - fake-vendor fake-model serial-83592889-d746-4c5d-98e8-582d9c34a15f - fake-vendor fake-model serial-88e423b5-98c9-4e78-992a-5d01e1c33272 - fake-vendor fake-model serial-a5553017-9991-4ffb-ae37-f9c0e3428562 - fake-vendor fake-model serial-b2c5a75b-9f72-405a-8134-691c0f45a1fd - fake-vendor fake-model serial-d4d665fc-df4d-4a13-b9c2-ad13549c0845 - fake-vendor fake-model serial-facb28ca-94fd-47a0-bf63-ee394d32c43b + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-288d864c-3a9e-4f21-8c6e-720702c82a29 in service + fake-vendor fake-model serial-5f877424-eca4-4c5d-af7f-41627382cfd8 in service + fake-vendor fake-model serial-65ebc532-cbb7-43e1-923b-37c5cb7236d7 in service + fake-vendor fake-model serial-8277a18f-3187-4893-8ef1-5cbfe2284616 in service + fake-vendor fake-model serial-83592889-d746-4c5d-98e8-582d9c34a15f in service + fake-vendor fake-model serial-88e423b5-98c9-4e78-992a-5d01e1c33272 in service + fake-vendor fake-model serial-a5553017-9991-4ffb-ae37-f9c0e3428562 in service + fake-vendor fake-model serial-b2c5a75b-9f72-405a-8134-691c0f45a1fd in service + fake-vendor fake-model serial-d4d665fc-df4d-4a13-b9c2-ad13549c0845 in service + fake-vendor fake-model serial-facb28ca-94fd-47a0-bf63-ee394d32c43b in service datasets generation 3 -> 4: diff --git a/nexus/reconfigurator/planning/tests/output/planner_nonprovisionable_1_2.txt b/nexus/reconfigurator/planning/tests/output/planner_nonprovisionable_1_2.txt index 6547f5cd592..5c0225bbc30 100644 --- a/nexus/reconfigurator/planning/tests/output/planner_nonprovisionable_1_2.txt +++ b/nexus/reconfigurator/planning/tests/output/planner_nonprovisionable_1_2.txt @@ -6,19 +6,19 @@ to: blueprint 9f71f5d3-a272-4382-9154-6ea2e171a6c6 sled 2d1cb4f2-cf44-40fc-b118-85036eb732a9 (active): physical disks at generation 2: - ---------------------------------------------------------------------- - vendor model serial - ---------------------------------------------------------------------- - fake-vendor fake-model serial-13e6503b-5300-4ccd-abc4-c1512b435929 - fake-vendor fake-model serial-44cdb6f2-fa6c-4b69-bab2-3ae4e1ec4b34 - fake-vendor fake-model serial-4de5fc8e-0e41-4ab9-ba12-2dc63882c96a - fake-vendor fake-model serial-51564e7a-d69f-4942-bcfe-330224633ca6 - fake-vendor fake-model serial-5ca23cb3-cc90-41c5-a474-01898cdd0796 - fake-vendor fake-model serial-6a23a532-0712-4a8d-be9b-e8c17e97aa4b - fake-vendor fake-model serial-6f1a330e-e8d4-4c09-97fc-8918b69b2a3c - fake-vendor fake-model serial-7113d104-fb55-4299-bf53-b3c59d258e44 - fake-vendor fake-model serial-8c10be49-3a66-40d4-a082-64d09d916f14 - fake-vendor fake-model serial-d1ebfd7b-3842-4ad7-be31-2b9c031209a9 + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-13e6503b-5300-4ccd-abc4-c1512b435929 in service + fake-vendor fake-model serial-44cdb6f2-fa6c-4b69-bab2-3ae4e1ec4b34 in service + fake-vendor fake-model serial-4de5fc8e-0e41-4ab9-ba12-2dc63882c96a in service + fake-vendor fake-model serial-51564e7a-d69f-4942-bcfe-330224633ca6 in service + fake-vendor fake-model serial-5ca23cb3-cc90-41c5-a474-01898cdd0796 in service + fake-vendor fake-model serial-6a23a532-0712-4a8d-be9b-e8c17e97aa4b in service + fake-vendor fake-model serial-6f1a330e-e8d4-4c09-97fc-8918b69b2a3c in service + fake-vendor fake-model serial-7113d104-fb55-4299-bf53-b3c59d258e44 in service + fake-vendor fake-model serial-8c10be49-3a66-40d4-a082-64d09d916f14 in service + fake-vendor fake-model serial-d1ebfd7b-3842-4ad7-be31-2b9c031209a9 in service datasets at generation 2: @@ -99,20 +99,30 @@ to: blueprint 9f71f5d3-a272-4382-9154-6ea2e171a6c6 sled 48d95fef-bc9f-4f50-9a53-1e075836291d (active -> decommissioned): - physical disks from generation 2: - ---------------------------------------------------------------------- - vendor model serial - ---------------------------------------------------------------------- -- fake-vendor fake-model serial-22930645-144a-415c-bceb-2dbfafb9c29e -- fake-vendor fake-model serial-24155070-8a43-4244-a3ba-853d8c71972d -- fake-vendor fake-model serial-494782c7-3821-4f49-918b-ce42cc4d18ad -- fake-vendor fake-model serial-6ea8a67f-d27d-472b-844c-6c8245b00e2b -- fake-vendor fake-model serial-77565d57-c235-4905-b3c7-32d1c2ca2c44 -- fake-vendor fake-model serial-8746874c-dc3b-4454-93cd-2a8fc13720fe -- fake-vendor fake-model serial-a42c5a67-6e10-4586-a56e-48bb8260e75f -- fake-vendor fake-model serial-ca89b120-7bcd-4eeb-baa7-71031fbd103b -- fake-vendor fake-model serial-ef61aa97-c862-428c-82f3-0a69a50d6155 -- fake-vendor fake-model serial-ef64ff6d-250d-47ac-8686-e696cfb46966 + physical disks generation 2 -> 3: + --------------------------------------------------------------------------------------- + vendor model serial disposition + --------------------------------------------------------------------------------------- +* fake-vendor fake-model serial-22930645-144a-415c-bceb-2dbfafb9c29e - in service + └─ + expunged ✓ +* fake-vendor fake-model serial-24155070-8a43-4244-a3ba-853d8c71972d - in service + └─ + expunged ✓ +* fake-vendor fake-model serial-494782c7-3821-4f49-918b-ce42cc4d18ad - in service + └─ + expunged ✓ +* fake-vendor fake-model serial-6ea8a67f-d27d-472b-844c-6c8245b00e2b - in service + └─ + expunged ✓ +* fake-vendor fake-model serial-77565d57-c235-4905-b3c7-32d1c2ca2c44 - in service + └─ + expunged ✓ +* fake-vendor fake-model serial-8746874c-dc3b-4454-93cd-2a8fc13720fe - in service + └─ + expunged ✓ +* fake-vendor fake-model serial-a42c5a67-6e10-4586-a56e-48bb8260e75f - in service + └─ + expunged ✓ +* fake-vendor fake-model serial-ca89b120-7bcd-4eeb-baa7-71031fbd103b - in service + └─ + expunged ✓ +* fake-vendor fake-model serial-ef61aa97-c862-428c-82f3-0a69a50d6155 - in service + └─ + expunged ✓ +* fake-vendor fake-model serial-ef64ff6d-250d-47ac-8686-e696cfb46966 - in service + └─ + expunged ✓ datasets from generation 2: @@ -202,20 +212,20 @@ to: blueprint 9f71f5d3-a272-4382-9154-6ea2e171a6c6 sled 68d24ac5-f341-49ea-a92a-0381b52ab387 (decommissioned): - physical disks from generation 2: - ---------------------------------------------------------------------- - vendor model serial - ---------------------------------------------------------------------- -- fake-vendor fake-model serial-09a5de95-c15f-486e-b776-fca62bf5e179 -- fake-vendor fake-model serial-11b8eccf-7c78-4bde-8639-b35a83082a95 -- fake-vendor fake-model serial-1931c422-4c6a-4597-8ae7-ecb44718462c -- fake-vendor fake-model serial-21a8a87e-73a4-42d4-a426-f6eec94004e3 -- fake-vendor fake-model serial-222c0b55-2966-46b6-816c-9063a7587806 -- fake-vendor fake-model serial-3676f688-f41c-4f89-936a-6b04c3011b2a -- fake-vendor fake-model serial-5e9e14c4-d60d-4b5c-a11c-bba54eb24c9f -- fake-vendor fake-model serial-74f7b89e-88f5-4336-ba8b-22283a6966c5 -- fake-vendor fake-model serial-a787cac8-b5e3-49e3-aaab-20d8eadd8a63 -- fake-vendor fake-model serial-d56b0c9f-0e57-43d8-a1ac-8b4d2c303c29 + physical disks at generation 2: + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-09a5de95-c15f-486e-b776-fca62bf5e179 in service + fake-vendor fake-model serial-11b8eccf-7c78-4bde-8639-b35a83082a95 in service + fake-vendor fake-model serial-1931c422-4c6a-4597-8ae7-ecb44718462c in service + fake-vendor fake-model serial-21a8a87e-73a4-42d4-a426-f6eec94004e3 in service + fake-vendor fake-model serial-222c0b55-2966-46b6-816c-9063a7587806 in service + fake-vendor fake-model serial-3676f688-f41c-4f89-936a-6b04c3011b2a in service + fake-vendor fake-model serial-5e9e14c4-d60d-4b5c-a11c-bba54eb24c9f in service + fake-vendor fake-model serial-74f7b89e-88f5-4336-ba8b-22283a6966c5 in service + fake-vendor fake-model serial-a787cac8-b5e3-49e3-aaab-20d8eadd8a63 in service + fake-vendor fake-model serial-d56b0c9f-0e57-43d8-a1ac-8b4d2c303c29 in service datasets from generation 2: @@ -292,19 +302,19 @@ to: blueprint 9f71f5d3-a272-4382-9154-6ea2e171a6c6 sled 75bc286f-2b4b-482c-9431-59272af529da (active): physical disks at generation 2: - ---------------------------------------------------------------------- - vendor model serial - ---------------------------------------------------------------------- - fake-vendor fake-model serial-4069c804-c51a-4adc-8822-3cbbab56ed3f - fake-vendor fake-model serial-5248a306-4a03-449e-a8a3-6f86d26da755 - fake-vendor fake-model serial-55196665-ed61-4b23-9a74-0711bf2eaf90 - fake-vendor fake-model serial-6b2a719a-35eb-469f-aa54-114a1f21f37d - fake-vendor fake-model serial-7ed4296a-66d1-4fb2-bc56-9b23b8f27d7e - fake-vendor fake-model serial-984e2389-e7fd-4af9-ab02-e3caf77f95b5 - fake-vendor fake-model serial-a5f75431-3795-426c-8f80-176f658281a5 - fake-vendor fake-model serial-cf32a1ce-2c9e-49f5-b1cf-4af7f2a28901 - fake-vendor fake-model serial-e405da11-cb6b-4ebc-bac1-9bc997352e10 - fake-vendor fake-model serial-f4d7f914-ec73-4b65-8696-5068591d9065 + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-4069c804-c51a-4adc-8822-3cbbab56ed3f in service + fake-vendor fake-model serial-5248a306-4a03-449e-a8a3-6f86d26da755 in service + fake-vendor fake-model serial-55196665-ed61-4b23-9a74-0711bf2eaf90 in service + fake-vendor fake-model serial-6b2a719a-35eb-469f-aa54-114a1f21f37d in service + fake-vendor fake-model serial-7ed4296a-66d1-4fb2-bc56-9b23b8f27d7e in service + fake-vendor fake-model serial-984e2389-e7fd-4af9-ab02-e3caf77f95b5 in service + fake-vendor fake-model serial-a5f75431-3795-426c-8f80-176f658281a5 in service + fake-vendor fake-model serial-cf32a1ce-2c9e-49f5-b1cf-4af7f2a28901 in service + fake-vendor fake-model serial-e405da11-cb6b-4ebc-bac1-9bc997352e10 in service + fake-vendor fake-model serial-f4d7f914-ec73-4b65-8696-5068591d9065 in service datasets generation 2 -> 3: @@ -382,19 +392,19 @@ to: blueprint 9f71f5d3-a272-4382-9154-6ea2e171a6c6 sled affab35f-600a-4109-8ea0-34a067a4e0bc (active): physical disks at generation 2: - ---------------------------------------------------------------------- - vendor model serial - ---------------------------------------------------------------------- - fake-vendor fake-model serial-33d48d85-751e-4982-b738-eae4d9a05f01 - fake-vendor fake-model serial-39ca2e23-4c38-4743-afe0-26b0380b27db - fake-vendor fake-model serial-4fbd2fe0-2eac-41b8-8e8d-4fa46c3e8b6c - fake-vendor fake-model serial-60131a33-1f12-4dbb-9435-bdd368db1f51 - fake-vendor fake-model serial-77e45b5b-869f-4e78-8ce3-28bbe8cf37e9 - fake-vendor fake-model serial-789d607d-d196-428e-a988-f7886a327859 - fake-vendor fake-model serial-b104b94c-2197-4e76-bfbd-6f966bd5af66 - fake-vendor fake-model serial-cd62306a-aedf-47e8-93d5-92a358d64c7b - fake-vendor fake-model serial-f1693454-aac1-4265-b8a0-4e9f3f41c7b3 - fake-vendor fake-model serial-fe4fdfba-3b6d-47d3-8612-1fb2390b650a + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-33d48d85-751e-4982-b738-eae4d9a05f01 in service + fake-vendor fake-model serial-39ca2e23-4c38-4743-afe0-26b0380b27db in service + fake-vendor fake-model serial-4fbd2fe0-2eac-41b8-8e8d-4fa46c3e8b6c in service + fake-vendor fake-model serial-60131a33-1f12-4dbb-9435-bdd368db1f51 in service + fake-vendor fake-model serial-77e45b5b-869f-4e78-8ce3-28bbe8cf37e9 in service + fake-vendor fake-model serial-789d607d-d196-428e-a988-f7886a327859 in service + fake-vendor fake-model serial-b104b94c-2197-4e76-bfbd-6f966bd5af66 in service + fake-vendor fake-model serial-cd62306a-aedf-47e8-93d5-92a358d64c7b in service + fake-vendor fake-model serial-f1693454-aac1-4265-b8a0-4e9f3f41c7b3 in service + fake-vendor fake-model serial-fe4fdfba-3b6d-47d3-8612-1fb2390b650a in service datasets generation 2 -> 3: diff --git a/nexus/reconfigurator/planning/tests/output/planner_nonprovisionable_2_2a.txt b/nexus/reconfigurator/planning/tests/output/planner_nonprovisionable_2_2a.txt index fb7b74369e3..24f90a9044f 100644 --- a/nexus/reconfigurator/planning/tests/output/planner_nonprovisionable_2_2a.txt +++ b/nexus/reconfigurator/planning/tests/output/planner_nonprovisionable_2_2a.txt @@ -6,19 +6,19 @@ to: blueprint 9f71f5d3-a272-4382-9154-6ea2e171a6c6 sled 75bc286f-2b4b-482c-9431-59272af529da (active): physical disks at generation 2: - ---------------------------------------------------------------------- - vendor model serial - ---------------------------------------------------------------------- - fake-vendor fake-model serial-4069c804-c51a-4adc-8822-3cbbab56ed3f - fake-vendor fake-model serial-5248a306-4a03-449e-a8a3-6f86d26da755 - fake-vendor fake-model serial-55196665-ed61-4b23-9a74-0711bf2eaf90 - fake-vendor fake-model serial-6b2a719a-35eb-469f-aa54-114a1f21f37d - fake-vendor fake-model serial-7ed4296a-66d1-4fb2-bc56-9b23b8f27d7e - fake-vendor fake-model serial-984e2389-e7fd-4af9-ab02-e3caf77f95b5 - fake-vendor fake-model serial-a5f75431-3795-426c-8f80-176f658281a5 - fake-vendor fake-model serial-cf32a1ce-2c9e-49f5-b1cf-4af7f2a28901 - fake-vendor fake-model serial-e405da11-cb6b-4ebc-bac1-9bc997352e10 - fake-vendor fake-model serial-f4d7f914-ec73-4b65-8696-5068591d9065 + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-4069c804-c51a-4adc-8822-3cbbab56ed3f in service + fake-vendor fake-model serial-5248a306-4a03-449e-a8a3-6f86d26da755 in service + fake-vendor fake-model serial-55196665-ed61-4b23-9a74-0711bf2eaf90 in service + fake-vendor fake-model serial-6b2a719a-35eb-469f-aa54-114a1f21f37d in service + fake-vendor fake-model serial-7ed4296a-66d1-4fb2-bc56-9b23b8f27d7e in service + fake-vendor fake-model serial-984e2389-e7fd-4af9-ab02-e3caf77f95b5 in service + fake-vendor fake-model serial-a5f75431-3795-426c-8f80-176f658281a5 in service + fake-vendor fake-model serial-cf32a1ce-2c9e-49f5-b1cf-4af7f2a28901 in service + fake-vendor fake-model serial-e405da11-cb6b-4ebc-bac1-9bc997352e10 in service + fake-vendor fake-model serial-f4d7f914-ec73-4b65-8696-5068591d9065 in service datasets at generation 3: @@ -96,19 +96,19 @@ to: blueprint 9f71f5d3-a272-4382-9154-6ea2e171a6c6 sled affab35f-600a-4109-8ea0-34a067a4e0bc (active): physical disks at generation 2: - ---------------------------------------------------------------------- - vendor model serial - ---------------------------------------------------------------------- - fake-vendor fake-model serial-33d48d85-751e-4982-b738-eae4d9a05f01 - fake-vendor fake-model serial-39ca2e23-4c38-4743-afe0-26b0380b27db - fake-vendor fake-model serial-4fbd2fe0-2eac-41b8-8e8d-4fa46c3e8b6c - fake-vendor fake-model serial-60131a33-1f12-4dbb-9435-bdd368db1f51 - fake-vendor fake-model serial-77e45b5b-869f-4e78-8ce3-28bbe8cf37e9 - fake-vendor fake-model serial-789d607d-d196-428e-a988-f7886a327859 - fake-vendor fake-model serial-b104b94c-2197-4e76-bfbd-6f966bd5af66 - fake-vendor fake-model serial-cd62306a-aedf-47e8-93d5-92a358d64c7b - fake-vendor fake-model serial-f1693454-aac1-4265-b8a0-4e9f3f41c7b3 - fake-vendor fake-model serial-fe4fdfba-3b6d-47d3-8612-1fb2390b650a + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-33d48d85-751e-4982-b738-eae4d9a05f01 in service + fake-vendor fake-model serial-39ca2e23-4c38-4743-afe0-26b0380b27db in service + fake-vendor fake-model serial-4fbd2fe0-2eac-41b8-8e8d-4fa46c3e8b6c in service + fake-vendor fake-model serial-60131a33-1f12-4dbb-9435-bdd368db1f51 in service + fake-vendor fake-model serial-77e45b5b-869f-4e78-8ce3-28bbe8cf37e9 in service + fake-vendor fake-model serial-789d607d-d196-428e-a988-f7886a327859 in service + fake-vendor fake-model serial-b104b94c-2197-4e76-bfbd-6f966bd5af66 in service + fake-vendor fake-model serial-cd62306a-aedf-47e8-93d5-92a358d64c7b in service + fake-vendor fake-model serial-f1693454-aac1-4265-b8a0-4e9f3f41c7b3 in service + fake-vendor fake-model serial-fe4fdfba-3b6d-47d3-8612-1fb2390b650a in service datasets at generation 3: @@ -183,48 +183,24 @@ to: blueprint 9f71f5d3-a272-4382-9154-6ea2e171a6c6 nexus cab211e1-3ab1-475f-81fa-984748044d8c in service fd00:1122:3344:101::2d - REMOVED SLEDS: - - sled 68d24ac5-f341-49ea-a92a-0381b52ab387 (was decommissioned): - - omicron zones from generation 2: - ---------------------------------------------------------------------------------------------- - zone type zone id disposition underlay IP - ---------------------------------------------------------------------------------------------- -- crucible 258c5106-ebcd-4651-96e4-d5b0895691f6 expunged ⏳ fd00:1122:3344:102::25 -- crucible 2b046f65-00f5-46da-988c-90c1de32a1dd expunged ⏳ fd00:1122:3344:102::2a -- crucible 30c770a8-625e-4864-8977-d83a11c1c596 expunged ⏳ fd00:1122:3344:102::2d -- crucible 35e3587d-25d3-4234-822f-2d68713b8cbf expunged ⏳ fd00:1122:3344:102::27 -- crucible 46293b15-fd26-48f9-9ccb-122fa0ef41b4 expunged ⏳ fd00:1122:3344:102::28 -- crucible 462c6b8d-1872-4671-b84a-bdcbb69e3baf expunged ⏳ fd00:1122:3344:102::24 -- crucible a046c5f9-25e7-47c3-9c67-43d68fb39c5e expunged ⏳ fd00:1122:3344:102::26 -- crucible a49d4037-506e-4732-8e21-1f8c136a3c17 expunged ⏳ fd00:1122:3344:102::2c -- crucible df94dc9a-74d9-43a9-8879-199740665f29 expunged ⏳ fd00:1122:3344:102::2b -- crucible f1622981-7f0b-4a9f-9a70-6b46ab9d5e86 expunged ⏳ fd00:1122:3344:102::29 -- crucible_pantry b217d3a5-4ebb-45e3-b5be-2ebb2c57d8fa expunged ⏳ fd00:1122:3344:102::23 -- internal_dns 0efed95e-f052-4535-b45a-fac1148c0e6a expunged ⏳ fd00:1122:3344:3::1 -- internal_ntp 61a79cb4-7fcb-432d-bbe9-3f9882452db2 expunged ⏳ fd00:1122:3344:102::21 -- nexus ee146b15-bc59-43a3-9567-bb8596e6188d expunged ⏳ fd00:1122:3344:102::22 - - MODIFIED SLEDS: sled 2d1cb4f2-cf44-40fc-b118-85036eb732a9 (active): physical disks at generation 2: - ---------------------------------------------------------------------- - vendor model serial - ---------------------------------------------------------------------- - fake-vendor fake-model serial-13e6503b-5300-4ccd-abc4-c1512b435929 - fake-vendor fake-model serial-44cdb6f2-fa6c-4b69-bab2-3ae4e1ec4b34 - fake-vendor fake-model serial-4de5fc8e-0e41-4ab9-ba12-2dc63882c96a - fake-vendor fake-model serial-51564e7a-d69f-4942-bcfe-330224633ca6 - fake-vendor fake-model serial-5ca23cb3-cc90-41c5-a474-01898cdd0796 - fake-vendor fake-model serial-6a23a532-0712-4a8d-be9b-e8c17e97aa4b - fake-vendor fake-model serial-6f1a330e-e8d4-4c09-97fc-8918b69b2a3c - fake-vendor fake-model serial-7113d104-fb55-4299-bf53-b3c59d258e44 - fake-vendor fake-model serial-8c10be49-3a66-40d4-a082-64d09d916f14 - fake-vendor fake-model serial-d1ebfd7b-3842-4ad7-be31-2b9c031209a9 + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-13e6503b-5300-4ccd-abc4-c1512b435929 in service + fake-vendor fake-model serial-44cdb6f2-fa6c-4b69-bab2-3ae4e1ec4b34 in service + fake-vendor fake-model serial-4de5fc8e-0e41-4ab9-ba12-2dc63882c96a in service + fake-vendor fake-model serial-51564e7a-d69f-4942-bcfe-330224633ca6 in service + fake-vendor fake-model serial-5ca23cb3-cc90-41c5-a474-01898cdd0796 in service + fake-vendor fake-model serial-6a23a532-0712-4a8d-be9b-e8c17e97aa4b in service + fake-vendor fake-model serial-6f1a330e-e8d4-4c09-97fc-8918b69b2a3c in service + fake-vendor fake-model serial-7113d104-fb55-4299-bf53-b3c59d258e44 in service + fake-vendor fake-model serial-8c10be49-3a66-40d4-a082-64d09d916f14 in service + fake-vendor fake-model serial-d1ebfd7b-3842-4ad7-be31-2b9c031209a9 in service datasets at generation 2: @@ -302,6 +278,22 @@ to: blueprint 9f71f5d3-a272-4382-9154-6ea2e171a6c6 sled 48d95fef-bc9f-4f50-9a53-1e075836291d (decommissioned): + physical disks at generation 3: + ------------------------------------------------------------------------------------- + vendor model serial disposition + ------------------------------------------------------------------------------------- + fake-vendor fake-model serial-22930645-144a-415c-bceb-2dbfafb9c29e expunged ✓ + fake-vendor fake-model serial-24155070-8a43-4244-a3ba-853d8c71972d expunged ✓ + fake-vendor fake-model serial-494782c7-3821-4f49-918b-ce42cc4d18ad expunged ✓ + fake-vendor fake-model serial-6ea8a67f-d27d-472b-844c-6c8245b00e2b expunged ✓ + fake-vendor fake-model serial-77565d57-c235-4905-b3c7-32d1c2ca2c44 expunged ✓ + fake-vendor fake-model serial-8746874c-dc3b-4454-93cd-2a8fc13720fe expunged ✓ + fake-vendor fake-model serial-a42c5a67-6e10-4586-a56e-48bb8260e75f expunged ✓ + fake-vendor fake-model serial-ca89b120-7bcd-4eeb-baa7-71031fbd103b expunged ✓ + fake-vendor fake-model serial-ef61aa97-c862-428c-82f3-0a69a50d6155 expunged ✓ + fake-vendor fake-model serial-ef64ff6d-250d-47ac-8686-e696cfb46966 expunged ✓ + + omicron zones generation 3 -> 4: ---------------------------------------------------------------------------------------------- zone type zone id disposition underlay IP diff --git a/nexus/reconfigurator/planning/tests/output/planner_nonprovisionable_bp2.txt b/nexus/reconfigurator/planning/tests/output/planner_nonprovisionable_bp2.txt index 430dab8371d..4ce0d0ffd66 100644 --- a/nexus/reconfigurator/planning/tests/output/planner_nonprovisionable_bp2.txt +++ b/nexus/reconfigurator/planning/tests/output/planner_nonprovisionable_bp2.txt @@ -4,19 +4,19 @@ parent: 4d4e6c38-cd95-4c4e-8f45-6af4d686964b sled: 2d1cb4f2-cf44-40fc-b118-85036eb732a9 (active) physical disks at generation 2: - ---------------------------------------------------------------------- - vendor model serial - ---------------------------------------------------------------------- - fake-vendor fake-model serial-13e6503b-5300-4ccd-abc4-c1512b435929 - fake-vendor fake-model serial-44cdb6f2-fa6c-4b69-bab2-3ae4e1ec4b34 - fake-vendor fake-model serial-4de5fc8e-0e41-4ab9-ba12-2dc63882c96a - fake-vendor fake-model serial-51564e7a-d69f-4942-bcfe-330224633ca6 - fake-vendor fake-model serial-5ca23cb3-cc90-41c5-a474-01898cdd0796 - fake-vendor fake-model serial-6a23a532-0712-4a8d-be9b-e8c17e97aa4b - fake-vendor fake-model serial-6f1a330e-e8d4-4c09-97fc-8918b69b2a3c - fake-vendor fake-model serial-7113d104-fb55-4299-bf53-b3c59d258e44 - fake-vendor fake-model serial-8c10be49-3a66-40d4-a082-64d09d916f14 - fake-vendor fake-model serial-d1ebfd7b-3842-4ad7-be31-2b9c031209a9 + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-13e6503b-5300-4ccd-abc4-c1512b435929 in service + fake-vendor fake-model serial-44cdb6f2-fa6c-4b69-bab2-3ae4e1ec4b34 in service + fake-vendor fake-model serial-4de5fc8e-0e41-4ab9-ba12-2dc63882c96a in service + fake-vendor fake-model serial-51564e7a-d69f-4942-bcfe-330224633ca6 in service + fake-vendor fake-model serial-5ca23cb3-cc90-41c5-a474-01898cdd0796 in service + fake-vendor fake-model serial-6a23a532-0712-4a8d-be9b-e8c17e97aa4b in service + fake-vendor fake-model serial-6f1a330e-e8d4-4c09-97fc-8918b69b2a3c in service + fake-vendor fake-model serial-7113d104-fb55-4299-bf53-b3c59d258e44 in service + fake-vendor fake-model serial-8c10be49-3a66-40d4-a082-64d09d916f14 in service + fake-vendor fake-model serial-d1ebfd7b-3842-4ad7-be31-2b9c031209a9 in service datasets at generation 2: @@ -94,22 +94,100 @@ parent: 4d4e6c38-cd95-4c4e-8f45-6af4d686964b + sled: 48d95fef-bc9f-4f50-9a53-1e075836291d (decommissioned) + + physical disks at generation 3: + ------------------------------------------------------------------------------------- + vendor model serial disposition + ------------------------------------------------------------------------------------- + fake-vendor fake-model serial-22930645-144a-415c-bceb-2dbfafb9c29e expunged ✓ + fake-vendor fake-model serial-24155070-8a43-4244-a3ba-853d8c71972d expunged ✓ + fake-vendor fake-model serial-494782c7-3821-4f49-918b-ce42cc4d18ad expunged ✓ + fake-vendor fake-model serial-6ea8a67f-d27d-472b-844c-6c8245b00e2b expunged ✓ + fake-vendor fake-model serial-77565d57-c235-4905-b3c7-32d1c2ca2c44 expunged ✓ + fake-vendor fake-model serial-8746874c-dc3b-4454-93cd-2a8fc13720fe expunged ✓ + fake-vendor fake-model serial-a42c5a67-6e10-4586-a56e-48bb8260e75f expunged ✓ + fake-vendor fake-model serial-ca89b120-7bcd-4eeb-baa7-71031fbd103b expunged ✓ + fake-vendor fake-model serial-ef61aa97-c862-428c-82f3-0a69a50d6155 expunged ✓ + fake-vendor fake-model serial-ef64ff6d-250d-47ac-8686-e696cfb46966 expunged ✓ + + + omicron zones at generation 3: + ---------------------------------------------------------------------------------------------- + zone type zone id disposition underlay IP + ---------------------------------------------------------------------------------------------- + crucible 085d10cb-a6f0-46de-86bc-ad2f7f1defcf expunged ✓ fd00:1122:3344:103::2d + crucible 2bf62dfa-537a-4616-aad5-64447faaec53 expunged ✓ fd00:1122:3344:103::2b + crucible 50d43a78-e9af-4051-9f5d-85410f44214b expunged ✓ fd00:1122:3344:103::27 + crucible 6e7b5239-0d2e-42a5-80aa-51a3bc859318 expunged ✓ fd00:1122:3344:103::2a + crucible 8d87b485-3fb4-480b-97ce-02d066b799d7 expunged ✓ fd00:1122:3344:103::26 + crucible b3d757b8-033f-4a68-82db-6ff5331b9739 expunged ✓ fd00:1122:3344:103::25 + crucible bcd98cf5-a798-4aa0-81cc-8972a376073c expunged ✓ fd00:1122:3344:103::28 + crucible bd12d9d5-bccf-433a-b078-794a69aeb89a expunged ✓ fd00:1122:3344:103::29 + crucible d283707c-1b8f-4cb9-946d-041b25a83967 expunged ✓ fd00:1122:3344:103::2c + crucible e362415d-2d54-4574-b823-3f01b9b751de expunged ✓ fd00:1122:3344:103::24 + crucible_pantry 208c987a-ab33-47a3-a103-6108dd6dc4cb expunged ✓ fd00:1122:3344:103::23 + internal_dns c428175e-6a1c-40bf-aa36-f608a57431f5 expunged ✓ fd00:1122:3344:2::1 + internal_ntp a8f1b53a-4231-4f04-9939-29e50a0f0e2c expunged ✓ fd00:1122:3344:103::21 + nexus 533416e6-d0bd-482d-b592-29346c8a3471 expunged ✓ fd00:1122:3344:103::22 + + + + sled: 68d24ac5-f341-49ea-a92a-0381b52ab387 (decommissioned) + + physical disks at generation 2: + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-09a5de95-c15f-486e-b776-fca62bf5e179 in service + fake-vendor fake-model serial-11b8eccf-7c78-4bde-8639-b35a83082a95 in service + fake-vendor fake-model serial-1931c422-4c6a-4597-8ae7-ecb44718462c in service + fake-vendor fake-model serial-21a8a87e-73a4-42d4-a426-f6eec94004e3 in service + fake-vendor fake-model serial-222c0b55-2966-46b6-816c-9063a7587806 in service + fake-vendor fake-model serial-3676f688-f41c-4f89-936a-6b04c3011b2a in service + fake-vendor fake-model serial-5e9e14c4-d60d-4b5c-a11c-bba54eb24c9f in service + fake-vendor fake-model serial-74f7b89e-88f5-4336-ba8b-22283a6966c5 in service + fake-vendor fake-model serial-a787cac8-b5e3-49e3-aaab-20d8eadd8a63 in service + fake-vendor fake-model serial-d56b0c9f-0e57-43d8-a1ac-8b4d2c303c29 in service + + + omicron zones at generation 2: + ---------------------------------------------------------------------------------------------- + zone type zone id disposition underlay IP + ---------------------------------------------------------------------------------------------- + crucible 258c5106-ebcd-4651-96e4-d5b0895691f6 expunged ⏳ fd00:1122:3344:102::25 + crucible 2b046f65-00f5-46da-988c-90c1de32a1dd expunged ⏳ fd00:1122:3344:102::2a + crucible 30c770a8-625e-4864-8977-d83a11c1c596 expunged ⏳ fd00:1122:3344:102::2d + crucible 35e3587d-25d3-4234-822f-2d68713b8cbf expunged ⏳ fd00:1122:3344:102::27 + crucible 46293b15-fd26-48f9-9ccb-122fa0ef41b4 expunged ⏳ fd00:1122:3344:102::28 + crucible 462c6b8d-1872-4671-b84a-bdcbb69e3baf expunged ⏳ fd00:1122:3344:102::24 + crucible a046c5f9-25e7-47c3-9c67-43d68fb39c5e expunged ⏳ fd00:1122:3344:102::26 + crucible a49d4037-506e-4732-8e21-1f8c136a3c17 expunged ⏳ fd00:1122:3344:102::2c + crucible df94dc9a-74d9-43a9-8879-199740665f29 expunged ⏳ fd00:1122:3344:102::2b + crucible f1622981-7f0b-4a9f-9a70-6b46ab9d5e86 expunged ⏳ fd00:1122:3344:102::29 + crucible_pantry b217d3a5-4ebb-45e3-b5be-2ebb2c57d8fa expunged ⏳ fd00:1122:3344:102::23 + internal_dns 0efed95e-f052-4535-b45a-fac1148c0e6a expunged ⏳ fd00:1122:3344:3::1 + internal_ntp 61a79cb4-7fcb-432d-bbe9-3f9882452db2 expunged ⏳ fd00:1122:3344:102::21 + nexus ee146b15-bc59-43a3-9567-bb8596e6188d expunged ⏳ fd00:1122:3344:102::22 + + + sled: 75bc286f-2b4b-482c-9431-59272af529da (active) physical disks at generation 2: - ---------------------------------------------------------------------- - vendor model serial - ---------------------------------------------------------------------- - fake-vendor fake-model serial-4069c804-c51a-4adc-8822-3cbbab56ed3f - fake-vendor fake-model serial-5248a306-4a03-449e-a8a3-6f86d26da755 - fake-vendor fake-model serial-55196665-ed61-4b23-9a74-0711bf2eaf90 - fake-vendor fake-model serial-6b2a719a-35eb-469f-aa54-114a1f21f37d - fake-vendor fake-model serial-7ed4296a-66d1-4fb2-bc56-9b23b8f27d7e - fake-vendor fake-model serial-984e2389-e7fd-4af9-ab02-e3caf77f95b5 - fake-vendor fake-model serial-a5f75431-3795-426c-8f80-176f658281a5 - fake-vendor fake-model serial-cf32a1ce-2c9e-49f5-b1cf-4af7f2a28901 - fake-vendor fake-model serial-e405da11-cb6b-4ebc-bac1-9bc997352e10 - fake-vendor fake-model serial-f4d7f914-ec73-4b65-8696-5068591d9065 + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-4069c804-c51a-4adc-8822-3cbbab56ed3f in service + fake-vendor fake-model serial-5248a306-4a03-449e-a8a3-6f86d26da755 in service + fake-vendor fake-model serial-55196665-ed61-4b23-9a74-0711bf2eaf90 in service + fake-vendor fake-model serial-6b2a719a-35eb-469f-aa54-114a1f21f37d in service + fake-vendor fake-model serial-7ed4296a-66d1-4fb2-bc56-9b23b8f27d7e in service + fake-vendor fake-model serial-984e2389-e7fd-4af9-ab02-e3caf77f95b5 in service + fake-vendor fake-model serial-a5f75431-3795-426c-8f80-176f658281a5 in service + fake-vendor fake-model serial-cf32a1ce-2c9e-49f5-b1cf-4af7f2a28901 in service + fake-vendor fake-model serial-e405da11-cb6b-4ebc-bac1-9bc997352e10 in service + fake-vendor fake-model serial-f4d7f914-ec73-4b65-8696-5068591d9065 in service datasets at generation 3: @@ -188,19 +266,19 @@ parent: 4d4e6c38-cd95-4c4e-8f45-6af4d686964b sled: affab35f-600a-4109-8ea0-34a067a4e0bc (active) physical disks at generation 2: - ---------------------------------------------------------------------- - vendor model serial - ---------------------------------------------------------------------- - fake-vendor fake-model serial-33d48d85-751e-4982-b738-eae4d9a05f01 - fake-vendor fake-model serial-39ca2e23-4c38-4743-afe0-26b0380b27db - fake-vendor fake-model serial-4fbd2fe0-2eac-41b8-8e8d-4fa46c3e8b6c - fake-vendor fake-model serial-60131a33-1f12-4dbb-9435-bdd368db1f51 - fake-vendor fake-model serial-77e45b5b-869f-4e78-8ce3-28bbe8cf37e9 - fake-vendor fake-model serial-789d607d-d196-428e-a988-f7886a327859 - fake-vendor fake-model serial-b104b94c-2197-4e76-bfbd-6f966bd5af66 - fake-vendor fake-model serial-cd62306a-aedf-47e8-93d5-92a358d64c7b - fake-vendor fake-model serial-f1693454-aac1-4265-b8a0-4e9f3f41c7b3 - fake-vendor fake-model serial-fe4fdfba-3b6d-47d3-8612-1fb2390b650a + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-33d48d85-751e-4982-b738-eae4d9a05f01 in service + fake-vendor fake-model serial-39ca2e23-4c38-4743-afe0-26b0380b27db in service + fake-vendor fake-model serial-4fbd2fe0-2eac-41b8-8e8d-4fa46c3e8b6c in service + fake-vendor fake-model serial-60131a33-1f12-4dbb-9435-bdd368db1f51 in service + fake-vendor fake-model serial-77e45b5b-869f-4e78-8ce3-28bbe8cf37e9 in service + fake-vendor fake-model serial-789d607d-d196-428e-a988-f7886a327859 in service + fake-vendor fake-model serial-b104b94c-2197-4e76-bfbd-6f966bd5af66 in service + fake-vendor fake-model serial-cd62306a-aedf-47e8-93d5-92a358d64c7b in service + fake-vendor fake-model serial-f1693454-aac1-4265-b8a0-4e9f3f41c7b3 in service + fake-vendor fake-model serial-fe4fdfba-3b6d-47d3-8612-1fb2390b650a in service datasets at generation 3: @@ -275,52 +353,6 @@ parent: 4d4e6c38-cd95-4c4e-8f45-6af4d686964b nexus cab211e1-3ab1-475f-81fa-984748044d8c in service fd00:1122:3344:101::2d -!48d95fef-bc9f-4f50-9a53-1e075836291d -WARNING: Zones exist without physical disks! - omicron zones at generation 3: - ---------------------------------------------------------------------------------------------- - zone type zone id disposition underlay IP - ---------------------------------------------------------------------------------------------- - crucible 085d10cb-a6f0-46de-86bc-ad2f7f1defcf expunged ✓ fd00:1122:3344:103::2d - crucible 2bf62dfa-537a-4616-aad5-64447faaec53 expunged ✓ fd00:1122:3344:103::2b - crucible 50d43a78-e9af-4051-9f5d-85410f44214b expunged ✓ fd00:1122:3344:103::27 - crucible 6e7b5239-0d2e-42a5-80aa-51a3bc859318 expunged ✓ fd00:1122:3344:103::2a - crucible 8d87b485-3fb4-480b-97ce-02d066b799d7 expunged ✓ fd00:1122:3344:103::26 - crucible b3d757b8-033f-4a68-82db-6ff5331b9739 expunged ✓ fd00:1122:3344:103::25 - crucible bcd98cf5-a798-4aa0-81cc-8972a376073c expunged ✓ fd00:1122:3344:103::28 - crucible bd12d9d5-bccf-433a-b078-794a69aeb89a expunged ✓ fd00:1122:3344:103::29 - crucible d283707c-1b8f-4cb9-946d-041b25a83967 expunged ✓ fd00:1122:3344:103::2c - crucible e362415d-2d54-4574-b823-3f01b9b751de expunged ✓ fd00:1122:3344:103::24 - crucible_pantry 208c987a-ab33-47a3-a103-6108dd6dc4cb expunged ✓ fd00:1122:3344:103::23 - internal_dns c428175e-6a1c-40bf-aa36-f608a57431f5 expunged ✓ fd00:1122:3344:2::1 - internal_ntp a8f1b53a-4231-4f04-9939-29e50a0f0e2c expunged ✓ fd00:1122:3344:103::21 - nexus 533416e6-d0bd-482d-b592-29346c8a3471 expunged ✓ fd00:1122:3344:103::22 - - - -!68d24ac5-f341-49ea-a92a-0381b52ab387 -WARNING: Zones exist without physical disks! - omicron zones at generation 2: - ---------------------------------------------------------------------------------------------- - zone type zone id disposition underlay IP - ---------------------------------------------------------------------------------------------- - crucible 258c5106-ebcd-4651-96e4-d5b0895691f6 expunged ⏳ fd00:1122:3344:102::25 - crucible 2b046f65-00f5-46da-988c-90c1de32a1dd expunged ⏳ fd00:1122:3344:102::2a - crucible 30c770a8-625e-4864-8977-d83a11c1c596 expunged ⏳ fd00:1122:3344:102::2d - crucible 35e3587d-25d3-4234-822f-2d68713b8cbf expunged ⏳ fd00:1122:3344:102::27 - crucible 46293b15-fd26-48f9-9ccb-122fa0ef41b4 expunged ⏳ fd00:1122:3344:102::28 - crucible 462c6b8d-1872-4671-b84a-bdcbb69e3baf expunged ⏳ fd00:1122:3344:102::24 - crucible a046c5f9-25e7-47c3-9c67-43d68fb39c5e expunged ⏳ fd00:1122:3344:102::26 - crucible a49d4037-506e-4732-8e21-1f8c136a3c17 expunged ⏳ fd00:1122:3344:102::2c - crucible df94dc9a-74d9-43a9-8879-199740665f29 expunged ⏳ fd00:1122:3344:102::2b - crucible f1622981-7f0b-4a9f-9a70-6b46ab9d5e86 expunged ⏳ fd00:1122:3344:102::29 - crucible_pantry b217d3a5-4ebb-45e3-b5be-2ebb2c57d8fa expunged ⏳ fd00:1122:3344:102::23 - internal_dns 0efed95e-f052-4535-b45a-fac1148c0e6a expunged ⏳ fd00:1122:3344:3::1 - internal_ntp 61a79cb4-7fcb-432d-bbe9-3f9882452db2 expunged ⏳ fd00:1122:3344:102::21 - nexus ee146b15-bc59-43a3-9567-bb8596e6188d expunged ⏳ fd00:1122:3344:102::22 - - - COCKROACHDB SETTINGS: state fingerprint::::::::::::::::: (none) cluster.preserve_downgrade_option: (do not modify) diff --git a/nexus/saga-recovery/Cargo.toml b/nexus/saga-recovery/Cargo.toml index 4356cc97891..c35a5c7b5fe 100644 --- a/nexus/saga-recovery/Cargo.toml +++ b/nexus/saga-recovery/Cargo.toml @@ -34,7 +34,6 @@ nexus-test-utils-macros.workspace = true nexus-types.workspace = true omicron-common.workspace = true omicron-test-utils.workspace = true -once_cell.workspace = true pretty_assertions.workspace = true steno.workspace = true tokio.workspace = true diff --git a/nexus/src/app/background/driver.rs b/nexus/src/app/background/driver.rs index 4467a8b070e..1eabae8987a 100644 --- a/nexus/src/app/background/driver.rs +++ b/nexus/src/app/background/driver.rs @@ -562,8 +562,8 @@ mod test { }); // Wait for four activations of our task. (This is three periods.) That - // should take between 300ms and 400ms. Allow extra time for a busy - // system. + // would usually take between 300ms and 400ms, but allow plenty of time + // as a buffer. let start = Instant::now(); let wall_start = Utc::now(); wait_until_count(rx1.clone(), 4).await; @@ -571,8 +571,8 @@ mod test { let duration = start.elapsed(); println!("rx1 -> 3 took {:?}", duration); assert!( - duration.as_millis() < 1250, - "took longer than 1.25s to activate our \ + duration < Duration::from_secs(30), + "took longer than 30s to activate our \ every-100ms-task three times" ); assert!(duration.as_millis() >= 300); diff --git a/nexus/src/app/background/tasks/decommissioned_disk_cleaner.rs b/nexus/src/app/background/tasks/decommissioned_disk_cleaner.rs index e5e21f5ea97..b940a759a7c 100644 --- a/nexus/src/app/background/tasks/decommissioned_disk_cleaner.rs +++ b/nexus/src/app/background/tasks/decommissioned_disk_cleaner.rs @@ -278,6 +278,7 @@ mod tests { zpool_id: ZpoolUuid, dataset_id: DatasetUuid, region_id: RegionUuid, + disk_id: PhysicalDiskUuid, } impl TestFixture { @@ -301,7 +302,7 @@ mod tests { .await .unwrap(); - Self { zpool_id, dataset_id, region_id } + Self { zpool_id, dataset_id, region_id, disk_id } } async fn delete_region(&self, datastore: &DataStore) { @@ -397,7 +398,7 @@ mod tests { let mut task = DecommissionedDiskCleaner::new(datastore.clone(), false); datastore - .physical_disk_decommission_all_expunged(&opctx) + .physical_disk_decommission(&opctx, fixture.disk_id) .await .unwrap(); @@ -431,9 +432,10 @@ mod tests { let mut task = DecommissionedDiskCleaner::new(datastore.clone(), false); datastore - .physical_disk_decommission_all_expunged(&opctx) + .physical_disk_decommission(&opctx, fixture.disk_id) .await .unwrap(); + fixture.delete_region(&datastore).await; // Setup: Disk is decommissioned and has no regions. diff --git a/nexus/src/app/background/tasks/saga_recovery.rs b/nexus/src/app/background/tasks/saga_recovery.rs index ad32f854af7..652e3f2276c 100644 --- a/nexus/src/app/background/tasks/saga_recovery.rs +++ b/nexus/src/app/background/tasks/saga_recovery.rs @@ -493,9 +493,11 @@ mod test { self, poll::{wait_for_condition, CondCheckError}, }; - use once_cell::sync::Lazy; use pretty_assertions::assert_eq; - use std::sync::atomic::{AtomicBool, AtomicU32, Ordering}; + use std::sync::{ + atomic::{AtomicBool, AtomicU32, Ordering}, + LazyLock, + }; use steno::{ new_action_noop_undo, Action, ActionContext, ActionError, ActionRegistry, DagBuilder, Node, SagaDag, SagaId, SagaName, @@ -561,10 +563,10 @@ mod test { } } - static ACTION_N1: Lazy>> = - Lazy::new(|| new_action_noop_undo("n1_action", node_one)); - static ACTION_N2: Lazy>> = - Lazy::new(|| new_action_noop_undo("n2_action", node_two)); + static ACTION_N1: LazyLock>> = + LazyLock::new(|| new_action_noop_undo("n1_action", node_one)); + static ACTION_N2: LazyLock>> = + LazyLock::new(|| new_action_noop_undo("n2_action", node_two)); fn registry_create() -> Arc> { let mut registry = ActionRegistry::new(); diff --git a/nexus/src/app/sagas/mod.rs b/nexus/src/app/sagas/mod.rs index f114dee432d..f8020346116 100644 --- a/nexus/src/app/sagas/mod.rs +++ b/nexus/src/app/sagas/mod.rs @@ -10,8 +10,8 @@ // easier it will be to test, version, and update in deployed systems. use crate::saga_interface::SagaContext; -use once_cell::sync::Lazy; use std::sync::Arc; +use std::sync::LazyLock; use steno::new_action_noop_undo; use steno::ActionContext; use steno::ActionError; @@ -133,11 +133,12 @@ impl From for omicron_common::api::external::Error { } } -pub(super) static ACTION_GENERATE_ID: Lazy = Lazy::new(|| { - new_action_noop_undo("common.uuid_generate", saga_generate_uuid) -}); -pub(crate) static ACTION_REGISTRY: Lazy> = - Lazy::new(|| Arc::new(make_action_registry())); +pub(super) static ACTION_GENERATE_ID: LazyLock = + LazyLock::new(|| { + new_action_noop_undo("common.uuid_generate", saga_generate_uuid) + }); +pub(crate) static ACTION_REGISTRY: LazyLock> = + LazyLock::new(|| Arc::new(make_action_registry())); macro_rules! register_actions { ( $registry:ident, $( $saga: ty ),* ) => { @@ -289,8 +290,8 @@ macro_rules! declare_saga_actions { // Basically, everything to the left of "<>" is just us propagating state // through the macro, and everything to the right of it is user input. (S = $saga:ident $($nodes:ident),* <> $node:ident -> $out:literal { + $a:ident - $u:ident } $($tail:tt)*) => { - static $node: ::once_cell::sync::Lazy = - ::once_cell::sync::Lazy::new(|| { + static $node: ::std::sync::LazyLock = + ::std::sync::LazyLock::new(|| { ::steno::ActionFunc::new_action( crate::app::sagas::__action_name!($saga, $node), $a, $u, ) @@ -300,8 +301,8 @@ macro_rules! declare_saga_actions { }; // Same as the prior match, but without the undo action. (S = $saga:ident $($nodes:ident),* <> $node:ident -> $out:literal { + $a:ident } $($tail:tt)*) => { - static $node: ::once_cell::sync::Lazy = - ::once_cell::sync::Lazy::new(|| { + static $node: ::std::sync::LazyLock = + ::std::sync::LazyLock::new(|| { ::steno::new_action_noop_undo( crate::app::sagas::__action_name!($saga, $node), $a, ) diff --git a/nexus/src/app/update/mod.rs b/nexus/src/app/update/mod.rs index 01e7285478c..ce99903685e 100644 --- a/nexus/src/app/update/mod.rs +++ b/nexus/src/app/update/mod.rs @@ -11,8 +11,9 @@ use nexus_db_model::TufRepoDescription; use nexus_db_queries::authz; use nexus_db_queries::context::OpContext; use omicron_common::api::external::{ - Error, SemverVersion, TufRepoInsertResponse, TufRepoInsertStatus, + Error, TufRepoInsertResponse, TufRepoInsertStatus, }; +use semver::Version; use update_common::artifacts::{ArtifactsWithPlan, ControlPlaneZonesMode}; mod common_sp_update; @@ -100,7 +101,7 @@ impl super::Nexus { pub(crate) async fn updates_get_repository( &self, opctx: &OpContext, - system_version: SemverVersion, + system_version: Version, ) -> Result { opctx.authorize(authz::Action::Read, &authz::FLEET).await?; diff --git a/nexus/src/bin/schema-updater.rs b/nexus/src/bin/schema-updater.rs index 1b32fe26eee..52df3457d6f 100644 --- a/nexus/src/bin/schema-updater.rs +++ b/nexus/src/bin/schema-updater.rs @@ -14,7 +14,7 @@ use nexus_db_model::AllSchemaVersions; use nexus_db_model::SCHEMA_VERSION; use nexus_db_queries::db; use nexus_db_queries::db::DataStore; -use omicron_common::api::external::SemverVersion; +use semver::Version; use slog::Drain; use slog::Level; use slog::LevelFilter; @@ -56,7 +56,7 @@ enum Cmd { #[clap(visible_alias = "up")] Upgrade { #[arg(default_value_t = SCHEMA_VERSION)] - version: SemverVersion, + version: Version, }, } diff --git a/nexus/src/external_api/console_api.rs b/nexus/src/external_api/console_api.rs index a07453627b4..535ba0f95f2 100644 --- a/nexus/src/external_api/console_api.rs +++ b/nexus/src/external_api/console_api.rs @@ -19,10 +19,10 @@ use nexus_types::external_api::params::{self, RelativeUri}; use nexus_types::identity::Resource; use omicron_common::api::external::http_pagination::PaginatedBy; use omicron_common::api::external::{DataPageParams, Error, NameOrId}; -use once_cell::sync::Lazy; use serde_urlencoded; use std::collections::HashMap; use std::num::NonZeroU32; +use std::sync::LazyLock; use tokio::fs::File; use tokio_util::codec::{BytesCodec, FramedRead}; @@ -296,7 +296,7 @@ fn with_gz_ext(path: &Utf8Path) -> Utf8PathBuf { // Define header values as const so that `HeaderValue::from_static` is given the // opportunity to panic at compile time -static ALLOWED_EXTENSIONS: Lazy> = { +static ALLOWED_EXTENSIONS: LazyLock> = { const CONTENT_TYPES: [(&str, HeaderValue); 10] = [ ("css", HeaderValue::from_static("text/css")), ("html", HeaderValue::from_static("text/html; charset=utf-8")), @@ -310,7 +310,7 @@ static ALLOWED_EXTENSIONS: Lazy> = { ("woff2", HeaderValue::from_static("font/woff2")), ]; - Lazy::new(|| HashMap::from(CONTENT_TYPES)) + LazyLock::new(|| HashMap::from(CONTENT_TYPES)) }; const CONTENT_ENCODING_GZIP: HeaderValue = HeaderValue::from_static("gzip"); // Web application security headers; these should stay in sync with the headers diff --git a/nexus/tests/integration_tests/endpoints.rs b/nexus/tests/integration_tests/endpoints.rs index 656b4ba8266..70104cac900 100644 --- a/nexus/tests/integration_tests/endpoints.rs +++ b/nexus/tests/integration_tests/endpoints.rs @@ -36,67 +36,73 @@ use omicron_common::api::external::RouteTarget; use omicron_common::api::external::UserId; use omicron_common::api::external::VpcFirewallRuleUpdateParams; use omicron_test_utils::certificates::CertificateChain; -use once_cell::sync::Lazy; use std::net::IpAddr; use std::net::Ipv4Addr; use std::str::FromStr; +use std::sync::LazyLock; type DiskTest<'a> = nexus_test_utils::resource_helpers::DiskTest<'a, omicron_nexus::Server>; -pub static HARDWARE_RACK_URL: Lazy = - Lazy::new(|| format!("/v1/system/hardware/racks/{}", RACK_UUID)); +pub static HARDWARE_RACK_URL: LazyLock = + LazyLock::new(|| format!("/v1/system/hardware/racks/{}", RACK_UUID)); pub const HARDWARE_UNINITIALIZED_SLEDS: &'static str = "/v1/system/hardware/sleds-uninitialized"; -pub static HARDWARE_SLED_URL: Lazy = - Lazy::new(|| format!("/v1/system/hardware/sleds/{}", SLED_AGENT_UUID)); -pub static HARDWARE_SLED_PROVISION_POLICY_URL: Lazy = Lazy::new(|| { - format!("/v1/system/hardware/sleds/{}/provision-policy", SLED_AGENT_UUID) -}); -pub static DEMO_SLED_PROVISION_POLICY: Lazy = - Lazy::new(|| params::SledProvisionPolicyParams { - state: SledProvisionPolicy::NonProvisionable, +pub static HARDWARE_SLED_URL: LazyLock = + LazyLock::new(|| format!("/v1/system/hardware/sleds/{}", SLED_AGENT_UUID)); +pub static HARDWARE_SLED_PROVISION_POLICY_URL: LazyLock = + LazyLock::new(|| { + format!( + "/v1/system/hardware/sleds/{}/provision-policy", + SLED_AGENT_UUID + ) }); +pub static DEMO_SLED_PROVISION_POLICY: LazyLock< + params::SledProvisionPolicyParams, +> = LazyLock::new(|| params::SledProvisionPolicyParams { + state: SledProvisionPolicy::NonProvisionable, +}); -pub static HARDWARE_SWITCH_URL: Lazy = - Lazy::new(|| format!("/v1/system/hardware/switches/{}", SWITCH_UUID)); +pub static HARDWARE_SWITCH_URL: LazyLock = + LazyLock::new(|| format!("/v1/system/hardware/switches/{}", SWITCH_UUID)); pub const HARDWARE_DISKS_URL: &'static str = "/v1/system/hardware/disks"; -pub static HARDWARE_DISK_URL: Lazy = - Lazy::new(|| format!("/v1/system/hardware/disks/{}", PHYSICAL_DISK_UUID)); -pub static HARDWARE_SLED_DISK_URL: Lazy = Lazy::new(|| { +pub static HARDWARE_DISK_URL: LazyLock = LazyLock::new(|| { + format!("/v1/system/hardware/disks/{}", PHYSICAL_DISK_UUID) +}); +pub static HARDWARE_SLED_DISK_URL: LazyLock = LazyLock::new(|| { format!("/v1/system/hardware/sleds/{}/disks", SLED_AGENT_UUID) }); -pub static SLED_INSTANCES_URL: Lazy = Lazy::new(|| { +pub static SLED_INSTANCES_URL: LazyLock = LazyLock::new(|| { format!("/v1/system/hardware/sleds/{}/instances", SLED_AGENT_UUID) }); -pub static DEMO_UNINITIALIZED_SLED: Lazy = - Lazy::new(|| params::UninitializedSledId { +pub static DEMO_UNINITIALIZED_SLED: LazyLock = + LazyLock::new(|| params::UninitializedSledId { serial: "demo-serial".to_string(), part: "demo-part".to_string(), }); pub const SUPPORT_BUNDLES_URL: &'static str = "/experimental/v1/system/support-bundles"; -pub static SUPPORT_BUNDLE_URL: Lazy = - Lazy::new(|| format!("{SUPPORT_BUNDLES_URL}/{{id}}")); +pub static SUPPORT_BUNDLE_URL: LazyLock = + LazyLock::new(|| format!("{SUPPORT_BUNDLES_URL}/{{id}}")); // Global policy pub const SYSTEM_POLICY_URL: &'static str = "/v1/system/policy"; // Silo used for testing -pub static DEMO_SILO_NAME: Lazy = - Lazy::new(|| "demo-silo".parse().unwrap()); -pub static DEMO_SILO_URL: Lazy = - Lazy::new(|| format!("/v1/system/silos/{}", *DEMO_SILO_NAME)); -pub static DEMO_SILO_IP_POOLS_URL: Lazy = - Lazy::new(|| format!("{}/ip-pools", *DEMO_SILO_URL)); -pub static DEMO_SILO_POLICY_URL: Lazy = - Lazy::new(|| format!("/v1/system/silos/{}/policy", *DEMO_SILO_NAME)); -pub static DEMO_SILO_QUOTAS_URL: Lazy = - Lazy::new(|| format!("/v1/system/silos/{}/quotas", *DEMO_SILO_NAME)); -pub static DEMO_SILO_CREATE: Lazy = - Lazy::new(|| params::SiloCreate { +pub static DEMO_SILO_NAME: LazyLock = + LazyLock::new(|| "demo-silo".parse().unwrap()); +pub static DEMO_SILO_URL: LazyLock = + LazyLock::new(|| format!("/v1/system/silos/{}", *DEMO_SILO_NAME)); +pub static DEMO_SILO_IP_POOLS_URL: LazyLock = + LazyLock::new(|| format!("{}/ip-pools", *DEMO_SILO_URL)); +pub static DEMO_SILO_POLICY_URL: LazyLock = + LazyLock::new(|| format!("/v1/system/silos/{}/policy", *DEMO_SILO_NAME)); +pub static DEMO_SILO_QUOTAS_URL: LazyLock = + LazyLock::new(|| format!("/v1/system/silos/{}/quotas", *DEMO_SILO_NAME)); +pub static DEMO_SILO_CREATE: LazyLock = + LazyLock::new(|| params::SiloCreate { identity: IdentityMetadataCreateParams { name: DEMO_SILO_NAME.clone(), description: String::from(""), @@ -109,56 +115,60 @@ pub static DEMO_SILO_CREATE: Lazy = mapped_fleet_roles: Default::default(), }); -pub static DEMO_SILO_UTIL_URL: Lazy = - Lazy::new(|| format!("/v1/system/utilization/silos/{}", *DEMO_SILO_NAME)); +pub static DEMO_SILO_UTIL_URL: LazyLock = LazyLock::new(|| { + format!("/v1/system/utilization/silos/{}", *DEMO_SILO_NAME) +}); // Use the default Silo for testing the local IdP -pub static DEMO_SILO_USERS_CREATE_URL: Lazy = Lazy::new(|| { +pub static DEMO_SILO_USERS_CREATE_URL: LazyLock = LazyLock::new(|| { format!( "/v1/system/identity-providers/local/users?silo={}", DEFAULT_SILO.identity().name, ) }); -pub static DEMO_SILO_USERS_LIST_URL: Lazy = Lazy::new(|| { +pub static DEMO_SILO_USERS_LIST_URL: LazyLock = LazyLock::new(|| { format!("/v1/system/users?silo={}", DEFAULT_SILO.identity().name,) }); -pub static DEMO_SILO_USER_ID_GET_URL: Lazy = Lazy::new(|| { +pub static DEMO_SILO_USER_ID_GET_URL: LazyLock = LazyLock::new(|| { format!("/v1/system/users/{{id}}?silo={}", DEFAULT_SILO.identity().name,) }); -pub static DEMO_SILO_USER_ID_DELETE_URL: Lazy = Lazy::new(|| { - format!( - "/v1/system/identity-providers/local/users/{{id}}?silo={}", - DEFAULT_SILO.identity().name, - ) -}); -pub static DEMO_SILO_USER_ID_SET_PASSWORD_URL: Lazy = Lazy::new(|| { - format!( +pub static DEMO_SILO_USER_ID_DELETE_URL: LazyLock = + LazyLock::new(|| { + format!( + "/v1/system/identity-providers/local/users/{{id}}?silo={}", + DEFAULT_SILO.identity().name, + ) + }); +pub static DEMO_SILO_USER_ID_SET_PASSWORD_URL: LazyLock = + LazyLock::new(|| { + format!( "/v1/system/identity-providers/local/users/{{id}}/set-password?silo={}", DEFAULT_SILO.identity().name, ) -}); + }); // Project used for testing -pub static DEMO_PROJECT_NAME: Lazy = - Lazy::new(|| "demo-project".parse().unwrap()); -pub static DEMO_PROJECT_URL: Lazy = - Lazy::new(|| format!("/v1/projects/{}", *DEMO_PROJECT_NAME)); -pub static DEMO_PROJECT_SELECTOR: Lazy = - Lazy::new(|| format!("project={}", *DEMO_PROJECT_NAME)); -pub static DEMO_PROJECT_POLICY_URL: Lazy = - Lazy::new(|| format!("/v1/projects/{}/policy", *DEMO_PROJECT_NAME)); -pub static DEMO_PROJECT_URL_IMAGES: Lazy = - Lazy::new(|| format!("/v1/images?project={}", *DEMO_PROJECT_NAME)); -pub static DEMO_PROJECT_URL_INSTANCES: Lazy = - Lazy::new(|| format!("/v1/instances?project={}", *DEMO_PROJECT_NAME)); -pub static DEMO_PROJECT_URL_SNAPSHOTS: Lazy = - Lazy::new(|| format!("/v1/snapshots?project={}", *DEMO_PROJECT_NAME)); -pub static DEMO_PROJECT_URL_VPCS: Lazy = - Lazy::new(|| format!("/v1/vpcs?project={}", *DEMO_PROJECT_NAME)); -pub static DEMO_PROJECT_URL_FIPS: Lazy = - Lazy::new(|| format!("/v1/floating-ips?project={}", *DEMO_PROJECT_NAME)); -pub static DEMO_PROJECT_CREATE: Lazy = - Lazy::new(|| params::ProjectCreate { +pub static DEMO_PROJECT_NAME: LazyLock = + LazyLock::new(|| "demo-project".parse().unwrap()); +pub static DEMO_PROJECT_URL: LazyLock = + LazyLock::new(|| format!("/v1/projects/{}", *DEMO_PROJECT_NAME)); +pub static DEMO_PROJECT_SELECTOR: LazyLock = + LazyLock::new(|| format!("project={}", *DEMO_PROJECT_NAME)); +pub static DEMO_PROJECT_POLICY_URL: LazyLock = + LazyLock::new(|| format!("/v1/projects/{}/policy", *DEMO_PROJECT_NAME)); +pub static DEMO_PROJECT_URL_IMAGES: LazyLock = + LazyLock::new(|| format!("/v1/images?project={}", *DEMO_PROJECT_NAME)); +pub static DEMO_PROJECT_URL_INSTANCES: LazyLock = + LazyLock::new(|| format!("/v1/instances?project={}", *DEMO_PROJECT_NAME)); +pub static DEMO_PROJECT_URL_SNAPSHOTS: LazyLock = + LazyLock::new(|| format!("/v1/snapshots?project={}", *DEMO_PROJECT_NAME)); +pub static DEMO_PROJECT_URL_VPCS: LazyLock = + LazyLock::new(|| format!("/v1/vpcs?project={}", *DEMO_PROJECT_NAME)); +pub static DEMO_PROJECT_URL_FIPS: LazyLock = LazyLock::new(|| { + format!("/v1/floating-ips?project={}", *DEMO_PROJECT_NAME) +}); +pub static DEMO_PROJECT_CREATE: LazyLock = + LazyLock::new(|| params::ProjectCreate { identity: IdentityMetadataCreateParams { name: DEMO_PROJECT_NAME.clone(), description: String::from(""), @@ -166,22 +176,22 @@ pub static DEMO_PROJECT_CREATE: Lazy = }); // VPC used for testing -pub static DEMO_VPC_NAME: Lazy = - Lazy::new(|| "demo-vpc".parse().unwrap()); -pub static DEMO_VPC_URL: Lazy = Lazy::new(|| { +pub static DEMO_VPC_NAME: LazyLock = + LazyLock::new(|| "demo-vpc".parse().unwrap()); +pub static DEMO_VPC_URL: LazyLock = LazyLock::new(|| { format!("/v1/vpcs/{}?{}", *DEMO_VPC_NAME, *DEMO_PROJECT_SELECTOR) }); -pub static DEMO_VPC_SELECTOR: Lazy = Lazy::new(|| { +pub static DEMO_VPC_SELECTOR: LazyLock = LazyLock::new(|| { format!("project={}&vpc={}", *DEMO_PROJECT_NAME, *DEMO_VPC_NAME) }); -pub static DEMO_VPC_URL_FIREWALL_RULES: Lazy = - Lazy::new(|| format!("/v1/vpc-firewall-rules?{}", *DEMO_VPC_SELECTOR)); -pub static DEMO_VPC_URL_ROUTERS: Lazy = - Lazy::new(|| format!("/v1/vpc-routers?{}", *DEMO_VPC_SELECTOR)); -pub static DEMO_VPC_URL_SUBNETS: Lazy = - Lazy::new(|| format!("/v1/vpc-subnets?{}", *DEMO_VPC_SELECTOR)); -pub static DEMO_VPC_CREATE: Lazy = - Lazy::new(|| params::VpcCreate { +pub static DEMO_VPC_URL_FIREWALL_RULES: LazyLock = + LazyLock::new(|| format!("/v1/vpc-firewall-rules?{}", *DEMO_VPC_SELECTOR)); +pub static DEMO_VPC_URL_ROUTERS: LazyLock = + LazyLock::new(|| format!("/v1/vpc-routers?{}", *DEMO_VPC_SELECTOR)); +pub static DEMO_VPC_URL_SUBNETS: LazyLock = + LazyLock::new(|| format!("/v1/vpc-subnets?{}", *DEMO_VPC_SELECTOR)); +pub static DEMO_VPC_CREATE: LazyLock = + LazyLock::new(|| params::VpcCreate { identity: IdentityMetadataCreateParams { name: DEMO_VPC_NAME.clone(), description: String::from(""), @@ -191,19 +201,20 @@ pub static DEMO_VPC_CREATE: Lazy = }); // VPC Subnet used for testing -pub static DEMO_VPC_SUBNET_NAME: Lazy = - Lazy::new(|| "demo-vpc-subnet".parse().unwrap()); -pub static DEMO_VPC_SUBNET_URL: Lazy = Lazy::new(|| { +pub static DEMO_VPC_SUBNET_NAME: LazyLock = + LazyLock::new(|| "demo-vpc-subnet".parse().unwrap()); +pub static DEMO_VPC_SUBNET_URL: LazyLock = LazyLock::new(|| { format!("/v1/vpc-subnets/{}?{}", *DEMO_VPC_SUBNET_NAME, *DEMO_VPC_SELECTOR) }); -pub static DEMO_VPC_SUBNET_INTERFACES_URL: Lazy = Lazy::new(|| { - format!( - "/v1/vpc-subnets/{}/network-interfaces?{}", - *DEMO_VPC_SUBNET_NAME, *DEMO_VPC_SELECTOR - ) -}); -pub static DEMO_VPC_SUBNET_CREATE: Lazy = - Lazy::new(|| params::VpcSubnetCreate { +pub static DEMO_VPC_SUBNET_INTERFACES_URL: LazyLock = + LazyLock::new(|| { + format!( + "/v1/vpc-subnets/{}/network-interfaces?{}", + *DEMO_VPC_SUBNET_NAME, *DEMO_VPC_SELECTOR + ) + }); +pub static DEMO_VPC_SUBNET_CREATE: LazyLock = + LazyLock::new(|| params::VpcSubnetCreate { identity: IdentityMetadataCreateParams { name: DEMO_VPC_SUBNET_NAME.clone(), description: String::from(""), @@ -214,22 +225,22 @@ pub static DEMO_VPC_SUBNET_CREATE: Lazy = }); // VPC Router used for testing -pub static DEMO_VPC_ROUTER_NAME: Lazy = - Lazy::new(|| "demo-vpc-router".parse().unwrap()); -pub static DEMO_VPC_ROUTER_URL: Lazy = Lazy::new(|| { +pub static DEMO_VPC_ROUTER_NAME: LazyLock = + LazyLock::new(|| "demo-vpc-router".parse().unwrap()); +pub static DEMO_VPC_ROUTER_URL: LazyLock = LazyLock::new(|| { format!( "/v1/vpc-routers/{}?project={}&vpc={}", *DEMO_VPC_ROUTER_NAME, *DEMO_PROJECT_NAME, *DEMO_VPC_NAME ) }); -pub static DEMO_VPC_ROUTER_URL_ROUTES: Lazy = Lazy::new(|| { +pub static DEMO_VPC_ROUTER_URL_ROUTES: LazyLock = LazyLock::new(|| { format!( "/v1/vpc-router-routes?project={}&vpc={}&router={}", *DEMO_PROJECT_NAME, *DEMO_VPC_NAME, *DEMO_VPC_ROUTER_NAME ) }); -pub static DEMO_VPC_ROUTER_CREATE: Lazy = - Lazy::new(|| params::VpcRouterCreate { +pub static DEMO_VPC_ROUTER_CREATE: LazyLock = + LazyLock::new(|| params::VpcRouterCreate { identity: IdentityMetadataCreateParams { name: DEMO_VPC_ROUTER_NAME.clone(), description: String::from(""), @@ -237,9 +248,9 @@ pub static DEMO_VPC_ROUTER_CREATE: Lazy = }); // Router Route used for testing -pub static DEMO_ROUTER_ROUTE_NAME: Lazy = - Lazy::new(|| "demo-router-route".parse().unwrap()); -pub static DEMO_ROUTER_ROUTE_URL: Lazy = Lazy::new(|| { +pub static DEMO_ROUTER_ROUTE_NAME: LazyLock = + LazyLock::new(|| "demo-router-route".parse().unwrap()); +pub static DEMO_ROUTER_ROUTE_URL: LazyLock = LazyLock::new(|| { format!( "/v1/vpc-router-routes/{}?project={}&vpc={}&router={}", *DEMO_ROUTER_ROUTE_NAME, @@ -248,8 +259,8 @@ pub static DEMO_ROUTER_ROUTE_URL: Lazy = Lazy::new(|| { *DEMO_VPC_ROUTER_NAME ) }); -pub static DEMO_ROUTER_ROUTE_CREATE: Lazy = - Lazy::new(|| params::RouterRouteCreate { +pub static DEMO_ROUTER_ROUTE_CREATE: LazyLock = + LazyLock::new(|| params::RouterRouteCreate { identity: IdentityMetadataCreateParams { name: DEMO_ROUTER_ROUTE_NAME.clone(), description: String::from(""), @@ -259,107 +270,113 @@ pub static DEMO_ROUTER_ROUTE_CREATE: Lazy = }); // Internet Gateway used for testing -pub static DEMO_INTERNET_GATEWAY_NAME: Lazy = - Lazy::new(|| "demo-internet-gateway".parse().unwrap()); -pub static DEMO_INTERNET_GATEWAYS_URL: Lazy = Lazy::new(|| { +pub static DEMO_INTERNET_GATEWAY_NAME: LazyLock = + LazyLock::new(|| "demo-internet-gateway".parse().unwrap()); +pub static DEMO_INTERNET_GATEWAYS_URL: LazyLock = LazyLock::new(|| { format!( "/v1/internet-gateways?project={}&vpc={}", *DEMO_PROJECT_NAME, *DEMO_VPC_NAME ) }); -pub static DEMO_INTERNET_GATEWAY_URL: Lazy = Lazy::new(|| { +pub static DEMO_INTERNET_GATEWAY_URL: LazyLock = LazyLock::new(|| { format!( "/v1/internet-gateways/{}?project={}&vpc={}", *DEMO_INTERNET_GATEWAY_NAME, *DEMO_PROJECT_NAME, *DEMO_VPC_NAME ) }); -pub static DEMO_INTERNET_GATEWAY_CREATE: Lazy = - Lazy::new(|| params::InternetGatewayCreate { - identity: IdentityMetadataCreateParams { - name: DEMO_INTERNET_GATEWAY_NAME.clone(), - description: String::from(""), - }, - }); -pub static DEMO_INTERNET_GATEWAY_IP_POOL_CREATE: Lazy< +pub static DEMO_INTERNET_GATEWAY_CREATE: LazyLock< + params::InternetGatewayCreate, +> = LazyLock::new(|| params::InternetGatewayCreate { + identity: IdentityMetadataCreateParams { + name: DEMO_INTERNET_GATEWAY_NAME.clone(), + description: String::from(""), + }, +}); +pub static DEMO_INTERNET_GATEWAY_IP_POOL_CREATE: LazyLock< params::InternetGatewayIpPoolCreate, -> = Lazy::new(|| params::InternetGatewayIpPoolCreate { +> = LazyLock::new(|| params::InternetGatewayIpPoolCreate { identity: IdentityMetadataCreateParams { name: DEMO_INTERNET_GATEWAY_NAME.clone(), description: String::from(""), }, ip_pool: NameOrId::Id(uuid::Uuid::new_v4()), }); -pub static DEMO_INTERNET_GATEWAY_IP_ADDRESS_CREATE: Lazy< +pub static DEMO_INTERNET_GATEWAY_IP_ADDRESS_CREATE: LazyLock< params::InternetGatewayIpAddressCreate, -> = Lazy::new(|| params::InternetGatewayIpAddressCreate { +> = LazyLock::new(|| params::InternetGatewayIpAddressCreate { identity: IdentityMetadataCreateParams { name: DEMO_INTERNET_GATEWAY_NAME.clone(), description: String::from(""), }, address: IpAddr::V4(Ipv4Addr::UNSPECIFIED), }); -pub static DEMO_INTERNET_GATEWAY_IP_POOLS_URL: Lazy = Lazy::new(|| { - format!( - "/v1/internet-gateway-ip-pools?project={}&vpc={}&gateway={}", - *DEMO_PROJECT_NAME, *DEMO_VPC_NAME, *DEMO_INTERNET_GATEWAY_NAME, - ) -}); -pub static DEMO_INTERNET_GATEWAY_IP_ADDRS_URL: Lazy = Lazy::new(|| { - format!( - "/v1/internet-gateway-ip-addresses?project={}&vpc={}&gateway={}", - *DEMO_PROJECT_NAME, *DEMO_VPC_NAME, *DEMO_INTERNET_GATEWAY_NAME, - ) -}); -pub static DEMO_INTERNET_GATEWAY_IP_POOL_NAME: Lazy = - Lazy::new(|| "demo-igw-pool".parse().unwrap()); -pub static DEMO_INTERNET_GATEWAY_IP_ADDRESS_NAME: Lazy = - Lazy::new(|| "demo-igw-address".parse().unwrap()); -pub static DEMO_INTERNET_GATEWAY_IP_POOL_URL: Lazy = Lazy::new(|| { - format!( - "/v1/internet-gateway-ip-pools/{}?project={}&vpc={}&gateway={}", - *DEMO_INTERNET_GATEWAY_IP_POOL_NAME, - *DEMO_PROJECT_NAME, - *DEMO_VPC_NAME, - *DEMO_INTERNET_GATEWAY_NAME, - ) -}); -pub static DEMO_INTERNET_GATEWAY_IP_ADDR_URL: Lazy = Lazy::new(|| { - format!( - "/v1/internet-gateway-ip-addresses/{}?project={}&vpc={}&gateway={}", - *DEMO_INTERNET_GATEWAY_IP_ADDRESS_NAME, - *DEMO_PROJECT_NAME, - *DEMO_VPC_NAME, - *DEMO_INTERNET_GATEWAY_NAME, - ) -}); +pub static DEMO_INTERNET_GATEWAY_IP_POOLS_URL: LazyLock = + LazyLock::new(|| { + format!( + "/v1/internet-gateway-ip-pools?project={}&vpc={}&gateway={}", + *DEMO_PROJECT_NAME, *DEMO_VPC_NAME, *DEMO_INTERNET_GATEWAY_NAME, + ) + }); +pub static DEMO_INTERNET_GATEWAY_IP_ADDRS_URL: LazyLock = + LazyLock::new(|| { + format!( + "/v1/internet-gateway-ip-addresses?project={}&vpc={}&gateway={}", + *DEMO_PROJECT_NAME, *DEMO_VPC_NAME, *DEMO_INTERNET_GATEWAY_NAME, + ) + }); +pub static DEMO_INTERNET_GATEWAY_IP_POOL_NAME: LazyLock = + LazyLock::new(|| "demo-igw-pool".parse().unwrap()); +pub static DEMO_INTERNET_GATEWAY_IP_ADDRESS_NAME: LazyLock = + LazyLock::new(|| "demo-igw-address".parse().unwrap()); +pub static DEMO_INTERNET_GATEWAY_IP_POOL_URL: LazyLock = + LazyLock::new(|| { + format!( + "/v1/internet-gateway-ip-pools/{}?project={}&vpc={}&gateway={}", + *DEMO_INTERNET_GATEWAY_IP_POOL_NAME, + *DEMO_PROJECT_NAME, + *DEMO_VPC_NAME, + *DEMO_INTERNET_GATEWAY_NAME, + ) + }); +pub static DEMO_INTERNET_GATEWAY_IP_ADDR_URL: LazyLock = + LazyLock::new(|| { + format!( + "/v1/internet-gateway-ip-addresses/{}?project={}&vpc={}&gateway={}", + *DEMO_INTERNET_GATEWAY_IP_ADDRESS_NAME, + *DEMO_PROJECT_NAME, + *DEMO_VPC_NAME, + *DEMO_INTERNET_GATEWAY_NAME, + ) + }); // Disk used for testing -pub static DEMO_DISK_NAME: Lazy = - Lazy::new(|| "demo-disk".parse().unwrap()); +pub static DEMO_DISK_NAME: LazyLock = + LazyLock::new(|| "demo-disk".parse().unwrap()); // TODO: Once we can test a URL multiple times we should also a case to exercise // authz for disks filtered by instances -pub static DEMO_DISKS_URL: Lazy = - Lazy::new(|| format!("/v1/disks?{}", *DEMO_PROJECT_SELECTOR)); -pub static DEMO_DISK_URL: Lazy = Lazy::new(|| { +pub static DEMO_DISKS_URL: LazyLock = + LazyLock::new(|| format!("/v1/disks?{}", *DEMO_PROJECT_SELECTOR)); +pub static DEMO_DISK_URL: LazyLock = LazyLock::new(|| { format!("/v1/disks/{}?{}", *DEMO_DISK_NAME, *DEMO_PROJECT_SELECTOR) }); -pub static DEMO_DISK_CREATE: Lazy = Lazy::new(|| { - params::DiskCreate { - identity: IdentityMetadataCreateParams { - name: DEMO_DISK_NAME.clone(), - description: "".parse().unwrap(), - }, - disk_source: params::DiskSource::Blank { - block_size: params::BlockSize::try_from(4096).unwrap(), - }, - size: ByteCount::from_gibibytes_u32( - // divide by at least two to leave space for snapshot blocks - DiskTest::DEFAULT_ZPOOL_SIZE_GIB / 5, - ), - } -}); -pub static DEMO_DISK_METRICS_URL: Lazy = Lazy::new(|| { +pub static DEMO_DISK_CREATE: LazyLock = + LazyLock::new(|| { + params::DiskCreate { + identity: IdentityMetadataCreateParams { + name: DEMO_DISK_NAME.clone(), + description: "".parse().unwrap(), + }, + disk_source: params::DiskSource::Blank { + block_size: params::BlockSize::try_from(4096).unwrap(), + }, + size: ByteCount::from_gibibytes_u32( + // divide by at least two to leave space for snapshot blocks + DiskTest::DEFAULT_ZPOOL_SIZE_GIB / 5, + ), + } + }); +pub static DEMO_DISK_METRICS_URL: LazyLock = LazyLock::new(|| { format!( "/v1/disks/{}/metrics/activated?start_time={:?}&end_time={:?}&{}", *DEMO_DISK_NAME, @@ -370,10 +387,10 @@ pub static DEMO_DISK_METRICS_URL: Lazy = Lazy::new(|| { }); // Related to importing blocks from an external source -pub static DEMO_IMPORT_DISK_NAME: Lazy = - Lazy::new(|| "demo-import-disk".parse().unwrap()); -pub static DEMO_IMPORT_DISK_CREATE: Lazy = - Lazy::new(|| { +pub static DEMO_IMPORT_DISK_NAME: LazyLock = + LazyLock::new(|| "demo-import-disk".parse().unwrap()); +pub static DEMO_IMPORT_DISK_CREATE: LazyLock = + LazyLock::new(|| { params::DiskCreate { identity: IdentityMetadataCreateParams { name: DEMO_IMPORT_DISK_NAME.clone(), @@ -388,113 +405,120 @@ pub static DEMO_IMPORT_DISK_CREATE: Lazy = ), } }); -pub static DEMO_IMPORT_DISK_BULK_WRITE_START_URL: Lazy = - Lazy::new(|| { +pub static DEMO_IMPORT_DISK_BULK_WRITE_START_URL: LazyLock = + LazyLock::new(|| { format!( "/v1/disks/{}/bulk-write-start?{}", *DEMO_IMPORT_DISK_NAME, *DEMO_PROJECT_SELECTOR ) }); -pub static DEMO_IMPORT_DISK_BULK_WRITE_URL: Lazy = Lazy::new(|| { - format!( - "/v1/disks/{}/bulk-write?{}", - *DEMO_IMPORT_DISK_NAME, *DEMO_PROJECT_SELECTOR - ) -}); -pub static DEMO_IMPORT_DISK_BULK_WRITE_STOP_URL: Lazy = - Lazy::new(|| { +pub static DEMO_IMPORT_DISK_BULK_WRITE_URL: LazyLock = + LazyLock::new(|| { + format!( + "/v1/disks/{}/bulk-write?{}", + *DEMO_IMPORT_DISK_NAME, *DEMO_PROJECT_SELECTOR + ) + }); +pub static DEMO_IMPORT_DISK_BULK_WRITE_STOP_URL: LazyLock = + LazyLock::new(|| { format!( "/v1/disks/{}/bulk-write-stop?{}", *DEMO_IMPORT_DISK_NAME, *DEMO_PROJECT_SELECTOR ) }); -pub static DEMO_IMPORT_DISK_FINALIZE_URL: Lazy = Lazy::new(|| { - format!( - "/v1/disks/{}/finalize?{}", - *DEMO_IMPORT_DISK_NAME, *DEMO_PROJECT_SELECTOR - ) -}); +pub static DEMO_IMPORT_DISK_FINALIZE_URL: LazyLock = + LazyLock::new(|| { + format!( + "/v1/disks/{}/finalize?{}", + *DEMO_IMPORT_DISK_NAME, *DEMO_PROJECT_SELECTOR + ) + }); // Instance used for testing -pub static DEMO_INSTANCE_NAME: Lazy = - Lazy::new(|| "demo-instance".parse().unwrap()); -pub static DEMO_INSTANCE_URL: Lazy = Lazy::new(|| { +pub static DEMO_INSTANCE_NAME: LazyLock = + LazyLock::new(|| "demo-instance".parse().unwrap()); +pub static DEMO_INSTANCE_URL: LazyLock = LazyLock::new(|| { format!("/v1/instances/{}?{}", *DEMO_INSTANCE_NAME, *DEMO_PROJECT_SELECTOR) }); -pub static DEMO_INSTANCE_START_URL: Lazy = Lazy::new(|| { +pub static DEMO_INSTANCE_START_URL: LazyLock = LazyLock::new(|| { format!( "/v1/instances/{}/start?{}", *DEMO_INSTANCE_NAME, *DEMO_PROJECT_SELECTOR ) }); -pub static DEMO_INSTANCE_STOP_URL: Lazy = Lazy::new(|| { +pub static DEMO_INSTANCE_STOP_URL: LazyLock = LazyLock::new(|| { format!( "/v1/instances/{}/stop?{}", *DEMO_INSTANCE_NAME, *DEMO_PROJECT_SELECTOR ) }); -pub static DEMO_INSTANCE_REBOOT_URL: Lazy = Lazy::new(|| { +pub static DEMO_INSTANCE_REBOOT_URL: LazyLock = LazyLock::new(|| { format!( "/v1/instances/{}/reboot?{}", *DEMO_INSTANCE_NAME, *DEMO_PROJECT_SELECTOR ) }); -pub static DEMO_INSTANCE_SERIAL_URL: Lazy = Lazy::new(|| { +pub static DEMO_INSTANCE_SERIAL_URL: LazyLock = LazyLock::new(|| { format!( "/v1/instances/{}/serial-console?{}", *DEMO_INSTANCE_NAME, *DEMO_PROJECT_SELECTOR ) }); -pub static DEMO_INSTANCE_SERIAL_STREAM_URL: Lazy = Lazy::new(|| { - format!( - "/v1/instances/{}/serial-console/stream?{}", - *DEMO_INSTANCE_NAME, *DEMO_PROJECT_SELECTOR - ) -}); -pub static DEMO_INSTANCE_DISKS_URL: Lazy = Lazy::new(|| { +pub static DEMO_INSTANCE_SERIAL_STREAM_URL: LazyLock = + LazyLock::new(|| { + format!( + "/v1/instances/{}/serial-console/stream?{}", + *DEMO_INSTANCE_NAME, *DEMO_PROJECT_SELECTOR + ) + }); +pub static DEMO_INSTANCE_DISKS_URL: LazyLock = LazyLock::new(|| { format!( "/v1/instances/{}/disks?{}", *DEMO_INSTANCE_NAME, *DEMO_PROJECT_SELECTOR ) }); -pub static DEMO_INSTANCE_DISKS_ATTACH_URL: Lazy = Lazy::new(|| { - format!( - "/v1/instances/{}/disks/attach?{}", - *DEMO_INSTANCE_NAME, *DEMO_PROJECT_SELECTOR - ) -}); -pub static DEMO_INSTANCE_DISKS_DETACH_URL: Lazy = Lazy::new(|| { - format!( - "/v1/instances/{}/disks/detach?{}", - *DEMO_INSTANCE_NAME, *DEMO_PROJECT_SELECTOR - ) -}); -pub static DEMO_INSTANCE_EPHEMERAL_IP_URL: Lazy = Lazy::new(|| { - format!( - "/v1/instances/{}/external-ips/ephemeral?{}", - *DEMO_INSTANCE_NAME, *DEMO_PROJECT_SELECTOR - ) -}); -pub static DEMO_INSTANCE_SSH_KEYS_URL: Lazy = Lazy::new(|| { +pub static DEMO_INSTANCE_DISKS_ATTACH_URL: LazyLock = + LazyLock::new(|| { + format!( + "/v1/instances/{}/disks/attach?{}", + *DEMO_INSTANCE_NAME, *DEMO_PROJECT_SELECTOR + ) + }); +pub static DEMO_INSTANCE_DISKS_DETACH_URL: LazyLock = + LazyLock::new(|| { + format!( + "/v1/instances/{}/disks/detach?{}", + *DEMO_INSTANCE_NAME, *DEMO_PROJECT_SELECTOR + ) + }); +pub static DEMO_INSTANCE_EPHEMERAL_IP_URL: LazyLock = + LazyLock::new(|| { + format!( + "/v1/instances/{}/external-ips/ephemeral?{}", + *DEMO_INSTANCE_NAME, *DEMO_PROJECT_SELECTOR + ) + }); +pub static DEMO_INSTANCE_SSH_KEYS_URL: LazyLock = LazyLock::new(|| { format!( "/v1/instances/{}/ssh-public-keys?{}", *DEMO_INSTANCE_NAME, *DEMO_PROJECT_SELECTOR ) }); -pub static DEMO_INSTANCE_NICS_URL: Lazy = Lazy::new(|| { +pub static DEMO_INSTANCE_NICS_URL: LazyLock = LazyLock::new(|| { format!( "/v1/network-interfaces?project={}&instance={}", *DEMO_PROJECT_NAME, *DEMO_INSTANCE_NAME ) }); -pub static DEMO_INSTANCE_EXTERNAL_IPS_URL: Lazy = Lazy::new(|| { - format!( - "/v1/instances/{}/external-ips?{}", - *DEMO_INSTANCE_NAME, *DEMO_PROJECT_SELECTOR - ) -}); -pub static DEMO_INSTANCE_CREATE: Lazy = - Lazy::new(|| params::InstanceCreate { +pub static DEMO_INSTANCE_EXTERNAL_IPS_URL: LazyLock = + LazyLock::new(|| { + format!( + "/v1/instances/{}/external-ips?{}", + *DEMO_INSTANCE_NAME, *DEMO_PROJECT_SELECTOR + ) + }); +pub static DEMO_INSTANCE_CREATE: LazyLock = + LazyLock::new(|| params::InstanceCreate { identity: IdentityMetadataCreateParams { name: DEMO_INSTANCE_NAME.clone(), description: String::from(""), @@ -513,8 +537,8 @@ pub static DEMO_INSTANCE_CREATE: Lazy = start: true, auto_restart_policy: Default::default(), }); -pub static DEMO_INSTANCE_UPDATE: Lazy = - Lazy::new(|| params::InstanceUpdate { +pub static DEMO_INSTANCE_UPDATE: LazyLock = + LazyLock::new(|| params::InstanceUpdate { boot_disk: None, auto_restart_policy: None, ncpus: InstanceCpuCount(1), @@ -522,17 +546,17 @@ pub static DEMO_INSTANCE_UPDATE: Lazy = }); // The instance needs a network interface, too. -pub static DEMO_INSTANCE_NIC_NAME: Lazy = - Lazy::new(|| nexus_defaults::DEFAULT_PRIMARY_NIC_NAME.parse().unwrap()); -pub static DEMO_INSTANCE_NIC_URL: Lazy = Lazy::new(|| { +pub static DEMO_INSTANCE_NIC_NAME: LazyLock = + LazyLock::new(|| nexus_defaults::DEFAULT_PRIMARY_NIC_NAME.parse().unwrap()); +pub static DEMO_INSTANCE_NIC_URL: LazyLock = LazyLock::new(|| { format!( "/v1/network-interfaces/{}?project={}&instance={}", *DEMO_INSTANCE_NIC_NAME, *DEMO_PROJECT_NAME, *DEMO_INSTANCE_NAME ) }); -pub static DEMO_INSTANCE_NIC_CREATE: Lazy< +pub static DEMO_INSTANCE_NIC_CREATE: LazyLock< params::InstanceNetworkInterfaceCreate, -> = Lazy::new(|| params::InstanceNetworkInterfaceCreate { +> = LazyLock::new(|| params::InstanceNetworkInterfaceCreate { identity: IdentityMetadataCreateParams { name: DEMO_INSTANCE_NIC_NAME.clone(), description: String::from(""), @@ -541,26 +565,27 @@ pub static DEMO_INSTANCE_NIC_CREATE: Lazy< subnet_name: DEMO_VPC_SUBNET_NAME.clone(), ip: None, }); -pub static DEMO_INSTANCE_NIC_PUT: Lazy = - Lazy::new(|| params::InstanceNetworkInterfaceUpdate { - identity: IdentityMetadataUpdateParams { - name: None, - description: Some(String::from("an updated description")), - }, - primary: false, - transit_ips: vec![], - }); +pub static DEMO_INSTANCE_NIC_PUT: LazyLock< + params::InstanceNetworkInterfaceUpdate, +> = LazyLock::new(|| params::InstanceNetworkInterfaceUpdate { + identity: IdentityMetadataUpdateParams { + name: None, + description: Some(String::from("an updated description")), + }, + primary: false, + transit_ips: vec![], +}); -pub static DEMO_CERTIFICATE_NAME: Lazy = - Lazy::new(|| "demo-certificate".parse().unwrap()); +pub static DEMO_CERTIFICATE_NAME: LazyLock = + LazyLock::new(|| "demo-certificate".parse().unwrap()); pub const DEMO_CERTIFICATES_URL: &'static str = "/v1/certificates"; pub const DEMO_CERTIFICATE_URL: &'static str = "/v1/certificates/demo-certificate"; -pub static DEMO_CERTIFICATE: Lazy = Lazy::new(|| { +pub static DEMO_CERTIFICATE: LazyLock = LazyLock::new(|| { CertificateChain::new(format!("*.sys.{DNS_ZONE_EXTERNAL_TESTING}")) }); -pub static DEMO_CERTIFICATE_CREATE: Lazy = - Lazy::new(|| params::CertificateCreate { +pub static DEMO_CERTIFICATE_CREATE: LazyLock = + LazyLock::new(|| params::CertificateCreate { identity: IdentityMetadataCreateParams { name: DEMO_CERTIFICATE_NAME.clone(), description: String::from(""), @@ -572,21 +597,21 @@ pub static DEMO_CERTIFICATE_CREATE: Lazy = pub const DEMO_SWITCH_PORT_URL: &'static str = "/v1/system/hardware/switch-port"; -pub static DEMO_SWITCH_PORT_SETTINGS_APPLY_URL: Lazy = Lazy::new( - || { +pub static DEMO_SWITCH_PORT_SETTINGS_APPLY_URL: LazyLock = + LazyLock::new(|| { format!( "/v1/system/hardware/switch-port/qsfp7/settings?rack_id={}&switch_location={}", uuid::Uuid::new_v4(), "switch0", ) - }, -); -pub static DEMO_SWITCH_PORT_SETTINGS: Lazy = - Lazy::new(|| params::SwitchPortApplySettings { - port_settings: NameOrId::Name("portofino".parse().unwrap()), }); +pub static DEMO_SWITCH_PORT_SETTINGS: LazyLock< + params::SwitchPortApplySettings, +> = LazyLock::new(|| params::SwitchPortApplySettings { + port_settings: NameOrId::Name("portofino".parse().unwrap()), +}); /* TODO requires dpd access -pub static DEMO_SWITCH_PORT_STATUS_URL: Lazy = Lazy::new(|| { +pub static DEMO_SWITCH_PORT_STATUS_URL: LazyLock = LazyLock::new(|| { format!( "/v1/system/hardware/switch-port/qsfp7/status?rack_id={}&switch_location={}", uuid::Uuid::new_v4(), @@ -595,9 +620,9 @@ pub static DEMO_SWITCH_PORT_STATUS_URL: Lazy = Lazy::new(|| { }); */ -pub static DEMO_LOOPBACK_CREATE_URL: Lazy = - Lazy::new(|| "/v1/system/networking/loopback-address".into()); -pub static DEMO_LOOPBACK_URL: Lazy = Lazy::new(|| { +pub static DEMO_LOOPBACK_CREATE_URL: LazyLock = + LazyLock::new(|| "/v1/system/networking/loopback-address".into()); +pub static DEMO_LOOPBACK_URL: LazyLock = LazyLock::new(|| { format!( "/v1/system/networking/loopback-address/{}/{}/{}", uuid::Uuid::new_v4(), @@ -605,8 +630,8 @@ pub static DEMO_LOOPBACK_URL: Lazy = Lazy::new(|| { "203.0.113.99/24", ) }); -pub static DEMO_LOOPBACK_CREATE: Lazy = - Lazy::new(|| params::LoopbackAddressCreate { +pub static DEMO_LOOPBACK_CREATE: LazyLock = + LazyLock::new(|| params::LoopbackAddressCreate { address_lot: NameOrId::Name("parkinglot".parse().unwrap()), rack_id: uuid::Uuid::new_v4(), switch_location: "switch0".parse().unwrap(), @@ -619,9 +644,9 @@ pub const DEMO_SWITCH_PORT_SETTINGS_URL: &'static str = "/v1/system/networking/switch-port-settings?port_settings=portofino"; pub const DEMO_SWITCH_PORT_SETTINGS_INFO_URL: &'static str = "/v1/system/networking/switch-port-settings/protofino"; -pub static DEMO_SWITCH_PORT_SETTINGS_CREATE: Lazy< +pub static DEMO_SWITCH_PORT_SETTINGS_CREATE: LazyLock< params::SwitchPortSettingsCreate, -> = Lazy::new(|| { +> = LazyLock::new(|| { params::SwitchPortSettingsCreate::new(IdentityMetadataCreateParams { name: "portofino".parse().unwrap(), description: "just a port".into(), @@ -634,8 +659,8 @@ pub const DEMO_ADDRESS_LOT_URL: &'static str = "/v1/system/networking/address-lot/parkinglot"; pub const DEMO_ADDRESS_LOT_BLOCKS_URL: &'static str = "/v1/system/networking/address-lot/parkinglot/blocks"; -pub static DEMO_ADDRESS_LOT_CREATE: Lazy = - Lazy::new(|| params::AddressLotCreate { +pub static DEMO_ADDRESS_LOT_CREATE: LazyLock = + LazyLock::new(|| params::AddressLotCreate { identity: IdentityMetadataCreateParams { name: "parkinglot".parse().unwrap(), description: "an address parking lot".into(), @@ -649,8 +674,8 @@ pub static DEMO_ADDRESS_LOT_CREATE: Lazy = pub const DEMO_BGP_CONFIG_CREATE_URL: &'static str = "/v1/system/networking/bgp?name_or_id=as47"; -pub static DEMO_BGP_CONFIG: Lazy = - Lazy::new(|| params::BgpConfigCreate { +pub static DEMO_BGP_CONFIG: LazyLock = + LazyLock::new(|| params::BgpConfigCreate { identity: IdentityMetadataCreateParams { name: "as47".parse().unwrap(), description: "BGP config for AS47".into(), @@ -663,8 +688,8 @@ pub static DEMO_BGP_CONFIG: Lazy = }); pub const DEMO_BGP_ANNOUNCE_SET_URL: &'static str = "/v1/system/networking/bgp-announce-set"; -pub static DEMO_BGP_ANNOUNCE: Lazy = - Lazy::new(|| params::BgpAnnounceSetCreate { +pub static DEMO_BGP_ANNOUNCE: LazyLock = + LazyLock::new(|| params::BgpAnnounceSetCreate { identity: IdentityMetadataCreateParams { name: "a-bag-of-addrs".parse().unwrap(), description: "a bag of addrs".into(), @@ -696,8 +721,8 @@ pub const DEMO_BFD_ENABLE_URL: &'static str = pub const DEMO_BFD_DISABLE_URL: &'static str = "/v1/system/networking/bfd-disable"; -pub static DEMO_BFD_ENABLE: Lazy = - Lazy::new(|| params::BfdSessionEnable { +pub static DEMO_BFD_ENABLE: LazyLock = + LazyLock::new(|| params::BfdSessionEnable { local: None, remote: "10.0.0.1".parse().unwrap(), detection_threshold: 3, @@ -706,36 +731,37 @@ pub static DEMO_BFD_ENABLE: Lazy = mode: omicron_common::api::external::BfdMode::MultiHop, }); -pub static DEMO_BFD_DISABLE: Lazy = - Lazy::new(|| params::BfdSessionDisable { +pub static DEMO_BFD_DISABLE: LazyLock = + LazyLock::new(|| params::BfdSessionDisable { remote: "10.0.0.1".parse().unwrap(), switch: "switch0".parse().unwrap(), }); // Project Images -pub static DEMO_IMAGE_NAME: Lazy = - Lazy::new(|| "demo-image".parse().unwrap()); -pub static DEMO_PROJECT_IMAGES_URL: Lazy = - Lazy::new(|| format!("/v1/images?project={}", *DEMO_PROJECT_NAME)); -pub static DEMO_PROJECT_IMAGE_URL: Lazy = Lazy::new(|| { +pub static DEMO_IMAGE_NAME: LazyLock = + LazyLock::new(|| "demo-image".parse().unwrap()); +pub static DEMO_PROJECT_IMAGES_URL: LazyLock = + LazyLock::new(|| format!("/v1/images?project={}", *DEMO_PROJECT_NAME)); +pub static DEMO_PROJECT_IMAGE_URL: LazyLock = LazyLock::new(|| { format!("/v1/images/{}?project={}", *DEMO_IMAGE_NAME, *DEMO_PROJECT_NAME) }); -pub static DEMO_PROJECT_PROMOTE_IMAGE_URL: Lazy = Lazy::new(|| { - format!( - "/v1/images/{}/promote?project={}", - *DEMO_IMAGE_NAME, *DEMO_PROJECT_NAME - ) -}); +pub static DEMO_PROJECT_PROMOTE_IMAGE_URL: LazyLock = + LazyLock::new(|| { + format!( + "/v1/images/{}/promote?project={}", + *DEMO_IMAGE_NAME, *DEMO_PROJECT_NAME + ) + }); -pub static DEMO_SILO_DEMOTE_IMAGE_URL: Lazy = Lazy::new(|| { +pub static DEMO_SILO_DEMOTE_IMAGE_URL: LazyLock = LazyLock::new(|| { format!( "/v1/images/{}/demote?project={}", *DEMO_IMAGE_NAME, *DEMO_PROJECT_NAME ) }); -pub static DEMO_IMAGE_CREATE: Lazy = - Lazy::new(|| params::ImageCreate { +pub static DEMO_IMAGE_CREATE: LazyLock = + LazyLock::new(|| params::ImageCreate { identity: IdentityMetadataCreateParams { name: DEMO_IMAGE_NAME.clone(), description: String::from(""), @@ -746,49 +772,50 @@ pub static DEMO_IMAGE_CREATE: Lazy = }); // IP Pools -pub static DEMO_IP_POOLS_PROJ_URL: Lazy = - Lazy::new(|| "/v1/ip-pools".to_string()); +pub static DEMO_IP_POOLS_PROJ_URL: LazyLock = + LazyLock::new(|| "/v1/ip-pools".to_string()); pub const DEMO_IP_POOLS_URL: &'static str = "/v1/system/ip-pools"; -pub static DEMO_IP_POOL_NAME: Lazy = - Lazy::new(|| "default".parse().unwrap()); -pub static DEMO_IP_POOL_CREATE: Lazy = - Lazy::new(|| params::IpPoolCreate { +pub static DEMO_IP_POOL_NAME: LazyLock = + LazyLock::new(|| "default".parse().unwrap()); +pub static DEMO_IP_POOL_CREATE: LazyLock = + LazyLock::new(|| params::IpPoolCreate { identity: IdentityMetadataCreateParams { name: DEMO_IP_POOL_NAME.clone(), description: String::from("an IP pool"), }, }); -pub static DEMO_IP_POOL_PROJ_URL: Lazy = Lazy::new(|| { +pub static DEMO_IP_POOL_PROJ_URL: LazyLock = LazyLock::new(|| { format!( "/v1/ip-pools/{}?project={}", *DEMO_IP_POOL_NAME, *DEMO_PROJECT_NAME ) }); -pub static DEMO_IP_POOL_URL: Lazy = - Lazy::new(|| format!("/v1/system/ip-pools/{}", *DEMO_IP_POOL_NAME)); -pub static DEMO_IP_POOL_UTILIZATION_URL: Lazy = - Lazy::new(|| format!("{}/utilization", *DEMO_IP_POOL_URL)); -pub static DEMO_IP_POOL_UPDATE: Lazy = - Lazy::new(|| params::IpPoolUpdate { +pub static DEMO_IP_POOL_URL: LazyLock = + LazyLock::new(|| format!("/v1/system/ip-pools/{}", *DEMO_IP_POOL_NAME)); +pub static DEMO_IP_POOL_UTILIZATION_URL: LazyLock = + LazyLock::new(|| format!("{}/utilization", *DEMO_IP_POOL_URL)); +pub static DEMO_IP_POOL_UPDATE: LazyLock = + LazyLock::new(|| params::IpPoolUpdate { identity: IdentityMetadataUpdateParams { name: None, description: Some(String::from("a new IP pool")), }, }); -pub static DEMO_IP_POOL_SILOS_URL: Lazy = - Lazy::new(|| format!("{}/silos", *DEMO_IP_POOL_URL)); -pub static DEMO_IP_POOL_SILOS_BODY: Lazy = - Lazy::new(|| params::IpPoolLinkSilo { +pub static DEMO_IP_POOL_SILOS_URL: LazyLock = + LazyLock::new(|| format!("{}/silos", *DEMO_IP_POOL_URL)); +pub static DEMO_IP_POOL_SILOS_BODY: LazyLock = + LazyLock::new(|| params::IpPoolLinkSilo { silo: NameOrId::Id(DEFAULT_SILO.identity().id), is_default: true, // necessary for demo instance create to go through }); -pub static DEMO_IP_POOL_SILO_URL: Lazy = - Lazy::new(|| format!("{}/silos/{}", *DEMO_IP_POOL_URL, *DEMO_SILO_NAME)); -pub static DEMO_IP_POOL_SILO_UPDATE_BODY: Lazy = - Lazy::new(|| params::IpPoolSiloUpdate { is_default: false }); +pub static DEMO_IP_POOL_SILO_URL: LazyLock = LazyLock::new(|| { + format!("{}/silos/{}", *DEMO_IP_POOL_URL, *DEMO_SILO_NAME) +}); +pub static DEMO_IP_POOL_SILO_UPDATE_BODY: LazyLock = + LazyLock::new(|| params::IpPoolSiloUpdate { is_default: false }); -pub static DEMO_IP_POOL_RANGE: Lazy = Lazy::new(|| { +pub static DEMO_IP_POOL_RANGE: LazyLock = LazyLock::new(|| { IpRange::V4( Ipv4Range::new( std::net::Ipv4Addr::new(10, 0, 0, 0), @@ -797,34 +824,34 @@ pub static DEMO_IP_POOL_RANGE: Lazy = Lazy::new(|| { .unwrap(), ) }); -pub static DEMO_IP_POOL_RANGES_URL: Lazy = - Lazy::new(|| format!("{}/ranges", *DEMO_IP_POOL_URL)); -pub static DEMO_IP_POOL_RANGES_ADD_URL: Lazy = - Lazy::new(|| format!("{}/add", *DEMO_IP_POOL_RANGES_URL)); -pub static DEMO_IP_POOL_RANGES_DEL_URL: Lazy = - Lazy::new(|| format!("{}/remove", *DEMO_IP_POOL_RANGES_URL)); +pub static DEMO_IP_POOL_RANGES_URL: LazyLock = + LazyLock::new(|| format!("{}/ranges", *DEMO_IP_POOL_URL)); +pub static DEMO_IP_POOL_RANGES_ADD_URL: LazyLock = + LazyLock::new(|| format!("{}/add", *DEMO_IP_POOL_RANGES_URL)); +pub static DEMO_IP_POOL_RANGES_DEL_URL: LazyLock = + LazyLock::new(|| format!("{}/remove", *DEMO_IP_POOL_RANGES_URL)); // IP Pools (Services) pub const DEMO_IP_POOL_SERVICE_URL: &'static str = "/v1/system/ip-pools-service"; -pub static DEMO_IP_POOL_SERVICE_RANGES_URL: Lazy = - Lazy::new(|| format!("{}/ranges", DEMO_IP_POOL_SERVICE_URL)); -pub static DEMO_IP_POOL_SERVICE_RANGES_ADD_URL: Lazy = - Lazy::new(|| format!("{}/add", *DEMO_IP_POOL_SERVICE_RANGES_URL)); -pub static DEMO_IP_POOL_SERVICE_RANGES_DEL_URL: Lazy = - Lazy::new(|| format!("{}/remove", *DEMO_IP_POOL_SERVICE_RANGES_URL)); +pub static DEMO_IP_POOL_SERVICE_RANGES_URL: LazyLock = + LazyLock::new(|| format!("{}/ranges", DEMO_IP_POOL_SERVICE_URL)); +pub static DEMO_IP_POOL_SERVICE_RANGES_ADD_URL: LazyLock = + LazyLock::new(|| format!("{}/add", *DEMO_IP_POOL_SERVICE_RANGES_URL)); +pub static DEMO_IP_POOL_SERVICE_RANGES_DEL_URL: LazyLock = + LazyLock::new(|| format!("{}/remove", *DEMO_IP_POOL_SERVICE_RANGES_URL)); // Snapshots -pub static DEMO_SNAPSHOT_NAME: Lazy = - Lazy::new(|| "demo-snapshot".parse().unwrap()); -pub static DEMO_SNAPSHOT_URL: Lazy = Lazy::new(|| { +pub static DEMO_SNAPSHOT_NAME: LazyLock = + LazyLock::new(|| "demo-snapshot".parse().unwrap()); +pub static DEMO_SNAPSHOT_URL: LazyLock = LazyLock::new(|| { format!( "/v1/snapshots/{}?project={}", *DEMO_SNAPSHOT_NAME, *DEMO_PROJECT_NAME ) }); -pub static DEMO_SNAPSHOT_CREATE: Lazy = - Lazy::new(|| params::SnapshotCreate { +pub static DEMO_SNAPSHOT_CREATE: LazyLock = + LazyLock::new(|| params::SnapshotCreate { identity: IdentityMetadataCreateParams { name: DEMO_SNAPSHOT_NAME.clone(), description: String::from(""), @@ -834,11 +861,11 @@ pub static DEMO_SNAPSHOT_CREATE: Lazy = // SSH keys pub const DEMO_SSHKEYS_URL: &'static str = "/v1/me/ssh-keys"; -pub static DEMO_SSHKEY_NAME: Lazy = - Lazy::new(|| "aaaaa-ssh-key".parse().unwrap()); +pub static DEMO_SSHKEY_NAME: LazyLock = + LazyLock::new(|| "aaaaa-ssh-key".parse().unwrap()); -pub static DEMO_SSHKEY_CREATE: Lazy = - Lazy::new(|| params::SshKeyCreate { +pub static DEMO_SSHKEY_CREATE: LazyLock = + LazyLock::new(|| params::SshKeyCreate { identity: IdentityMetadataCreateParams { name: DEMO_SSHKEY_NAME.clone(), description: "a demo key".to_string(), @@ -847,35 +874,37 @@ pub static DEMO_SSHKEY_CREATE: Lazy = public_key: "AAAAAAAAAAAAAAA".to_string(), }); -pub static DEMO_SPECIFIC_SSHKEY_URL: Lazy = - Lazy::new(|| format!("{}/{}", DEMO_SSHKEYS_URL, *DEMO_SSHKEY_NAME)); +pub static DEMO_SPECIFIC_SSHKEY_URL: LazyLock = + LazyLock::new(|| format!("{}/{}", DEMO_SSHKEYS_URL, *DEMO_SSHKEY_NAME)); // Project Floating IPs -pub static DEMO_FLOAT_IP_NAME: Lazy = - Lazy::new(|| "float-ip".parse().unwrap()); +pub static DEMO_FLOAT_IP_NAME: LazyLock = + LazyLock::new(|| "float-ip".parse().unwrap()); -pub static DEMO_FLOAT_IP_URL: Lazy = Lazy::new(|| { +pub static DEMO_FLOAT_IP_URL: LazyLock = LazyLock::new(|| { format!( "/v1/floating-ips/{}?project={}", *DEMO_FLOAT_IP_NAME, *DEMO_PROJECT_NAME ) }); -pub static DEMO_FLOATING_IP_ATTACH_URL: Lazy = Lazy::new(|| { - format!( - "/v1/floating-ips/{}/attach?{}", - *DEMO_FLOAT_IP_NAME, *DEMO_PROJECT_SELECTOR - ) -}); -pub static DEMO_FLOATING_IP_DETACH_URL: Lazy = Lazy::new(|| { - format!( - "/v1/floating-ips/{}/detach?{}", - *DEMO_FLOAT_IP_NAME, *DEMO_PROJECT_SELECTOR - ) -}); +pub static DEMO_FLOATING_IP_ATTACH_URL: LazyLock = + LazyLock::new(|| { + format!( + "/v1/floating-ips/{}/attach?{}", + *DEMO_FLOAT_IP_NAME, *DEMO_PROJECT_SELECTOR + ) + }); +pub static DEMO_FLOATING_IP_DETACH_URL: LazyLock = + LazyLock::new(|| { + format!( + "/v1/floating-ips/{}/detach?{}", + *DEMO_FLOAT_IP_NAME, *DEMO_PROJECT_SELECTOR + ) + }); -pub static DEMO_FLOAT_IP_CREATE: Lazy = - Lazy::new(|| params::FloatingIpCreate { +pub static DEMO_FLOAT_IP_CREATE: LazyLock = + LazyLock::new(|| params::FloatingIpCreate { identity: IdentityMetadataCreateParams { name: DEMO_FLOAT_IP_NAME.clone(), description: String::from("a new IP pool"), @@ -884,60 +913,61 @@ pub static DEMO_FLOAT_IP_CREATE: Lazy = pool: None, }); -pub static DEMO_FLOAT_IP_UPDATE: Lazy = - Lazy::new(|| params::FloatingIpUpdate { +pub static DEMO_FLOAT_IP_UPDATE: LazyLock = + LazyLock::new(|| params::FloatingIpUpdate { identity: IdentityMetadataUpdateParams { name: None, description: Some(String::from("an updated Floating IP")), }, }); -pub static DEMO_FLOAT_IP_ATTACH: Lazy = - Lazy::new(|| params::FloatingIpAttach { +pub static DEMO_FLOAT_IP_ATTACH: LazyLock = + LazyLock::new(|| params::FloatingIpAttach { kind: params::FloatingIpParentKind::Instance, parent: DEMO_FLOAT_IP_NAME.clone().into(), }); -pub static DEMO_EPHEMERAL_IP_ATTACH: Lazy = - Lazy::new(|| params::EphemeralIpCreate { pool: None }); +pub static DEMO_EPHEMERAL_IP_ATTACH: LazyLock = + LazyLock::new(|| params::EphemeralIpCreate { pool: None }); // Identity providers pub const IDENTITY_PROVIDERS_URL: &'static str = "/v1/system/identity-providers?silo=demo-silo"; pub const SAML_IDENTITY_PROVIDERS_URL: &'static str = "/v1/system/identity-providers/saml?silo=demo-silo"; -pub static DEMO_SAML_IDENTITY_PROVIDER_NAME: Lazy = - Lazy::new(|| "demo-saml-provider".parse().unwrap()); +pub static DEMO_SAML_IDENTITY_PROVIDER_NAME: LazyLock = + LazyLock::new(|| "demo-saml-provider".parse().unwrap()); -pub static SPECIFIC_SAML_IDENTITY_PROVIDER_URL: Lazy = - Lazy::new(|| { +pub static SPECIFIC_SAML_IDENTITY_PROVIDER_URL: LazyLock = + LazyLock::new(|| { format!( "/v1/system/identity-providers/saml/{}?silo=demo-silo", *DEMO_SAML_IDENTITY_PROVIDER_NAME ) }); -pub static SAML_IDENTITY_PROVIDER: Lazy = - Lazy::new(|| params::SamlIdentityProviderCreate { - identity: IdentityMetadataCreateParams { - name: DEMO_SAML_IDENTITY_PROVIDER_NAME.clone(), - description: "a demo provider".to_string(), - }, +pub static SAML_IDENTITY_PROVIDER: LazyLock< + params::SamlIdentityProviderCreate, +> = LazyLock::new(|| params::SamlIdentityProviderCreate { + identity: IdentityMetadataCreateParams { + name: DEMO_SAML_IDENTITY_PROVIDER_NAME.clone(), + description: "a demo provider".to_string(), + }, - idp_metadata_source: params::IdpMetadataSource::Url { - url: HTTP_SERVER.url("/descriptor").to_string(), - }, + idp_metadata_source: params::IdpMetadataSource::Url { + url: HTTP_SERVER.url("/descriptor").to_string(), + }, - idp_entity_id: "entity_id".to_string(), - sp_client_id: "client_id".to_string(), - acs_url: "http://acs".to_string(), - slo_url: "http://slo".to_string(), - technical_contact_email: "technical@fake".to_string(), + idp_entity_id: "entity_id".to_string(), + sp_client_id: "client_id".to_string(), + acs_url: "http://acs".to_string(), + slo_url: "http://slo".to_string(), + technical_contact_email: "technical@fake".to_string(), - signing_keypair: None, + signing_keypair: None, - group_attribute_name: None, - }); + group_attribute_name: None, +}); -pub static DEMO_SYSTEM_METRICS_URL: Lazy = Lazy::new(|| { +pub static DEMO_SYSTEM_METRICS_URL: LazyLock = LazyLock::new(|| { format!( "/v1/system/metrics/virtual_disk_space_provisioned?start_time={:?}&end_time={:?}", Utc::now(), @@ -945,7 +975,7 @@ pub static DEMO_SYSTEM_METRICS_URL: Lazy = Lazy::new(|| { ) }); -pub static DEMO_SILO_METRICS_URL: Lazy = Lazy::new(|| { +pub static DEMO_SILO_METRICS_URL: LazyLock = LazyLock::new(|| { format!( "/v1/metrics/virtual_disk_space_provisioned?start_time={:?}&end_time={:?}", Utc::now(), @@ -953,34 +983,35 @@ pub static DEMO_SILO_METRICS_URL: Lazy = Lazy::new(|| { ) }); -pub static TIMESERIES_QUERY_URL: Lazy = Lazy::new(|| { +pub static TIMESERIES_QUERY_URL: LazyLock = LazyLock::new(|| { format!("/v1/timeseries/query?project={}", *DEMO_PROJECT_NAME) }); -pub static SYSTEM_TIMESERIES_LIST_URL: Lazy = - Lazy::new(|| String::from("/v1/system/timeseries/schemas")); +pub static SYSTEM_TIMESERIES_LIST_URL: LazyLock = + LazyLock::new(|| String::from("/v1/system/timeseries/schemas")); -pub static SYSTEM_TIMESERIES_QUERY_URL: Lazy = - Lazy::new(|| String::from("/v1/system/timeseries/query")); +pub static SYSTEM_TIMESERIES_QUERY_URL: LazyLock = + LazyLock::new(|| String::from("/v1/system/timeseries/query")); -pub static DEMO_TIMESERIES_QUERY: Lazy = - Lazy::new(|| params::TimeseriesQuery { +pub static DEMO_TIMESERIES_QUERY: LazyLock = + LazyLock::new(|| params::TimeseriesQuery { query: String::from("get http_service:request_latency_histogram"), }); // Users -pub static DEMO_USER_CREATE: Lazy = - Lazy::new(|| params::UserCreate { +pub static DEMO_USER_CREATE: LazyLock = + LazyLock::new(|| params::UserCreate { external_id: UserId::from_str("dummy-user").unwrap(), password: params::UserPassword::LoginDisallowed, }); // Allowlist for user-facing services. -pub static ALLOW_LIST_URL: Lazy = - Lazy::new(|| String::from("/v1/system/networking/allow-list")); -pub static ALLOW_LIST_UPDATE: Lazy = Lazy::new(|| { - params::AllowListUpdate { allowed_ips: AllowedSourceIps::Any } -}); +pub static ALLOW_LIST_URL: LazyLock = + LazyLock::new(|| String::from("/v1/system/networking/allow-list")); +pub static ALLOW_LIST_UPDATE: LazyLock = + LazyLock::new(|| params::AllowListUpdate { + allowed_ips: AllowedSourceIps::Any, + }); /// Describes an API endpoint to be verified by the "unauthorized" test /// @@ -1126,1523 +1157,1327 @@ impl AllowedMethod { } } -pub static URL_USERS_DB_INIT: Lazy = Lazy::new(|| { +pub static URL_USERS_DB_INIT: LazyLock = LazyLock::new(|| { format!("/v1/system/users-builtin/{}", authn::USER_DB_INIT.name) }); /// List of endpoints to be verified -pub static VERIFY_ENDPOINTS: Lazy> = Lazy::new(|| { - vec![ - // Global IAM policy - VerifyEndpoint { - url: &SYSTEM_POLICY_URL, - visibility: Visibility::Public, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![ - AllowedMethod::Get, - AllowedMethod::Put( - serde_json::to_value( - &shared::Policy:: { - role_assignments: vec![] - } - ).unwrap() - ), - ], - }, - // IP Pools top-level endpoint - VerifyEndpoint { - url: &DEMO_IP_POOLS_URL, - visibility: Visibility::Public, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![ - AllowedMethod::Get, - AllowedMethod::Post( - serde_json::to_value(&*DEMO_IP_POOL_CREATE).unwrap() - ), - ], - }, - VerifyEndpoint { - url: &DEMO_IP_POOLS_PROJ_URL, - visibility: Visibility::Public, - unprivileged_access: UnprivilegedAccess::ReadOnly, - allowed_methods: vec![ - AllowedMethod::Get - ], - }, - - // Single IP Pool endpoint - VerifyEndpoint { - url: &DEMO_IP_POOL_URL, - visibility: Visibility::Protected, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![ - AllowedMethod::Get, - AllowedMethod::Put( - serde_json::to_value(&*DEMO_IP_POOL_UPDATE).unwrap() - ), - AllowedMethod::Delete, - ], - }, - VerifyEndpoint { - url: &DEMO_IP_POOL_PROJ_URL, - visibility: Visibility::Protected, - unprivileged_access: UnprivilegedAccess::ReadOnly, - allowed_methods: vec![ - AllowedMethod::Get - ], - }, - - // IP pool silos endpoint - VerifyEndpoint { - url: &DEMO_IP_POOL_SILOS_URL, - visibility: Visibility::Protected, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![ - AllowedMethod::Get, - AllowedMethod::Post(serde_json::to_value(&*DEMO_IP_POOL_SILOS_BODY).unwrap()), - ], - }, - VerifyEndpoint { - url: &DEMO_IP_POOL_SILO_URL, - visibility: Visibility::Protected, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![ - AllowedMethod::Delete, - AllowedMethod::Put(serde_json::to_value(&*DEMO_IP_POOL_SILO_UPDATE_BODY).unwrap()), - ], - }, - - // IP Pool ranges endpoint - VerifyEndpoint { - url: &DEMO_IP_POOL_RANGES_URL, - visibility: Visibility::Protected, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![ - AllowedMethod::Get - ], - }, - - // IP Pool ranges/add endpoint - VerifyEndpoint { - url: &DEMO_IP_POOL_RANGES_ADD_URL, - visibility: Visibility::Protected, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![ - AllowedMethod::Post( - serde_json::to_value(&*DEMO_IP_POOL_RANGE).unwrap() - ), - ], - }, - - // IP Pool ranges/delete endpoint - VerifyEndpoint { - url: &DEMO_IP_POOL_RANGES_DEL_URL, - visibility: Visibility::Protected, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![ - AllowedMethod::Post( - serde_json::to_value(&*DEMO_IP_POOL_RANGE).unwrap() - ), - ], - }, - - // IP pool utilization - VerifyEndpoint { - url: &DEMO_IP_POOL_UTILIZATION_URL, - visibility: Visibility::Protected, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![ - AllowedMethod::Get, - ], - }, - - // IP Pool endpoint (Oxide services) - VerifyEndpoint { - url: &DEMO_IP_POOL_SERVICE_URL, - visibility: Visibility::Protected, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![ - AllowedMethod::Get - ], - }, - - // IP Pool ranges endpoint (Oxide services) - VerifyEndpoint { - url: &DEMO_IP_POOL_SERVICE_RANGES_URL, - visibility: Visibility::Protected, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![ - AllowedMethod::Get - ], - }, - - // IP Pool ranges/add endpoint (Oxide services) - VerifyEndpoint { - url: &DEMO_IP_POOL_SERVICE_RANGES_ADD_URL, - visibility: Visibility::Protected, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![ - AllowedMethod::Post( - serde_json::to_value(&*DEMO_IP_POOL_RANGE).unwrap() - ), - ], - }, - - // IP Pool ranges/delete endpoint (Oxide services) - VerifyEndpoint { - url: &DEMO_IP_POOL_SERVICE_RANGES_DEL_URL, - visibility: Visibility::Protected, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![ - AllowedMethod::Post( - serde_json::to_value(&*DEMO_IP_POOL_RANGE).unwrap() - ), - ], - }, - - /* Silos */ - VerifyEndpoint { - url: "/v1/system/silos", - visibility: Visibility::Public, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![ - AllowedMethod::Get, - AllowedMethod::Post( - serde_json::to_value(&*DEMO_SILO_CREATE).unwrap() - ) - ], - }, - VerifyEndpoint { - url: &DEMO_SILO_URL, - visibility: Visibility::Protected, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![ - AllowedMethod::Get, - AllowedMethod::Delete, - ], - }, - VerifyEndpoint { - url: &DEMO_SILO_IP_POOLS_URL, - visibility: Visibility::Protected, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![ - AllowedMethod::Get, - ], - }, - VerifyEndpoint { - url: &DEMO_SILO_POLICY_URL, - visibility: Visibility::Protected, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![ - AllowedMethod::Get, - AllowedMethod::Put( - serde_json::to_value( - &shared::Policy:: { - role_assignments: vec![] - } - ).unwrap() - ), - ], - }, - VerifyEndpoint { - url: &DEMO_SILO_QUOTAS_URL, - visibility: Visibility::Protected, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![ - AllowedMethod::Get, - AllowedMethod::Put( - serde_json::to_value( - params::SiloQuotasCreate::empty() - ).unwrap() - ) - ], - }, - VerifyEndpoint { - url: "/v1/system/silo-quotas", - visibility: Visibility::Public, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![ - AllowedMethod::Get - ], - }, - VerifyEndpoint { - url: "/v1/system/utilization/silos", - visibility: Visibility::Public, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![ - AllowedMethod::Get - ] - }, - VerifyEndpoint { - url: &DEMO_SILO_UTIL_URL, - visibility: Visibility::Protected, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![ - AllowedMethod::Get - ] - }, - VerifyEndpoint { - url: "/v1/utilization", - visibility: Visibility::Public, - unprivileged_access: UnprivilegedAccess::ReadOnly, - allowed_methods: vec![ - AllowedMethod::Get - ] - }, - VerifyEndpoint { - url: "/v1/policy", - visibility: Visibility::Public, - unprivileged_access: UnprivilegedAccess::ReadOnly, - allowed_methods: vec![ - AllowedMethod::Get, - AllowedMethod::Put( - serde_json::to_value( - &shared::Policy:: { - role_assignments: vec![] - } - ).unwrap() - ), - ], - }, - - VerifyEndpoint { - url: "/v1/users", - visibility: Visibility::Public, - unprivileged_access: UnprivilegedAccess::ReadOnly, - allowed_methods: vec![ - AllowedMethod::Get, - ], - }, - - VerifyEndpoint { - url: "/v1/groups", - visibility: Visibility::Public, - unprivileged_access: UnprivilegedAccess::ReadOnly, - allowed_methods: vec![ - AllowedMethod::Get, - ], - }, - - VerifyEndpoint { - // non-existent UUID that will 404 - url: "/v1/groups/8d90b9a5-1cea-4a2b-9af4-71467dd33a04", - visibility: Visibility::Public, - unprivileged_access: UnprivilegedAccess::ReadOnly, - allowed_methods: vec![ - AllowedMethod::GetNonexistent, - ], - }, - - VerifyEndpoint { - url: &DEMO_SILO_USERS_LIST_URL, - visibility: Visibility::Public, - unprivileged_access: UnprivilegedAccess::ReadOnly, - allowed_methods: vec![ AllowedMethod::Get ], - }, - - VerifyEndpoint { - url: &DEMO_SILO_USERS_CREATE_URL, - visibility: Visibility::Public, - unprivileged_access: UnprivilegedAccess::ReadOnly, - allowed_methods: vec![ - AllowedMethod::Post( - serde_json::to_value( - &*DEMO_USER_CREATE - ).unwrap() - ), - ], - }, - - VerifyEndpoint { - url: &DEMO_SILO_USER_ID_GET_URL, - visibility: Visibility::Public, - unprivileged_access: UnprivilegedAccess::ReadOnly, - allowed_methods: vec![ - AllowedMethod::Get, - ], - }, - - VerifyEndpoint { - url: &DEMO_SILO_USER_ID_DELETE_URL, - visibility: Visibility::Public, - unprivileged_access: UnprivilegedAccess::ReadOnly, - allowed_methods: vec![ - AllowedMethod::Delete, - ], - }, - - VerifyEndpoint { - url: &DEMO_SILO_USER_ID_SET_PASSWORD_URL, - visibility: Visibility::Public, - unprivileged_access: UnprivilegedAccess::ReadOnly, - allowed_methods: vec![ - AllowedMethod::Post(serde_json::to_value( - params::UserPassword::LoginDisallowed - ).unwrap()), - ], - }, - - /* Projects */ - - // TODO-security TODO-correctness One thing that's a little strange - // here: we currently return a 404 if you attempt to create a Project - // inside an Organization and you're not authorized to do that. In an - // ideal world, we'd return a 403 if you can _see_ the Organization and - // a 404 if not. But we don't really know if you should be able to see - // the Organization. Right now, the only real way to tell that is if - // you have permissions on anything _inside_ the Organization, which is - // incredibly expensive to determine in general. - // TODO: reevaluate the above comment and the change to unprivileged_access below - VerifyEndpoint { - url: "/v1/projects", - visibility: Visibility::Public, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![ - AllowedMethod::Get, - AllowedMethod::Post( - serde_json::to_value(&*DEMO_PROJECT_CREATE).unwrap() - ), - ], - }, - - VerifyEndpoint { - url: &DEMO_PROJECT_URL, - visibility: Visibility::Protected, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![ - AllowedMethod::Get, - AllowedMethod::Delete, - AllowedMethod::Put( - serde_json::to_value(params::ProjectUpdate{ - identity: IdentityMetadataUpdateParams { - name: None, - description: Some("different".to_string()) - }, - }).unwrap() - ), - ], - }, - - VerifyEndpoint { - url: &DEMO_PROJECT_POLICY_URL, - visibility: Visibility::Protected, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![ - AllowedMethod::Get, - AllowedMethod::Put( - serde_json::to_value( - &shared::Policy:: { - role_assignments: vec![] - } - ).unwrap() - ), - ], - }, - - /* VPCs */ - VerifyEndpoint { - url: &DEMO_PROJECT_URL_VPCS, - visibility: Visibility::Protected, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![ - AllowedMethod::Get, - AllowedMethod::Post( - serde_json::to_value(&*DEMO_VPC_CREATE).unwrap() - ), - ], - }, - - VerifyEndpoint { - url: &DEMO_VPC_URL, - visibility: Visibility::Protected, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![ - AllowedMethod::Get, - AllowedMethod::Put( - serde_json::to_value(¶ms::VpcUpdate { - identity: IdentityMetadataUpdateParams { - name: None, - description: Some("different".to_string()) - }, - dns_name: None, - }).unwrap() - ), - AllowedMethod::Delete, - ], - }, - - /* Firewall rules */ - VerifyEndpoint { - url: &DEMO_VPC_URL_FIREWALL_RULES, - visibility: Visibility::Protected, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![ - AllowedMethod::Get, - AllowedMethod::Put( - serde_json::to_value(VpcFirewallRuleUpdateParams { - rules: vec![], - }).unwrap() - ), - ], - }, - - /* VPC Subnets */ - VerifyEndpoint { - url: &DEMO_VPC_URL_SUBNETS, - visibility: Visibility::Protected, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![ - AllowedMethod::Get, - AllowedMethod::Post( - serde_json::to_value(&*DEMO_VPC_SUBNET_CREATE).unwrap() - ), - ], - }, - - VerifyEndpoint { - url: &DEMO_VPC_SUBNET_URL, - visibility: Visibility::Protected, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![ - AllowedMethod::Get, - AllowedMethod::Put( - serde_json::to_value(¶ms::VpcSubnetUpdate { - identity: IdentityMetadataUpdateParams { - name: None, - description: Some("different".to_string()) - }, - custom_router: None, - }).unwrap() - ), - AllowedMethod::Delete, - ], - }, - - VerifyEndpoint { - url: &DEMO_VPC_SUBNET_INTERFACES_URL, - visibility: Visibility::Protected, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![ - AllowedMethod::Get, - ], - }, - - /* VPC Routers */ - - VerifyEndpoint { - url: &DEMO_VPC_URL_ROUTERS, - visibility: Visibility::Protected, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![ - AllowedMethod::Get, - AllowedMethod::Post( - serde_json::to_value(&*DEMO_VPC_ROUTER_CREATE).unwrap() - ), - ], - }, - - VerifyEndpoint { - url: &DEMO_VPC_ROUTER_URL, - visibility: Visibility::Protected, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![ - AllowedMethod::Get, - AllowedMethod::Put( - serde_json::to_value(¶ms::VpcRouterUpdate { - identity: IdentityMetadataUpdateParams { - name: None, - description: Some("different".to_string()) - }, - }).unwrap() - ), - AllowedMethod::Delete, - ], - }, - - /* Router Routes */ - - VerifyEndpoint { - url: &DEMO_VPC_ROUTER_URL_ROUTES, - visibility: Visibility::Protected, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![ - AllowedMethod::Get, - AllowedMethod::Post( - serde_json::to_value(&*DEMO_ROUTER_ROUTE_CREATE).unwrap() - ), - ], - }, - - VerifyEndpoint { - url: &DEMO_ROUTER_ROUTE_URL, - visibility: Visibility::Protected, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![ - AllowedMethod::Get, - AllowedMethod::Put( - serde_json::to_value(¶ms::RouterRouteUpdate { - identity: IdentityMetadataUpdateParams { - name: None, - description: Some("different".to_string()) - }, - target: RouteTarget::Ip( - IpAddr::from(Ipv4Addr::new(127, 0, 0, 1))), - destination: RouteDestination::Subnet( - "loopback".parse().unwrap()), - }).unwrap() - ), - AllowedMethod::Delete, - ], - }, - - /* Internet Gateways */ - - VerifyEndpoint { - url: &DEMO_INTERNET_GATEWAYS_URL, - visibility: Visibility::Protected, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![ - AllowedMethod::GetNonexistent, - AllowedMethod::Post( - serde_json::to_value(&*DEMO_INTERNET_GATEWAY_CREATE).unwrap() - ), - ], - }, - - VerifyEndpoint { - url: &DEMO_INTERNET_GATEWAY_URL, - visibility: Visibility::Protected, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![ - AllowedMethod::GetNonexistent, - AllowedMethod::Delete, - ], - }, - - VerifyEndpoint { - url: &DEMO_INTERNET_GATEWAY_IP_POOLS_URL, - visibility: Visibility::Protected, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![ - AllowedMethod::GetNonexistent, - AllowedMethod::Post( - serde_json::to_value(&*DEMO_INTERNET_GATEWAY_IP_POOL_CREATE).unwrap() - ), - ], - }, - - VerifyEndpoint { - url: &DEMO_INTERNET_GATEWAY_IP_POOL_URL, - visibility: Visibility::Protected, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![ - AllowedMethod::Delete, - ], - }, - - VerifyEndpoint { - url: &DEMO_INTERNET_GATEWAY_IP_ADDRS_URL, - visibility: Visibility::Protected, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![ - AllowedMethod::GetNonexistent, - AllowedMethod::Post( - serde_json::to_value(&*DEMO_INTERNET_GATEWAY_IP_ADDRESS_CREATE).unwrap() - ), - ], - }, - - VerifyEndpoint { - url: &DEMO_INTERNET_GATEWAY_IP_ADDR_URL, - visibility: Visibility::Protected, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![ - AllowedMethod::Delete, - ], - }, - - /* Disks */ - - VerifyEndpoint { - url: &DEMO_DISKS_URL, - visibility: Visibility::Protected, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![ - AllowedMethod::Get, - AllowedMethod::Post( - serde_json::to_value(&*DEMO_DISK_CREATE).unwrap() - ), - ], - }, - - VerifyEndpoint { - url: &DEMO_DISK_URL, - visibility: Visibility::Protected, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![ - AllowedMethod::Get, - AllowedMethod::Delete, - ], - }, - - VerifyEndpoint { - url: &DEMO_DISK_METRICS_URL, - visibility: Visibility::Protected, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![ - AllowedMethod::Get, - ], - }, - - VerifyEndpoint { - url: &DEMO_INSTANCE_DISKS_URL, - visibility: Visibility::Protected, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![ - AllowedMethod::Get, - ] - }, - - VerifyEndpoint { - url: &DEMO_INSTANCE_DISKS_ATTACH_URL, - visibility: Visibility::Protected, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![ - AllowedMethod::Post( +pub static VERIFY_ENDPOINTS: LazyLock> = + LazyLock::new(|| { + vec![ + // Global IAM policy + VerifyEndpoint { + url: &SYSTEM_POLICY_URL, + visibility: Visibility::Public, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![ + AllowedMethod::Get, + AllowedMethod::Put( + serde_json::to_value(&shared::Policy::< + shared::FleetRole, + > { + role_assignments: vec![], + }) + .unwrap(), + ), + ], + }, + // IP Pools top-level endpoint + VerifyEndpoint { + url: &DEMO_IP_POOLS_URL, + visibility: Visibility::Public, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![ + AllowedMethod::Get, + AllowedMethod::Post( + serde_json::to_value(&*DEMO_IP_POOL_CREATE).unwrap(), + ), + ], + }, + VerifyEndpoint { + url: &DEMO_IP_POOLS_PROJ_URL, + visibility: Visibility::Public, + unprivileged_access: UnprivilegedAccess::ReadOnly, + allowed_methods: vec![AllowedMethod::Get], + }, + // Single IP Pool endpoint + VerifyEndpoint { + url: &DEMO_IP_POOL_URL, + visibility: Visibility::Protected, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![ + AllowedMethod::Get, + AllowedMethod::Put( + serde_json::to_value(&*DEMO_IP_POOL_UPDATE).unwrap(), + ), + AllowedMethod::Delete, + ], + }, + VerifyEndpoint { + url: &DEMO_IP_POOL_PROJ_URL, + visibility: Visibility::Protected, + unprivileged_access: UnprivilegedAccess::ReadOnly, + allowed_methods: vec![AllowedMethod::Get], + }, + // IP pool silos endpoint + VerifyEndpoint { + url: &DEMO_IP_POOL_SILOS_URL, + visibility: Visibility::Protected, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![ + AllowedMethod::Get, + AllowedMethod::Post( + serde_json::to_value(&*DEMO_IP_POOL_SILOS_BODY) + .unwrap(), + ), + ], + }, + VerifyEndpoint { + url: &DEMO_IP_POOL_SILO_URL, + visibility: Visibility::Protected, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![ + AllowedMethod::Delete, + AllowedMethod::Put( + serde_json::to_value(&*DEMO_IP_POOL_SILO_UPDATE_BODY) + .unwrap(), + ), + ], + }, + // IP Pool ranges endpoint + VerifyEndpoint { + url: &DEMO_IP_POOL_RANGES_URL, + visibility: Visibility::Protected, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![AllowedMethod::Get], + }, + // IP Pool ranges/add endpoint + VerifyEndpoint { + url: &DEMO_IP_POOL_RANGES_ADD_URL, + visibility: Visibility::Protected, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![AllowedMethod::Post( + serde_json::to_value(&*DEMO_IP_POOL_RANGE).unwrap(), + )], + }, + // IP Pool ranges/delete endpoint + VerifyEndpoint { + url: &DEMO_IP_POOL_RANGES_DEL_URL, + visibility: Visibility::Protected, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![AllowedMethod::Post( + serde_json::to_value(&*DEMO_IP_POOL_RANGE).unwrap(), + )], + }, + // IP pool utilization + VerifyEndpoint { + url: &DEMO_IP_POOL_UTILIZATION_URL, + visibility: Visibility::Protected, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![AllowedMethod::Get], + }, + // IP Pool endpoint (Oxide services) + VerifyEndpoint { + url: &DEMO_IP_POOL_SERVICE_URL, + visibility: Visibility::Protected, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![AllowedMethod::Get], + }, + // IP Pool ranges endpoint (Oxide services) + VerifyEndpoint { + url: &DEMO_IP_POOL_SERVICE_RANGES_URL, + visibility: Visibility::Protected, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![AllowedMethod::Get], + }, + // IP Pool ranges/add endpoint (Oxide services) + VerifyEndpoint { + url: &DEMO_IP_POOL_SERVICE_RANGES_ADD_URL, + visibility: Visibility::Protected, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![AllowedMethod::Post( + serde_json::to_value(&*DEMO_IP_POOL_RANGE).unwrap(), + )], + }, + // IP Pool ranges/delete endpoint (Oxide services) + VerifyEndpoint { + url: &DEMO_IP_POOL_SERVICE_RANGES_DEL_URL, + visibility: Visibility::Protected, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![AllowedMethod::Post( + serde_json::to_value(&*DEMO_IP_POOL_RANGE).unwrap(), + )], + }, + /* Silos */ + VerifyEndpoint { + url: "/v1/system/silos", + visibility: Visibility::Public, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![ + AllowedMethod::Get, + AllowedMethod::Post( + serde_json::to_value(&*DEMO_SILO_CREATE).unwrap(), + ), + ], + }, + VerifyEndpoint { + url: &DEMO_SILO_URL, + visibility: Visibility::Protected, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![ + AllowedMethod::Get, + AllowedMethod::Delete, + ], + }, + VerifyEndpoint { + url: &DEMO_SILO_IP_POOLS_URL, + visibility: Visibility::Protected, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![AllowedMethod::Get], + }, + VerifyEndpoint { + url: &DEMO_SILO_POLICY_URL, + visibility: Visibility::Protected, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![ + AllowedMethod::Get, + AllowedMethod::Put( + serde_json::to_value(&shared::Policy::< + shared::SiloRole, + > { + role_assignments: vec![], + }) + .unwrap(), + ), + ], + }, + VerifyEndpoint { + url: &DEMO_SILO_QUOTAS_URL, + visibility: Visibility::Protected, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![ + AllowedMethod::Get, + AllowedMethod::Put( + serde_json::to_value(params::SiloQuotasCreate::empty()) + .unwrap(), + ), + ], + }, + VerifyEndpoint { + url: "/v1/system/silo-quotas", + visibility: Visibility::Public, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![AllowedMethod::Get], + }, + VerifyEndpoint { + url: "/v1/system/utilization/silos", + visibility: Visibility::Public, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![AllowedMethod::Get], + }, + VerifyEndpoint { + url: &DEMO_SILO_UTIL_URL, + visibility: Visibility::Protected, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![AllowedMethod::Get], + }, + VerifyEndpoint { + url: "/v1/utilization", + visibility: Visibility::Public, + unprivileged_access: UnprivilegedAccess::ReadOnly, + allowed_methods: vec![AllowedMethod::Get], + }, + VerifyEndpoint { + url: "/v1/policy", + visibility: Visibility::Public, + unprivileged_access: UnprivilegedAccess::ReadOnly, + allowed_methods: vec![ + AllowedMethod::Get, + AllowedMethod::Put( + serde_json::to_value(&shared::Policy::< + shared::SiloRole, + > { + role_assignments: vec![], + }) + .unwrap(), + ), + ], + }, + VerifyEndpoint { + url: "/v1/users", + visibility: Visibility::Public, + unprivileged_access: UnprivilegedAccess::ReadOnly, + allowed_methods: vec![AllowedMethod::Get], + }, + VerifyEndpoint { + url: "/v1/groups", + visibility: Visibility::Public, + unprivileged_access: UnprivilegedAccess::ReadOnly, + allowed_methods: vec![AllowedMethod::Get], + }, + VerifyEndpoint { + // non-existent UUID that will 404 + url: "/v1/groups/8d90b9a5-1cea-4a2b-9af4-71467dd33a04", + visibility: Visibility::Public, + unprivileged_access: UnprivilegedAccess::ReadOnly, + allowed_methods: vec![AllowedMethod::GetNonexistent], + }, + VerifyEndpoint { + url: &DEMO_SILO_USERS_LIST_URL, + visibility: Visibility::Public, + unprivileged_access: UnprivilegedAccess::ReadOnly, + allowed_methods: vec![AllowedMethod::Get], + }, + VerifyEndpoint { + url: &DEMO_SILO_USERS_CREATE_URL, + visibility: Visibility::Public, + unprivileged_access: UnprivilegedAccess::ReadOnly, + allowed_methods: vec![AllowedMethod::Post( + serde_json::to_value(&*DEMO_USER_CREATE).unwrap(), + )], + }, + VerifyEndpoint { + url: &DEMO_SILO_USER_ID_GET_URL, + visibility: Visibility::Public, + unprivileged_access: UnprivilegedAccess::ReadOnly, + allowed_methods: vec![AllowedMethod::Get], + }, + VerifyEndpoint { + url: &DEMO_SILO_USER_ID_DELETE_URL, + visibility: Visibility::Public, + unprivileged_access: UnprivilegedAccess::ReadOnly, + allowed_methods: vec![AllowedMethod::Delete], + }, + VerifyEndpoint { + url: &DEMO_SILO_USER_ID_SET_PASSWORD_URL, + visibility: Visibility::Public, + unprivileged_access: UnprivilegedAccess::ReadOnly, + allowed_methods: vec![AllowedMethod::Post( + serde_json::to_value(params::UserPassword::LoginDisallowed) + .unwrap(), + )], + }, + /* Projects */ + // TODO-security TODO-correctness One thing that's a little strange + // here: we currently return a 404 if you attempt to create a Project + // inside an Organization and you're not authorized to do that. In an + // ideal world, we'd return a 403 if you can _see_ the Organization and + // a 404 if not. But we don't really know if you should be able to see + // the Organization. Right now, the only real way to tell that is if + // you have permissions on anything _inside_ the Organization, which is + // incredibly expensive to determine in general. + // TODO: reevaluate the above comment and the change to unprivileged_access below + VerifyEndpoint { + url: "/v1/projects", + visibility: Visibility::Public, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![ + AllowedMethod::Get, + AllowedMethod::Post( + serde_json::to_value(&*DEMO_PROJECT_CREATE).unwrap(), + ), + ], + }, + VerifyEndpoint { + url: &DEMO_PROJECT_URL, + visibility: Visibility::Protected, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![ + AllowedMethod::Get, + AllowedMethod::Delete, + AllowedMethod::Put( + serde_json::to_value(params::ProjectUpdate { + identity: IdentityMetadataUpdateParams { + name: None, + description: Some("different".to_string()), + }, + }) + .unwrap(), + ), + ], + }, + VerifyEndpoint { + url: &DEMO_PROJECT_POLICY_URL, + visibility: Visibility::Protected, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![ + AllowedMethod::Get, + AllowedMethod::Put( + serde_json::to_value(&shared::Policy::< + shared::ProjectRole, + > { + role_assignments: vec![], + }) + .unwrap(), + ), + ], + }, + /* VPCs */ + VerifyEndpoint { + url: &DEMO_PROJECT_URL_VPCS, + visibility: Visibility::Protected, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![ + AllowedMethod::Get, + AllowedMethod::Post( + serde_json::to_value(&*DEMO_VPC_CREATE).unwrap(), + ), + ], + }, + VerifyEndpoint { + url: &DEMO_VPC_URL, + visibility: Visibility::Protected, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![ + AllowedMethod::Get, + AllowedMethod::Put( + serde_json::to_value(¶ms::VpcUpdate { + identity: IdentityMetadataUpdateParams { + name: None, + description: Some("different".to_string()), + }, + dns_name: None, + }) + .unwrap(), + ), + AllowedMethod::Delete, + ], + }, + /* Firewall rules */ + VerifyEndpoint { + url: &DEMO_VPC_URL_FIREWALL_RULES, + visibility: Visibility::Protected, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![ + AllowedMethod::Get, + AllowedMethod::Put( + serde_json::to_value(VpcFirewallRuleUpdateParams { + rules: vec![], + }) + .unwrap(), + ), + ], + }, + /* VPC Subnets */ + VerifyEndpoint { + url: &DEMO_VPC_URL_SUBNETS, + visibility: Visibility::Protected, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![ + AllowedMethod::Get, + AllowedMethod::Post( + serde_json::to_value(&*DEMO_VPC_SUBNET_CREATE).unwrap(), + ), + ], + }, + VerifyEndpoint { + url: &DEMO_VPC_SUBNET_URL, + visibility: Visibility::Protected, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![ + AllowedMethod::Get, + AllowedMethod::Put( + serde_json::to_value(¶ms::VpcSubnetUpdate { + identity: IdentityMetadataUpdateParams { + name: None, + description: Some("different".to_string()), + }, + custom_router: None, + }) + .unwrap(), + ), + AllowedMethod::Delete, + ], + }, + VerifyEndpoint { + url: &DEMO_VPC_SUBNET_INTERFACES_URL, + visibility: Visibility::Protected, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![AllowedMethod::Get], + }, + /* VPC Routers */ + VerifyEndpoint { + url: &DEMO_VPC_URL_ROUTERS, + visibility: Visibility::Protected, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![ + AllowedMethod::Get, + AllowedMethod::Post( + serde_json::to_value(&*DEMO_VPC_ROUTER_CREATE).unwrap(), + ), + ], + }, + VerifyEndpoint { + url: &DEMO_VPC_ROUTER_URL, + visibility: Visibility::Protected, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![ + AllowedMethod::Get, + AllowedMethod::Put( + serde_json::to_value(¶ms::VpcRouterUpdate { + identity: IdentityMetadataUpdateParams { + name: None, + description: Some("different".to_string()), + }, + }) + .unwrap(), + ), + AllowedMethod::Delete, + ], + }, + /* Router Routes */ + VerifyEndpoint { + url: &DEMO_VPC_ROUTER_URL_ROUTES, + visibility: Visibility::Protected, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![ + AllowedMethod::Get, + AllowedMethod::Post( + serde_json::to_value(&*DEMO_ROUTER_ROUTE_CREATE) + .unwrap(), + ), + ], + }, + VerifyEndpoint { + url: &DEMO_ROUTER_ROUTE_URL, + visibility: Visibility::Protected, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![ + AllowedMethod::Get, + AllowedMethod::Put( + serde_json::to_value(¶ms::RouterRouteUpdate { + identity: IdentityMetadataUpdateParams { + name: None, + description: Some("different".to_string()), + }, + target: RouteTarget::Ip(IpAddr::from( + Ipv4Addr::new(127, 0, 0, 1), + )), + destination: RouteDestination::Subnet( + "loopback".parse().unwrap(), + ), + }) + .unwrap(), + ), + AllowedMethod::Delete, + ], + }, + /* Internet Gateways */ + VerifyEndpoint { + url: &DEMO_INTERNET_GATEWAYS_URL, + visibility: Visibility::Protected, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![ + AllowedMethod::GetNonexistent, + AllowedMethod::Post( + serde_json::to_value(&*DEMO_INTERNET_GATEWAY_CREATE) + .unwrap(), + ), + ], + }, + VerifyEndpoint { + url: &DEMO_INTERNET_GATEWAY_URL, + visibility: Visibility::Protected, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![ + AllowedMethod::GetNonexistent, + AllowedMethod::Delete, + ], + }, + VerifyEndpoint { + url: &DEMO_INTERNET_GATEWAY_IP_POOLS_URL, + visibility: Visibility::Protected, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![ + AllowedMethod::GetNonexistent, + AllowedMethod::Post( + serde_json::to_value( + &*DEMO_INTERNET_GATEWAY_IP_POOL_CREATE, + ) + .unwrap(), + ), + ], + }, + VerifyEndpoint { + url: &DEMO_INTERNET_GATEWAY_IP_POOL_URL, + visibility: Visibility::Protected, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![AllowedMethod::Delete], + }, + VerifyEndpoint { + url: &DEMO_INTERNET_GATEWAY_IP_ADDRS_URL, + visibility: Visibility::Protected, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![ + AllowedMethod::GetNonexistent, + AllowedMethod::Post( + serde_json::to_value( + &*DEMO_INTERNET_GATEWAY_IP_ADDRESS_CREATE, + ) + .unwrap(), + ), + ], + }, + VerifyEndpoint { + url: &DEMO_INTERNET_GATEWAY_IP_ADDR_URL, + visibility: Visibility::Protected, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![AllowedMethod::Delete], + }, + /* Disks */ + VerifyEndpoint { + url: &DEMO_DISKS_URL, + visibility: Visibility::Protected, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![ + AllowedMethod::Get, + AllowedMethod::Post( + serde_json::to_value(&*DEMO_DISK_CREATE).unwrap(), + ), + ], + }, + VerifyEndpoint { + url: &DEMO_DISK_URL, + visibility: Visibility::Protected, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![ + AllowedMethod::Get, + AllowedMethod::Delete, + ], + }, + VerifyEndpoint { + url: &DEMO_DISK_METRICS_URL, + visibility: Visibility::Protected, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![AllowedMethod::Get], + }, + VerifyEndpoint { + url: &DEMO_INSTANCE_DISKS_URL, + visibility: Visibility::Protected, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![AllowedMethod::Get], + }, + VerifyEndpoint { + url: &DEMO_INSTANCE_DISKS_ATTACH_URL, + visibility: Visibility::Protected, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![AllowedMethod::Post( serde_json::to_value(params::DiskPath { - disk: DEMO_DISK_NAME.clone().into() - }).unwrap() - ) - ], - }, - - VerifyEndpoint { - url: &DEMO_INSTANCE_DISKS_DETACH_URL, - visibility: Visibility::Protected, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![ - AllowedMethod::Post( + disk: DEMO_DISK_NAME.clone().into(), + }) + .unwrap(), + )], + }, + VerifyEndpoint { + url: &DEMO_INSTANCE_DISKS_DETACH_URL, + visibility: Visibility::Protected, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![AllowedMethod::Post( serde_json::to_value(params::DiskPath { - disk: DEMO_DISK_NAME.clone().into() - }).unwrap() - ) - ], - }, - - VerifyEndpoint { - url: &DEMO_IMPORT_DISK_BULK_WRITE_START_URL, - visibility: Visibility::Protected, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![ - AllowedMethod::Post(serde_json::value::Value::Null), - ], - }, - - VerifyEndpoint { - url: &DEMO_IMPORT_DISK_BULK_WRITE_URL, - visibility: Visibility::Protected, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![ - AllowedMethod::Post( - serde_json::to_value(params::ImportBlocksBulkWrite { - offset: 0, - base64_encoded_data: "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==".into(), - }).unwrap()), - ], - }, - - VerifyEndpoint { - url: &DEMO_IMPORT_DISK_BULK_WRITE_STOP_URL, - visibility: Visibility::Protected, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![ - AllowedMethod::Post(serde_json::value::Value::Null), - ], - }, - - VerifyEndpoint { - url: &DEMO_IMPORT_DISK_FINALIZE_URL, - visibility: Visibility::Protected, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![ - AllowedMethod::Post(serde_json::from_str("{}").unwrap()), - ], - }, - - /* Project images */ - - VerifyEndpoint { - url: &DEMO_PROJECT_URL_IMAGES, - visibility: Visibility::Protected, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![ - AllowedMethod::Get, - AllowedMethod::Post( - serde_json::to_value(&*DEMO_IMAGE_CREATE).unwrap() - ), - ], - }, - - VerifyEndpoint { - url: &DEMO_PROJECT_IMAGE_URL, - visibility: Visibility::Protected, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![ - AllowedMethod::Get, - AllowedMethod::Delete, - ], - }, - - VerifyEndpoint { - url: &DEMO_PROJECT_PROMOTE_IMAGE_URL, - visibility: Visibility::Protected, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![ - AllowedMethod::Post(serde_json::value::Value::Null), - ], - }, - - VerifyEndpoint { - url: &DEMO_SILO_DEMOTE_IMAGE_URL, - visibility: Visibility::Protected, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![ - AllowedMethod::Post(serde_json::value::Value::Null), - ], - }, - - /* Snapshots */ - - VerifyEndpoint { - url: &DEMO_PROJECT_URL_SNAPSHOTS, - visibility: Visibility::Protected, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![ - AllowedMethod::Get, - AllowedMethod::Post( - serde_json::to_value(DEMO_SNAPSHOT_CREATE.clone()).unwrap(), - ) - ] - }, - - VerifyEndpoint { - url: &DEMO_SNAPSHOT_URL, - visibility: Visibility::Protected, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![ - AllowedMethod::Get, - AllowedMethod::Delete, - ] - }, - - /* Instances */ - VerifyEndpoint { - url: &DEMO_PROJECT_URL_INSTANCES, - visibility: Visibility::Protected, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![ - AllowedMethod::Get, - AllowedMethod::Post( - serde_json::to_value(&*DEMO_INSTANCE_CREATE).unwrap() - ), - ], - }, - - VerifyEndpoint { - url: &DEMO_INSTANCE_URL, - visibility: Visibility::Protected, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![ - AllowedMethod::Get, - AllowedMethod::Delete, - AllowedMethod::Put( - serde_json::to_value(&*DEMO_INSTANCE_UPDATE).unwrap() - ), - ], - }, - - VerifyEndpoint { - url: &DEMO_INSTANCE_START_URL, - visibility: Visibility::Protected, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![ - AllowedMethod::Post(serde_json::Value::Null) - ], - }, - VerifyEndpoint { - url: &DEMO_INSTANCE_STOP_URL, - visibility: Visibility::Protected, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![ - AllowedMethod::Post(serde_json::Value::Null) - ], - }, - VerifyEndpoint { - url: &DEMO_INSTANCE_REBOOT_URL, - visibility: Visibility::Protected, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![ - AllowedMethod::Post(serde_json::Value::Null) - ], - }, - VerifyEndpoint { - url: &DEMO_INSTANCE_SERIAL_URL, - visibility: Visibility::Protected, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![ - AllowedMethod::GetNonexistent // has required query parameters - ], - }, - VerifyEndpoint { - url: &DEMO_INSTANCE_SERIAL_STREAM_URL, - visibility: Visibility::Protected, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![ - AllowedMethod::GetWebsocket - ], - }, - /* Instance NICs */ - VerifyEndpoint { - url: &DEMO_INSTANCE_NICS_URL, - visibility: Visibility::Protected, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![ - AllowedMethod::Get, - AllowedMethod::Post( - serde_json::to_value(&*DEMO_INSTANCE_NIC_CREATE).unwrap() - ), - ], - }, - VerifyEndpoint { - url: &DEMO_INSTANCE_NIC_URL, - visibility: Visibility::Protected, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![ - AllowedMethod::Get, - AllowedMethod::Delete, - AllowedMethod::Put( - serde_json::to_value(&*DEMO_INSTANCE_NIC_PUT).unwrap() - ), - ], - }, - - /* Instance external IP addresses */ - VerifyEndpoint { - url: &DEMO_INSTANCE_EXTERNAL_IPS_URL, - visibility: Visibility::Protected, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![AllowedMethod::Get], - }, - - VerifyEndpoint { - url: &DEMO_INSTANCE_EPHEMERAL_IP_URL, - visibility: Visibility::Protected, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![ - AllowedMethod::Post( - serde_json::to_value(&*DEMO_EPHEMERAL_IP_ATTACH).unwrap() - ), - AllowedMethod::Delete, - ], - }, - - VerifyEndpoint { - url: &DEMO_INSTANCE_SSH_KEYS_URL, - visibility: Visibility::Protected, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![AllowedMethod::Get] - }, - - /* IAM */ - - VerifyEndpoint { - url: "/v1/system/roles", - visibility: Visibility::Public, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![AllowedMethod::Get], - }, - VerifyEndpoint { - url: "/v1/system/roles/fleet.admin", - visibility: Visibility::Protected, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![AllowedMethod::Get], - }, - - VerifyEndpoint { - url: "/v1/system/users-builtin", - visibility: Visibility::Public, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![AllowedMethod::Get], - }, - VerifyEndpoint { - url: &URL_USERS_DB_INIT, - visibility: Visibility::Protected, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![AllowedMethod::Get], - }, - - /* Hardware */ - - VerifyEndpoint { - url: "/v1/system/hardware/racks", - visibility: Visibility::Public, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![AllowedMethod::Get], - }, - - VerifyEndpoint { - url: &HARDWARE_RACK_URL, - visibility: Visibility::Protected, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![AllowedMethod::Get], - }, - - VerifyEndpoint { - url: &HARDWARE_UNINITIALIZED_SLEDS, - visibility: Visibility::Public, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![AllowedMethod::Get], - }, - - VerifyEndpoint { - url: "/v1/system/hardware/sleds", - visibility: Visibility::Public, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![AllowedMethod::Get, AllowedMethod::Post( - serde_json::to_value(&*DEMO_UNINITIALIZED_SLED).unwrap() - )], - }, - - VerifyEndpoint { - url: &SLED_INSTANCES_URL, - visibility: Visibility::Protected, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![AllowedMethod::Get], - }, - - VerifyEndpoint { - url: &HARDWARE_SLED_URL, - visibility: Visibility::Protected, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![AllowedMethod::Get], - }, - - VerifyEndpoint { - url: &HARDWARE_SLED_PROVISION_POLICY_URL, - visibility: Visibility::Protected, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![AllowedMethod::Put( - serde_json::to_value(&*DEMO_SLED_PROVISION_POLICY).unwrap() - )], - }, - - VerifyEndpoint { - url: "/v1/system/hardware/switches", - visibility: Visibility::Public, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![AllowedMethod::Get], - }, - - // TODO: Switches should be configured alongside sled agents during test setup - VerifyEndpoint { - url: &HARDWARE_SWITCH_URL, - visibility: Visibility::Protected, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![AllowedMethod::GetNonexistent], - }, - - VerifyEndpoint { - url: &HARDWARE_DISKS_URL, - visibility: Visibility::Public, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![AllowedMethod::Get], - }, - - VerifyEndpoint { - url: &HARDWARE_DISK_URL, - visibility: Visibility::Protected, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![AllowedMethod::Get], - }, - - - VerifyEndpoint { - url: &HARDWARE_SLED_DISK_URL, - visibility: Visibility::Public, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![AllowedMethod::Get], - }, - - /* Support Bundles */ - - VerifyEndpoint { - url: &SUPPORT_BUNDLES_URL, - visibility: Visibility::Public, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![ - AllowedMethod::Get, - AllowedMethod::Post(serde_json::to_value(()).unwrap()) - ], - }, - - VerifyEndpoint { - url: &SUPPORT_BUNDLE_URL, - visibility: Visibility::Protected, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![ - AllowedMethod::Get, - AllowedMethod::Delete, - ], - }, - - /* Updates */ - - VerifyEndpoint { - url: "/v1/system/update/repository?file_name=demo-repo.zip", - visibility: Visibility::Public, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![AllowedMethod::Put( - // In reality this is the contents of a zip file. - serde_json::Value::Null, - )], - }, - - VerifyEndpoint { - url: "/v1/system/update/repository/1.0.0", - visibility: Visibility::Public, - unprivileged_access: UnprivilegedAccess::None, - // The update system is disabled, which causes a 500 error even for - // privileged users. That is captured by GetUnimplemented. - allowed_methods: vec![AllowedMethod::GetUnimplemented], - }, - - /* Metrics */ - - VerifyEndpoint { - url: &DEMO_SYSTEM_METRICS_URL, - visibility: Visibility::Public, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![ - AllowedMethod::Get, - ], - }, - - VerifyEndpoint { - url: &DEMO_SILO_METRICS_URL, - visibility: Visibility::Public, - // unprivileged user has silo read, otherwise they wouldn't be able - // to do anything - unprivileged_access: UnprivilegedAccess::ReadOnly, - allowed_methods: vec![ - AllowedMethod::Get, - ], - }, - - VerifyEndpoint { - url: &TIMESERIES_QUERY_URL, - visibility: Visibility::Protected, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![ - AllowedMethod::Post( - serde_json::to_value(&*DEMO_TIMESERIES_QUERY).unwrap() - ), - ], - }, - - VerifyEndpoint { - url: &SYSTEM_TIMESERIES_LIST_URL, - visibility: Visibility::Public, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![ - AllowedMethod::GetVolatile, - ], - }, - - VerifyEndpoint { - url: &SYSTEM_TIMESERIES_QUERY_URL, - visibility: Visibility::Public, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![ - AllowedMethod::Post( - serde_json::to_value(&*DEMO_TIMESERIES_QUERY).unwrap() - ), - ], - }, - - /* Silo identity providers */ - - VerifyEndpoint { - url: &IDENTITY_PROVIDERS_URL, - visibility: Visibility::Public, - unprivileged_access: UnprivilegedAccess::ReadOnly, - allowed_methods: vec![ - AllowedMethod::Get, - ], - }, - - VerifyEndpoint { - url: &SAML_IDENTITY_PROVIDERS_URL, - // The visibility here deserves some explanation. In order to - // create a real SAML identity provider for doing tests, we have to - // do it in a non-default Silo (because the default one does not - // support creating a SAML identity provider). But unprivileged - // users won't be able to see that Silo. So from their perspective, - // it's like an object in a container they can't see (which is what - // Visibility::Protected means). - visibility: Visibility::Protected, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![AllowedMethod::Post( - serde_json::to_value(&*SAML_IDENTITY_PROVIDER).unwrap(), - )], - }, - VerifyEndpoint { - url: &SPECIFIC_SAML_IDENTITY_PROVIDER_URL, - visibility: Visibility::Protected, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![AllowedMethod::Get], - }, - /* Misc */ - - VerifyEndpoint { - url: "/v1/me", - visibility: Visibility::Public, - unprivileged_access: UnprivilegedAccess::ReadOnly, - allowed_methods: vec![ - AllowedMethod::Get, - ], - }, - VerifyEndpoint { - url: "/v1/me/groups", - visibility: Visibility::Public, - unprivileged_access: UnprivilegedAccess::ReadOnly, - allowed_methods: vec![ - AllowedMethod::Get, - ], - }, - - /* SSH keys */ - - VerifyEndpoint { - url: &DEMO_SSHKEYS_URL, - visibility: Visibility::Protected, - unprivileged_access: UnprivilegedAccess::Full, - allowed_methods: vec![ - AllowedMethod::Get, - AllowedMethod::Post( - serde_json::to_value(&*DEMO_SSHKEY_CREATE).unwrap(), - ), - ], - }, - VerifyEndpoint { - url: &DEMO_SPECIFIC_SSHKEY_URL, - visibility: Visibility::Protected, - unprivileged_access: UnprivilegedAccess::Full, - allowed_methods: vec![ - AllowedMethod::Get, - AllowedMethod::Delete, - ], - }, - - /* Certificates */ - VerifyEndpoint { - url: &DEMO_CERTIFICATES_URL, - visibility: Visibility::Public, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![ - AllowedMethod::Get, - AllowedMethod::Post( - serde_json::to_value(&*DEMO_CERTIFICATE_CREATE).unwrap(), - ), - ], - }, - VerifyEndpoint { - url: &DEMO_CERTIFICATE_URL, - visibility: Visibility::Protected, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![ - AllowedMethod::Get, - AllowedMethod::Delete, - ], - }, - - /* External Networking */ - - VerifyEndpoint { - url: &DEMO_SWITCH_PORT_URL, - visibility: Visibility::Public, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![ - AllowedMethod::Get, - ], - }, - - /* TODO requires dpd access - VerifyEndpoint { - url: &DEMO_SWITCH_PORT_STATUS_URL, - visibility: Visibility::Public, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![ - AllowedMethod::Get, - ], - }, - */ - - - VerifyEndpoint { - url: &DEMO_SWITCH_PORT_SETTINGS_APPLY_URL, - visibility: Visibility::Public, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![ - AllowedMethod::Delete, - AllowedMethod::Post( - serde_json::to_value(&*DEMO_SWITCH_PORT_SETTINGS).unwrap(), - ), - ], - }, - - VerifyEndpoint { - url: &DEMO_ADDRESS_LOTS_URL, - visibility: Visibility::Public, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![ - AllowedMethod::Post( - serde_json::to_value(&*DEMO_ADDRESS_LOT_CREATE).unwrap(), - ), - AllowedMethod::Get - ], - }, - - VerifyEndpoint { - url: &DEMO_ADDRESS_LOT_URL, - visibility: Visibility::Protected, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![ - AllowedMethod::Delete, - ] - }, - - VerifyEndpoint { - url: &DEMO_ADDRESS_LOT_BLOCKS_URL, - visibility: Visibility::Protected, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![ - AllowedMethod::GetNonexistent - ], - }, - - VerifyEndpoint { - url: &DEMO_LOOPBACK_CREATE_URL, - visibility: Visibility::Public, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![ - AllowedMethod::Post( - serde_json::to_value(&*DEMO_LOOPBACK_CREATE).unwrap(), - ), - AllowedMethod::Get, - ], - }, - - VerifyEndpoint { - url: &DEMO_LOOPBACK_URL, - visibility: Visibility::Protected, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![ - AllowedMethod::Delete - ], - }, - - VerifyEndpoint { - url: &DEMO_SWITCH_PORT_SETTINGS_URL, - visibility: Visibility::Public, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![ - AllowedMethod::Post( - serde_json::to_value( - &*DEMO_SWITCH_PORT_SETTINGS_CREATE).unwrap(), - ), - AllowedMethod::Get, - AllowedMethod::Delete - ], - }, - - VerifyEndpoint { - url: &DEMO_SWITCH_PORT_SETTINGS_INFO_URL, - visibility: Visibility::Public, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![ - AllowedMethod::GetNonexistent - ], - }, - - VerifyEndpoint { - url: &DEMO_BGP_CONFIG_CREATE_URL, - visibility: Visibility::Public, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![ - AllowedMethod::Post( - serde_json::to_value(&*DEMO_BGP_CONFIG).unwrap(), - ), - AllowedMethod::Get, - AllowedMethod::Delete - ], - }, - - VerifyEndpoint { - url: &DEMO_BGP_ANNOUNCE_SET_URL, - visibility: Visibility::Public, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![ - AllowedMethod::Put( - serde_json::to_value(&*DEMO_BGP_ANNOUNCE).unwrap(), - ), - AllowedMethod::Get, - ], - }, - - VerifyEndpoint { - url: &DEMO_BGP_ANNOUNCE_SET_DELETE_URL, - visibility: Visibility::Public, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![ - AllowedMethod::Delete - ], - }, - - VerifyEndpoint { - url: &DEMO_BGP_ANNOUNCEMENT_URL, - visibility: Visibility::Public, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![ - AllowedMethod::GetNonexistent, - ], - }, - - VerifyEndpoint { - url: &DEMO_BGP_STATUS_URL, - visibility: Visibility::Public, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![ - AllowedMethod::GetNonexistent, - ], - }, - - VerifyEndpoint { - url: &DEMO_BGP_EXPORTED_URL, - visibility: Visibility::Public, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![ - AllowedMethod::GetNonexistent, - ], - }, - - VerifyEndpoint { - url: &DEMO_BGP_ROUTES_IPV4_URL, - visibility: Visibility::Public, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![ - AllowedMethod::GetNonexistent, - ], - }, - - VerifyEndpoint { - url: &DEMO_BGP_MESSAGE_HISTORY_URL, - visibility: Visibility::Public, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![ - AllowedMethod::GetNonexistent, - ], - }, - - VerifyEndpoint { - url: &DEMO_BFD_STATUS_URL, - visibility: Visibility::Public, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![ - AllowedMethod::GetNonexistent, - ], - }, - - VerifyEndpoint { - url: &DEMO_BFD_ENABLE_URL, - visibility: Visibility::Public, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![ - AllowedMethod::Post( - serde_json::to_value(&*DEMO_BFD_ENABLE).unwrap() - ) - ], - }, - - VerifyEndpoint { - url: &DEMO_BFD_DISABLE_URL, - visibility: Visibility::Public, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![ - AllowedMethod::Post( - serde_json::to_value(&*DEMO_BFD_DISABLE).unwrap() - ) - ], - }, - - // Floating IPs - VerifyEndpoint { - url: &DEMO_PROJECT_URL_FIPS, - visibility: Visibility::Protected, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![ - AllowedMethod::Post( - serde_json::to_value(&*DEMO_FLOAT_IP_CREATE).unwrap(), - ), - AllowedMethod::Get, - ], - }, - - VerifyEndpoint { - url: &DEMO_FLOAT_IP_URL, - visibility: Visibility::Protected, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![ - AllowedMethod::Get, - AllowedMethod::Put( - serde_json::to_value(&*DEMO_FLOAT_IP_UPDATE).unwrap() - ), - AllowedMethod::Delete, - ], - }, - - VerifyEndpoint { - url: &DEMO_FLOATING_IP_ATTACH_URL, - visibility: Visibility::Protected, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![ - AllowedMethod::Post( + disk: DEMO_DISK_NAME.clone().into(), + }) + .unwrap(), + )], + }, + VerifyEndpoint { + url: &DEMO_IMPORT_DISK_BULK_WRITE_START_URL, + visibility: Visibility::Protected, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![AllowedMethod::Post( + serde_json::value::Value::Null, + )], + }, + VerifyEndpoint { + url: &DEMO_IMPORT_DISK_BULK_WRITE_URL, + visibility: Visibility::Protected, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: { + use base64::prelude::*; + vec![AllowedMethod::Post( + serde_json::to_value(params::ImportBlocksBulkWrite { + offset: 0, + base64_encoded_data: BASE64_STANDARD + .encode([0; 4096]), + }) + .unwrap(), + )] + }, + }, + VerifyEndpoint { + url: &DEMO_IMPORT_DISK_BULK_WRITE_STOP_URL, + visibility: Visibility::Protected, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![AllowedMethod::Post( + serde_json::value::Value::Null, + )], + }, + VerifyEndpoint { + url: &DEMO_IMPORT_DISK_FINALIZE_URL, + visibility: Visibility::Protected, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![AllowedMethod::Post( + serde_json::from_str("{}").unwrap(), + )], + }, + /* Project images */ + VerifyEndpoint { + url: &DEMO_PROJECT_URL_IMAGES, + visibility: Visibility::Protected, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![ + AllowedMethod::Get, + AllowedMethod::Post( + serde_json::to_value(&*DEMO_IMAGE_CREATE).unwrap(), + ), + ], + }, + VerifyEndpoint { + url: &DEMO_PROJECT_IMAGE_URL, + visibility: Visibility::Protected, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![ + AllowedMethod::Get, + AllowedMethod::Delete, + ], + }, + VerifyEndpoint { + url: &DEMO_PROJECT_PROMOTE_IMAGE_URL, + visibility: Visibility::Protected, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![AllowedMethod::Post( + serde_json::value::Value::Null, + )], + }, + VerifyEndpoint { + url: &DEMO_SILO_DEMOTE_IMAGE_URL, + visibility: Visibility::Protected, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![AllowedMethod::Post( + serde_json::value::Value::Null, + )], + }, + /* Snapshots */ + VerifyEndpoint { + url: &DEMO_PROJECT_URL_SNAPSHOTS, + visibility: Visibility::Protected, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![ + AllowedMethod::Get, + AllowedMethod::Post( + serde_json::to_value(DEMO_SNAPSHOT_CREATE.clone()) + .unwrap(), + ), + ], + }, + VerifyEndpoint { + url: &DEMO_SNAPSHOT_URL, + visibility: Visibility::Protected, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![ + AllowedMethod::Get, + AllowedMethod::Delete, + ], + }, + /* Instances */ + VerifyEndpoint { + url: &DEMO_PROJECT_URL_INSTANCES, + visibility: Visibility::Protected, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![ + AllowedMethod::Get, + AllowedMethod::Post( + serde_json::to_value(&*DEMO_INSTANCE_CREATE).unwrap(), + ), + ], + }, + VerifyEndpoint { + url: &DEMO_INSTANCE_URL, + visibility: Visibility::Protected, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![ + AllowedMethod::Get, + AllowedMethod::Delete, + AllowedMethod::Put( + serde_json::to_value(&*DEMO_INSTANCE_UPDATE).unwrap(), + ), + ], + }, + VerifyEndpoint { + url: &DEMO_INSTANCE_START_URL, + visibility: Visibility::Protected, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![AllowedMethod::Post( + serde_json::Value::Null, + )], + }, + VerifyEndpoint { + url: &DEMO_INSTANCE_STOP_URL, + visibility: Visibility::Protected, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![AllowedMethod::Post( + serde_json::Value::Null, + )], + }, + VerifyEndpoint { + url: &DEMO_INSTANCE_REBOOT_URL, + visibility: Visibility::Protected, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![AllowedMethod::Post( + serde_json::Value::Null, + )], + }, + VerifyEndpoint { + url: &DEMO_INSTANCE_SERIAL_URL, + visibility: Visibility::Protected, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![ + AllowedMethod::GetNonexistent, // has required query parameters + ], + }, + VerifyEndpoint { + url: &DEMO_INSTANCE_SERIAL_STREAM_URL, + visibility: Visibility::Protected, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![AllowedMethod::GetWebsocket], + }, + /* Instance NICs */ + VerifyEndpoint { + url: &DEMO_INSTANCE_NICS_URL, + visibility: Visibility::Protected, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![ + AllowedMethod::Get, + AllowedMethod::Post( + serde_json::to_value(&*DEMO_INSTANCE_NIC_CREATE) + .unwrap(), + ), + ], + }, + VerifyEndpoint { + url: &DEMO_INSTANCE_NIC_URL, + visibility: Visibility::Protected, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![ + AllowedMethod::Get, + AllowedMethod::Delete, + AllowedMethod::Put( + serde_json::to_value(&*DEMO_INSTANCE_NIC_PUT).unwrap(), + ), + ], + }, + /* Instance external IP addresses */ + VerifyEndpoint { + url: &DEMO_INSTANCE_EXTERNAL_IPS_URL, + visibility: Visibility::Protected, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![AllowedMethod::Get], + }, + VerifyEndpoint { + url: &DEMO_INSTANCE_EPHEMERAL_IP_URL, + visibility: Visibility::Protected, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![ + AllowedMethod::Post( + serde_json::to_value(&*DEMO_EPHEMERAL_IP_ATTACH) + .unwrap(), + ), + AllowedMethod::Delete, + ], + }, + VerifyEndpoint { + url: &DEMO_INSTANCE_SSH_KEYS_URL, + visibility: Visibility::Protected, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![AllowedMethod::Get], + }, + /* IAM */ + VerifyEndpoint { + url: "/v1/system/roles", + visibility: Visibility::Public, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![AllowedMethod::Get], + }, + VerifyEndpoint { + url: "/v1/system/roles/fleet.admin", + visibility: Visibility::Protected, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![AllowedMethod::Get], + }, + VerifyEndpoint { + url: "/v1/system/users-builtin", + visibility: Visibility::Public, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![AllowedMethod::Get], + }, + VerifyEndpoint { + url: &URL_USERS_DB_INIT, + visibility: Visibility::Protected, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![AllowedMethod::Get], + }, + /* Hardware */ + VerifyEndpoint { + url: "/v1/system/hardware/racks", + visibility: Visibility::Public, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![AllowedMethod::Get], + }, + VerifyEndpoint { + url: &HARDWARE_RACK_URL, + visibility: Visibility::Protected, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![AllowedMethod::Get], + }, + VerifyEndpoint { + url: &HARDWARE_UNINITIALIZED_SLEDS, + visibility: Visibility::Public, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![AllowedMethod::Get], + }, + VerifyEndpoint { + url: "/v1/system/hardware/sleds", + visibility: Visibility::Public, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![ + AllowedMethod::Get, + AllowedMethod::Post( + serde_json::to_value(&*DEMO_UNINITIALIZED_SLED) + .unwrap(), + ), + ], + }, + VerifyEndpoint { + url: &SLED_INSTANCES_URL, + visibility: Visibility::Protected, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![AllowedMethod::Get], + }, + VerifyEndpoint { + url: &HARDWARE_SLED_URL, + visibility: Visibility::Protected, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![AllowedMethod::Get], + }, + VerifyEndpoint { + url: &HARDWARE_SLED_PROVISION_POLICY_URL, + visibility: Visibility::Protected, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![AllowedMethod::Put( + serde_json::to_value(&*DEMO_SLED_PROVISION_POLICY).unwrap(), + )], + }, + VerifyEndpoint { + url: "/v1/system/hardware/switches", + visibility: Visibility::Public, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![AllowedMethod::Get], + }, + // TODO: Switches should be configured alongside sled agents during test setup + VerifyEndpoint { + url: &HARDWARE_SWITCH_URL, + visibility: Visibility::Protected, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![AllowedMethod::GetNonexistent], + }, + VerifyEndpoint { + url: &HARDWARE_DISKS_URL, + visibility: Visibility::Public, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![AllowedMethod::Get], + }, + VerifyEndpoint { + url: &HARDWARE_DISK_URL, + visibility: Visibility::Protected, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![AllowedMethod::Get], + }, + VerifyEndpoint { + url: &HARDWARE_SLED_DISK_URL, + visibility: Visibility::Public, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![AllowedMethod::Get], + }, + /* Support Bundles */ + VerifyEndpoint { + url: &SUPPORT_BUNDLES_URL, + visibility: Visibility::Public, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![ + AllowedMethod::Get, + AllowedMethod::Post(serde_json::to_value(()).unwrap()), + ], + }, + VerifyEndpoint { + url: &SUPPORT_BUNDLE_URL, + visibility: Visibility::Protected, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![ + AllowedMethod::Get, + AllowedMethod::Delete, + ], + }, + /* Updates */ + VerifyEndpoint { + url: "/v1/system/update/repository?file_name=demo-repo.zip", + visibility: Visibility::Public, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![AllowedMethod::Put( + // In reality this is the contents of a zip file. + serde_json::Value::Null, + )], + }, + VerifyEndpoint { + url: "/v1/system/update/repository/1.0.0", + visibility: Visibility::Public, + unprivileged_access: UnprivilegedAccess::None, + // The update system is disabled, which causes a 500 error even for + // privileged users. That is captured by GetUnimplemented. + allowed_methods: vec![AllowedMethod::GetUnimplemented], + }, + /* Metrics */ + VerifyEndpoint { + url: &DEMO_SYSTEM_METRICS_URL, + visibility: Visibility::Public, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![AllowedMethod::Get], + }, + VerifyEndpoint { + url: &DEMO_SILO_METRICS_URL, + visibility: Visibility::Public, + // unprivileged user has silo read, otherwise they wouldn't be able + // to do anything + unprivileged_access: UnprivilegedAccess::ReadOnly, + allowed_methods: vec![AllowedMethod::Get], + }, + VerifyEndpoint { + url: &TIMESERIES_QUERY_URL, + visibility: Visibility::Protected, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![AllowedMethod::Post( + serde_json::to_value(&*DEMO_TIMESERIES_QUERY).unwrap(), + )], + }, + VerifyEndpoint { + url: &SYSTEM_TIMESERIES_LIST_URL, + visibility: Visibility::Public, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![AllowedMethod::GetVolatile], + }, + VerifyEndpoint { + url: &SYSTEM_TIMESERIES_QUERY_URL, + visibility: Visibility::Public, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![AllowedMethod::Post( + serde_json::to_value(&*DEMO_TIMESERIES_QUERY).unwrap(), + )], + }, + /* Silo identity providers */ + VerifyEndpoint { + url: &IDENTITY_PROVIDERS_URL, + visibility: Visibility::Public, + unprivileged_access: UnprivilegedAccess::ReadOnly, + allowed_methods: vec![AllowedMethod::Get], + }, + VerifyEndpoint { + url: &SAML_IDENTITY_PROVIDERS_URL, + // The visibility here deserves some explanation. In order to + // create a real SAML identity provider for doing tests, we have to + // do it in a non-default Silo (because the default one does not + // support creating a SAML identity provider). But unprivileged + // users won't be able to see that Silo. So from their perspective, + // it's like an object in a container they can't see (which is what + // Visibility::Protected means). + visibility: Visibility::Protected, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![AllowedMethod::Post( + serde_json::to_value(&*SAML_IDENTITY_PROVIDER).unwrap(), + )], + }, + VerifyEndpoint { + url: &SPECIFIC_SAML_IDENTITY_PROVIDER_URL, + visibility: Visibility::Protected, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![AllowedMethod::Get], + }, + /* Misc */ + VerifyEndpoint { + url: "/v1/me", + visibility: Visibility::Public, + unprivileged_access: UnprivilegedAccess::ReadOnly, + allowed_methods: vec![AllowedMethod::Get], + }, + VerifyEndpoint { + url: "/v1/me/groups", + visibility: Visibility::Public, + unprivileged_access: UnprivilegedAccess::ReadOnly, + allowed_methods: vec![AllowedMethod::Get], + }, + /* SSH keys */ + VerifyEndpoint { + url: &DEMO_SSHKEYS_URL, + visibility: Visibility::Protected, + unprivileged_access: UnprivilegedAccess::Full, + allowed_methods: vec![ + AllowedMethod::Get, + AllowedMethod::Post( + serde_json::to_value(&*DEMO_SSHKEY_CREATE).unwrap(), + ), + ], + }, + VerifyEndpoint { + url: &DEMO_SPECIFIC_SSHKEY_URL, + visibility: Visibility::Protected, + unprivileged_access: UnprivilegedAccess::Full, + allowed_methods: vec![ + AllowedMethod::Get, + AllowedMethod::Delete, + ], + }, + /* Certificates */ + VerifyEndpoint { + url: &DEMO_CERTIFICATES_URL, + visibility: Visibility::Public, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![ + AllowedMethod::Get, + AllowedMethod::Post( + serde_json::to_value(&*DEMO_CERTIFICATE_CREATE) + .unwrap(), + ), + ], + }, + VerifyEndpoint { + url: &DEMO_CERTIFICATE_URL, + visibility: Visibility::Protected, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![ + AllowedMethod::Get, + AllowedMethod::Delete, + ], + }, + /* External Networking */ + VerifyEndpoint { + url: &DEMO_SWITCH_PORT_URL, + visibility: Visibility::Public, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![AllowedMethod::Get], + }, + /* TODO requires dpd access + VerifyEndpoint { + url: &DEMO_SWITCH_PORT_STATUS_URL, + visibility: Visibility::Public, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![ + AllowedMethod::Get, + ], + }, + */ + VerifyEndpoint { + url: &DEMO_SWITCH_PORT_SETTINGS_APPLY_URL, + visibility: Visibility::Public, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![ + AllowedMethod::Delete, + AllowedMethod::Post( + serde_json::to_value(&*DEMO_SWITCH_PORT_SETTINGS) + .unwrap(), + ), + ], + }, + VerifyEndpoint { + url: &DEMO_ADDRESS_LOTS_URL, + visibility: Visibility::Public, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![ + AllowedMethod::Post( + serde_json::to_value(&*DEMO_ADDRESS_LOT_CREATE) + .unwrap(), + ), + AllowedMethod::Get, + ], + }, + VerifyEndpoint { + url: &DEMO_ADDRESS_LOT_URL, + visibility: Visibility::Protected, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![AllowedMethod::Delete], + }, + VerifyEndpoint { + url: &DEMO_ADDRESS_LOT_BLOCKS_URL, + visibility: Visibility::Protected, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![AllowedMethod::GetNonexistent], + }, + VerifyEndpoint { + url: &DEMO_LOOPBACK_CREATE_URL, + visibility: Visibility::Public, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![ + AllowedMethod::Post( + serde_json::to_value(&*DEMO_LOOPBACK_CREATE).unwrap(), + ), + AllowedMethod::Get, + ], + }, + VerifyEndpoint { + url: &DEMO_LOOPBACK_URL, + visibility: Visibility::Protected, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![AllowedMethod::Delete], + }, + VerifyEndpoint { + url: &DEMO_SWITCH_PORT_SETTINGS_URL, + visibility: Visibility::Public, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![ + AllowedMethod::Post( + serde_json::to_value( + &*DEMO_SWITCH_PORT_SETTINGS_CREATE, + ) + .unwrap(), + ), + AllowedMethod::Get, + AllowedMethod::Delete, + ], + }, + VerifyEndpoint { + url: &DEMO_SWITCH_PORT_SETTINGS_INFO_URL, + visibility: Visibility::Public, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![AllowedMethod::GetNonexistent], + }, + VerifyEndpoint { + url: &DEMO_BGP_CONFIG_CREATE_URL, + visibility: Visibility::Public, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![ + AllowedMethod::Post( + serde_json::to_value(&*DEMO_BGP_CONFIG).unwrap(), + ), + AllowedMethod::Get, + AllowedMethod::Delete, + ], + }, + VerifyEndpoint { + url: &DEMO_BGP_ANNOUNCE_SET_URL, + visibility: Visibility::Public, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![ + AllowedMethod::Put( + serde_json::to_value(&*DEMO_BGP_ANNOUNCE).unwrap(), + ), + AllowedMethod::Get, + ], + }, + VerifyEndpoint { + url: &DEMO_BGP_ANNOUNCE_SET_DELETE_URL, + visibility: Visibility::Public, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![AllowedMethod::Delete], + }, + VerifyEndpoint { + url: &DEMO_BGP_ANNOUNCEMENT_URL, + visibility: Visibility::Public, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![AllowedMethod::GetNonexistent], + }, + VerifyEndpoint { + url: &DEMO_BGP_STATUS_URL, + visibility: Visibility::Public, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![AllowedMethod::GetNonexistent], + }, + VerifyEndpoint { + url: &DEMO_BGP_EXPORTED_URL, + visibility: Visibility::Public, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![AllowedMethod::GetNonexistent], + }, + VerifyEndpoint { + url: &DEMO_BGP_ROUTES_IPV4_URL, + visibility: Visibility::Public, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![AllowedMethod::GetNonexistent], + }, + VerifyEndpoint { + url: &DEMO_BGP_MESSAGE_HISTORY_URL, + visibility: Visibility::Public, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![AllowedMethod::GetNonexistent], + }, + VerifyEndpoint { + url: &DEMO_BFD_STATUS_URL, + visibility: Visibility::Public, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![AllowedMethod::GetNonexistent], + }, + VerifyEndpoint { + url: &DEMO_BFD_ENABLE_URL, + visibility: Visibility::Public, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![AllowedMethod::Post( + serde_json::to_value(&*DEMO_BFD_ENABLE).unwrap(), + )], + }, + VerifyEndpoint { + url: &DEMO_BFD_DISABLE_URL, + visibility: Visibility::Public, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![AllowedMethod::Post( + serde_json::to_value(&*DEMO_BFD_DISABLE).unwrap(), + )], + }, + // Floating IPs + VerifyEndpoint { + url: &DEMO_PROJECT_URL_FIPS, + visibility: Visibility::Protected, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![ + AllowedMethod::Post( + serde_json::to_value(&*DEMO_FLOAT_IP_CREATE).unwrap(), + ), + AllowedMethod::Get, + ], + }, + VerifyEndpoint { + url: &DEMO_FLOAT_IP_URL, + visibility: Visibility::Protected, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![ + AllowedMethod::Get, + AllowedMethod::Put( + serde_json::to_value(&*DEMO_FLOAT_IP_UPDATE).unwrap(), + ), + AllowedMethod::Delete, + ], + }, + VerifyEndpoint { + url: &DEMO_FLOATING_IP_ATTACH_URL, + visibility: Visibility::Protected, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![AllowedMethod::Post( serde_json::to_value(&*DEMO_FLOAT_IP_ATTACH).unwrap(), - ), - ], - }, - - VerifyEndpoint { - url: &DEMO_FLOATING_IP_DETACH_URL, - visibility: Visibility::Protected, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![ - AllowedMethod::Post( + )], + }, + VerifyEndpoint { + url: &DEMO_FLOATING_IP_DETACH_URL, + visibility: Visibility::Protected, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![AllowedMethod::Post( serde_json::to_value(&()).unwrap(), - ), - ], - }, - - // User-facing services IP allowlist - VerifyEndpoint { - url: &ALLOW_LIST_URL, - visibility: Visibility::Public, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![ - AllowedMethod::Get, - AllowedMethod::Put( - serde_json::to_value(&*ALLOW_LIST_UPDATE).unwrap(), - ), - ], - }, - ] -}); + )], + }, + // User-facing services IP allowlist + VerifyEndpoint { + url: &ALLOW_LIST_URL, + visibility: Visibility::Public, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![ + AllowedMethod::Get, + AllowedMethod::Put( + serde_json::to_value(&*ALLOW_LIST_UPDATE).unwrap(), + ), + ], + }, + ] + }); diff --git a/nexus/tests/integration_tests/initialization.rs b/nexus/tests/integration_tests/initialization.rs index a15b3e8d4b3..2b9cb2c0a1d 100644 --- a/nexus/tests/integration_tests/initialization.rs +++ b/nexus/tests/integration_tests/initialization.rs @@ -209,9 +209,9 @@ async fn test_nexus_does_not_boot_without_valid_schema() { let s = nexus_db_model::SCHEMA_VERSION; let schemas_to_test = vec![ - semver::Version::new(s.0.major + 1, s.0.minor, s.0.patch), - semver::Version::new(s.0.major, s.0.minor + 1, s.0.patch), - semver::Version::new(s.0.major, s.0.minor, s.0.patch + 1), + semver::Version::new(s.major + 1, s.minor, s.patch), + semver::Version::new(s.major, s.minor + 1, s.patch), + semver::Version::new(s.major, s.minor, s.patch + 1), ]; for schema in schemas_to_test { @@ -260,9 +260,9 @@ async fn test_nexus_does_not_boot_without_valid_schema() { async fn test_nexus_does_not_boot_until_schema_updated() { let good_schema = nexus_db_model::SCHEMA_VERSION; let bad_schema = semver::Version::new( - good_schema.0.major + 1, - good_schema.0.minor, - good_schema.0.patch, + good_schema.major + 1, + good_schema.minor, + good_schema.patch, ); let mut config = load_test_config(); diff --git a/nexus/tests/integration_tests/metrics.rs b/nexus/tests/integration_tests/metrics.rs index 68ee5aceadc..915cca87ffb 100644 --- a/nexus/tests/integration_tests/metrics.rs +++ b/nexus/tests/integration_tests/metrics.rs @@ -397,9 +397,15 @@ pub async fn execute_timeseries_query( // Check for a timeseries-not-found error specifically. if rsp.status.is_client_error() { - let text = std::str::from_utf8(&rsp.body) - .expect("Timeseries query response body should be UTF-8"); - if text.starts_with("Timeseries not found for: ") { + let err = + rsp.parsed_body::().unwrap_or_else(|e| { + panic!( + "could not parse body as `HttpErrorResponseBody`: {e:?}\n\ + query: {query}\nresponse: {rsp:#?}", + ) + }); + + if err.message.starts_with("Timeseries not found for: ") { return None; } } diff --git a/nexus/tests/integration_tests/schema.rs b/nexus/tests/integration_tests/schema.rs index 9cc42800e88..063f4552515 100644 --- a/nexus/tests/integration_tests/schema.rs +++ b/nexus/tests/integration_tests/schema.rs @@ -14,12 +14,12 @@ use nexus_db_model::{AllSchemaVersions, SchemaVersion}; use nexus_db_queries::db::pub_test_utils::TestDatabase; use nexus_db_queries::db::DISALLOW_FULL_TABLE_SCAN_SQL; use nexus_test_utils::{load_test_config, ControlPlaneTestContextBuilder}; -use omicron_common::api::external::SemverVersion; use omicron_common::api::internal::shared::SwitchLocation; use omicron_test_utils::dev::db::{Client, CockroachInstance}; use omicron_uuid_kinds::InstanceUuid; use omicron_uuid_kinds::SledUuid; use pretty_assertions::{assert_eq, assert_ne}; +use semver::Version; use similar_asserts; use slog::Logger; use std::collections::BTreeMap; @@ -150,7 +150,7 @@ async fn apply_update( semver::Version::new(10, 0, 0), ]; - if NOT_IDEMPOTENT_VERSIONS.contains(&version.semver().0) { + if NOT_IDEMPOTENT_VERSIONS.contains(&version.semver()) { break; } } @@ -1820,47 +1820,43 @@ fn after_125_0_0(client: &Client) -> BoxFuture<'_, ()> { // // Each "check" is implemented as a pair of {before, after} migration function // pointers, called precisely around the migration under test. -fn get_migration_checks() -> BTreeMap { +fn get_migration_checks() -> BTreeMap { let mut map = BTreeMap::new(); map.insert( - SemverVersion(semver::Version::parse("23.0.0").unwrap()), + Version::new(23, 0, 0), DataMigrationFns { before: Some(before_23_0_0), after: after_23_0_0 }, ); map.insert( - SemverVersion(semver::Version::parse("24.0.0").unwrap()), + Version::new(24, 0, 0), DataMigrationFns { before: Some(before_24_0_0), after: after_24_0_0 }, ); map.insert( - SemverVersion(semver::Version::parse("37.0.1").unwrap()), + Version::new(37, 0, 1), DataMigrationFns { before: None, after: after_37_0_1 }, ); map.insert( - SemverVersion(semver::Version::parse("70.0.0").unwrap()), + Version::new(70, 0, 0), DataMigrationFns { before: Some(before_70_0_0), after: after_70_0_0 }, ); map.insert( - SemverVersion(semver::Version::parse("95.0.0").unwrap()), + Version::new(95, 0, 0), DataMigrationFns { before: Some(before_95_0_0), after: after_95_0_0 }, ); - map.insert( - SemverVersion(semver::Version::parse("101.0.0").unwrap()), + Version::new(101, 0, 0), DataMigrationFns { before: Some(before_101_0_0), after: after_101_0_0 }, ); - map.insert( - SemverVersion(semver::Version::parse("107.0.0").unwrap()), + Version::new(107, 0, 0), DataMigrationFns { before: Some(before_107_0_0), after: after_107_0_0 }, ); - map.insert( - SemverVersion::new(124, 0, 0), + Version::new(124, 0, 0), DataMigrationFns { before: Some(before_124_0_0), after: after_124_0_0 }, ); - map.insert( - SemverVersion::new(125, 0, 0), + Version::new(125, 0, 0), DataMigrationFns { before: Some(before_125_0_0), after: after_125_0_0 }, ); diff --git a/nexus/tests/integration_tests/unauthorized.rs b/nexus/tests/integration_tests/unauthorized.rs index 45f87c96ce0..7ec33d97f89 100644 --- a/nexus/tests/integration_tests/unauthorized.rs +++ b/nexus/tests/integration_tests/unauthorized.rs @@ -23,7 +23,7 @@ use nexus_test_utils_macros::nexus_test; use omicron_common::disk::DatasetKind; use omicron_uuid_kinds::DatasetUuid; use omicron_uuid_kinds::ZpoolUuid; -use once_cell::sync::Lazy; +use std::sync::LazyLock; type ControlPlaneTestContext = nexus_test_utils::ControlPlaneTestContext; @@ -183,8 +183,8 @@ enum SetupReq { }, } -pub static HTTP_SERVER: Lazy = - Lazy::new(|| { +pub static HTTP_SERVER: LazyLock = + LazyLock::new(|| { // Run a httptest server let server = ServerBuilder::new().run().unwrap(); @@ -208,7 +208,7 @@ pub static HTTP_SERVER: Lazy = }); /// List of requests to execute at setup time -static SETUP_REQUESTS: Lazy> = Lazy::new(|| { +static SETUP_REQUESTS: LazyLock> = LazyLock::new(|| { vec![ // Create a separate Silo SetupReq::Post { diff --git a/nexus/tests/integration_tests/updates.rs b/nexus/tests/integration_tests/updates.rs index 37821a1b881..c67c25b579d 100644 --- a/nexus/tests/integration_tests/updates.rs +++ b/nexus/tests/integration_tests/updates.rs @@ -19,12 +19,12 @@ use nexus_test_utils::background::wait_tuf_artifact_replication_step; use nexus_test_utils::http_testing::{AuthnMode, NexusRequest, RequestBuilder}; use nexus_test_utils::{load_test_config, test_setup, test_setup_with_config}; use omicron_common::api::external::{ - SemverVersion, TufRepoGetResponse, TufRepoInsertResponse, - TufRepoInsertStatus, + TufRepoGetResponse, TufRepoInsertResponse, TufRepoInsertStatus, }; use omicron_common::api::internal::nexus::KnownArtifactKind; use omicron_sled_agent::sim; use pretty_assertions::assert_eq; +use semver::Version; use serde::Deserialize; use std::collections::HashSet; use std::fmt::Debug; @@ -369,7 +369,7 @@ fn make_upload_request<'a>( fn make_get_request( client: &dropshot::test_util::ClientTestContext, - system_version: SemverVersion, + system_version: Version, expected_status: StatusCode, ) -> NexusRequest<'_> { let request = NexusRequest::new( diff --git a/nexus/types/Cargo.toml b/nexus/types/Cargo.toml index 30916f3eb76..2d13d19ca97 100644 --- a/nexus/types/Cargo.toml +++ b/nexus/types/Cargo.toml @@ -50,6 +50,7 @@ nexus-sled-agent-shared.workspace = true omicron-common.workspace = true omicron-passwords.workspace = true omicron-workspace-hack.workspace = true +semver.workspace = true # Note: we're trying to avoid a dependency from nexus-types to sled-agent-types # because the correct direction of dependency is unclear. If there are types # common to both, put them in `omicron-common` or `nexus-sled-agent-shared`. diff --git a/nexus/types/src/deployment.rs b/nexus/types/src/deployment.rs index 41d0ac9434e..b597282f9a6 100644 --- a/nexus/types/src/deployment.rs +++ b/nexus/types/src/deployment.rs @@ -256,6 +256,24 @@ impl Blueprint { .filter(move |(_, z)| filter(z.disposition)) } + /// Iterate over the [`BlueprintPhysicalDiskConfig`] instances in the + /// blueprint that match the provided filter, along with the associated + /// sled id. + pub fn all_omicron_disks( + &self, + mut filter: F, + ) -> impl Iterator + where + F: FnMut(BlueprintPhysicalDiskDisposition) -> bool, + { + self.blueprint_disks + .iter() + .flat_map(move |(sled_id, disks)| { + disks.disks.iter().map(|disk| (*sled_id, disk)) + }) + .filter(move |(_, d)| filter(d.disposition)) + } + /// Iterate over the [`BlueprintDatasetsConfig`] instances in the blueprint. pub fn all_omicron_datasets( &self, @@ -298,11 +316,19 @@ impl BpTableData for &BlueprintPhysicalDisksConfig { } fn rows(&self, state: BpDiffState) -> impl Iterator { - let sorted_disk_ids: BTreeSet = - self.disks.iter().map(|d| d.identity.clone()).collect(); + let mut disks: Vec<_> = self.disks.iter().cloned().collect(); + disks.sort_unstable_by_key(|d| d.identity.clone()); - sorted_disk_ids.into_iter().map(move |d| { - BpTableRow::from_strings(state, vec![d.vendor, d.model, d.serial]) + disks.into_iter().map(move |d| { + BpTableRow::from_strings( + state, + vec![ + d.identity.vendor, + d.identity.model, + d.identity.serial, + d.disposition.to_string(), + ], + ) }) } } @@ -869,34 +895,91 @@ pub enum BlueprintDatasetFilter { JsonSchema, Deserialize, Serialize, - EnumIter, Diffable, )] -#[serde(rename_all = "snake_case")] +#[serde(tag = "kind", rename_all = "snake_case")] pub enum BlueprintPhysicalDiskDisposition { /// The physical disk is in-service. InService, /// The physical disk is permanently gone. - Expunged, + Expunged { + /// Generation of the parent config in which this disk became expunged. + as_of_generation: Generation, + + /// True if Reconfiguration knows that this disk has been expunged. + /// + /// In the current implementation, this means either: + /// + /// a) the sled where the disk was residing has been expunged. + /// + /// b) the planner has observed an inventory collection where the + /// disk expungement was seen by the sled agent on the sled where the + /// disk was previously in service. This is indicated by the inventory + /// reporting a disk generation at least as high as `as_of_generation`. + ready_for_cleanup: bool, + }, } impl BlueprintPhysicalDiskDisposition { - /// Returns true if the disk disposition matches this filter. - pub fn matches(self, filter: DiskFilter) -> bool { + /// Always returns true. + /// + /// This is intended for use with methods that take a filtering + /// closure operating on a `BlueprintPhysicalDiskDisposition` (e.g., + /// `Blueprint::all_omicron_disks()`), allowing callers to make it clear + /// they accept any disposition via + /// + /// ```rust,ignore + /// blueprint.all_omicron_disks(BlueprintPhysicalDiskDisposition::any) + /// ``` + pub fn any(self) -> bool { + true + } + + /// Returns true if `self` is `BlueprintZoneDisposition::InService` + pub fn is_in_service(self) -> bool { + matches!(self, Self::InService) + } + + /// Returns true if `self` is `BlueprintPhysicalDiskDisposition::Expunged + /// { .. }`, regardless of the details contained within that variant. + pub fn is_expunged(self) -> bool { + matches!(self, Self::Expunged { .. }) + } + + /// Returns true if `self` is `BlueprintPhysicalDiskDisposition::Expunged + /// {ready_for_cleanup: true, ..}` + pub fn is_ready_for_cleanup(self) -> bool { + matches!(self, Self::Expunged { ready_for_cleanup: true, .. }) + } + + /// Return the generation when a disk was expunged or `None` if the disk + /// was not expunged. + pub fn expunged_as_of_generation(&self) -> Option { match self { - Self::InService => match filter { - DiskFilter::All => true, - DiskFilter::InService => true, - // TODO remove this variant? - DiskFilter::ExpungedButActive => false, - }, - Self::Expunged => match filter { - DiskFilter::All => true, - DiskFilter::InService => false, - // TODO remove this variant? - DiskFilter::ExpungedButActive => true, - }, + BlueprintPhysicalDiskDisposition::Expunged { + as_of_generation, + .. + } => Some(*as_of_generation), + _ => None, + } + } +} + +impl fmt::Display for BlueprintPhysicalDiskDisposition { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + // Neither `write!(f, "...")` nor `f.write_str("...")` obey fill + // and alignment (used above), but this does. + BlueprintPhysicalDiskDisposition::InService => "in service".fmt(f), + BlueprintPhysicalDiskDisposition::Expunged { + ready_for_cleanup: true, + .. + } => "expunged ✓".fmt(f), + BlueprintPhysicalDiskDisposition::Expunged { + ready_for_cleanup: false, + .. + } => "expunged ⏳".fmt(f), } } } @@ -907,6 +990,7 @@ impl BlueprintPhysicalDiskDisposition { )] pub struct BlueprintPhysicalDiskConfig { pub disposition: BlueprintPhysicalDiskDisposition, + #[daft(leaf)] pub identity: DiskIdentity, pub id: PhysicalDiskUuid, pub pool_id: ZpoolUuid, @@ -939,7 +1023,7 @@ impl BlueprintPhysicalDisksConfig { .disks .into_iter() .filter_map(|d| { - if d.disposition.matches(DiskFilter::InService) { + if d.disposition.is_in_service() { Some(d.into()) } else { None diff --git a/nexus/types/src/deployment/blueprint_diff.rs b/nexus/types/src/deployment/blueprint_diff.rs index c43476a353b..4a6a919a25d 100644 --- a/nexus/types/src/deployment/blueprint_diff.rs +++ b/nexus/types/src/deployment/blueprint_diff.rs @@ -14,16 +14,16 @@ use super::{ unwrap_or_none, zone_sort_key, BlueprintDatasetConfigDiff, BlueprintDatasetDisposition, BlueprintDatasetsConfigDiff, BlueprintDiff, BlueprintMetadata, BlueprintPhysicalDiskConfig, - BlueprintPhysicalDisksConfigDiff, BlueprintZoneConfigDiff, - BlueprintZonesConfigDiff, ClickhouseClusterConfig, + BlueprintPhysicalDiskConfigDiff, BlueprintPhysicalDisksConfigDiff, + BlueprintZoneConfigDiff, BlueprintZonesConfigDiff, ClickhouseClusterConfig, CockroachDbPreserveDowngrade, }; use daft::Diffable; use nexus_sled_agent_shared::inventory::ZoneKind; use omicron_common::api::external::{ByteCount, Generation}; -use omicron_common::disk::{CompressionAlgorithm, DatasetName, DiskIdentity}; +use omicron_common::disk::{CompressionAlgorithm, DatasetName}; use omicron_uuid_kinds::SledUuid; -use omicron_uuid_kinds::{DatasetUuid, OmicronZoneUuid}; +use omicron_uuid_kinds::{DatasetUuid, OmicronZoneUuid, PhysicalDiskUuid}; use std::collections::{BTreeMap, BTreeSet}; use std::fmt::{self, Write as _}; @@ -516,6 +516,26 @@ impl<'a> BlueprintDiffSummary<'a> { )) } + /// Iterate over all modified disks on a sled + pub fn modified_disks( + &'a self, + sled_id: &SledUuid, + ) -> Option<(BpDiffPhysicalDisksModified<'a>, BpDiffPhysicalDiskErrors)> + { + // Check if the sled is modified and there are any modified disks + let disks_cfg_diff = self.modified_disks_diff.get(sled_id)?; + let mut modified_disks = + disks_cfg_diff.disks.modified_values_diff().peekable(); + if modified_disks.peek().is_none() { + return None; + } + Some(BpDiffPhysicalDisksModified::new( + *disks_cfg_diff.generation.before, + *disks_cfg_diff.generation.after, + modified_disks, + )) + } + /// Iterate over all added datasets on a sled pub fn added_datasets( &self, @@ -916,7 +936,7 @@ pub struct DiffPhysicalDisksDetails { pub after_generation: Option, // Disks added, removed, or unmodified - pub disks: Vec, + pub disks: Vec, } impl DiffPhysicalDisksDetails { @@ -925,11 +945,8 @@ impl DiffPhysicalDisksDetails { after_generation: Option, disks_iter: impl Iterator, ) -> Self { - let mut disks: Vec<_> = disks_iter - .map(|disk_config| &disk_config.identity) - .cloned() - .collect(); - disks.sort_unstable(); + let mut disks: Vec<_> = disks_iter.cloned().collect(); + disks.sort_unstable_by_key(|d| d.identity.clone()); DiffPhysicalDisksDetails { before_generation, after_generation, disks } } } @@ -946,21 +963,160 @@ impl BpTableData for DiffPhysicalDisksDetails { self.disks.iter().map(move |d| { BpTableRow::from_strings( state, - vec![d.vendor.clone(), d.model.clone(), d.serial.clone()], + vec![ + d.identity.vendor.clone(), + d.identity.model.clone(), + d.identity.serial.clone(), + d.disposition.to_string(), + ], + ) + }) + } +} + +/// Errors arising from illegally modified physical disk fields +#[derive(Debug)] +pub struct BpDiffPhysicalDiskErrors { + pub generation_before: Generation, + pub generation_after: Generation, + pub errors: Vec, +} + +#[derive(Debug)] +pub struct BpDiffPhysicalDiskError { + pub disk_id: PhysicalDiskUuid, + pub reason: String, +} + +/// This is just an error parsed diff (Parse don't validate) +/// +/// We still just want the underlying diff representation for printing +#[derive(Debug)] +pub struct ModifiedPhysicalDisk<'a> { + pub diff: BlueprintPhysicalDiskConfigDiff<'a>, +} + +impl<'a> ModifiedPhysicalDisk<'a> { + pub fn from_diff( + diff: BlueprintPhysicalDiskConfigDiff<'a>, + ) -> Result { + // Do we have any errors? If so, create a "reason" string. + let mut reason = String::new(); + + let BlueprintPhysicalDiskConfigDiff { + disposition: _, + identity, + id, + pool_id, + } = diff; + + // If we're a "modified" disk, we must have the same ID before and + // after. (Otherwise our "before" or "after" should've been recorded as + // removed/added.) + debug_assert_eq!(id.before, id.after); + + if identity.is_modified() { + writeln!( + &mut reason, + "mismatched identity: before: {:?}, after: {:?}", + identity.before, identity.after + ) + .expect("write to String is infallible"); + } + + if pool_id.is_modified() { + writeln!( + &mut reason, + "mismatched zpool: before: {}, after: {}", + pool_id.before, pool_id.after + ) + .expect("write to String is infallible"); + } + + if reason.is_empty() { + Ok(ModifiedPhysicalDisk { diff }) + } else { + Err(BpDiffPhysicalDiskError { disk_id: *id.before, reason }) + } + } +} + +#[derive(Debug)] +pub struct BpDiffPhysicalDisksModified<'a> { + pub generation_before: Generation, + pub generation_after: Generation, + pub disks: Vec>, +} + +impl<'a> BpDiffPhysicalDisksModified<'a> { + pub fn new( + generation_before: Generation, + generation_after: Generation, + disk_diffs: impl Iterator>, + ) -> (BpDiffPhysicalDisksModified<'a>, BpDiffPhysicalDiskErrors) { + let mut disks = vec![]; + let mut errors = vec![]; + for diff in disk_diffs { + match ModifiedPhysicalDisk::from_diff(diff) { + Ok(modified_disk) => disks.push(modified_disk), + Err(error) => errors.push(error), + } + } + disks.sort_unstable_by_key(|d| d.diff.identity.before.clone()); + ( + BpDiffPhysicalDisksModified { + generation_before, + generation_after, + disks, + }, + BpDiffPhysicalDiskErrors { + generation_before, + generation_after, + errors, + }, + ) + } +} + +impl BpTableData for BpDiffPhysicalDisksModified<'_> { + fn bp_generation(&self) -> BpGeneration { + BpGeneration::Diff { + before: Some(self.generation_before), + after: Some(self.generation_after), + } + } + + fn rows(&self, state: BpDiffState) -> impl Iterator { + self.disks.iter().map(move |disk| { + let identity = disk.diff.identity.before; + let disposition = disk.diff.disposition; + BpTableRow::new( + state, + vec![ + BpTableColumn::value(identity.vendor.clone()), + BpTableColumn::value(identity.model.clone()), + BpTableColumn::value(identity.serial.clone()), + BpTableColumn::new( + disposition.before.to_string(), + disposition.after.to_string(), + ), + ], ) }) } } #[derive(Debug, Default)] -pub struct BpDiffPhysicalDisks { +pub struct BpDiffPhysicalDisks<'a> { pub added: BTreeMap, pub removed: BTreeMap, pub unchanged: BTreeMap, + pub modified: BTreeMap>, + pub errors: BTreeMap, } -impl BpDiffPhysicalDisks { - pub fn from_diff_summary(summary: &BlueprintDiffSummary<'_>) -> Self { +impl<'a> BpDiffPhysicalDisks<'a> { + pub fn from_diff_summary(summary: &'a BlueprintDiffSummary<'a>) -> Self { let mut diffs = BpDiffPhysicalDisks::default(); for sled_id in &summary.all_sleds { if let Some(added) = summary.added_disks(sled_id) { @@ -972,6 +1128,12 @@ impl BpDiffPhysicalDisks { if let Some(unchanged) = summary.unchanged_disks(sled_id) { diffs.unchanged.insert(*sled_id, unchanged); } + if let Some((modified, errors)) = summary.modified_disks(sled_id) { + diffs.modified.insert(*sled_id, modified); + if !errors.errors.is_empty() { + diffs.errors.insert(*sled_id, errors); + } + } } diffs } @@ -990,6 +1152,11 @@ impl BpDiffPhysicalDisks { rows.extend(diff.rows(BpDiffState::Removed)); } + if let Some(diff) = self.modified.get(sled_id) { + generation = diff.bp_generation(); + rows.extend(diff.rows(BpDiffState::Modified)); + } + if let Some(diff) = self.added.get(sled_id) { // Generations never vary for the same sled, so this is harmless generation = diff.bp_generation(); @@ -1716,7 +1883,7 @@ pub struct BlueprintDiffDisplay<'diff> { before_meta: BlueprintMetadata, after_meta: BlueprintMetadata, zones: BpDiffZones, - disks: BpDiffPhysicalDisks, + disks: BpDiffPhysicalDisks<'diff>, datasets: BpDiffDatasets, } @@ -1987,6 +2154,24 @@ impl fmt::Display for BlueprintDiffDisplay<'_> { } } + // Write out disk errors. + if !self.disks.errors.is_empty() { + writeln!(f, "DISK ERRORS:")?; + for (sled_id, errors) in &self.disks.errors { + writeln!(f, "\n sled {sled_id}\n")?; + writeln!( + f, + " disk diff errors: before gen {}, after gen {}\n", + errors.generation_before, errors.generation_after + )?; + + for err in &errors.errors { + writeln!(f, " disk id: {}", err.disk_id)?; + writeln!(f, " reason: {}", err.reason)?; + } + } + } + // Write out dataset errors. if !self.datasets.errors.is_empty() { writeln!(f, "DATASET ERRORS:")?; diff --git a/nexus/types/src/deployment/blueprint_display.rs b/nexus/types/src/deployment/blueprint_display.rs index 2cf664e0c19..bd5c95b0fb3 100644 --- a/nexus/types/src/deployment/blueprint_display.rs +++ b/nexus/types/src/deployment/blueprint_display.rs @@ -332,7 +332,7 @@ impl BpTableSchema for BpPhysicalDisksTableSchema { } fn column_names(&self) -> &'static [&'static str] { - &["vendor", "model", "serial"] + &["vendor", "model", "serial", "disposition"] } } diff --git a/nexus/types/src/deployment/planning_input.rs b/nexus/types/src/deployment/planning_input.rs index 19ab6560059..4f22b4d9b21 100644 --- a/nexus/types/src/deployment/planning_input.rs +++ b/nexus/types/src/deployment/planning_input.rs @@ -508,9 +508,6 @@ pub enum DiskFilter { /// All disks which are in-service. InService, - - /// All disks which are expunged but still active. - ExpungedButActive, } impl DiskFilter { @@ -530,12 +527,10 @@ impl PhysicalDiskPolicy { PhysicalDiskPolicy::InService => match filter { DiskFilter::All => true, DiskFilter::InService => true, - DiskFilter::ExpungedButActive => false, }, PhysicalDiskPolicy::Expunged => match filter { DiskFilter::All => true, DiskFilter::InService => false, - DiskFilter::ExpungedButActive => true, }, } } @@ -557,12 +552,10 @@ impl PhysicalDiskState { PhysicalDiskState::Active => match filter { DiskFilter::All => true, DiskFilter::InService => true, - DiskFilter::ExpungedButActive => true, }, PhysicalDiskState::Decommissioned => match filter { DiskFilter::All => true, DiskFilter::InService => false, - DiskFilter::ExpungedButActive => false, }, } } @@ -988,6 +981,8 @@ pub enum PlanningInputBuildError { #[source] err: SourceNatConfigError, }, + #[error("sled not found: {0}")] + SledNotFound(SledUuid), } /// Constructor for [`PlanningInput`]. @@ -1057,6 +1052,26 @@ impl PlanningInputBuilder { } } + /// Expunge a sled and all its disks + /// + /// In the real code, the `PlanningInput` comes from the database. In this + /// case all disks in a sled are marked expunged when the sled is expunged + /// inside a transaction. We do the same thing here for testing purposes. + pub fn expunge_sled( + &mut self, + sled_id: &SledUuid, + ) -> Result<(), PlanningInputBuildError> { + let sled_details = self + .sleds_mut() + .get_mut(&sled_id) + .ok_or(PlanningInputBuildError::SledNotFound(*sled_id))?; + sled_details.policy = SledPolicy::Expunged; + for (_, sled_disk) in sled_details.resources.zpools.iter_mut() { + sled_disk.policy = PhysicalDiskPolicy::Expunged; + } + Ok(()) + } + pub fn add_omicron_zone_external_ip( &mut self, zone_id: OmicronZoneUuid, diff --git a/nexus/types/src/external_api/params.rs b/nexus/types/src/external_api/params.rs index e87acd4e590..100768e8b19 100644 --- a/nexus/types/src/external_api/params.rs +++ b/nexus/types/src/external_api/params.rs @@ -14,12 +14,13 @@ use omicron_common::api::external::{ ByteCount, FailureDomain, Hostname, IdentityMetadataCreateParams, IdentityMetadataUpdateParams, InstanceAutoRestartPolicy, InstanceCpuCount, LinkFec, LinkSpeed, Name, NameOrId, PaginationOrder, RouteDestination, - RouteTarget, SemverVersion, TxEqConfig, UserId, + RouteTarget, TxEqConfig, UserId, }; use omicron_common::disk::DiskVariant; use oxnet::{IpNet, Ipv4Net, Ipv6Net}; use parse_display::Display; use schemars::JsonSchema; +use semver::Version; use serde::{ de::{self, Visitor}, Deserialize, Deserializer, Serialize, Serializer, @@ -2235,7 +2236,7 @@ pub struct UpdatesPutRepositoryParams { #[derive(Clone, Debug, Deserialize, JsonSchema)] pub struct UpdatesGetRepositoryParams { /// The version to get. - pub system_version: SemverVersion, + pub system_version: Version, } // Probes diff --git a/nexus/types/src/external_api/views.rs b/nexus/types/src/external_api/views.rs index ce1387330c2..02aaffd3cc4 100644 --- a/nexus/types/src/external_api/views.rs +++ b/nexus/types/src/external_api/views.rs @@ -868,6 +868,7 @@ impl fmt::Display for PhysicalDiskPolicy { PartialEq, Eq, EnumIter, + Diffable, )] #[serde(rename_all = "snake_case")] pub enum PhysicalDiskState { diff --git a/openapi/bootstrap-agent.json b/openapi/bootstrap-agent.json index 32fe2f0d8e4..b782aafc3e8 100644 --- a/openapi/bootstrap-agent.json +++ b/openapi/bootstrap-agent.json @@ -559,7 +559,8 @@ "type": "string" }, "version": { - "$ref": "#/components/schemas/SemverVersion" + "type": "string", + "pattern": "^(0|[1-9]\\d*)\\.(0|[1-9]\\d*)\\.(0|[1-9]\\d*)(?:-((?:0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\\.(?:0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\\+([0-9a-zA-Z-]+(?:\\.[0-9a-zA-Z-]+)*))?$" } }, "required": [ @@ -1522,10 +1523,6 @@ } ] }, - "SemverVersion": { - "type": "string", - "pattern": "^(0|[1-9]\\d*)\\.(0|[1-9]\\d*)\\.(0|[1-9]\\d*)(?:-((?:0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\\.(?:0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\\+([0-9a-zA-Z-]+(?:\\.[0-9a-zA-Z-]+)*))?$" - }, "SwitchLocation": { "description": "Identifies switch physical location", "oneOf": [ diff --git a/openapi/nexus-internal.json b/openapi/nexus-internal.json index 65fcd7e3dc2..e9f29a1dd91 100644 --- a/openapi/nexus-internal.json +++ b/openapi/nexus-internal.json @@ -2173,16 +2173,46 @@ "oneOf": [ { "description": "The physical disk is in-service.", - "type": "string", - "enum": [ - "in_service" + "type": "object", + "properties": { + "kind": { + "type": "string", + "enum": [ + "in_service" + ] + } + }, + "required": [ + "kind" ] }, { "description": "The physical disk is permanently gone.", - "type": "string", - "enum": [ - "expunged" + "type": "object", + "properties": { + "as_of_generation": { + "description": "Generation of the parent config in which this disk became expunged.", + "allOf": [ + { + "$ref": "#/components/schemas/Generation" + } + ] + }, + "kind": { + "type": "string", + "enum": [ + "expunged" + ] + }, + "ready_for_cleanup": { + "description": "True if Reconfiguration knows that this disk has been expunged.\n\nIn the current implementation, this means either:\n\na) the sled where the disk was residing has been expunged.\n\nb) the planner has observed an inventory collection where the disk expungement was seen by the sled agent on the sled where the disk was previously in service. This is indicated by the inventory reporting a disk generation at least as high as `as_of_generation`.", + "type": "boolean" + } + }, + "required": [ + "as_of_generation", + "kind", + "ready_for_cleanup" ] } ] diff --git a/openapi/wicketd.json b/openapi/wicketd.json index c518231eff4..1334dd448ab 100644 --- a/openapi/wicketd.json +++ b/openapi/wicketd.json @@ -820,6 +820,56 @@ } ] }, + "ApplicationDescriptor": { + "description": "An Application Descriptor describes the supported datapath configurations.\n\nThis is a CMIS-specific concept. It's used for modules to advertise how it can be used by the host. Each application describes the host-side electrical interface; the media-side interface; the number of lanes required; etc.\n\nHost-side software can select one of these applications to instruct the module to use a specific set of lanes, with the interface on either side of the module.", + "type": "object", + "properties": { + "host_id": { + "description": "The electrical interface with the host side.", + "type": "string" + }, + "host_lane_assignment_options": { + "description": "The lanes on the host-side supporting this application.\n\nThis is a bit mask with a 1 identifying the lowest lane in a consecutive group of lanes to which the application can be assigned. This must be used with the `host_lane_count`. For example a value of `0b0000_0001` with a host lane count of 4 indicates that the first 4 lanes may be used in this application.\n\nAn application may support starting from multiple lanes.", + "type": "integer", + "format": "uint8", + "minimum": 0 + }, + "host_lane_count": { + "description": "The number of host-side lanes.", + "type": "integer", + "format": "uint8", + "minimum": 0 + }, + "media_id": { + "description": "The interface, optical or copper, with the media side.", + "allOf": [ + { + "$ref": "#/components/schemas/MediaInterfaceId" + } + ] + }, + "media_lane_assignment_options": { + "description": "The lanes on the media-side supporting this application.\n\nSee `host_lane_assignment_options` for details.", + "type": "integer", + "format": "uint8", + "minimum": 0 + }, + "media_lane_count": { + "description": "The number of media-side lanes.", + "type": "integer", + "format": "uint8", + "minimum": 0 + } + }, + "required": [ + "host_id", + "host_lane_assignment_options", + "host_lane_count", + "media_id", + "media_lane_assignment_options", + "media_lane_count" + ] + }, "ArtifactHashId": { "description": "A hash-based identifier for an artifact.\n\nSome places, e.g. the installinator, request artifacts by hash rather than by name and version. This type indicates that.", "type": "object", @@ -853,11 +903,8 @@ }, "version": { "description": "The artifact's version.", - "allOf": [ - { - "$ref": "#/components/schemas/SemverVersion" - } - ] + "type": "string", + "pattern": "^(0|[1-9]\\d*)\\.(0|[1-9]\\d*)\\.(0|[1-9]\\d*)(?:-((?:0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\\.(?:0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\\+([0-9a-zA-Z-]+(?:\\.[0-9a-zA-Z-]+)*))?$" } }, "required": [ @@ -866,6 +913,156 @@ "version" ] }, + "Aux1Monitor": { + "description": "The first auxlliary CMIS monitor.", + "oneOf": [ + { + "description": "The monitored property is custom, i.e., part-specific.", + "type": "object", + "properties": { + "custom": { + "type": "array", + "items": { + "type": "integer", + "format": "uint8", + "minimum": 0 + }, + "minItems": 2, + "maxItems": 2 + } + }, + "required": [ + "custom" + ], + "additionalProperties": false + }, + { + "description": "The current of the laser thermoelectric cooler.\n\nFor actively-cooled laser systems, this specifies the percentage of the maximum current the thermoelectric cooler supports. If the percentage is positive, the cooler is heating the laser. If negative, the cooler is cooling the laser.", + "type": "object", + "properties": { + "tec_current": { + "type": "number", + "format": "float" + } + }, + "required": [ + "tec_current" + ], + "additionalProperties": false + } + ] + }, + "Aux2Monitor": { + "description": "The second auxlliary CMIS monitor.", + "oneOf": [ + { + "description": "The temperature of the laser itself (degrees C).", + "type": "object", + "properties": { + "laser_temperature": { + "type": "number", + "format": "float" + } + }, + "required": [ + "laser_temperature" + ], + "additionalProperties": false + }, + { + "description": "The current of the laser thermoelectric cooler.\n\nFor actively-cooled laser systems, this specifies the percentage of the maximum current the thermoelectric cooler supports. If the percentage is positive, the cooler is heating the laser. If negative, the cooler is cooling the laser.", + "type": "object", + "properties": { + "tec_current": { + "type": "number", + "format": "float" + } + }, + "required": [ + "tec_current" + ], + "additionalProperties": false + } + ] + }, + "Aux3Monitor": { + "description": "The third auxlliary CMIS monitor.", + "oneOf": [ + { + "description": "The temperature of the laser itself (degrees C).", + "type": "object", + "properties": { + "laser_temperature": { + "type": "number", + "format": "float" + } + }, + "required": [ + "laser_temperature" + ], + "additionalProperties": false + }, + { + "description": "Measured voltage of an additional power supply (Volts).", + "type": "object", + "properties": { + "additional_supply_voltage": { + "type": "number", + "format": "float" + } + }, + "required": [ + "additional_supply_voltage" + ], + "additionalProperties": false + } + ] + }, + "AuxMonitors": { + "description": "Auxlliary monitored values for CMIS modules.", + "type": "object", + "properties": { + "aux1": { + "nullable": true, + "description": "Auxlliary monitor 1, either a custom value or TEC current.", + "allOf": [ + { + "$ref": "#/components/schemas/Aux1Monitor" + } + ] + }, + "aux2": { + "nullable": true, + "description": "Auxlliary monitor 1, either laser temperature or TEC current.", + "allOf": [ + { + "$ref": "#/components/schemas/Aux2Monitor" + } + ] + }, + "aux3": { + "nullable": true, + "description": "Auxlliary monitor 1, either laser temperature or additional supply voltage.", + "allOf": [ + { + "$ref": "#/components/schemas/Aux3Monitor" + } + ] + }, + "custom": { + "nullable": true, + "description": "A custom monitor. The value here is entirely vendor- and part-specific, so the part's data sheet must be consulted. The value may be either a signed or unsigned 16-bit integer, and so is included as raw bytes.", + "type": "array", + "items": { + "type": "integer", + "format": "uint8", + "minimum": 0 + }, + "minItems": 2, + "maxItems": 2 + } + } + }, "Baseboard": { "description": "Describes properties that should uniquely identify a Gimlet.", "oneOf": [ @@ -1251,6 +1448,135 @@ "no_update_data" ] }, + "CmisDatapath": { + "description": "A datapath in a CMIS module.\n\nIn contrast to SFF-8636, CMIS makes first-class the concept of a datapath: a set of lanes and all the associated machinery involved in the transfer of data. This includes:\n\n- The \"application descriptor\" which is the host and media interfaces, and the lanes on each side used to transfer data; - The state of the datapath in a well-defined finite state machine (see CMIS 5.0 section 6.3.3); - The flags indicating how the datapath components are operating, such as receiving an input Rx signal or whether the transmitter is disabled.", + "type": "object", + "properties": { + "application": { + "description": "The application descriptor for this datapath.", + "allOf": [ + { + "$ref": "#/components/schemas/ApplicationDescriptor" + } + ] + }, + "lane_status": { + "description": "The status bits for each lane in the datapath.", + "type": "object", + "additionalProperties": { + "$ref": "#/components/schemas/CmisLaneStatus" + } + } + }, + "required": [ + "application", + "lane_status" + ] + }, + "CmisLaneStatus": { + "description": "The status of a single CMIS lane.\n\nIf any particular control or status value is unsupported by a module, it is `None`.", + "type": "object", + "properties": { + "rx_auto_squelch_disable": { + "nullable": true, + "description": "Whether the host-side has disabled the Rx auto-squelch.\n\nThe module can implement automatic squelching of the Rx output, if the media-side input signal isn't valid. This indicates whether the host has disabled such a setting.", + "type": "boolean" + }, + "rx_lol": { + "nullable": true, + "description": "Media-side loss of lock flag.\n\nThis is true if the module is not able to extract a clock signal from the media-side signal (usually optical).", + "type": "boolean" + }, + "rx_los": { + "nullable": true, + "description": "Media-side loss of signal flag.\n\nThis is true if there is no detected input signal from the media-side (usually optical).", + "type": "boolean" + }, + "rx_output_enabled": { + "nullable": true, + "description": "Whether the Rx output is enabled.\n\nThe host may control this to disable the electrical output from the module to the host.", + "type": "boolean" + }, + "rx_output_polarity": { + "nullable": true, + "description": "The Rx output polarity.\n\nThis indicates a host-side control that flips the polarity of the host-side output signal.", + "allOf": [ + { + "$ref": "#/components/schemas/LanePolarity" + } + ] + }, + "rx_output_status": { + "description": "Status of host-side Rx output.\n\nThis indicates whether the Rx output is sending a valid signal to the host. Note that this is `Invalid` if the output is either muted (such as squelched) or explicitly disabled.", + "allOf": [ + { + "$ref": "#/components/schemas/OutputStatus" + } + ] + }, + "state": { + "description": "The datapath state of this lane.\n\nSee CMIS 5.0 section 8.9.1 for details.", + "type": "string" + }, + "tx_adaptive_eq_fail": { + "nullable": true, + "description": "A failure in the Tx adaptive input equalization.", + "type": "boolean" + }, + "tx_auto_squelch_disable": { + "nullable": true, + "description": "Whether the host-side has disabled the Tx auto-squelch.\n\nThe module can implement automatic squelching of the Tx output, if the host-side input signal isn't valid. This indicates whether the host has disabled such a setting.", + "type": "boolean" + }, + "tx_failure": { + "nullable": true, + "description": "General Tx failure flag.\n\nThis indicates that an internal and unspecified malfunction has occurred on the Tx lane.", + "type": "boolean" + }, + "tx_force_squelch": { + "nullable": true, + "description": "Whether the host-side has force-squelched the Tx output.\n\nThis indicates that the host can _force_ squelching the output if the signal is not valid.", + "type": "boolean" + }, + "tx_input_polarity": { + "nullable": true, + "description": "The Tx input polarity.\n\nThis indicates a host-side control that flips the polarity of the host-side input signal.", + "allOf": [ + { + "$ref": "#/components/schemas/LanePolarity" + } + ] + }, + "tx_lol": { + "nullable": true, + "description": "Host-side loss of lock flag.\n\nThis is true if the module is not able to extract a clock signal from the host-side electrical signal.", + "type": "boolean" + }, + "tx_los": { + "nullable": true, + "description": "Host-side loss of signal flag.\n\nThis is true if there is no detected electrical signal from the host-side serdes.", + "type": "boolean" + }, + "tx_output_enabled": { + "nullable": true, + "description": "Whether the Tx output is enabled.", + "type": "boolean" + }, + "tx_output_status": { + "description": "Status of media-side Tx output.\n\nThis indicates whether the Rx output is sending a valid signal to the media itself. Note that this is `Invalid` if the output is either muted (such as squelched) or explicitly disabled.", + "allOf": [ + { + "$ref": "#/components/schemas/OutputStatus" + } + ] + } + }, + "required": [ + "rx_output_status", + "state", + "tx_output_status" + ] + }, "CurrentRssUserConfig": { "type": "object", "properties": { @@ -1353,6 +1679,83 @@ "recovery_silo_password_set" ] }, + "Datapath": { + "description": "Information about a transceiver's datapath.\n\nThis includes state related to the low-level eletrical and optical path through which bits flow. This includes flags like loss-of-signal / loss-of-lock; transmitter enablement state; and equalization parameters.", + "oneOf": [ + { + "description": "A number of datapaths in a CMIS module.\n\nCMIS modules may have a large number of supported configurations of their various lanes, each called an \"application\". These are described by the `ApplicationDescriptor` type, which mirrors CMIS 5.0 table 8-18. Each descriptor is identified by an \"Application Selector Code\", which is just its index in the section of the memory map describing them.\n\nEach lane can be used in zero or more applications, however, it may exist in at most one application at a time. These active applications, of which there may be more than one, are keyed by their codes in the contained mapping.", + "type": "object", + "properties": { + "cmis": { + "type": "object", + "properties": { + "connector": { + "description": "The type of free-side connector", + "type": "string" + }, + "datapaths": { + "description": "Mapping from \"application selector\" ID to its datapath information.\n\nThe datapath inclues the lanes used; host electrical interface; media interface; and a lot more about the state of the path.", + "type": "object", + "additionalProperties": { + "$ref": "#/components/schemas/CmisDatapath" + } + }, + "supported_lanes": { + "description": "A bit mask with a 1 in bit `i` if the `i`th lane is supported.", + "type": "integer", + "format": "uint8", + "minimum": 0 + } + }, + "required": [ + "connector", + "datapaths", + "supported_lanes" + ] + } + }, + "required": [ + "cmis" + ], + "additionalProperties": false + }, + { + "description": "Datapath state about each lane in an SFF-8636 module.", + "type": "object", + "properties": { + "sff8636": { + "type": "object", + "properties": { + "connector": { + "description": "The type of a media-side connector.\n\nThese values come from SFF-8024 Rev 4.10 Table 4-3.", + "type": "string" + }, + "lanes": { + "type": "array", + "items": { + "$ref": "#/components/schemas/Sff8636Datapath" + }, + "minItems": 4, + "maxItems": 4 + }, + "specification": { + "$ref": "#/components/schemas/SffComplianceCode" + } + }, + "required": [ + "connector", + "lanes", + "specification" + ] + } + }, + "required": [ + "sff8636" + ], + "additionalProperties": false + } + ] + }, "Duration": { "type": "object", "properties": { @@ -1465,6 +1868,11 @@ "step_events" ] }, + "ExtendedStatus": { + "type": "integer", + "format": "uint32", + "minimum": 0 + }, "GetArtifactsAndEventReportsResponse": { "description": "The response to a `get_artifacts` call: the system version, and the list of all artifacts currently held by wicketd.", "type": "object", @@ -1487,11 +1895,8 @@ }, "system_version": { "nullable": true, - "allOf": [ - { - "$ref": "#/components/schemas/SemverVersion" - } - ] + "type": "string", + "pattern": "^(0|[1-9]\\d*)\\.(0|[1-9]\\d*)\\.(0|[1-9]\\d*)(?:-((?:0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\\.(?:0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\\+([0-9a-zA-Z-]+(?:\\.[0-9a-zA-Z-]+)*))?$" } }, "required": [ @@ -1570,14 +1975,10 @@ "properties": { "inventory": { "$ref": "#/components/schemas/RackV1Inventory" - }, - "mgs_last_seen": { - "$ref": "#/components/schemas/Duration" } }, "required": [ - "inventory", - "mgs_last_seen" + "inventory" ] }, "type": { @@ -1782,6 +2183,14 @@ "last" ] }, + "LanePolarity": { + "description": "The polarity of a transceiver lane.", + "type": "string", + "enum": [ + "normal", + "flipped" + ] + }, "LldpAdminStatus": { "description": "To what extent should this port participate in LLDP", "type": "string", @@ -1810,38 +2219,221 @@ "format": "ip" } }, - "port_description": { + "port_description": { + "nullable": true, + "description": "Port description to advertise. If this is not set, no description will be advertised.", + "type": "string" + }, + "port_id": { + "nullable": true, + "description": "Port ID to advertise. If this is set, it will be advertised as a LocallyAssigned ID type. If this is not set, it will be set to the port name. e.g., qsfp0/0.", + "type": "string" + }, + "status": { + "description": "To what extent should this port participate in LLDP", + "allOf": [ + { + "$ref": "#/components/schemas/LldpAdminStatus" + } + ] + }, + "system_description": { + "nullable": true, + "description": "System description to advertise. If this is not set, it will be inherited from the switch-level settings.", + "type": "string" + }, + "system_name": { + "nullable": true, + "description": "System name to advertise. If this is not set, it will be inherited from the switch-level settings.", + "type": "string" + } + }, + "required": [ + "status" + ] + }, + "MediaInterfaceId": { + "oneOf": [ + { + "type": "object", + "properties": { + "id": { + "description": "Media interface ID for multi-mode fiber media.\n\nSee SFF-8024 Table 4-6.", + "type": "string" + }, + "type": { + "type": "string", + "enum": [ + "mmf" + ] + } + }, + "required": [ + "id", + "type" + ] + }, + { + "type": "object", + "properties": { + "id": { + "description": "Media interface ID for single-mode fiber.\n\nSee SFF-8024 Table 4-7.", + "type": "string" + }, + "type": { + "type": "string", + "enum": [ + "smf" + ] + } + }, + "required": [ + "id", + "type" + ] + }, + { + "type": "object", + "properties": { + "id": { + "description": "Media interface ID for passive copper cables.\n\nSee SFF-8024 Table 4-8.", + "type": "string" + }, + "type": { + "type": "string", + "enum": [ + "passive_copper" + ] + } + }, + "required": [ + "id", + "type" + ] + }, + { + "type": "object", + "properties": { + "id": { + "description": "Media interface ID for active cable assemblies.\n\nSee SFF-8024 Table 4-9.", + "type": "string" + }, + "type": { + "type": "string", + "enum": [ + "active_cable" + ] + } + }, + "required": [ + "id", + "type" + ] + }, + { + "type": "object", + "properties": { + "id": { + "description": "Media interface ID for BASE-T.\n\nSee SFF-8024 Table 4-10.", + "type": "string" + }, + "type": { + "type": "string", + "enum": [ + "base_t" + ] + } + }, + "required": [ + "id", + "type" + ] + } + ] + }, + "MgsV1Inventory": { + "description": "The current state of the v1 Rack as known to MGS", + "type": "object", + "properties": { + "sps": { + "type": "array", + "items": { + "$ref": "#/components/schemas/SpInventory" + } + } + }, + "required": [ + "sps" + ] + }, + "MgsV1InventorySnapshot": { + "description": "The state of the v1 Rack as known to MGS at a given time.", + "type": "object", + "properties": { + "inventory": { + "$ref": "#/components/schemas/MgsV1Inventory" + }, + "last_seen": { + "$ref": "#/components/schemas/Duration" + } + }, + "required": [ + "inventory", + "last_seen" + ] + }, + "Monitors": { + "description": "Free-side device monitoring information.\n\nNote that all values are optional, as some specifications do not require that modules implement monitoring of those values.", + "type": "object", + "properties": { + "aux_monitors": { + "nullable": true, + "description": "Auxiliary monitoring values.\n\nThese are only available on CMIS-compatible transceivers, e.g., QSFP-DD.", + "allOf": [ + { + "$ref": "#/components/schemas/AuxMonitors" + } + ] + }, + "receiver_power": { + "nullable": true, + "description": "The measured input optical power (milliwatts);\n\nNote that due to a limitation in the SFF-8636 specification, it's possible for receiver power to be zero. See [`ReceiverPower`] for details.", + "type": "array", + "items": { + "$ref": "#/components/schemas/ReceiverPower" + } + }, + "supply_voltage": { "nullable": true, - "description": "Port description to advertise. If this is not set, no description will be advertised.", - "type": "string" + "description": "The measured input supply voltage (Volts).", + "type": "number", + "format": "float" }, - "port_id": { + "temperature": { "nullable": true, - "description": "Port ID to advertise. If this is set, it will be advertised as a LocallyAssigned ID type. If this is not set, it will be set to the port name. e.g., qsfp0/0.", - "type": "string" - }, - "status": { - "description": "To what extent should this port participate in LLDP", - "allOf": [ - { - "$ref": "#/components/schemas/LldpAdminStatus" - } - ] + "description": "The measured cage temperature (degrees C);", + "type": "number", + "format": "float" }, - "system_description": { + "transmitter_bias_current": { "nullable": true, - "description": "System description to advertise. If this is not set, it will be inherited from the switch-level settings.", - "type": "string" + "description": "The output laser bias current (milliamps).", + "type": "array", + "items": { + "type": "number", + "format": "float" + } }, - "system_name": { + "transmitter_power": { "nullable": true, - "description": "System name to advertise. If this is not set, it will be inherited from the switch-level settings.", - "type": "string" + "description": "The measured output optical power (milliwatts).", + "type": "array", + "items": { + "type": "number", + "format": "float" + } } - }, - "required": [ - "status" - ] + } }, "Name": { "title": "A name unique within the parent collection", @@ -1856,6 +2448,24 @@ "description": "Password hashes must be in PHC (Password Hashing Competition) string format. Passwords must be hashed with Argon2id. Password hashes may be rejected if the parameters appear not to be secure enough.", "type": "string" }, + "Oui": { + "description": "An Organization Unique Identifier.", + "type": "array", + "items": { + "type": "integer", + "format": "uint8", + "minimum": 0 + }, + "minItems": 3, + "maxItems": 3 + }, + "OutputStatus": { + "type": "string", + "enum": [ + "valid", + "invalid" + ] + }, "PortFec": { "description": "Switchport FEC options", "type": "string", @@ -1880,6 +2490,28 @@ "speed400_g" ] }, + "PowerMode": { + "description": "The power mode of a module.", + "type": "object", + "properties": { + "software_override": { + "nullable": true, + "description": "Whether the module is configured for software override of power control.\n\nIf the module is in `PowerState::Off`, this can't be determined, and `None` is returned.", + "type": "boolean" + }, + "state": { + "description": "The actual power state.", + "allOf": [ + { + "$ref": "#/components/schemas/PowerState2" + } + ] + } + }, + "required": [ + "state" + ] + }, "PowerState": { "description": "See RFD 81.\n\nThis enum only lists power states the SP is able to control; higher power states are controlled by ignition.\n\n
JSON schema\n\n```json { \"description\": \"See RFD 81.\\n\\nThis enum only lists power states the SP is able to control; higher power states are controlled by ignition.\", \"type\": \"string\", \"enum\": [ \"A0\", \"A1\", \"A2\" ] } ```
", "type": "string", @@ -1889,6 +2521,32 @@ "A2" ] }, + "PowerState2": { + "description": "An allowed power state for the module.", + "oneOf": [ + { + "description": "A module is entirely powered off, using the EFuse.", + "type": "string", + "enum": [ + "off" + ] + }, + { + "description": "Power is enabled to the module, but module remains in low-power mode.\n\nIn this state, modules will not establish a link or transmit traffic, but they may be managed and queried for information through their memory maps.", + "type": "string", + "enum": [ + "low" + ] + }, + { + "description": "The module is in high-power mode.\n\nNote that additional configuration may be required to correctly configure the module, such as described in SFF-8636 rev 2.10a table 6-10, and that the _host side_ is responsible for ensuring that the relevant configuration is applied.", + "type": "string", + "enum": [ + "high" + ] + } + ] + }, "PreflightUplinkCheckOptions": { "description": "Options provided to the preflight uplink check.", "type": "object", @@ -2863,15 +3521,55 @@ "description": "The current state of the v1 Rack as known to wicketd", "type": "object", "properties": { - "sps": { - "type": "array", - "items": { - "$ref": "#/components/schemas/SpInventory" - } + "mgs": { + "nullable": true, + "allOf": [ + { + "$ref": "#/components/schemas/MgsV1InventorySnapshot" + } + ] + }, + "transceivers": { + "nullable": true, + "allOf": [ + { + "$ref": "#/components/schemas/TransceiverInventorySnapshot" + } + ] + } + } + }, + "ReceiverPower": { + "description": "Measured receiver optical power.\n\nThe SFF specifications allow for devices to monitor input optical power in several ways. It may either be an average power, over some unspecified time, or a peak-to-peak power. The latter is often abbreviated OMA, for Optical Modulation Amplitude. Again the time interval for peak-to-peak measurments are not specified.\n\nDetails -------\n\nThe SFF-8636 specification has an unfortunate limitation. There is no separate advertisement for whether a module supports measurements of receiver power. Instead, the _kind_ of measurement is advertised. The _same bit value_ could mean that either a peak-to-peak measurement is supported, or the measurements are not supported at all. Thus values of `PeakToPeak(0.0)` may mean that power measurements are not supported.", + "oneOf": [ + { + "description": "The measurement is represents average optical power, in mW.", + "type": "object", + "properties": { + "average": { + "type": "number", + "format": "float" + } + }, + "required": [ + "average" + ], + "additionalProperties": false + }, + { + "description": "The measurement represents a peak-to-peak, in mW.", + "type": "object", + "properties": { + "peak_to_peak": { + "type": "number", + "format": "float" + } + }, + "required": [ + "peak_to_peak" + ], + "additionalProperties": false } - }, - "required": [ - "sps" ] }, "RotImageError": { @@ -3392,10 +4090,6 @@ } ] }, - "SemverVersion": { - "type": "string", - "pattern": "^(0|[1-9]\\d*)\\.(0|[1-9]\\d*)\\.(0|[1-9]\\d*)(?:-((?:0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\\.(?:0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\\+([0-9a-zA-Z-]+(?:\\.[0-9a-zA-Z-]+)*))?$" - }, "SetBgpAuthKeyStatus": { "oneOf": [ { @@ -3421,6 +4115,102 @@ } ] }, + "Sff8636Datapath": { + "description": "The datapath of an SFF-8636 module.\n\nThis describes the state of a single lane in an SFF module. It includes information about input and output signals, faults, and controls.", + "type": "object", + "properties": { + "rx_cdr_enabled": { + "description": "Media-side transmit Clock and Data Recovery (CDR) enable status.\n\nCDR is the process by which the module enages an internal retimer function, through which the module attempts to recovery a clock signal directly from the input bitstream.", + "type": "boolean" + }, + "rx_lol": { + "description": "Media-side loss of lock flag.\n\nThis is true if the module is not able to extract a clock signal from the media-side signal (usually optical).", + "type": "boolean" + }, + "rx_los": { + "description": "Media-side loss of signal flag.\n\nThis is true if there is no detected input signal from the media-side (usually optical).", + "type": "boolean" + }, + "tx_adaptive_eq_fault": { + "description": "Flag indicating a fault in adaptive transmit equalization.", + "type": "boolean" + }, + "tx_cdr_enabled": { + "description": "Host-side transmit Clock and Data Recovery (CDR) enable status.\n\nCDR is the process by which the module enages an internal retimer function, through which the module attempts to recovery a clock signal directly from the input bitstream.", + "type": "boolean" + }, + "tx_enabled": { + "description": "Software control of output transmitter.", + "type": "boolean" + }, + "tx_fault": { + "description": "Flag indicating a fault in the transmitter and/or laser.", + "type": "boolean" + }, + "tx_lol": { + "description": "Host-side loss of lock flag.\n\nThis is true if the module is not able to extract a clock signal from the host-side electrical signal.", + "type": "boolean" + }, + "tx_los": { + "description": "Host-side loss of signal flag.\n\nThis is true if there is no detected electrical signal from the host-side serdes.", + "type": "boolean" + } + }, + "required": [ + "rx_cdr_enabled", + "rx_lol", + "rx_los", + "tx_adaptive_eq_fault", + "tx_cdr_enabled", + "tx_enabled", + "tx_fault", + "tx_lol", + "tx_los" + ] + }, + "SffComplianceCode": { + "description": "The compliance code for an SFF-8636 module.\n\nThese values record a specification compliance code, from SFF-8636 Table 6-17, or an extended specification compliance code, from SFF-8024 Table 4-4.", + "oneOf": [ + { + "type": "object", + "properties": { + "code": { + "description": "Extended electrical or optical interface codes", + "type": "string" + }, + "type": { + "type": "string", + "enum": [ + "extended" + ] + } + }, + "required": [ + "code", + "type" + ] + }, + { + "type": "object", + "properties": { + "code": { + "description": "The Ethernet specification implemented by a module.", + "type": "string" + }, + "type": { + "type": "string", + "enum": [ + "ethernet" + ] + } + }, + "required": [ + "code", + "type" + ] + } + ] + }, "SpComponentCaboose": { "description": "SpComponentCaboose\n\n
JSON schema\n\n```json { \"type\": \"object\", \"required\": [ \"board\", \"git_commit\", \"name\", \"version\" ], \"properties\": { \"board\": { \"type\": \"string\" }, \"epoch\": { \"type\": [ \"string\", \"null\" ] }, \"git_commit\": { \"type\": \"string\" }, \"name\": { \"type\": \"string\" }, \"sign\": { \"type\": [ \"string\", \"null\" ] }, \"version\": { \"type\": \"string\" } } } ```
", "type": "object", @@ -5991,6 +6781,87 @@ } ] }, + "Transceiver": { + "description": "A transceiver in the switch's front ports.", + "type": "object", + "properties": { + "datapath": { + "nullable": true, + "description": "Status of the transceiver's machinery for carrying data, the \"datapath\".", + "allOf": [ + { + "$ref": "#/components/schemas/Datapath" + } + ] + }, + "monitors": { + "nullable": true, + "description": "Environmental monitoring data, such as temperature or optical power.", + "allOf": [ + { + "$ref": "#/components/schemas/Monitors" + } + ] + }, + "port": { + "description": "The port in which the transceiver sits.", + "type": "string" + }, + "power": { + "nullable": true, + "description": "Information about the power state of the transceiver.", + "allOf": [ + { + "$ref": "#/components/schemas/PowerMode" + } + ] + }, + "status": { + "nullable": true, + "description": "The general status of the transceiver, such as presence and faults.", + "allOf": [ + { + "$ref": "#/components/schemas/ExtendedStatus" + } + ] + }, + "vendor": { + "nullable": true, + "description": "Details about the vendor, part number, and serial number.", + "allOf": [ + { + "$ref": "#/components/schemas/VendorInfo" + } + ] + } + }, + "required": [ + "port" + ] + }, + "TransceiverInventorySnapshot": { + "description": "A snapshot of the transceiver inventory at a given time.", + "type": "object", + "properties": { + "inventory": { + "description": "The transceivers in each switch.", + "type": "object", + "additionalProperties": { + "type": "array", + "items": { + "$ref": "#/components/schemas/Transceiver" + } + } + }, + "last_seen": { + "$ref": "#/components/schemas/Duration" + } + }, + "required": [ + "inventory", + "last_seen" + ] + }, "TxEqConfig": { "description": "Per-port tx-eq overrides. This can be used to fine-tune the transceiver equalization settings to improve signal integrity.", "type": "object", @@ -6735,6 +7606,60 @@ ], "additionalProperties": false }, + "Vendor": { + "description": "Vendor-specific information about a transceiver module.", + "type": "object", + "properties": { + "date": { + "nullable": true, + "type": "string" + }, + "name": { + "type": "string" + }, + "oui": { + "$ref": "#/components/schemas/Oui" + }, + "part": { + "type": "string" + }, + "revision": { + "type": "string" + }, + "serial": { + "type": "string" + } + }, + "required": [ + "name", + "oui", + "part", + "revision", + "serial" + ] + }, + "VendorInfo": { + "description": "The vendor information for a transceiver module.", + "type": "object", + "properties": { + "identifier": { + "description": "The SFF-8024 identifier.", + "type": "string" + }, + "vendor": { + "description": "The vendor information.", + "allOf": [ + { + "$ref": "#/components/schemas/Vendor" + } + ] + } + }, + "required": [ + "identifier", + "vendor" + ] + }, "IgnitionCommand": { "description": "Ignition command.\n\n
JSON schema\n\n```json { \"description\": \"Ignition command.\", \"type\": \"string\", \"enum\": [ \"power_on\", \"power_off\", \"power_reset\" ] } ```
", "type": "string", diff --git a/schema/crdb/bp-disk-disposition-expunged-cleanup/up1.sql b/schema/crdb/bp-disk-disposition-expunged-cleanup/up1.sql new file mode 100644 index 00000000000..87ed52397c2 --- /dev/null +++ b/schema/crdb/bp-disk-disposition-expunged-cleanup/up1.sql @@ -0,0 +1,2 @@ +ALTER TABLE omicron.public.bp_omicron_physical_disk + ADD COLUMN IF NOT EXISTS disposition_expunged_as_of_generation INT; diff --git a/schema/crdb/bp-disk-disposition-expunged-cleanup/up2.sql b/schema/crdb/bp-disk-disposition-expunged-cleanup/up2.sql new file mode 100644 index 00000000000..31cfc9fc2b8 --- /dev/null +++ b/schema/crdb/bp-disk-disposition-expunged-cleanup/up2.sql @@ -0,0 +1,10 @@ +SET LOCAL disallow_full_table_scans = off; +UPDATE omicron.public.bp_omicron_physical_disk + SET disposition_expunged_as_of_generation = ( + SELECT generation + FROM omicron.public.bp_sled_omicron_physical_disks + WHERE + blueprint_id = omicron.public.bp_omicron_physical_disk.blueprint_id + AND sled_id = omicron.public.bp_omicron_physical_disk.sled_id + ) + WHERE disposition = 'expunged'; diff --git a/schema/crdb/bp-disk-disposition-expunged-cleanup/up3.sql b/schema/crdb/bp-disk-disposition-expunged-cleanup/up3.sql new file mode 100644 index 00000000000..501da394c6c --- /dev/null +++ b/schema/crdb/bp-disk-disposition-expunged-cleanup/up3.sql @@ -0,0 +1,3 @@ +ALTER TABLE omicron.public.bp_omicron_physical_disk + ADD COLUMN IF NOT EXISTS + disposition_expunged_ready_for_cleanup BOOL NOT NULL DEFAULT false; diff --git a/schema/crdb/bp-disk-disposition-expunged-cleanup/up4.sql b/schema/crdb/bp-disk-disposition-expunged-cleanup/up4.sql new file mode 100644 index 00000000000..3790ddd7de5 --- /dev/null +++ b/schema/crdb/bp-disk-disposition-expunged-cleanup/up4.sql @@ -0,0 +1,2 @@ +ALTER TABLE omicron.public.bp_omicron_physical_disk + ALTER COLUMN disposition_expunged_ready_for_cleanup DROP DEFAULT; diff --git a/schema/crdb/bp-disk-disposition-expunged-cleanup/up5.sql b/schema/crdb/bp-disk-disposition-expunged-cleanup/up5.sql new file mode 100644 index 00000000000..4fabf6e2e90 --- /dev/null +++ b/schema/crdb/bp-disk-disposition-expunged-cleanup/up5.sql @@ -0,0 +1,9 @@ +ALTER TABLE omicron.public.bp_omicron_physical_disk +ADD CONSTRAINT IF NOT EXISTS expunged_disposition_properties CHECK ( + (disposition != 'expunged' + AND disposition_expunged_as_of_generation IS NULL + AND NOT disposition_expunged_ready_for_cleanup) + OR + (disposition = 'expunged' + AND disposition_expunged_as_of_generation IS NOT NULL) +); diff --git a/schema/crdb/dbinit.sql b/schema/crdb/dbinit.sql index b24d44a9be6..e5a8f79dcee 100644 --- a/schema/crdb/dbinit.sql +++ b/schema/crdb/dbinit.sql @@ -3823,7 +3823,20 @@ CREATE TABLE IF NOT EXISTS omicron.public.bp_omicron_physical_disk ( disposition omicron.public.bp_physical_disk_disposition NOT NULL, - PRIMARY KEY (blueprint_id, id) + -- Specific properties of the `expunged` disposition + disposition_expunged_as_of_generation INT, + disposition_expunged_ready_for_cleanup BOOL NOT NULL, + + PRIMARY KEY (blueprint_id, id), + + CONSTRAINT expunged_disposition_properties CHECK ( + (disposition != 'expunged' + AND disposition_expunged_as_of_generation IS NULL + AND NOT disposition_expunged_ready_for_cleanup) + OR + (disposition = 'expunged' + AND disposition_expunged_as_of_generation IS NOT NULL) + ) ); -- description of a collection of omicron datasets stored in a blueprint @@ -4969,7 +4982,7 @@ INSERT INTO omicron.public.db_metadata ( version, target_version ) VALUES - (TRUE, NOW(), NOW(), '126.0.0', NULL) + (TRUE, NOW(), NOW(), '127.0.0', NULL) ON CONFLICT DO NOTHING; COMMIT; diff --git a/sled-agent/Cargo.toml b/sled-agent/Cargo.toml index ffef7c6597e..421f8f6e098 100644 --- a/sled-agent/Cargo.toml +++ b/sled-agent/Cargo.toml @@ -59,7 +59,6 @@ nexus-types.workspace = true omicron-common.workspace = true omicron-ddm-admin-client.workspace = true omicron-uuid-kinds.workspace = true -once_cell.workspace = true oximeter.workspace = true oximeter-instruments.workspace = true oximeter-producer.workspace = true diff --git a/sled-agent/bootstrap-agent-api/Cargo.toml b/sled-agent/bootstrap-agent-api/Cargo.toml index 24b4866dcdf..42bdc0add64 100644 --- a/sled-agent/bootstrap-agent-api/Cargo.toml +++ b/sled-agent/bootstrap-agent-api/Cargo.toml @@ -13,6 +13,7 @@ omicron-common.workspace = true omicron-uuid-kinds.workspace = true omicron-workspace-hack.workspace = true schemars.workspace = true +semver.workspace = true serde.workspace = true sled-agent-types.workspace = true sled-hardware-types.workspace = true diff --git a/sled-agent/bootstrap-agent-api/src/lib.rs b/sled-agent/bootstrap-agent-api/src/lib.rs index 4deb76ca858..f6846af6165 100644 --- a/sled-agent/bootstrap-agent-api/src/lib.rs +++ b/sled-agent/bootstrap-agent-api/src/lib.rs @@ -11,9 +11,9 @@ use dropshot::{ HttpError, HttpResponseOk, HttpResponseUpdatedNoContent, RequestContext, TypedBody, }; -use omicron_common::api::external::SemverVersion; use omicron_uuid_kinds::{RackInitUuid, RackResetUuid}; use schemars::JsonSchema; +use semver::Version; use serde::{Deserialize, Serialize}; use sled_agent_types::{ rack_init::RackInitializeRequest, rack_ops::RackOperationStatus, @@ -86,5 +86,5 @@ pub trait BootstrapAgentApi { #[derive(Clone, Debug, Deserialize, Serialize, JsonSchema, PartialEq)] pub struct Component { pub name: String, - pub version: SemverVersion, + pub version: Version, } diff --git a/sled-agent/src/services.rs b/sled-agent/src/services.rs index cbef6101a00..f1033aa8a15 100644 --- a/sled-agent/src/services.rs +++ b/sled-agent/src/services.rs @@ -96,7 +96,6 @@ use omicron_common::disk::{DatasetKind, DatasetName}; use omicron_common::ledger::{self, Ledger, Ledgerable}; use omicron_ddm_admin_client::{Client as DdmAdminClient, DdmError}; use omicron_uuid_kinds::OmicronZoneUuid; -use once_cell::sync::OnceCell; use rand::prelude::SliceRandom; use sled_agent_types::{ time_sync::TimeSync, @@ -115,7 +114,7 @@ use std::collections::HashSet; use std::net::{IpAddr, Ipv6Addr, SocketAddr}; use std::str::FromStr; use std::sync::atomic::{AtomicBool, Ordering}; -use std::sync::Arc; +use std::sync::{Arc, OnceLock}; use tokio::io::AsyncWriteExt; use tokio::sync::Mutex; use tokio::sync::{oneshot, MutexGuard}; @@ -729,12 +728,12 @@ pub struct ServiceManagerInner { bootstrap_vnic_allocator: VnicAllocator, ddmd_client: DdmAdminClient, advertised_prefixes: Mutex>>, - sled_info: OnceCell, + sled_info: OnceLock, switch_zone_bootstrap_address: Ipv6Addr, storage: StorageHandle, zone_bundler: ZoneBundler, - ledger_directory_override: OnceCell, - image_directory_override: OnceCell, + ledger_directory_override: OnceLock, + image_directory_override: OnceLock, } // Late-binding information, only known once the sled agent is up and @@ -817,13 +816,13 @@ impl ServiceManager { ), ddmd_client, advertised_prefixes: Mutex::new(HashSet::new()), - sled_info: OnceCell::new(), + sled_info: OnceLock::new(), switch_zone_bootstrap_address: bootstrap_networking .switch_zone_bootstrap_ip, storage, zone_bundler, - ledger_directory_override: OnceCell::new(), - image_directory_override: OnceCell::new(), + ledger_directory_override: OnceLock::new(), + image_directory_override: OnceLock::new(), }), } } diff --git a/sled-agent/src/updates.rs b/sled-agent/src/updates.rs index 3fd810c2534..bb222a4a9e5 100644 --- a/sled-agent/src/updates.rs +++ b/sled-agent/src/updates.rs @@ -7,7 +7,6 @@ use bootstrap_agent_api::Component; use camino::{Utf8Path, Utf8PathBuf}; use omicron_brand_metadata::Metadata; -use omicron_common::api::external::SemverVersion; use serde::{Deserialize, Serialize}; #[derive(thiserror::Error, Debug)] @@ -74,10 +73,7 @@ impl UpdateManager { let info = metadata .layer_info() .map_err(|err| Error::ZoneVersion { path: path.into(), err })?; - Ok(Component { - name: info.pkg.clone(), - version: SemverVersion(info.version.clone()), - }) + Ok(Component { name: info.pkg.clone(), version: info.version.clone() }) } pub async fn components_get(&self) -> Result, Error> { @@ -105,11 +101,10 @@ impl UpdateManager { // Extract the name and semver version let name = "sled-agent".to_string(); - let version = omicron_common::api::external::SemverVersion( - semver::Version::parse(&version).map_err(|err| { - Error::Semver { path: version_path.to_path_buf(), err } - })?, - ); + let version = version.parse().map_err(|err| Error::Semver { + path: version_path.to_path_buf(), + err, + })?; components.push(crate::updates::Component { name, version }); } @@ -124,7 +119,7 @@ mod test { use super::*; use camino_tempfile::NamedUtf8TempFile; use flate2::write::GzEncoder; - use omicron_common::api::external::SemverVersion; + use semver::Version; use std::io::Write; use tar::Builder; @@ -177,10 +172,7 @@ mod test { um.components_get().await.expect("Failed to get components"); assert_eq!(components.len(), 1); assert_eq!(components[0].name, "test-pkg".to_string()); - assert_eq!( - components[0].version, - SemverVersion(semver::Version::new(2, 0, 0)) - ); + assert_eq!(components[0].version, Version::new(2, 0, 0)); } #[tokio::test] @@ -202,9 +194,6 @@ mod test { um.components_get().await.expect("Failed to get components"); assert_eq!(components.len(), 1); assert_eq!(components[0].name, "sled-agent".to_string()); - assert_eq!( - components[0].version, - SemverVersion(semver::Version::new(1, 2, 3)) - ); + assert_eq!(components[0].version, Version::new(1, 2, 3)); } } diff --git a/tufaceous-lib/src/artifact.rs b/tufaceous-lib/src/artifact.rs index 486744e3ceb..903e2dbab9d 100644 --- a/tufaceous-lib/src/artifact.rs +++ b/tufaceous-lib/src/artifact.rs @@ -13,7 +13,8 @@ use bytes::Bytes; use camino::Utf8PathBuf; use fs_err::File; use omicron_brand_metadata::Metadata; -use omicron_common::{api::external::SemverVersion, update::ArtifactKind}; +use omicron_common::update::ArtifactKind; +use semver::Version; mod composite; @@ -35,7 +36,7 @@ pub enum ArtifactSource { pub struct AddArtifact { kind: ArtifactKind, name: String, - version: SemverVersion, + version: Version, source: ArtifactSource, } @@ -44,7 +45,7 @@ impl AddArtifact { pub fn new( kind: ArtifactKind, name: String, - version: SemverVersion, + version: Version, source: ArtifactSource, ) -> Self { Self { kind, name, version, source } @@ -57,7 +58,7 @@ impl AddArtifact { pub fn from_path( kind: ArtifactKind, name: Option, - version: SemverVersion, + version: Version, path: Utf8PathBuf, ) -> Result { let name = match name { @@ -85,7 +86,7 @@ impl AddArtifact { } /// Returns the version of the new artifact. - pub fn version(&self) -> &SemverVersion { + pub fn version(&self) -> &Version { &self.version } diff --git a/tufaceous-lib/src/assemble/manifest.rs b/tufaceous-lib/src/assemble/manifest.rs index 3203cdfc825..d6cd9eeb48e 100644 --- a/tufaceous-lib/src/assemble/manifest.rs +++ b/tufaceous-lib/src/assemble/manifest.rs @@ -6,10 +6,9 @@ use std::collections::{BTreeMap, BTreeSet}; use anyhow::{bail, ensure, Context, Result}; use camino::{Utf8Path, Utf8PathBuf}; -use omicron_common::api::{ - external::SemverVersion, internal::nexus::KnownArtifactKind, -}; +use omicron_common::api::internal::nexus::KnownArtifactKind; use parse_size::parse_size; +use semver::Version; use serde::{Deserialize, Serialize}; use crate::{ @@ -24,7 +23,7 @@ static FAKE_MANIFEST_TOML: &str = /// A list of components in a TUF repo representing a single update. #[derive(Clone, Debug)] pub struct ArtifactManifest { - pub system_version: SemverVersion, + pub system_version: Version, pub artifacts: BTreeMap>, } @@ -245,11 +244,11 @@ impl ArtifactManifest { #[derive(Debug)] struct FakeDataAttributes<'a> { kind: KnownArtifactKind, - version: &'a SemverVersion, + version: &'a Version, } impl<'a> FakeDataAttributes<'a> { - fn new(kind: KnownArtifactKind, version: &'a SemverVersion) -> Self { + fn new(kind: KnownArtifactKind, version: &'a Version) -> Self { Self { kind, version } } @@ -298,7 +297,7 @@ impl<'a> FakeDataAttributes<'a> { #[derive(Clone, Debug)] pub struct ArtifactData { pub name: String, - pub version: SemverVersion, + pub version: Version, pub source: ArtifactSource, } @@ -311,7 +310,7 @@ pub struct ArtifactData { #[derive(Clone, Debug, PartialEq, Eq, Deserialize, Serialize)] #[serde(rename_all = "snake_case")] pub struct DeserializedManifest { - pub system_version: SemverVersion, + pub system_version: Version, #[serde(rename = "artifact")] pub artifacts: BTreeMap>, } @@ -396,7 +395,7 @@ impl DeserializedManifest { #[serde(rename_all = "snake_case")] pub struct DeserializedArtifactData { pub name: String, - pub version: SemverVersion, + pub version: Version, pub source: DeserializedArtifactSource, } @@ -594,10 +593,10 @@ impl DeserializedControlPlaneZoneSource { #[derive(Clone, Debug)] pub enum ManifestTweak { /// Update the system version. - SystemVersion(SemverVersion), + SystemVersion(Version), /// Update the versions for this artifact. - ArtifactVersion { kind: KnownArtifactKind, version: SemverVersion }, + ArtifactVersion { kind: KnownArtifactKind, version: Version }, /// Update the contents of this artifact (only support changing the size). ArtifactContents { kind: KnownArtifactKind, size_delta: i64 }, diff --git a/tufaceous-lib/src/repository.rs b/tufaceous-lib/src/repository.rs index 7fef2565a86..c36413ffced 100644 --- a/tufaceous-lib/src/repository.rs +++ b/tufaceous-lib/src/repository.rs @@ -9,10 +9,8 @@ use camino::{Utf8Path, Utf8PathBuf}; use chrono::{DateTime, Utc}; use fs_err::{self as fs}; use futures::TryStreamExt; -use omicron_common::{ - api::external::SemverVersion, - update::{Artifact, ArtifactsDocument}, -}; +use omicron_common::update::{Artifact, ArtifactsDocument}; +use semver::Version; use std::{collections::BTreeSet, num::NonZeroU64}; use tough::{ editor::{signed::SignedRole, RepositoryEditor}, @@ -33,7 +31,7 @@ impl OmicronRepo { pub async fn initialize( log: &slog::Logger, repo_path: &Utf8Path, - system_version: SemverVersion, + system_version: Version, keys: Vec, expiry: DateTime, ) -> Result { @@ -256,7 +254,7 @@ impl OmicronRepoEditor { async fn initialize( repo_path: Utf8PathBuf, root: SignedRole, - system_version: SemverVersion, + system_version: Version, ) -> Result { let metadata_dir = repo_path.join("metadata"); let targets_dir = repo_path.join("targets"); diff --git a/tufaceous/Cargo.toml b/tufaceous/Cargo.toml index 2cdd98f0a6d..37d3974aff8 100644 --- a/tufaceous/Cargo.toml +++ b/tufaceous/Cargo.toml @@ -23,6 +23,7 @@ slog-term.workspace = true tokio.workspace = true tufaceous-lib.workspace = true omicron-workspace-hack.workspace = true +semver.workspace = true [dev-dependencies] assert_cmd.workspace = true diff --git a/tufaceous/src/dispatch.rs b/tufaceous/src/dispatch.rs index ed0c63f7879..fc8281b511f 100644 --- a/tufaceous/src/dispatch.rs +++ b/tufaceous/src/dispatch.rs @@ -6,7 +6,8 @@ use anyhow::{bail, Context, Result}; use camino::Utf8PathBuf; use chrono::{DateTime, Utc}; use clap::{CommandFactory, Parser}; -use omicron_common::{api::external::SemverVersion, update::ArtifactKind}; +use omicron_common::update::ArtifactKind; +use semver::Version; use tufaceous_lib::{ assemble::{ArtifactManifest, OmicronRepoAssembler}, AddArtifact, ArchiveExtractor, Key, OmicronRepo, @@ -186,7 +187,7 @@ enum Command { /// Create a new rack update TUF repository Init { /// The system version. - system_version: SemverVersion, + system_version: Version, /// Disable random key generation and exit if no keys are provided #[clap(long)] @@ -208,7 +209,7 @@ enum Command { name: Option, /// Artifact version. - version: SemverVersion, + version: Version, }, /// Archives this repository to a zip file. Archive { diff --git a/update-common/Cargo.toml b/update-common/Cargo.toml index 789f6466825..fde6cb2e471 100644 --- a/update-common/Cargo.toml +++ b/update-common/Cargo.toml @@ -32,6 +32,7 @@ omicron-brand-metadata.workspace = true tar.workspace = true flate2.workspace = true fs-err = { workspace = true, features = ["tokio"] } +semver.workspace = true [dev-dependencies] clap.workspace = true diff --git a/update-common/src/artifacts/update_plan.rs b/update-common/src/artifacts/update_plan.rs index 297d66f9868..062d6cd1845 100644 --- a/update-common/src/artifacts/update_plan.rs +++ b/update-common/src/artifacts/update_plan.rs @@ -21,13 +21,13 @@ use futures::Stream; use futures::StreamExt; use futures::TryStreamExt; use hubtools::RawHubrisArchive; -use omicron_common::api::external::SemverVersion; use omicron_common::api::external::TufArtifactMeta; use omicron_common::api::internal::nexus::KnownArtifactKind; use omicron_common::update::ArtifactHash; use omicron_common::update::ArtifactHashId; use omicron_common::update::ArtifactId; use omicron_common::update::ArtifactKind; +use semver::Version; use slog::info; use slog::Logger; use std::collections::btree_map; @@ -45,7 +45,7 @@ use tufaceous_lib::RotArchives; /// repository. #[derive(Debug, Clone)] pub struct UpdatePlan { - pub system_version: SemverVersion, + pub system_version: Version, pub gimlet_sp: BTreeMap, pub gimlet_rot_a: Vec, pub gimlet_rot_b: Vec, @@ -109,7 +109,7 @@ struct RotSignTarget { #[derive(Debug)] pub struct UpdatePlanBuilder<'a> { // fields that mirror `UpdatePlan` - system_version: SemverVersion, + system_version: Version, gimlet_sp: BTreeMap, gimlet_rot_a: Vec, gimlet_rot_b: Vec, @@ -158,7 +158,7 @@ pub struct UpdatePlanBuilder<'a> { impl<'a> UpdatePlanBuilder<'a> { pub fn new( - system_version: SemverVersion, + system_version: Version, zone_mode: ControlPlaneZonesMode, log: &'a Logger, ) -> Result { @@ -932,7 +932,7 @@ impl<'a> UpdatePlanBuilder<'a> { let artifact_id = ArtifactId { name: info.pkg.clone(), - version: SemverVersion(info.version.clone()), + version: info.version.clone(), kind: KnownArtifactKind::Zone.into(), }; self.record_extracted_artifact( @@ -1465,8 +1465,8 @@ mod tests { #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn test_bad_rot_versions() { - const VERSION_0: SemverVersion = SemverVersion::new(0, 0, 0); - const VERSION_1: SemverVersion = SemverVersion::new(0, 0, 1); + const VERSION_0: Version = Version::new(0, 0, 0); + const VERSION_1: Version = Version::new(0, 0, 1); let logctx = test_setup_log("test_bad_rot_version"); @@ -1637,8 +1637,8 @@ mod tests { #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn test_multi_rot_version() { - const VERSION_0: SemverVersion = SemverVersion::new(0, 0, 0); - const VERSION_1: SemverVersion = SemverVersion::new(0, 0, 1); + const VERSION_0: Version = Version::new(0, 0, 0); + const VERSION_1: Version = Version::new(0, 0, 1); let logctx = test_setup_log("test_multi_rot_version"); @@ -1826,7 +1826,7 @@ mod tests { // is required. #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn test_update_plan_from_artifacts() { - const VERSION_0: SemverVersion = SemverVersion::new(0, 0, 0); + const VERSION_0: Version = Version::new(0, 0, 0); let logctx = test_setup_log("test_update_plan_from_artifacts"); @@ -2131,7 +2131,7 @@ mod tests { #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn test_bad_hubris_cabooses() { - const VERSION_0: SemverVersion = SemverVersion::new(0, 0, 0); + const VERSION_0: Version = Version::new(0, 0, 0); let logctx = test_setup_log("test_bad_hubris_cabooses"); @@ -2209,8 +2209,8 @@ mod tests { #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn test_too_many_rot_bootloaders() { - const VERSION_0: SemverVersion = SemverVersion::new(0, 0, 0); - const VERSION_1: SemverVersion = SemverVersion::new(0, 0, 1); + const VERSION_0: Version = Version::new(0, 0, 0); + const VERSION_1: Version = Version::new(0, 0, 1); // The regular RoT can have multiple versions but _not_ the // bootloader @@ -2278,7 +2278,7 @@ mod tests { // YYYY BBBB 2.0.0 // YYYY CCCC 2.0.0 - const VERSION_0: SemverVersion = SemverVersion::new(0, 0, 0); + const VERSION_0: Version = Version::new(0, 0, 0); let logctx = test_setup_log("test_update_plan_from_artifacts"); @@ -2442,7 +2442,7 @@ mod tests { // YYYY BBBB // YYYY CCCC - const VERSION_0: SemverVersion = SemverVersion::new(0, 0, 0); + const VERSION_0: Version = Version::new(0, 0, 0); let logctx = test_setup_log("test_update_plan_from_artifacts"); @@ -2490,7 +2490,7 @@ mod tests { #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn test_split_control_plane() { - const VERSION_0: SemverVersion = SemverVersion::new(0, 0, 0); + const VERSION_0: Version = Version::new(0, 0, 0); let logctx = test_setup_log("test_split_control_plane"); @@ -2502,7 +2502,7 @@ mod tests { )); let metadata = Metadata::new(ArchiveType::Layer(LayerInfo { pkg: name.to_owned(), - version: VERSION_0.0.clone(), + version: VERSION_0, })); metadata.append_to_tar(&mut tar, 0).unwrap(); let data = tar.into_inner().unwrap().finish().unwrap(); diff --git a/update-common/src/errors.rs b/update-common/src/errors.rs index d4dc9c6bfc4..1ce9c6f5a14 100644 --- a/update-common/src/errors.rs +++ b/update-common/src/errors.rs @@ -7,9 +7,9 @@ use camino::Utf8PathBuf; use display_error_chain::DisplayErrorChain; use dropshot::HttpError; -use omicron_common::api::external::SemverVersion; use omicron_common::api::internal::nexus::KnownArtifactKind; use omicron_common::update::{ArtifactHashId, ArtifactId, ArtifactKind}; +use semver::Version; use slog::error; use thiserror::Error; @@ -146,8 +146,8 @@ pub enum RepositoryError { )] MultipleVersionsPresent { kind: KnownArtifactKind, - v1: SemverVersion, - v2: SemverVersion, + v1: Version, + v2: Version, }, #[error("Caboose mismatch between A {a:?} and B {b:?}")] CabooseMismatch { a: String, b: String }, diff --git a/wicket-common/Cargo.toml b/wicket-common/Cargo.toml index 3c24cea8054..dc6e241c955 100644 --- a/wicket-common/Cargo.toml +++ b/wicket-common/Cargo.toml @@ -25,6 +25,7 @@ sled-hardware-types.workspace = true slog.workspace = true thiserror.workspace = true tokio.workspace = true +transceiver-controller.workspace = true update-engine.workspace = true [dev-dependencies] diff --git a/wicket-common/src/inventory.rs b/wicket-common/src/inventory.rs index f7b42e4ec07..cf1bfa698a8 100644 --- a/wicket-common/src/inventory.rs +++ b/wicket-common/src/inventory.rs @@ -9,13 +9,34 @@ pub use gateway_client::types::{ SpComponentPresence, SpIdentifier, SpIgnition, SpIgnitionSystemType, SpState, SpType, }; +use omicron_common::api::external::SwitchLocation; use schemars::JsonSchema; use serde::{Deserialize, Serialize}; +use std::{collections::HashMap, time::Duration}; +use transceiver_controller::{ + message::ExtendedStatus, Datapath, Monitors, PowerMode, VendorInfo, +}; /// The current state of the v1 Rack as known to wicketd #[derive(Clone, Debug, Serialize, Deserialize, JsonSchema)] -#[serde(tag = "inventory", rename_all = "snake_case")] +#[serde(tag = "rack_inventory", rename_all = "snake_case")] pub struct RackV1Inventory { + pub mgs: Option, + pub transceivers: Option, +} + +/// The state of the v1 Rack as known to MGS at a given time. +#[derive(Clone, Debug, Serialize, Deserialize, JsonSchema)] +#[serde(tag = "mgs_inventory_snapshot", rename_all = "snake_case")] +pub struct MgsV1InventorySnapshot { + pub inventory: MgsV1Inventory, + pub last_seen: Duration, +} + +/// The current state of the v1 Rack as known to MGS +#[derive(Clone, Debug, Serialize, Deserialize, JsonSchema)] +#[serde(tag = "mgs_inventory", rename_all = "snake_case")] +pub struct MgsV1Inventory { pub sps: Vec, } @@ -61,3 +82,30 @@ pub struct RotInventory { pub caboose_stage0: Option>, pub caboose_stage0next: Option>, } + +/// A snapshot of the transceiver inventory at a given time. +#[derive(Clone, Debug, Deserialize, JsonSchema, Serialize)] +#[serde(tag = "transceiver_inventory_snapshot", rename_all = "snake_case")] +pub struct TransceiverInventorySnapshot { + /// The transceivers in each switch. + pub inventory: HashMap>, + pub last_seen: Duration, +} + +/// A transceiver in the switch's front ports. +#[derive(Clone, Debug, Deserialize, JsonSchema, Serialize)] +#[serde(tag = "transceiver", rename_all = "snake_case")] +pub struct Transceiver { + /// The port in which the transceiver sits. + pub port: String, + /// The general status of the transceiver, such as presence and faults. + pub status: Option, + /// Information about the power state of the transceiver. + pub power: Option, + /// Details about the vendor, part number, and serial number. + pub vendor: Option, + /// Status of the transceiver's machinery for carrying data, the "datapath". + pub datapath: Option, + /// Environmental monitoring data, such as temperature or optical power. + pub monitors: Option, +} diff --git a/wicket/Cargo.toml b/wicket/Cargo.toml index 117a0128456..fefecd060c5 100644 --- a/wicket/Cargo.toml +++ b/wicket/Cargo.toml @@ -23,7 +23,6 @@ indexmap.workspace = true indicatif.workspace = true itertools.workspace = true omicron-common.workspace = true -once_cell.workspace = true owo-colors.workspace = true ratatui.workspace = true reqwest.workspace = true @@ -42,6 +41,7 @@ tokio = { workspace = true, features = ["full"] } tokio-util.workspace = true toml.workspace = true toml_edit.workspace = true +transceiver-controller.workspace = true tui-tree-widget.workspace = true unicode-width.workspace = true zeroize.workspace = true @@ -51,6 +51,7 @@ update-engine.workspace = true wicket-common.workspace = true wicketd-client.workspace = true omicron-workspace-hack.workspace = true +semver.workspace = true [dev-dependencies] assert_cmd.workspace = true diff --git a/wicket/src/events.rs b/wicket/src/events.rs index 55f28f5acb9..514a60248ba 100644 --- a/wicket/src/events.rs +++ b/wicket/src/events.rs @@ -4,15 +4,16 @@ use crate::{keymap::Cmd, state::ComponentId, State}; use camino::Utf8PathBuf; use humantime::format_rfc3339; +use semver::Version; use serde::{Deserialize, Serialize}; use std::collections::HashMap; use std::fs::File; -use std::time::{Duration, SystemTime}; +use std::time::SystemTime; use wicket_common::inventory::RackV1Inventory; use wicket_common::update_events::EventReport; use wicketd_client::types::{ ArtifactId, CurrentRssUserConfig, GetLocationResponse, IgnitionCommand, - RackOperationStatus, SemverVersion, + RackOperationStatus, }; /// Event report type returned by the get_artifacts_and_event_reports API call. @@ -36,11 +37,11 @@ pub enum Event { Term(Cmd), /// An Inventory Update Event - Inventory { inventory: RackV1Inventory, mgs_last_seen: Duration }, + Inventory { inventory: RackV1Inventory }, /// TUF repo artifacts unpacked by wicketd, and event reports ArtifactsAndEventReports { - system_version: Option, + system_version: Option, artifacts: Vec, event_reports: EventReportMap, }, diff --git a/wicket/src/runner.rs b/wicket/src/runner.rs index 0e201478a8e..fc9b4794328 100644 --- a/wicket/src/runner.rs +++ b/wicket/src/runner.rs @@ -122,10 +122,17 @@ impl RunnerCore { self.screen.resize(&mut self.state, width, height); self.screen.draw(&self.state, &mut self.terminal)?; } - Event::Inventory { inventory, mgs_last_seen } => { - self.state.service_status.reset_mgs(mgs_last_seen); - self.state.service_status.reset_wicketd(Duration::ZERO); + Event::Inventory { inventory } => { + if let Some(mgs) = &inventory.mgs { + self.state.service_status.reset_mgs(mgs.last_seen); + } + if let Some(transceivers) = &inventory.transceivers { + self.state + .service_status + .reset_transceivers(transceivers.last_seen); + } self.state.inventory.update_inventory(inventory)?; + self.state.service_status.reset_wicketd(Duration::ZERO); self.screen.draw(&self.state, &mut self.terminal)?; } Event::ArtifactsAndEventReports { diff --git a/wicket/src/state/inventory.rs b/wicket/src/state/inventory.rs index d200f597e8c..d7488381802 100644 --- a/wicket/src/state/inventory.rs +++ b/wicket/src/state/inventory.rs @@ -4,25 +4,28 @@ //! Information about all top-level Oxide components (sleds, switches, PSCs) -use anyhow::{bail, Result}; -use omicron_common::api::internal::nexus::KnownArtifactKind; -use once_cell::sync::Lazy; +use anyhow::{bail, Context as _, Result}; +use omicron_common::api::{ + external::SwitchLocation, internal::nexus::KnownArtifactKind, +}; use serde::{Deserialize, Serialize}; use std::collections::BTreeMap; use std::fmt::Display; use std::iter::Iterator; +use std::sync::LazyLock; use wicket_common::inventory::{ RackV1Inventory, RotInventory, RotSlot, SpComponentCaboose, - SpComponentInfo, SpIgnition, SpState, SpType, + SpComponentInfo, SpIgnition, SpState, SpType, Transceiver, }; -pub static ALL_COMPONENT_IDS: Lazy> = Lazy::new(|| { - (0..=31u8) - .map(ComponentId::Sled) - .chain((0..=1u8).map(ComponentId::Switch)) - .chain((0..=1u8).map(ComponentId::Psc)) - .collect() -}); +pub static ALL_COMPONENT_IDS: LazyLock> = + LazyLock::new(|| { + (0..=31u8) + .map(ComponentId::Sled) + .chain((0..=1u8).map(ComponentId::Switch)) + .chain((0..=1u8).map(ComponentId::Psc)) + .collect() + }); /// Inventory is the most recent information about rack composition as /// received from MGS. @@ -49,9 +52,15 @@ impl Inventory { &mut self, inventory: RackV1Inventory, ) -> anyhow::Result<()> { + let mgs_inventory = inventory + .mgs + .map(|mgs| mgs.inventory) + .context("Cannot update inventory without any details from MGS")?; + let mut transceiver_inventory = + inventory.transceivers.map(|tr| tr.inventory).unwrap_or_default(); let mut new_inventory = Inventory::default(); - for sp in inventory.sps { + for sp in mgs_inventory.sps { let i = sp.id.slot; let type_ = sp.id.type_; let sp = Sp { @@ -67,7 +76,18 @@ impl Inventory { let id = ComponentId::from_sp_type_and_slot(type_, i)?; let component = match type_ { SpType::Sled => Component::Sled(sp), - SpType::Switch => Component::Switch(sp), + SpType::Switch => { + // Insert the switch's transceivers. + let switch_id = match i { + 0 => SwitchLocation::Switch0, + 1 => SwitchLocation::Switch1, + _ => unreachable!(), + }; + let transceivers = transceiver_inventory + .remove(&switch_id) + .unwrap_or_default(); + Component::Switch { sp, transceivers } + } SpType::Power => Component::Psc(sp), }; @@ -129,7 +149,7 @@ impl Sp { #[derive(Debug, Clone, Serialize, Deserialize)] pub enum Component { Sled(Sp), - Switch(Sp), + Switch { sp: Sp, transceivers: Vec }, Psc(Sp), } @@ -145,7 +165,7 @@ impl Component { pub fn sp(&self) -> &Sp { match self { Component::Sled(sp) => sp, - Component::Switch(sp) => sp, + Component::Switch { sp, .. } => sp, Component::Psc(sp) => sp, } } diff --git a/wicket/src/state/status.rs b/wicket/src/state/status.rs index aa6fd001b7b..7b1a31c5897 100644 --- a/wicket/src/state/status.rs +++ b/wicket/src/state/status.rs @@ -21,6 +21,7 @@ const LIVENESS_THRESHOLD: Duration = Duration::from_secs(30); pub struct ServiceStatus { wicketd_last_seen: Option, mgs_last_seen: Option, + transceivers_last_seen: Option, } impl ServiceStatus { @@ -44,6 +45,11 @@ impl ServiceStatus { *d += time; redraw |= d.as_secs() > prev; } + if let Some(d) = self.transceivers_last_seen.as_mut() { + let prev = d.as_secs(); + *d += time; + redraw |= d.as_secs() > prev; + } redraw } @@ -56,6 +62,10 @@ impl ServiceStatus { self.mgs_last_seen = Some(elapsed); } + pub fn reset_transceivers(&mut self, elapsed: Duration) { + self.transceivers_last_seen = Some(elapsed); + } + pub fn mgs_liveness(&self) -> Liveness { Self::liveness(self.mgs_last_seen) } @@ -64,6 +74,10 @@ impl ServiceStatus { Self::liveness(self.wicketd_last_seen) } + pub fn transceiver_liveness(&self) -> Liveness { + Self::liveness(self.transceivers_last_seen) + } + fn liveness(elapsed: Option) -> Liveness { elapsed.map_or(Liveness::NoResponse, |d| { if d > LIVENESS_THRESHOLD { diff --git a/wicket/src/state/update.rs b/wicket/src/state/update.rs index f143ac36452..7bbcf8ca41e 100644 --- a/wicket/src/state/update.rs +++ b/wicket/src/state/update.rs @@ -18,11 +18,11 @@ use crate::{ use super::{ComponentId, ParsableComponentId, ALL_COMPONENT_IDS}; use omicron_common::api::internal::nexus::KnownArtifactKind; +use semver::Version; use serde::{Deserialize, Serialize}; use slog::Logger; use std::collections::BTreeMap; use std::fmt::Display; -use wicketd_client::types::SemverVersion; // Represents a version and the signature (optional) associated // with a particular artifact. This allows for multiple versions @@ -30,14 +30,14 @@ use wicketd_client::types::SemverVersion; // sign is currently only used for RoT artifacts #[derive(Debug, Clone, Serialize, Deserialize)] pub struct ArtifactVersions { - pub version: SemverVersion, + pub version: Version, pub sign: Option>, } #[derive(Debug, Clone, Serialize, Deserialize)] pub struct RackUpdateState { pub items: BTreeMap, - pub system_version: Option, + pub system_version: Option, pub artifacts: Vec, pub artifact_versions: BTreeMap>, // The update item currently selected is recorded in @@ -114,7 +114,7 @@ impl RackUpdateState { pub fn update_artifacts_and_reports( &mut self, logger: &Logger, - system_version: Option, + system_version: Option, artifacts: Vec, reports: EventReportMap, ) { diff --git a/wicket/src/ui/defaults/style.rs b/wicket/src/ui/defaults/style.rs index 2ede7d1bb25..82043ca5ae8 100644 --- a/wicket/src/ui/defaults/style.rs +++ b/wicket/src/ui/defaults/style.rs @@ -137,6 +137,10 @@ pub fn text_warning() -> Style { Style::default().fg(OX_YELLOW) } +pub fn text_dim() -> Style { + Style::default().fg(OX_GRAY).add_modifier(Modifier::DIM) +} + pub const CHECK_ICON: char = '✓'; pub const CROSS_ICON: char = '✗'; pub const WARN_ICON: char = '⚠'; diff --git a/wicket/src/ui/main.rs b/wicket/src/ui/main.rs index ae6924071a3..196dd049ecb 100644 --- a/wicket/src/ui/main.rs +++ b/wicket/src/ui/main.rs @@ -197,6 +197,7 @@ impl MainScreen { let location_spans = location_spans(&state.wicketd_location); let wicketd_spans = state.service_status.wicketd_liveness().to_spans(); let mgs_spans = state.service_status.mgs_liveness().to_spans(); + let xcvr_spans = state.service_status.transceiver_liveness().to_spans(); let mut spans = vec![Span::styled("You are here: ", style::service())]; spans.extend_from_slice(&location_spans); spans.push(Span::styled(" | ", style::divider())); @@ -205,6 +206,9 @@ impl MainScreen { spans.push(Span::styled(" | ", style::divider())); spans.push(Span::styled("MGS: ", style::service())); spans.extend_from_slice(&mgs_spans); + spans.push(Span::styled(" | ", style::divider())); + spans.push(Span::styled("XCVRS: ", style::service())); + spans.extend_from_slice(&xcvr_spans); let main = Paragraph::new(Line::from(spans)); frame.render_widget(main, rect); diff --git a/wicket/src/ui/panes/overview.rs b/wicket/src/ui/panes/overview.rs index b807a13a973..c82753fc803 100644 --- a/wicket/src/ui/panes/overview.rs +++ b/wicket/src/ui/panes/overview.rs @@ -22,12 +22,22 @@ use ratatui::style::Style; use ratatui::text::{Line, Span, Text}; use ratatui::widgets::{Block, BorderType, Borders, Paragraph}; use ratatui::Frame; +use transceiver_controller::message::ExtendedStatus; +use transceiver_controller::ApplicationDescriptor; +use transceiver_controller::CmisDatapath; +use transceiver_controller::Datapath; +use transceiver_controller::PowerMode; +use transceiver_controller::PowerState; +use transceiver_controller::ReceiverPower; +use transceiver_controller::SffComplianceCode; +use transceiver_controller::VendorInfo; use wicket_common::inventory::RotState; use wicket_common::inventory::SpComponentCaboose; use wicket_common::inventory::SpComponentInfo; use wicket_common::inventory::SpComponentPresence; use wicket_common::inventory::SpIgnition; use wicket_common::inventory::SpState; +use wicket_common::inventory::Transceiver; enum PopupKind { Ignition, @@ -1115,9 +1125,180 @@ fn inventory_description(component: &Component) -> Text { } } + // If this is a switch, describe any transceivers. + if let Component::Switch { transceivers, .. } = component { + // blank line separator + spans.push(Line::default()); + + let mut label = vec![Span::styled("Transceivers: ", label_style)]; + if transceivers.is_empty() { + label.push(Span::styled("None", warn_style)); + spans.push(label.into()); + } else { + spans.push(label.into()); + for transceiver in transceivers { + // Top-level bullet for the port itself. We're not sure what + // details we can print about the transceiver yet. + spans.push( + vec![ + bullet(), + Span::styled(&transceiver.port, label_style), + ] + .into(), + ); + + // Now print as much of the details for this transceiver as we + // can, starting with the vendor name. + let vendor_details = + extract_vendor_details(transceiver.vendor.as_ref()); + for detail in vendor_details { + spans.push( + vec![ + nest_bullet(), + Span::styled(detail.label, label_style), + Span::styled(detail.detail, detail.style), + ] + .into(), + ); + } + + let (media_type_style, media_type) = + extract_media_type(transceiver.datapath.as_ref()); + spans.push( + vec![ + nest_bullet(), + Span::styled("Media type: ", label_style), + Span::styled(media_type, media_type_style), + ] + .into(), + ); + + let (fpga_power_style, fpga_power) = + format_transceiver_status(transceiver.status.as_ref()); + spans.push( + vec![ + nest_bullet(), + Span::styled("Power at FPGA: ", label_style), + Span::styled(fpga_power, fpga_power_style), + ] + .into(), + ); + + let (module_power_style, module_power) = + format_transceiver_power_state(transceiver.power.as_ref()); + spans.push( + vec![ + nest_bullet(), + Span::styled("Power at module: ", label_style), + Span::styled(module_power, module_power_style), + ] + .into(), + ); + + let (temp_style, temp) = format_transceiver_temperature( + transceiver + .monitors + .as_ref() + .map(|m| m.temperature) + .ok_or(FailedToRead), + ); + spans.push( + vec![ + nest_bullet(), + Span::styled("Temperature: ", label_style), + Span::styled(temp, temp_style), + ] + .into(), + ); + + let (voltage_style, voltage) = format_transceiver_voltage( + transceiver + .monitors + .as_ref() + .map(|m| m.supply_voltage) + .ok_or(FailedToRead), + ); + spans.push( + vec![ + nest_bullet(), + Span::styled("Voltage: ", label_style), + Span::styled(voltage, voltage_style), + ] + .into(), + ); + + let n_lanes = n_expected_lanes(transceiver); + let mut line = vec![nest_bullet()]; + line.extend(format_transceiver_receive_power( + n_lanes, + transceiver + .monitors + .as_ref() + .map(|m| m.receiver_power.as_deref()) + .ok_or(FailedToRead), + )); + spans.push(line.into()); + + let mut line = vec![nest_bullet()]; + line.extend(format_transceiver_transmit_power( + n_lanes, + transceiver + .monitors + .as_ref() + .map(|m| m.transmitter_power.as_deref()) + .ok_or(FailedToRead), + )); + spans.push(line.into()); + } + } + }; + Text::from(spans) } +struct VendorLine { + label: String, + detail: String, + style: Style, +} + +fn extract_vendor_details(vendor: Option<&VendorInfo>) -> Vec { + let mut out = Vec::with_capacity(4); + if let Some(vendor) = &vendor { + out.push(VendorLine { + label: "Vendor: ".to_string(), + detail: vendor.vendor.name.clone(), + style: style::text_success(), + }); + out.push(VendorLine { + label: "Model: ".to_string(), + detail: vendor.vendor.part.clone(), + style: style::text_success(), + }); + out.push(VendorLine { + label: "Serial: ".to_string(), + detail: vendor.vendor.serial.clone(), + style: style::text_success(), + }); + out.push(VendorLine { + label: "Management interface: ".to_string(), + detail: vendor.identifier.to_string(), + style: style::text_success(), + }); + } else { + for label in + ["Vendor: ", "Model: ", "Serial: ", "Management interface: "] + { + out.push(VendorLine { + label: label.to_string(), + detail: "Failed to read!".to_string(), + style: style::text_warning(), + }) + } + } + out +} + // Helper function for appending caboose details to a section of the // inventory (used for both SP and RoT above). fn append_caboose( @@ -1179,3 +1360,292 @@ fn append_caboose( vec![prefix.clone(), Span::styled("Version: ", label_style)]; version_spans.push(Span::styled(version, ok_style)); } + +/// Helper to indicate when we've failed to read data from a transceiver module. +struct FailedToRead; + +impl FailedToRead { + fn to_ui_elements(self) -> (Style, String) { + (style::text_warning(), String::from("Failed to read!")) + } +} + +fn unsupported_ui_elements() -> (Style, String) { + (style::text_dim(), String::from("Unsupported")) +} + +// Print relevant transceiver status bits. +// +// We know the transceiver is present by construction, so print the power state +// and any faults. +fn format_transceiver_status( + status: Option<&ExtendedStatus>, +) -> (Style, String) { + let Some(status) = status else { return FailedToRead.to_ui_elements() }; + if status.contains(ExtendedStatus::ENABLED) { + if status.contains(ExtendedStatus::POWER_GOOD) { + return (style::text_success(), String::from("Enabled")); + } + let message = if status.contains(ExtendedStatus::FAULT_POWER_TIMEOUT) { + String::from("Timeout fault") + } else if status.contains(ExtendedStatus::FAULT_POWER_LOST) { + String::from("Power lost") + } else if status.contains(ExtendedStatus::DISABLED_BY_SP) { + String::from("Disabled by SP") + } else { + format!("Unknown: {}", status) + }; + (style::text_failure(), message) + } else { + (style::text_warning(), String::from("Disabled")) + } +} + +fn format_transceiver_power_state( + power: Option<&PowerMode>, +) -> (Style, String) { + let Some(power) = power else { return FailedToRead.to_ui_elements() }; + let style = match power.state { + PowerState::Off => style::text_warning(), + PowerState::Low => style::text_warning(), + PowerState::High => style::text_success(), + }; + ( + style, + format!( + "{}{}", + power.state, + // "Software override" means that the module's power is controlled + // by writing to a specific register, rather than through a hardware + // signal / pin. + if matches!(power.software_override, Some(true)) { + " (Software control)" + } else { + "" + } + ), + ) +} + +/// Format the transceiver temperature. +fn format_transceiver_temperature( + maybe_temp: Result, FailedToRead>, +) -> (Style, String) { + let t = match maybe_temp { + Ok(Some(t)) => t, + Ok(None) => return unsupported_ui_elements(), + Err(e) => return e.to_ui_elements(), + }; + const MIN_WARNING_TEMP: f32 = 15.0; + const MAX_WARNING_TEMP: f32 = 50.0; + let temp = format!("{t:0.2} °C"); + let style = if t < MIN_WARNING_TEMP || t > MAX_WARNING_TEMP { + style::text_warning() + } else { + style::text_success() + }; + (style, temp) +} + +/// Format the transceiver voltage. +fn format_transceiver_voltage( + maybe_voltage: Result, FailedToRead>, +) -> (Style, String) { + let v = match maybe_voltage { + Ok(Some(v)) => v, + Ok(None) => return unsupported_ui_elements(), + Err(e) => return e.to_ui_elements(), + }; + const MIN_WARNING_VOLTAGE: f32 = 3.0; + const MAX_WARNING_VOLTAGE: f32 = 3.7; + let voltage = format!("{v:0.2} V"); + let style = if v < MIN_WARNING_VOLTAGE || v > MAX_WARNING_VOLTAGE { + style::text_warning() + } else { + style::text_success() + }; + (style, voltage) +} + +/// Format the transceiver received optical power. +fn format_transceiver_receive_power( + n_lanes: Option, + receiver_power: Result, FailedToRead>, +) -> Vec { + let pow = match receiver_power { + Ok(Some(p)) if !p.is_empty() => p, + // Either not supported at all, or list of power is empty. + Ok(None) | Ok(Some(_)) => { + let elems = unsupported_ui_elements(); + return vec![ + Span::styled("Rx power: ", style::text_label()), + Span::styled(elems.1, elems.0), + ]; + } + // Failed to read entirely + Err(e) => { + let elems = e.to_ui_elements(); + return vec![ + Span::styled("Rx power: ", style::text_label()), + Span::styled(elems.1, elems.0), + ]; + } + }; + + const MIN_WARNING_POWER: f32 = 0.5; + const MAX_WARNING_POWER: f32 = 2.5; + assert!(!pow.is_empty()); + let mut out = Vec::with_capacity(pow.len() + 2); + + // Push the label itself. + let kind = if matches!(&pow[0], ReceiverPower::Average(_)) { + "Avg" + } else { + "Peak-to-peak" + }; + let label = format!("Rx power (mW, {}): [", kind); + out.push(Span::styled(label, style::text_label())); + + // Push each Rx power measurement, styling it if it's above the + // limit. + let n_lanes = n_lanes.unwrap_or(pow.len()); + for (lane, meas) in pow[..n_lanes].iter().enumerate() { + let style = if meas.value() < MIN_WARNING_POWER + || meas.value() > MAX_WARNING_POWER + { + style::text_warning() + } else { + style::text_success() + }; + let measurement = format!("{:0.3}", meas.value()); + out.push(Span::styled(measurement, style)); + if lane < n_lanes - 1 { + out.push(Span::styled(", ", style::text_label())); + } + } + out.push(Span::styled("]", style::text_label())); + out +} + +/// Format the transceiver transmitted optical power. +fn format_transceiver_transmit_power( + n_lanes: Option, + transmitter_power: Result, FailedToRead>, +) -> Vec { + let pow = match transmitter_power { + Ok(Some(p)) if !p.is_empty() => p, + // Either not supported at all, or list of power is empty. + Ok(None) | Ok(Some(_)) => { + let elems = unsupported_ui_elements(); + return vec![ + Span::styled("Tx power: ", style::text_label()), + Span::styled(elems.1, elems.0), + ]; + } + // Failed to read entirely + Err(e) => { + let elems = e.to_ui_elements(); + return vec![ + Span::styled("Tx power: ", style::text_label()), + Span::styled(elems.1, elems.0), + ]; + } + }; + + const MIN_WARNING_POWER: f32 = 0.5; + const MAX_WARNING_POWER: f32 = 2.5; + assert!(!pow.is_empty()); + let mut out = Vec::with_capacity(pow.len() + 2); + out.push(Span::styled("Tx power (mW): [", style::text_label())); + let n_lanes = n_lanes.unwrap_or(pow.len()); + for (lane, meas) in pow[..n_lanes].iter().enumerate() { + let style = if *meas < MIN_WARNING_POWER || *meas > MAX_WARNING_POWER { + style::text_warning() + } else { + style::text_success() + }; + let measurement = format!("{:0.3}", meas); + out.push(Span::styled(measurement, style)); + if lane < n_lanes - 1 { + out.push(Span::styled(", ", style::text_label())); + } + } + out.push(Span::styled("]", style::text_label())); + out +} + +fn extract_media_type(datapath: Option<&Datapath>) -> (Style, String) { + let Some(datapath) = datapath else { + return FailedToRead.to_ui_elements(); + }; + match datapath { + Datapath::Cmis { datapaths, .. } => { + let Some(media_type) = datapaths.values().next().map(|p| { + let CmisDatapath { application, .. } = p; + let ApplicationDescriptor { media_id, .. } = application; + media_id.to_string() + }) else { + return (style::text_warning(), String::from("Unknown")); + }; + (style::text_success(), media_type) + } + Datapath::Sff8636 { specification, .. } => { + (style::text_success(), specification.to_string()) + } + } +} + +/// Return the number of expected media lanes in the transceiver. +/// +/// If we aren't sure, return `None`. +fn n_expected_lanes(tr: &Transceiver) -> Option { + let Some(datapath) = &tr.datapath else { + return None; + }; + match datapath { + Datapath::Cmis { datapaths, .. } => datapaths + .values() + .next() + .map(|CmisDatapath { lane_status, .. }| lane_status.len()) + .or(Some(4)), + Datapath::Sff8636 { specification, .. } => match specification { + SffComplianceCode::Extended(code) => { + use transceiver_controller::ExtendedSpecificationComplianceCode::*; + match code { + Id100GBaseSr4 => Some(4), + Id100GBaseLr4 => Some(4), + Id100GBCwdm4 => Some(4), + Id100GBaseCr4 => Some(4), + Id100GSwdm4 => Some(4), + Id100GBaseFr1 => Some(1), + Id100GBaseLr1 => Some(1), + Id200GBaseFr4 => Some(4), + Id200GBaseLr4 => Some(4), + Id400GBaseDr4 => Some(4), + Id100GPam4BiDi => Some(2), + Unspecified | Id100GAoc5en5 | Id100GBaseEr4 + | Id100GBaseSr10 | Id100GPsm4 | Id100GAcc | Obsolete + | Id25GBaseCrS | Id25GBaseCrN | Id10MbEth + | Id40GBaseEr4 | Id4x10GBaseSr | Id40GPsm4 + | IdG959p1i12d1 | IdG959p1s12d2 | IdG9592p1l1d1 + | Id10GBaseT | Id100GClr4 | Id100GAoc10en12 + | Id100GAcc10en12 | Id100GeDwdm2 | Id100GWdm + | Id10GBaseTSr | Id5GBaseT | Id2p5GBaseT | Id40GSwdm4 + | Id10GBaseBr | Id25GBaseBr | Id50GBaseBr | Id4wdm10 + | Id4wdm20 | Id4wdm40 | Id100GBaseDr | Id100GFr + | Id100GLr | Id100GBaseSr1 | Id100GBaseVr1 + | Id100GBaseSr12 | Id100GBaseVr12 | Id100GLr120Caui4 + | Id100GLr130Caui4 | Id100GLr140Caui4 | Id100GLr120 + | Id100GLr130 | Id100GLr140 | IdAcc50GAUI10en6 + | IdAcc50GAUI10en62 | IdAcc50GAUI2p6en4 + | IdAcc50GAUI2p6en41 | Id100GBaseCr1 | Id50GBaseCr + | Id50GBaseSr | Id50GBaseFr | Id50GBaseEr | Id200GPsm4 + | Id50GBaseLr | Id400GBaseFr4 | Id400GBaseLr4 + | Id400GGLr410 | Id400GBaseZr | Id256GfcSw4 | Id64Gfc + | Id128Gfc | Reserved(_) => None, + } + } + SffComplianceCode::Ethernet(_) => None, + }, + } +} diff --git a/wicket/src/wicketd.rs b/wicket/src/wicketd.rs index 635878e080b..0123b253ac6 100644 --- a/wicket/src/wicketd.rs +++ b/wicket/src/wicketd.rs @@ -496,14 +496,8 @@ impl WicketdManager { // TODO: We should really be using ETAGs here match client.get_inventory(¶ms).await { Ok(val) => match val.into_inner() { - GetInventoryResponse::Response { - inventory, - mgs_last_seen, - } => { - let _ = tx.send(Event::Inventory { - inventory, - mgs_last_seen, - }); + GetInventoryResponse::Response { inventory } => { + let _ = tx.send(Event::Inventory { inventory }); } GetInventoryResponse::Unavailable => { // Nothing to do here. We keep a running total from diff --git a/wicketd-api/Cargo.toml b/wicketd-api/Cargo.toml index 75c3a534615..e689612724e 100644 --- a/wicketd-api/Cargo.toml +++ b/wicketd-api/Cargo.toml @@ -15,6 +15,7 @@ omicron-passwords.workspace = true omicron-uuid-kinds.workspace = true omicron-workspace-hack.workspace = true schemars.workspace = true +semver.workspace = true serde.workspace = true sled-hardware-types.workspace = true slog.workspace = true diff --git a/wicketd-api/src/lib.rs b/wicketd-api/src/lib.rs index 47533b892d8..8bf96d7f898 100644 --- a/wicketd-api/src/lib.rs +++ b/wicketd-api/src/lib.rs @@ -11,19 +11,18 @@ use dropshot::RequestContext; use dropshot::StreamingBody; use dropshot::TypedBody; use gateway_client::types::IgnitionCommand; -use omicron_common::api::external::SemverVersion; use omicron_common::update::ArtifactHashId; use omicron_common::update::ArtifactId; use omicron_uuid_kinds::RackInitUuid; use omicron_uuid_kinds::RackResetUuid; use schemars::JsonSchema; +use semver::Version; use serde::Deserialize; use serde::Serialize; use sled_hardware_types::Baseboard; use std::collections::BTreeMap; use std::collections::BTreeSet; use std::net::Ipv6Addr; -use std::time::Duration; use wicket_common::inventory::RackV1Inventory; use wicket_common::inventory::SpIdentifier; use wicket_common::inventory::SpType; @@ -456,7 +455,7 @@ pub struct GetInventoryParams { #[derive(Clone, Debug, JsonSchema, Serialize)] #[serde(rename_all = "snake_case", tag = "type", content = "data")] pub enum GetInventoryResponse { - Response { inventory: RackV1Inventory, mgs_last_seen: Duration }, + Response { inventory: RackV1Inventory }, Unavailable, } @@ -473,7 +472,7 @@ pub struct InstallableArtifacts { #[derive(Clone, Debug, JsonSchema, Serialize)] #[serde(rename_all = "snake_case")] pub struct GetArtifactsAndEventReportsResponse { - pub system_version: Option, + pub system_version: Option, /// Map of artifacts we ingested from the most-recently-uploaded TUF /// repository to a list of artifacts we're serving over the bootstrap diff --git a/wicketd/Cargo.toml b/wicketd/Cargo.toml index 3145add700b..5bd7538ade9 100644 --- a/wicketd/Cargo.toml +++ b/wicketd/Cargo.toml @@ -34,7 +34,6 @@ illumos-utils.workspace = true internal-dns-resolver.workspace = true internal-dns-types.workspace = true itertools.workspace = true -once_cell.workspace = true oxnet.workspace = true reqwest.workspace = true schemars.workspace = true @@ -49,6 +48,7 @@ tokio-stream.workspace = true tokio-util.workspace = true toml.workspace = true tough.workspace = true +transceiver-controller.workspace = true uuid.workspace = true bootstrap-agent-client.workspace = true @@ -68,6 +68,7 @@ wicket-common.workspace = true wicketd-api.workspace = true wicketd-client.workspace = true omicron-workspace-hack.workspace = true +semver.workspace = true [[bin]] name = "wicketd" diff --git a/wicketd/src/artifacts/store.rs b/wicketd/src/artifacts/store.rs index 8ab83335a8e..7d89c6f562b 100644 --- a/wicketd/src/artifacts/store.rs +++ b/wicketd/src/artifacts/store.rs @@ -2,8 +2,8 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. -use omicron_common::api::external::SemverVersion; use omicron_common::update::ArtifactHashId; +use semver::Version; use slog::Logger; use std::sync::Arc; use std::sync::Mutex; @@ -40,7 +40,7 @@ impl WicketdArtifactStore { pub(crate) fn system_version_and_artifact_ids( &self, - ) -> Option<(SemverVersion, Vec)> { + ) -> Option<(Version, Vec)> { let artifacts = self.artifacts_with_plan.lock().unwrap(); let artifacts = artifacts.as_ref()?; let system_version = artifacts.plan().system_version.clone(); diff --git a/wicketd/src/context.rs b/wicketd/src/context.rs index 307898200be..5bbe9a8d9e0 100644 --- a/wicketd/src/context.rs +++ b/wicketd/src/context.rs @@ -7,6 +7,7 @@ use crate::bootstrap_addrs::BootstrapPeers; use crate::preflight_check::PreflightCheckerHandler; use crate::rss_config::CurrentRssConfig; +use crate::transceivers::Handle as TransceiverHandle; use crate::update_tracker::UpdateTracker; use crate::MgsHandle; use anyhow::anyhow; @@ -27,6 +28,7 @@ pub struct ServerContext { pub(crate) bind_address: SocketAddrV6, pub mgs_handle: MgsHandle, pub mgs_client: gateway_client::Client, + pub transceiver_handle: TransceiverHandle, pub(crate) log: slog::Logger, /// Our cached copy of what MGS's `/local/switch-id` endpoint returns; it /// identifies whether we're connected to switch 0 or 1 and cannot change @@ -95,7 +97,12 @@ impl ServerContext { // Ignore failures on set - that just means we lost the race and // another concurrent call to us already set it. - _ = self.local_switch_id.set(switch_id); + // + // However, we do need to notify the transceiver-fetching task + // that we've learned our ID. + if self.local_switch_id.set(switch_id).is_ok() { + self.transceiver_handle.set_local_switch_id(switch_id); + } Some(switch_id) } diff --git a/wicketd/src/http_entrypoints.rs b/wicketd/src/http_entrypoints.rs index 6cd8ee813a4..1291d746402 100644 --- a/wicketd/src/http_entrypoints.rs +++ b/wicketd/src/http_entrypoints.rs @@ -6,9 +6,12 @@ use crate::helpers::sps_to_string; use crate::helpers::SpIdentifierDisplay; -use crate::mgs::GetInventoryError; +use crate::mgs::GetInventoryError as GetMgsInventoryError; +use crate::mgs::GetInventoryResponse as GetMgsInventoryResponse; use crate::mgs::MgsHandle; use crate::mgs::ShutdownInProgress; +use crate::transceivers::GetTransceiversResponse; +use crate::transceivers::Handle as TransceiverHandle; use crate::SmfConfigValues; use bootstrap_agent_client::types::RackOperationStatus; use dropshot::ApiDescription; @@ -27,9 +30,11 @@ use sled_hardware_types::Baseboard; use slog::o; use std::collections::BTreeMap; use std::collections::BTreeSet; +use wicket_common::inventory::MgsV1InventorySnapshot; use wicket_common::inventory::RackV1Inventory; use wicket_common::inventory::SpIdentifier; use wicket_common::inventory::SpType; +use wicket_common::inventory::TransceiverInventorySnapshot; use wicket_common::rack_setup::GetBgpAuthKeyInfoResponse; use wicket_common::rack_setup::PutRssUserConfigInsensitive; use wicket_common::rack_update::AbortUpdateOptions; @@ -75,9 +80,15 @@ impl WicketdApi for WicketdApiImpl { // We can't run RSS if we don't have an inventory from MGS yet; we always // need to fill in the bootstrap sleds first. - let inventory = inventory_or_unavail(&ctx.mgs_handle).await?; + let inventory = + mgs_inventory_or_unavail(&ctx.mgs_handle, &ctx.transceiver_handle) + .await?; let mut config = ctx.rss_config.lock().unwrap(); + let inventory = inventory + .mgs + .expect("verified by `inventory_or_unavail`") + .inventory; config.update_with_inventory_and_bootstrap_peers( &inventory, &ctx.bootstrap_peers, @@ -95,9 +106,15 @@ impl WicketdApi for WicketdApiImpl { // We can't run RSS if we don't have an inventory from MGS yet; we always // need to fill in the bootstrap sleds first. - let inventory = inventory_or_unavail(&ctx.mgs_handle).await?; + let inventory = + mgs_inventory_or_unavail(&ctx.mgs_handle, &ctx.transceiver_handle) + .await?; let mut config = ctx.rss_config.lock().unwrap(); + let inventory = inventory + .mgs + .expect("verified by `inventory_or_unavail`") + .inventory; config.update_with_inventory_and_bootstrap_peers( &inventory, &ctx.bootstrap_peers, @@ -347,23 +364,62 @@ impl WicketdApi for WicketdApiImpl { body_params: TypedBody, ) -> Result, HttpError> { let GetInventoryParams { force_refresh } = body_params.into_inner(); - match rqctx + + // Fetch the MGS-specific inventory first. + let maybe_mgs_inventory = match rqctx .context() .mgs_handle .get_inventory_refreshing_sps(force_refresh) .await { - Ok(response) => Ok(HttpResponseOk(response)), - Err(GetInventoryError::InvalidSpIdentifier) => { - Err(HttpError::for_unavail( + Ok(GetMgsInventoryResponse::Response { + inventory, + mgs_last_seen, + }) => Some((inventory, mgs_last_seen)), + Ok(GetMgsInventoryResponse::Unavailable) => None, + Err(GetMgsInventoryError::InvalidSpIdentifier) => { + return Err(HttpError::for_unavail( None, "Invalid SP identifier in request".into(), + )); + } + Err(GetMgsInventoryError::ShutdownInProgress) => { + return Err(HttpError::for_unavail( + None, + "Server is shutting down".into(), )) } - Err(GetInventoryError::ShutdownInProgress) => Err( - HttpError::for_unavail(None, "Server is shutting down".into()), - ), + }; + + // Fetch the transceiver information from the SP. + let maybe_transceiver_inventory = + match rqctx.context().transceiver_handle.get_transceivers() { + GetTransceiversResponse::Response { + transceivers, + transceivers_last_seen, + } => Some((transceivers, transceivers_last_seen)), + GetTransceiversResponse::Unavailable => None, + }; + + // Return 503 if both MGS and transceiver inventory are missing, + // otherwise return what we can. + if maybe_mgs_inventory.is_none() + && maybe_transceiver_inventory.is_none() + { + return Err(HttpError::for_unavail( + None, + "Rack inventory not yet available".into(), + )); } + let mgs = maybe_mgs_inventory.map(|(inventory, last_seen)| { + MgsV1InventorySnapshot { inventory, last_seen } + }); + let transceivers = + maybe_transceiver_inventory.map(|(inventory, last_seen)| { + TransceiverInventorySnapshot { inventory, last_seen } + }); + let inventory = RackV1Inventory { mgs, transceivers }; + Ok(HttpResponseOk(GetInventoryResponse::Response { inventory })) } async fn put_repository( @@ -399,7 +455,11 @@ impl WicketdApi for WicketdApiImpl { rqctx: RequestContext, ) -> Result, HttpError> { let rqctx = rqctx.context(); - let inventory = inventory_or_unavail(&rqctx.mgs_handle).await?; + let inventory = mgs_inventory_or_unavail( + &rqctx.mgs_handle, + &rqctx.transceiver_handle, + ) + .await?; let switch_id = rqctx.local_switch_id().await; let sled_baseboard = rqctx.baseboard.clone(); @@ -407,7 +467,14 @@ impl WicketdApi for WicketdApiImpl { let mut switch_baseboard = None; let mut sled_id = None; - for sp in &inventory.sps { + // Safety: `inventory_or_unavail` returns an error if there is no + // MGS-derived inventory, so option is always `Some(_)`. + for sp in &inventory + .mgs + .expect("checked by `inventory_or_unavail`") + .inventory + .sps + { if Some(sp.id) == switch_id { switch_baseboard = sp.state.as_ref().map(|state| { // TODO-correctness `new_gimlet` isn't the right name: this is a @@ -505,7 +572,7 @@ impl WicketdApi for WicketdApiImpl { // Next, do we have the states of the target SP? let sp_states = match inventory { - GetInventoryResponse::Response { inventory, .. } => inventory + GetMgsInventoryResponse::Response { inventory, .. } => inventory .sps .into_iter() .filter_map(|sp| { @@ -520,7 +587,7 @@ impl WicketdApi for WicketdApiImpl { } }) .collect(), - GetInventoryResponse::Unavailable => BTreeMap::new(), + GetMgsInventoryResponse::Unavailable => BTreeMap::new(), }; for target in ¶ms.targets { @@ -850,19 +917,41 @@ impl WicketdApi for WicketdApiImpl { } // Get the current inventory or return a 503 Unavailable. -async fn inventory_or_unavail( +// +// Note that 503 is returned if we can't get the MGS-based inventory. If we fail +// to get the transceivers, that's not considered a fatal 503. +async fn mgs_inventory_or_unavail( mgs_handle: &MgsHandle, + transceiver_handle: &TransceiverHandle, ) -> Result { - match mgs_handle.get_cached_inventory().await { - Ok(GetInventoryResponse::Response { inventory, .. }) => Ok(inventory), - Ok(GetInventoryResponse::Unavailable) => Err(HttpError::for_unavail( - None, - "Rack inventory not yet available".into(), - )), + let mgs = match mgs_handle.get_cached_inventory().await { + Ok(GetMgsInventoryResponse::Response { inventory, mgs_last_seen }) => { + Some(MgsV1InventorySnapshot { inventory, last_seen: mgs_last_seen }) + } + Ok(GetMgsInventoryResponse::Unavailable) => { + return Err(HttpError::for_unavail( + None, + "Rack inventory not yet available".into(), + )) + } Err(ShutdownInProgress) => { - Err(HttpError::for_unavail(None, "Server is shutting down".into())) + return Err(HttpError::for_unavail( + None, + "Server is shutting down".into(), + )); } - } + }; + let transceivers = match transceiver_handle.get_transceivers() { + GetTransceiversResponse::Response { + transceivers, + transceivers_last_seen, + } => Some(TransceiverInventorySnapshot { + inventory: transceivers, + last_seen: transceivers_last_seen, + }), + GetTransceiversResponse::Unavailable => None, + }; + Ok(RackV1Inventory { mgs, transceivers }) } fn http_error_from_client_error( diff --git a/wicketd/src/lib.rs b/wicketd/src/lib.rs index 1fbf278ead3..dbdfdb01f6e 100644 --- a/wicketd/src/lib.rs +++ b/wicketd/src/lib.rs @@ -13,6 +13,7 @@ pub mod mgs; mod nexus_proxy; mod preflight_check; mod rss_config; +mod transceivers; mod update_tracker; use anyhow::{anyhow, bail, Context, Result}; @@ -41,6 +42,7 @@ use std::{ net::{SocketAddr, SocketAddrV6}, sync::Arc, }; +use transceivers::Manager as TransceiverManager; pub use update_tracker::{StartUpdateError, UpdateTracker}; /// Command line arguments for wicketd @@ -141,6 +143,12 @@ impl Server { mgs_manager.run().await; }); + let transceiver_manager = TransceiverManager::new(&log); + let transceiver_handle = transceiver_manager.get_handle(); + tokio::spawn(async move { + transceiver_manager.run().await; + }); + let (ipr_artifact, ipr_update_tracker) = crate::installinator_progress::new(&log); @@ -187,6 +195,7 @@ impl Server { bind_address: args.address, mgs_handle, mgs_client, + transceiver_handle, log: log.clone(), local_switch_id: OnceLock::new(), bootstrap_peers, diff --git a/wicketd/src/mgs.rs b/wicketd/src/mgs.rs index da09ac5802d..1dfcf56ec06 100644 --- a/wicketd/src/mgs.rs +++ b/wicketd/src/mgs.rs @@ -13,8 +13,7 @@ use std::net::SocketAddrV6; use tokio::sync::{mpsc, oneshot}; use tokio::time::{Duration, Instant}; use tokio_stream::StreamMap; -use wicket_common::inventory::{RackV1Inventory, SpIdentifier, SpInventory}; -use wicketd_api::GetInventoryResponse; +use wicket_common::inventory::{MgsV1Inventory, SpIdentifier, SpInventory}; use self::inventory::{ FetchedIgnitionState, FetchedSpData, IgnitionPresence, @@ -34,6 +33,13 @@ const MGS_TIMEOUT: Duration = Duration::from_secs(30); // * Room for some timeouts and re-requests from wicket. const CHANNEL_CAPACITY: usize = 8; +/// Response to a request for MGS-specific inventory information. +#[derive(Debug)] +pub enum GetInventoryResponse { + Response { inventory: MgsV1Inventory, mgs_last_seen: Duration }, + Unavailable, +} + #[derive(Debug)] enum MgsRequest { GetInventory { @@ -256,7 +262,7 @@ impl MgsManager { if self.inventory.is_empty() { GetInventoryResponse::Unavailable } else { - let inventory = RackV1Inventory { + let inventory = MgsV1Inventory { sps: self.inventory.values().cloned().collect(), }; diff --git a/wicketd/src/rss_config.rs b/wicketd/src/rss_config.rs index badcb24a3f4..e16fc677a50 100644 --- a/wicketd/src/rss_config.rs +++ b/wicketd/src/rss_config.rs @@ -24,7 +24,6 @@ use omicron_common::address::Ipv6Subnet; use omicron_common::address::RACK_PREFIX; use omicron_common::api::external::AllowedSourceIps; use omicron_common::api::external::SwitchLocation; -use once_cell::sync::Lazy; use sled_hardware_types::Baseboard; use slog::debug; use slog::warn; @@ -34,8 +33,9 @@ use std::collections::BTreeSet; use std::mem; use std::net::IpAddr; use std::net::Ipv6Addr; +use std::sync::LazyLock; use thiserror::Error; -use wicket_common::inventory::RackV1Inventory; +use wicket_common::inventory::MgsV1Inventory; use wicket_common::inventory::SpType; use wicket_common::rack_setup::BgpAuthKey; use wicket_common::rack_setup::BgpAuthKeyId; @@ -55,7 +55,7 @@ use wicketd_api::SetBgpAuthKeyStatus; // TODO-correctness For now, we always use the same rack subnet when running // RSS. When we get to multirack, this will be wrong, but there are many other // RSS-related things that need to change then too. -static RACK_SUBNET: Lazy> = Lazy::new(|| { +static RACK_SUBNET: LazyLock> = LazyLock::new(|| { let ip = Ipv6Addr::new(0xfd00, 0x1122, 0x3344, 0x0100, 0, 0, 0, 0); Ipv6Subnet::new(ip) }); @@ -114,7 +114,7 @@ impl CurrentRssConfig { pub(crate) fn update_with_inventory_and_bootstrap_peers( &mut self, - inventory: &RackV1Inventory, + inventory: &MgsV1Inventory, bootstrap_peers: &BootstrapPeers, log: &slog::Logger, ) { diff --git a/wicketd/src/transceivers.rs b/wicketd/src/transceivers.rs new file mode 100644 index 00000000000..e173f422ef2 --- /dev/null +++ b/wicketd/src/transceivers.rs @@ -0,0 +1,382 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Fetching transceiver state from the SP. + +use gateway_client::types::{SpIdentifier, SpType}; +use omicron_common::api::external::SwitchLocation; +use slog::{debug, error, Logger}; +use std::{ + collections::HashMap, + sync::{Arc, Mutex}, + time::Duration, +}; +use tokio::{ + sync::{mpsc, watch}, + time::Instant, +}; +use transceiver_controller::{message::ExtendedStatus, SpRequest}; +use transceiver_controller::{ConfigBuilder, Controller, Error, ModuleId}; +use wicket_common::inventory::Transceiver; + +/// Type alias for a map of all transceivers on each switch. +pub type TransceiverMap = HashMap>; + +// Queue size for passing messages between transceiver fetch task. +const CHANNEL_CAPACITY: usize = 4; + +// Duration on which we poll transceivers ourselves, independent of any +// requests. +const TRANSCEIVER_POLL_INTERVAL: Duration = Duration::from_secs(5); + +// IP interface we use when polling transceivers on the local switch. +// +// NOTE: This always refers to _our_ switch, the one we're currently on, +// regardless of whether we're running on switch0 or switch1. +// +// We will need to change these if we address +// https://github.com/oxidecomputer/dendrite/issues/221. +const LOCAL_SWITCH_SP_INTERFACE: &str = "sidecar0"; + +// IP interface we use when polling transceivers on the other switch. +// +// NOTE: This always refers to _the other_ switch, the one we're not currently +// running on, regardless of whether we're running on switch0 or switch1. +const OTHER_SWITCH_SP_INTERFACE: &str = "sidecar1"; + +#[derive(Clone, Debug)] +pub enum GetTransceiversResponse { + Response { transceivers: TransceiverMap, transceivers_last_seen: Duration }, + Unavailable, +} + +/// Handle for interacting with the transceiver manager. +pub struct Handle { + switch_location_tx: watch::Sender>, + transceivers: Arc>, +} + +impl Handle { + /// Notify the transceiver manager that we've learned our switch location. + /// + /// # Panics + /// + /// This panics if called with an `SpIdentifier` that doesn't have an + /// `SpType::Switch`. + pub(crate) fn set_local_switch_id(&self, switch: SpIdentifier) { + let SpIdentifier { slot, type_: SpType::Switch } = switch else { + panic!("Should only be called with SpType::Switch"); + }; + let loc = match slot { + 0 => SwitchLocation::Switch0, + 1 => SwitchLocation::Switch1, + _ => unreachable!(), + }; + self.switch_location_tx + .send(Some(loc)) + .expect("Should always have a receiver"); + } + + /// Get the current transceiver state, if we know it. + pub(crate) fn get_transceivers(&self) -> GetTransceiversResponse { + self.transceivers.lock().unwrap().clone() + } +} + +pub struct Manager { + log: Logger, + switch_location_tx: watch::Sender>, + switch_location_rx: watch::Receiver>, + transceivers: Arc>, +} + +impl Manager { + pub(crate) fn new(log: &Logger) -> Self { + let log = + log.new(slog::o!("component" => "wicketd TransceiverManager")); + let (switch_location_tx, switch_location_rx) = watch::channel(None); + let transceivers = + Arc::new(Mutex::new(GetTransceiversResponse::Unavailable)); + Self { log, transceivers, switch_location_tx, switch_location_rx } + } + + pub(crate) fn get_handle(&self) -> Handle { + Handle { + switch_location_tx: self.switch_location_tx.clone(), + transceivers: self.transceivers.clone(), + } + } + + pub(crate) async fn run(mut self) { + // First, we need to wait until we know the switch location. + // + // The watch Receiver was created with `None`, which is considered seen. + // We've never called any other borrowing method between the creation + // and here, so changed() will wait until we get something new. + debug!(self.log, "waiting to learn our switch location"); + let our_switch_location = loop { + if self.switch_location_rx.changed().await.is_err() { + slog::warn!( + self.log, + "failed to wait for new switch location change \ + notification, exiting"; + ); + return; + }; + match *self.switch_location_rx.borrow_and_update() { + Some(loc) => break loc, + None => continue, + } + }; + let other_switch_location = our_switch_location.other(); + debug!( + self.log, + "determined our switch locations, spawning transceiver fetch tasks"; + "our_switch" => %our_switch_location, + "other_switch" => %other_switch_location, + ); + + // Now, spawn a task for each switch. + // + // The local switch always uses `sidecar0` as the interface and the + // remote uses `sidecar1`. But we now know which _switch slot_ that maps + // to. + let (tx, mut rx) = mpsc::channel(CHANNEL_CAPACITY); + tokio::spawn(fetch_transceivers_from_one_switch( + self.log.clone(), + tx.clone(), + our_switch_location, + LOCAL_SWITCH_SP_INTERFACE, + )); + tokio::spawn(fetch_transceivers_from_one_switch( + self.log.clone(), + tx.clone(), + other_switch_location, + OTHER_SWITCH_SP_INTERFACE, + )); + + // Now, wait for updates from the fetching tasks and aggregate, + // populate our own view of the transceivers from it. + loop { + let Some(TransceiverUpdate { + location, + transceivers: these_transceivers, + updated_at, + }) = rx.recv().await + else { + error!(self.log, "all transceiver fetch tasks have exited"); + return; + }; + let mut transceivers_by_switch = self.transceivers.lock().unwrap(); + match &mut *transceivers_by_switch { + GetTransceiversResponse::Response { + transceivers, + transceivers_last_seen, + } => { + transceivers.insert(location, these_transceivers); + *transceivers_last_seen = updated_at.elapsed(); + } + GetTransceiversResponse::Unavailable => { + let mut all_transceivers = TransceiverMap::new(); + all_transceivers.insert(location, these_transceivers); + *transceivers_by_switch = + GetTransceiversResponse::Response { + transceivers: all_transceivers, + transceivers_last_seen: updated_at.elapsed(), + }; + } + } + } + } +} + +// An update from one of the transceiver fetching tasks about the transceivers +// it has seen. +struct TransceiverUpdate { + location: SwitchLocation, + transceivers: Vec, + updated_at: Instant, +} + +// Task fetching all transceiver state from one switch. +async fn fetch_transceivers_from_one_switch( + log: Logger, + tx: mpsc::Sender, + location: SwitchLocation, + interface: &'static str, +) { + let mut check_interval = tokio::time::interval(TRANSCEIVER_POLL_INTERVAL); + debug!( + log, + "starting transceiver fetch task"; + "interface" => interface, + "poll_interval" => ?TRANSCEIVER_POLL_INTERVAL, + ); + + // Spawn a task to swallow requessts from the SP. + let (sp_request_tx, sp_request_rx) = mpsc::channel(CHANNEL_CAPACITY); + tokio::spawn(drop_sp_transceiver_requests( + interface, + log.clone(), + sp_request_rx, + )); + + // First, setup the transceiver controller. + let controller = loop { + check_interval.tick().await; + // NOTE: We bind any ephemeral port here, since we cannot choose the + // default (that's used by Dendrite). This doesn't affect functionality, + // in any case, since the SP doesn't send us unsolicited messages. + let config = match ConfigBuilder::new(interface).port(0).build() { + Ok(c) => c, + Err(e) => { + error!( + log, + "failed to create transceiver controller configuration"; + "interface" => interface, + "error" => %e + ); + continue; + } + }; + + match Controller::new( + config, + log.new(slog::o!("component" => "transceiver-controller")), + sp_request_tx.clone(), + ) + .await + { + Ok(c) => break c, + Err(e) => { + error!( + log, + "failed to create transceiver controller"; + "interface" => interface, + "error" => %e, + ); + continue; + } + }; + }; + + // Then poll the transceivers periodically. + debug!(log, "created transceiver controller, starting poll loop"); + loop { + match fetch_transceiver_state(&controller).await { + Ok(transceivers) => { + debug!( + log, + "fetch transceiver state"; + "state" => ?transceivers, + ); + let update = TransceiverUpdate { + location, + transceivers, + updated_at: Instant::now(), + }; + if tx.try_send(update).is_err() { + error!( + log, + "failed to send new transceiver state to manager", + ); + } + } + Err(e) => error!( + log, + "failed to fetch transceiver state"; + "interafce" => interface, + "error" => %e, + ), + } + check_interval.tick().await; + } +} + +// A loop that just drops any messages we get from the SP. +// +// There shouldn't be any such requests today, so we'll warn and drop them. +async fn drop_sp_transceiver_requests( + interface: &'static str, + log: Logger, + mut sp_request_rx: mpsc::Receiver, +) { + loop { + let Some(req) = sp_request_rx.recv().await else { + debug!(log, "SP transceiver request channel closed, exiting"); + return; + }; + slog::warn!( + log, + "received unexpected transceiver request from SP"; + "request" => ?req.request, + "interface" => interface, + ); + if req.response_tx.try_send(Ok(None)).is_ok() { + debug!( + log, + "sent reply to transceiver controller to drop \ + the SP request" + ); + } else { + error!(log, "failed to send reply to transceiver controller"); + } + } +} + +async fn fetch_transceiver_state( + controller: &Controller, +) -> Result, Error> { + // Start by fetching the status of all modules. + // + // As we ask for more data, the set of modules we address might get smaller. + // Each operation is fallible, and each module can fail independently. So we + // continually overwrite the set of modules we're considering at each + // operation, so that by the end, we have the modules for which we've + // successfully collected all the data. + let mut modules = ModuleId::all(); + let all_status = controller.extended_status(modules).await?; + + // From here, let's only address those which are present. + let present = all_status + .iter() + .filter(|(_, st)| st.contains(ExtendedStatus::PRESENT)) + .map(|(p, _st)| p); + modules = ModuleId::from_index_iter(present).unwrap(); + + // Vendor details. + let all_vendor_info = controller.vendor_info(modules).await?; + modules = all_vendor_info.modules; + + // Power information. + let all_power = controller.power(modules).await?; + modules = all_vendor_info.modules; + + // Datapath state. + let all_datapaths = controller.datapath(modules).await?; + modules = all_datapaths.modules; + + // And monitors. + let all_monitors = controller.monitors(modules).await?; + modules = all_monitors.modules; + + // Now, combine everything. + // + // If we failed any operation for a module, we'll insert `None` so that we + // still have whatever data we _could_ collect. + let mut out = Vec::with_capacity(modules.selected_transceiver_count()); + for i in modules.to_indices() { + let tr = Transceiver { + port: format!("qsfp{i}"), + power: all_power.nth(i).copied(), + vendor: all_vendor_info.nth(i).cloned(), + status: all_status.nth(i).copied(), + datapath: all_datapaths.nth(i).cloned(), + monitors: all_monitors.nth(i).cloned(), + }; + out.push(tr); + } + + Ok(out) +} diff --git a/wicketd/src/update_tracker.rs b/wicketd/src/update_tracker.rs index 43b550a2d8b..b783329e9c8 100644 --- a/wicketd/src/update_tracker.rs +++ b/wicketd/src/update_tracker.rs @@ -36,9 +36,9 @@ use hubtools::RawHubrisArchive; use installinator_common::InstallinatorCompletionMetadata; use installinator_common::InstallinatorSpec; use installinator_common::WriteOutput; -use omicron_common::api::external::SemverVersion; use omicron_common::disk::M2Slot; use omicron_common::update::ArtifactHash; +use semver::Version; use slog::error; use slog::info; use slog::o; @@ -961,7 +961,7 @@ impl UpdateDriver { "SP board {}, version {} (git commit {})", caboose.board, caboose.version, caboose.git_commit ); - match caboose.version.parse::() { + match caboose.version.parse::() { Ok(version) => { StepSuccess::new((sp_artifact, Some(version))) .with_message(message) @@ -1683,7 +1683,7 @@ struct RotInterrogation { sp: SpIdentifier, // Version reported by the target RoT. artifact_to_apply: ArtifactIdData, - active_version: Option, + active_version: Option, } impl RotInterrogation { @@ -1705,15 +1705,15 @@ impl RotInterrogation { // Older versions of the SP have a bug that prevents setting // the active slot for the RoT bootloader. Check for these // and skip the update until the SP gets updated - const MIN_GIMLET_VERSION: SemverVersion = SemverVersion::new(1, 0, 21); - const MIN_SWITCH_VERSION: SemverVersion = SemverVersion::new(1, 0, 21); - const MIN_PSC_VERSION: SemverVersion = SemverVersion::new(1, 0, 20); + const MIN_GIMLET_VERSION: Version = Version::new(1, 0, 21); + const MIN_SWITCH_VERSION: Version = Version::new(1, 0, 21); + const MIN_PSC_VERSION: Version = Version::new(1, 0, 20); match sp_caboose { // If we can't get the SP caboose for whatever reason don't risk // trying an update None => false, - Some(caboose) => match caboose.version.parse::() { + Some(caboose) => match caboose.version.parse::() { Ok(vers) => match self.sp.type_ { SpType::Sled => vers >= MIN_GIMLET_VERSION, SpType::Switch => vers >= MIN_SWITCH_VERSION, @@ -1918,7 +1918,7 @@ impl UpdateContext { c.version, c.git_commit ); - match c.version.parse::() { + match c.version.parse::() { Ok(version) => StepSuccess::new(make_result(Some(version))) .with_message(message) .into(), @@ -2012,7 +2012,7 @@ impl UpdateContext { active_version, }; - match caboose.version.parse::() { + match caboose.version.parse::() { Ok(version) => StepSuccess::new(make_result(Some(version))) .with_message(message) .into(), diff --git a/wicketd/tests/integration_tests/inventory.rs b/wicketd/tests/integration_tests/inventory.rs index c7057e3adcf..ec18a153374 100644 --- a/wicketd/tests/integration_tests/inventory.rs +++ b/wicketd/tests/integration_tests/inventory.rs @@ -9,12 +9,14 @@ use std::time::Duration; use super::setup::WicketdTestContext; use gateway_messages::SpPort; use gateway_test_utils::setup as gateway_setup; +use http::StatusCode; use sled_hardware_types::Baseboard; use slog::{info, warn}; use wicket::OutputKind; use wicket_common::inventory::{SpIdentifier, SpType}; use wicket_common::rack_setup::BootstrapSledDescription; use wicketd_client::types::{GetInventoryParams, GetInventoryResponse}; +use wicketd_client::Error; #[tokio::test] async fn test_inventory() { @@ -29,29 +31,45 @@ async fn test_inventory() { .wicketd_client .get_inventory(¶ms) .await - .expect("get_inventory succeeded") - .into_inner(); + .map(|r| r.into_inner()); match response { - GetInventoryResponse::Response { inventory, .. } => { + Ok(GetInventoryResponse::Response { inventory, .. }) => { // Ensure that the SP state is populated -- if it's not, // then the `configured-bootstrap-sleds` command below // might return an empty list. - let sp_state_none: Vec<_> = inventory - .sps - .iter() - .filter(|sp| sp.state.is_none()) - .collect(); - if sp_state_none.is_empty() { - break inventory; + if let Some(mgs) = inventory.mgs { + let sp_state_none: Vec<_> = mgs + .inventory + .sps + .iter() + .filter(|sp| sp.state.is_none()) + .collect(); + if sp_state_none.is_empty() { + break mgs.inventory; + } + warn!( + wicketd_testctx.log(), + "SP state not yet populated for some SPs, retrying"; + "sps" => ?sp_state_none + ) + } else { + warn!( + wicketd_testctx.log(), + "MGS-derived inventory not yet populated, retrying" + ); } - - warn!( - wicketd_testctx.log(), - "SP state not yet populated for some SPs, retrying"; - "sps" => ?sp_state_none - ) } - GetInventoryResponse::Unavailable => {} + // Successful response, but the MGS inventory isn't available. + Ok(GetInventoryResponse::Unavailable) => {} + + // 503 means neither MGS nor transceiver inventory is available. + Err(Error::ErrorResponse(rv)) + if rv.status() == StatusCode::SERVICE_UNAVAILABLE => {} + + // Anything else is unexpected. + Err(e) => panic!( + "get_inventory failed with unexpected response: {e:?}" + ), } // Keep polling wicketd until it receives its first results from MGS. diff --git a/wicketd/tests/integration_tests/setup.rs b/wicketd/tests/integration_tests/setup.rs index 6ec40907191..c7633698432 100644 --- a/wicketd/tests/integration_tests/setup.rs +++ b/wicketd/tests/integration_tests/setup.rs @@ -2,7 +2,7 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. -// Copyright 2022 Oxide Computer Company +// Copyright 2025 Oxide Computer Company use std::net::{Ipv6Addr, SocketAddr, SocketAddrV6}; @@ -27,8 +27,8 @@ pub struct WicketdTestContext { impl WicketdTestContext { pub async fn setup(gateway: GatewayTestContext) -> Self { - // Can't be `const` because `SocketAddrV6::new()` isn't const yet - let localhost_port_0 = SocketAddrV6::new(Ipv6Addr::LOCALHOST, 0, 0, 0); + const LOCALHOST_PORT_0: SocketAddrV6 = + SocketAddrV6::new(Ipv6Addr::LOCALHOST, 0, 0, 0); // Reuse the log from the gateway context. let log = &gateway.logctx.log; @@ -36,15 +36,16 @@ impl WicketdTestContext { let mgs_address = assert_ipv6( gateway .server - .dropshot_server_for_address(localhost_port_0) + .dropshot_server_for_address(LOCALHOST_PORT_0) .unwrap() .local_addr(), ); + let args = wicketd::Args { - address: localhost_port_0, - artifact_address: localhost_port_0, + address: LOCALHOST_PORT_0, + artifact_address: LOCALHOST_PORT_0, mgs_address, - nexus_proxy_address: localhost_port_0, + nexus_proxy_address: LOCALHOST_PORT_0, baseboard: None, rack_subnet: None, }; diff --git a/wicketd/tests/integration_tests/updates.rs b/wicketd/tests/integration_tests/updates.rs index d333a1104a1..d912438bee9 100644 --- a/wicketd/tests/integration_tests/updates.rs +++ b/wicketd/tests/integration_tests/updates.rs @@ -131,7 +131,13 @@ async fn test_updates() { match resp.into_inner() { GetInventoryResponse::Response { inventory, .. } => { let mut found = false; - for sp in &inventory.sps { + for sp in &inventory + .mgs + .as_ref() + .expect("Should have MGS inventory") + .inventory + .sps + { if sp.id == target_sp { assert!(sp.state.is_some(), "no state for target SP"); found = true; diff --git a/workspace-hack/Cargo.toml b/workspace-hack/Cargo.toml index f890a8f799b..d2732fe1ba5 100644 --- a/workspace-hack/Cargo.toml +++ b/workspace-hack/Cargo.toml @@ -28,7 +28,7 @@ base64ct = { version = "1.6.0", default-features = false, features = ["std"] } bit-set = { version = "0.5.3" } bit-vec = { version = "0.6.3" } bitflags-dff4ba8e3ae991db = { package = "bitflags", version = "1.3.2" } -bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.6.0", default-features = false, features = ["serde", "std"] } +bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.6.0", default-features = false, features = ["serde"] } bstr = { version = "1.10.0" } byteorder = { version = "1.5.0" } bytes = { version = "1.9.0", features = ["serde"] } @@ -70,11 +70,11 @@ hyper = { version = "1.5.2", features = ["full"] } idna = { version = "1.0.3" } indexmap = { version = "2.7.1", features = ["serde"] } inout = { version = "0.1.3", default-features = false, features = ["std"] } -itertools-5ef9efb8ec2df382 = { package = "itertools", version = "0.12.1" } +itertools-582f2526e08bb6a0 = { package = "itertools", version = "0.14.0" } itertools-93f6ce9d446188ac = { package = "itertools", version = "0.10.5" } lalrpop-util = { version = "0.19.12" } lazy_static = { version = "1.5.0", default-features = false, features = ["spin_no_std"] } -libc = { version = "0.2.169", features = ["extra_traits"] } +libc = { version = "0.2.170", features = ["extra_traits"] } log = { version = "0.4.22", default-features = false, features = ["kv_unstable", "std"] } managed = { version = "0.8.0", default-features = false, features = ["alloc", "map"] } memchr = { version = "2.7.4" } @@ -95,27 +95,27 @@ phf_shared = { version = "0.11.2" } pkcs8 = { version = "0.10.2", default-features = false, features = ["encryption", "pem", "std"] } postgres-types = { version = "0.2.8", default-features = false, features = ["with-chrono-0_4", "with-serde_json-1", "with-uuid-1"] } predicates = { version = "3.1.3" } -proc-macro2 = { version = "1.0.92" } +proc-macro2 = { version = "1.0.93" } qorb = { version = "0.2.1", features = ["qtop"] } quote = { version = "1.0.38" } rand = { version = "0.8.5", features = ["small_rng"] } regex = { version = "1.11.1" } regex-automata = { version = "0.4.8", default-features = false, features = ["dfa", "hybrid", "meta", "nfa", "perf", "unicode"] } regex-syntax = { version = "0.8.5" } -reqwest = { version = "0.12.9", features = ["blocking", "cookies", "json", "rustls-tls", "stream"] } +reqwest = { version = "0.12.12", features = ["blocking", "cookies", "json", "rustls-tls", "stream"] } rsa = { version = "0.9.6", features = ["serde", "sha2"] } rustls = { version = "0.23.19", features = ["ring"] } rustls-webpki = { version = "0.102.8", default-features = false, features = ["aws_lc_rs", "ring", "std"] } -schemars = { version = "0.8.21", features = ["bytes", "chrono", "uuid1"] } +schemars = { version = "0.8.21", features = ["bytes", "chrono", "semver", "uuid1"] } scopeguard = { version = "1.2.0" } semver = { version = "1.0.25", features = ["serde"] } -serde = { version = "1.0.217", features = ["alloc", "derive", "rc"] } +serde = { version = "1.0.218", features = ["alloc", "derive", "rc"] } serde_json = { version = "1.0.139", features = ["raw_value", "unbounded_depth"] } sha1 = { version = "0.10.6", features = ["oid"] } sha2 = { version = "0.10.8", features = ["oid"] } similar = { version = "2.6.0", features = ["bytes", "inline", "unicode"] } slog = { version = "2.7.0", features = ["dynamic-keys", "max_level_trace", "release_max_level_debug", "release_max_level_trace"] } -smallvec = { version = "1.13.2", default-features = false, features = ["const_new"] } +smallvec = { version = "1.14.0", default-features = false, features = ["const_new"] } spin = { version = "0.9.8" } string_cache = { version = "0.8.7" } subtle = { version = "2.6.1" } @@ -133,7 +133,8 @@ usdt = { version = "0.5.0" } usdt-impl = { version = "0.5.0", default-features = false, features = ["asm", "des"] } uuid = { version = "1.12.0", features = ["serde", "v4"] } x509-cert = { version = "0.2.5" } -zerocopy = { version = "0.7.35", features = ["derive", "simd"] } +zerocopy-c38e5c1d305a1b54 = { package = "zerocopy", version = "0.8.10", default-features = false, features = ["derive", "simd"] } +zerocopy-ca01ad9e24f5d932 = { package = "zerocopy", version = "0.7.35", features = ["derive", "simd"] } zeroize = { version = "1.8.1", features = ["std", "zeroize_derive"] } zip = { version = "0.6.6", default-features = false, features = ["bzip2", "deflate"] } @@ -148,11 +149,11 @@ base64ct = { version = "1.6.0", default-features = false, features = ["std"] } bit-set = { version = "0.5.3" } bit-vec = { version = "0.6.3" } bitflags-dff4ba8e3ae991db = { package = "bitflags", version = "1.3.2" } -bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.6.0", default-features = false, features = ["serde", "std"] } +bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.6.0", default-features = false, features = ["serde"] } bstr = { version = "1.10.0" } byteorder = { version = "1.5.0" } bytes = { version = "1.9.0", features = ["serde"] } -cc = { version = "1.1.30", default-features = false, features = ["parallel"] } +cc = { version = "1.2.15", default-features = false, features = ["parallel"] } chrono = { version = "0.4.39", features = ["serde"] } cipher = { version = "0.4.4", default-features = false, features = ["block-padding", "zeroize"] } clap = { version = "4.5.30", features = ["cargo", "derive", "env", "wrap_help"] } @@ -191,11 +192,11 @@ hyper = { version = "1.5.2", features = ["full"] } idna = { version = "1.0.3" } indexmap = { version = "2.7.1", features = ["serde"] } inout = { version = "0.1.3", default-features = false, features = ["std"] } -itertools-5ef9efb8ec2df382 = { package = "itertools", version = "0.12.1" } +itertools-582f2526e08bb6a0 = { package = "itertools", version = "0.14.0" } itertools-93f6ce9d446188ac = { package = "itertools", version = "0.10.5" } lalrpop-util = { version = "0.19.12" } lazy_static = { version = "1.5.0", default-features = false, features = ["spin_no_std"] } -libc = { version = "0.2.169", features = ["extra_traits"] } +libc = { version = "0.2.170", features = ["extra_traits"] } log = { version = "0.4.22", default-features = false, features = ["kv_unstable", "std"] } managed = { version = "0.8.0", default-features = false, features = ["alloc", "map"] } memchr = { version = "2.7.4" } @@ -216,27 +217,27 @@ phf_shared = { version = "0.11.2" } pkcs8 = { version = "0.10.2", default-features = false, features = ["encryption", "pem", "std"] } postgres-types = { version = "0.2.8", default-features = false, features = ["with-chrono-0_4", "with-serde_json-1", "with-uuid-1"] } predicates = { version = "3.1.3" } -proc-macro2 = { version = "1.0.92" } +proc-macro2 = { version = "1.0.93" } qorb = { version = "0.2.1", features = ["qtop"] } quote = { version = "1.0.38" } rand = { version = "0.8.5", features = ["small_rng"] } regex = { version = "1.11.1" } regex-automata = { version = "0.4.8", default-features = false, features = ["dfa", "hybrid", "meta", "nfa", "perf", "unicode"] } regex-syntax = { version = "0.8.5" } -reqwest = { version = "0.12.9", features = ["blocking", "cookies", "json", "rustls-tls", "stream"] } +reqwest = { version = "0.12.12", features = ["blocking", "cookies", "json", "rustls-tls", "stream"] } rsa = { version = "0.9.6", features = ["serde", "sha2"] } rustls = { version = "0.23.19", features = ["ring"] } rustls-webpki = { version = "0.102.8", default-features = false, features = ["aws_lc_rs", "ring", "std"] } -schemars = { version = "0.8.21", features = ["bytes", "chrono", "uuid1"] } +schemars = { version = "0.8.21", features = ["bytes", "chrono", "semver", "uuid1"] } scopeguard = { version = "1.2.0" } semver = { version = "1.0.25", features = ["serde"] } -serde = { version = "1.0.217", features = ["alloc", "derive", "rc"] } +serde = { version = "1.0.218", features = ["alloc", "derive", "rc"] } serde_json = { version = "1.0.139", features = ["raw_value", "unbounded_depth"] } sha1 = { version = "0.10.6", features = ["oid"] } sha2 = { version = "0.10.8", features = ["oid"] } similar = { version = "2.6.0", features = ["bytes", "inline", "unicode"] } slog = { version = "2.7.0", features = ["dynamic-keys", "max_level_trace", "release_max_level_debug", "release_max_level_trace"] } -smallvec = { version = "1.13.2", default-features = false, features = ["const_new"] } +smallvec = { version = "1.14.0", default-features = false, features = ["const_new"] } spin = { version = "0.9.8" } string_cache = { version = "0.8.7" } subtle = { version = "2.6.1" } @@ -257,11 +258,13 @@ usdt = { version = "0.5.0" } usdt-impl = { version = "0.5.0", default-features = false, features = ["asm", "des"] } uuid = { version = "1.12.0", features = ["serde", "v4"] } x509-cert = { version = "0.2.5" } -zerocopy = { version = "0.7.35", features = ["derive", "simd"] } +zerocopy-c38e5c1d305a1b54 = { package = "zerocopy", version = "0.8.10", default-features = false, features = ["derive", "simd"] } +zerocopy-ca01ad9e24f5d932 = { package = "zerocopy", version = "0.7.35", features = ["derive", "simd"] } zeroize = { version = "1.8.1", features = ["std", "zeroize_derive"] } zip = { version = "0.6.6", default-features = false, features = ["bzip2", "deflate"] } [target.x86_64-unknown-linux-gnu.dependencies] +bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.6.0", default-features = false, features = ["std"] } cookie = { version = "0.18.1", default-features = false, features = ["percent-encode"] } dof = { version = "0.3.0", default-features = false, features = ["des"] } hyper-rustls = { version = "0.27.3", default-features = false, features = ["http1", "http2", "ring", "tls12", "webpki-tokio"] } @@ -273,6 +276,7 @@ rustix = { version = "0.38.37", features = ["event", "fs", "net", "pipe", "proce tokio-rustls = { version = "0.26.0", default-features = false, features = ["logging", "ring", "tls12"] } [target.x86_64-unknown-linux-gnu.build-dependencies] +bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.6.0", default-features = false, features = ["std"] } cookie = { version = "0.18.1", default-features = false, features = ["percent-encode"] } dof = { version = "0.3.0", default-features = false, features = ["des"] } hyper-rustls = { version = "0.27.3", default-features = false, features = ["http1", "http2", "ring", "tls12", "webpki-tokio"] } @@ -284,6 +288,7 @@ rustix = { version = "0.38.37", features = ["event", "fs", "net", "pipe", "proce tokio-rustls = { version = "0.26.0", default-features = false, features = ["logging", "ring", "tls12"] } [target.x86_64-apple-darwin.dependencies] +bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.6.0", default-features = false, features = ["std"] } cookie = { version = "0.18.1", default-features = false, features = ["percent-encode"] } hyper-rustls = { version = "0.27.3", default-features = false, features = ["http1", "http2", "ring", "tls12", "webpki-tokio"] } hyper-util = { version = "0.1.10", features = ["full"] } @@ -293,6 +298,7 @@ rustix = { version = "0.38.37", features = ["event", "fs", "net", "pipe", "proce tokio-rustls = { version = "0.26.0", default-features = false, features = ["logging", "ring", "tls12"] } [target.x86_64-apple-darwin.build-dependencies] +bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.6.0", default-features = false, features = ["std"] } cookie = { version = "0.18.1", default-features = false, features = ["percent-encode"] } hyper-rustls = { version = "0.27.3", default-features = false, features = ["http1", "http2", "ring", "tls12", "webpki-tokio"] } hyper-util = { version = "0.1.10", features = ["full"] } @@ -302,6 +308,7 @@ rustix = { version = "0.38.37", features = ["event", "fs", "net", "pipe", "proce tokio-rustls = { version = "0.26.0", default-features = false, features = ["logging", "ring", "tls12"] } [target.aarch64-apple-darwin.dependencies] +bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.6.0", default-features = false, features = ["std"] } cookie = { version = "0.18.1", default-features = false, features = ["percent-encode"] } hyper-rustls = { version = "0.27.3", default-features = false, features = ["http1", "http2", "ring", "tls12", "webpki-tokio"] } hyper-util = { version = "0.1.10", features = ["full"] } @@ -311,6 +318,7 @@ rustix = { version = "0.38.37", features = ["event", "fs", "net", "pipe", "proce tokio-rustls = { version = "0.26.0", default-features = false, features = ["logging", "ring", "tls12"] } [target.aarch64-apple-darwin.build-dependencies] +bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.6.0", default-features = false, features = ["std"] } cookie = { version = "0.18.1", default-features = false, features = ["percent-encode"] } hyper-rustls = { version = "0.27.3", default-features = false, features = ["http1", "http2", "ring", "tls12", "webpki-tokio"] } hyper-util = { version = "0.1.10", features = ["full"] } @@ -320,11 +328,13 @@ rustix = { version = "0.38.37", features = ["event", "fs", "net", "pipe", "proce tokio-rustls = { version = "0.26.0", default-features = false, features = ["logging", "ring", "tls12"] } [target.x86_64-unknown-illumos.dependencies] +bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.6.0", default-features = false, features = ["std"] } cookie = { version = "0.18.1", default-features = false, features = ["percent-encode"] } dof = { version = "0.3.0", default-features = false, features = ["des"] } hyper-rustls = { version = "0.27.3", default-features = false, features = ["http1", "http2", "ring", "tls12", "webpki-tokio"] } hyper-util = { version = "0.1.10", features = ["full"] } indicatif = { version = "0.17.11", features = ["rayon"] } +itertools-5ef9efb8ec2df382 = { package = "itertools", version = "0.12.1" } mio = { version = "1.0.2", features = ["net", "os-ext"] } once_cell = { version = "1.20.3" } rustix = { version = "0.38.37", features = ["event", "fs", "net", "pipe", "process", "stdio", "system", "termios", "time"] } @@ -332,11 +342,14 @@ tokio-rustls = { version = "0.26.0", default-features = false, features = ["logg toml_edit-cdcf2f9584511fe6 = { package = "toml_edit", version = "0.19.15", features = ["serde"] } [target.x86_64-unknown-illumos.build-dependencies] +bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.6.0", default-features = false, features = ["std"] } +clang-sys = { version = "1.8.1", default-features = false, features = ["clang_11_0", "runtime"] } cookie = { version = "0.18.1", default-features = false, features = ["percent-encode"] } dof = { version = "0.3.0", default-features = false, features = ["des"] } hyper-rustls = { version = "0.27.3", default-features = false, features = ["http1", "http2", "ring", "tls12", "webpki-tokio"] } hyper-util = { version = "0.1.10", features = ["full"] } indicatif = { version = "0.17.11", features = ["rayon"] } +itertools-5ef9efb8ec2df382 = { package = "itertools", version = "0.12.1" } mio = { version = "1.0.2", features = ["net", "os-ext"] } once_cell = { version = "1.20.3" } rustix = { version = "0.38.37", features = ["event", "fs", "net", "pipe", "process", "stdio", "system", "termios", "time"] }