From 5a8bff2aabfc3c25f6fe15c211028daedc24a0c7 Mon Sep 17 00:00:00 2001 From: boxdot Date: Mon, 10 Feb 2025 16:21:27 +0100 Subject: [PATCH] introduce a trait for persisting types as blobs in the database Mark all types in the backend that are persisted as blobs with the `BlobPersist` trait. Port the sqlx persistance code to the new trait. --- ...909699052e3c430673f174596355e5d8e929c.json | 23 ---- ...f23504f47073a7c5694a2e7f5a0177d0310f6.json | 30 ++++++ ...757672ff82aa91e948e2cb0a63f7e655e8b1e.json | 23 ++++ ...e6cb26e63f5225accf7fdabe2799cafd35a7.json} | 6 +- ...6281a38f860c68e04f1e2c9a9c2c023101c4.json} | 10 +- ...441cb8a24dc86b9da866ed7fa13b661400d6c.json | 22 ++++ ...05236910ed5600d9fbc6c016d9a85413d1c0.json} | 8 +- ...552c3f2eeca8420b8888daf3b10b28a742974.json | 15 --- ...15b27336d14b5fa113ae84643af6b10d4ebbd.json | 28 ----- ...fda13a1bd0dc113b90bfb54bef583a9642bc.json} | 4 +- ...fdfc0f4a5f03ae269e1f26e33a0a0819478e7.json | 20 ---- ...deccdaf71ae897bfbd52b221ba37616dea82.json} | 4 +- ...58839d29b52d9269cde8d33e3705e588f74b0.json | 15 +++ ...b86dfe1d06042de8457c07a9a05536d0386c.json} | 6 +- ...62e11d3942c33d5e308bb1ddb63ae757689d3.json | 22 ++++ ...0d6f8e61224d7795550aaf2d27e3d3fa64ee.json} | 4 +- ...41abe2b696b2ffdda7edcc9ac237dd89c75e.json} | 4 +- ...8885101d7f21a9608dc7139b3fba16e18c9a.json} | 10 +- ...51dce2f7a0fbffc99cae9522c7632fb70a04.json} | 6 +- ...9f80fb3e2775e66139de3a23f63f7fdc1cf4f.json | 32 ++++++ ...05102fc48feb6424c8686f789ea36f216bf5.json} | 6 +- ...614c4ca70338e8fa5b0ad9e377416747b036.json} | 4 +- ...a70f911c059068789b09fe93d04c59280652f.json | 20 ++++ ...78bb798f3826a50c606d6476573b1755f7ae6.json | 30 ------ ...eca6898c69dd9c3e3842e039c9e4325e3eb5c.json | 30 ++++++ ...519fd7b8226d049952ad719935242d53e0725.json | 22 ---- ...0240927070412_create_initial_as_tables.sql | 2 +- backend/src/auth_service/client_record.rs | 89 ++++++++++------ .../auth_service/connection_package/mod.rs | 4 +- .../connection_package/persistence.rs | 66 +++++------- .../credentials/intermediate_signing_key.rs | 47 ++++---- .../auth_service/credentials/signing_key.rs | 46 ++++---- backend/src/auth_service/opaque.rs | 21 ++-- backend/src/auth_service/privacy_pass.rs | 22 ++-- backend/src/auth_service/queue.rs | 63 ++++++----- backend/src/auth_service/user_record.rs | 32 ++++-- backend/src/ds/group_state/mod.rs | 4 +- backend/src/ds/group_state/persistence.rs | 66 ++++++++---- backend/src/qs/client_record.rs | 79 +++++++------- backend/src/qs/queue.rs | 47 ++++---- .../migrations/V2__add_timestamp_indexes.sql | 2 +- types/src/codec/cbor.rs | 9 ++ types/src/codec/mod.rs | 10 ++ types/src/codec/persist.rs | 100 ++++++++++++++++++ types/src/credentials/keys.rs | 8 ++ types/src/crypto/mod.rs | 6 +- types/src/crypto/ratchet/mod.rs | 5 + types/src/crypto/signatures/keys.rs | 4 +- types/src/identifiers/mod.rs | 13 ++- types/src/messages/mod.rs | 7 +- 50 files changed, 716 insertions(+), 440 deletions(-) delete mode 100644 backend/.sqlx/query-15e9a4e431eed56e3659c9fed90909699052e3c430673f174596355e5d8e929c.json create mode 100644 backend/.sqlx/query-2945470e25774c62bbecaa091b3f23504f47073a7c5694a2e7f5a0177d0310f6.json create mode 100644 backend/.sqlx/query-2ad78e4179dbfc7c098d3221ee0757672ff82aa91e948e2cb0a63f7e655e8b1e.json rename backend/.sqlx/{query-795d788226217f679312794261862cf0e9236d1688a96fc124b41f0fbcb61c06.json => query-2b9fe7f2cc6f20dc70437ae7cc04e6cb26e63f5225accf7fdabe2799cafd35a7.json} (54%) rename backend/.sqlx/{query-85d7d1965b49129880928185c5a8f9e52944f0c35d6d7d5e8dce937979067b4c.json => query-30a44abed45924287e76fce292746281a38f860c68e04f1e2c9a9c2c023101c4.json} (85%) create mode 100644 backend/.sqlx/query-351db7378b1632b1777256ec230441cb8a24dc86b9da866ed7fa13b661400d6c.json rename backend/.sqlx/{query-4c8dac26fef5c235476dca6fd0fbbafb1929a95bdf63fd120ffbf0cc0af07990.json => query-47b201aa117da7b57f9aa6736b8105236910ed5600d9fbc6c016d9a85413d1c0.json} (56%) delete mode 100644 backend/.sqlx/query-4af97c317d3bd047d6fe16cb14d552c3f2eeca8420b8888daf3b10b28a742974.json delete mode 100644 backend/.sqlx/query-502da62467e18e60e69818c407515b27336d14b5fa113ae84643af6b10d4ebbd.json rename backend/.sqlx/{query-ca471c422d4e4c8f7f4db7f66274b03f4c30e0450b3943049f72c889849092a5.json => query-6543b5717596b7db1ac35e503adefda13a1bd0dc113b90bfb54bef583a9642bc.json} (53%) delete mode 100644 backend/.sqlx/query-6d1479de3067abf0dbc0096dbe3fdfc0f4a5f03ae269e1f26e33a0a0819478e7.json rename backend/.sqlx/{query-46b576ddf33d367695053fe68c8b5ff892102ff763146d3301a075c3c80417b0.json => query-708d9a419f96911616be4700d93ddeccdaf71ae897bfbd52b221ba37616dea82.json} (64%) create mode 100644 backend/.sqlx/query-7100ebdd08030851784354d3f9c58839d29b52d9269cde8d33e3705e588f74b0.json rename backend/.sqlx/{query-3ba8fe0bbf9d259cc4778b7ccccd85a866f2474b517c57e640d31b0f948061cf.json => query-84e59f457de64db71587e2de08afb86dfe1d06042de8457c07a9a05536d0386c.json} (53%) create mode 100644 backend/.sqlx/query-95da95aa6155ed03e44689a625c62e11d3942c33d5e308bb1ddb63ae757689d3.json rename backend/.sqlx/{query-6f5111ddd6b33e7449b904797bd9b8363e107a5194a40d065860f31aba15db02.json => query-96c852e6675141e33b0fbdf5428c0d6f8e61224d7795550aaf2d27e3d3fa64ee.json} (86%) rename backend/.sqlx/{query-fa1f4e4a98ac823a01d7af12eaff3edf9bdd9ba1671314eec9aeba7e4de2e671.json => query-abf7c9d6858f7a1bd7e4d415b69641abe2b696b2ffdda7edcc9ac237dd89c75e.json} (87%) rename backend/.sqlx/{query-922474dcc71d22509f7569235396bc901ea31124e737d0f4e3a39793429255be.json => query-b4fc9f5a57f88216319328f9f7b48885101d7f21a9608dc7139b3fba16e18c9a.json} (71%) rename backend/.sqlx/{query-05558d3ff3e5761778af844cbc9bd71ebc98657719abe87bc7aa821c01c24b37.json => query-bb0a400f437c1bb430c82718b55951dce2f7a0fbffc99cae9522c7632fb70a04.json} (66%) create mode 100644 backend/.sqlx/query-c1ccb1a286feb7be2dd2c00dd1f9f80fb3e2775e66139de3a23f63f7fdc1cf4f.json rename backend/.sqlx/{query-de09bf0b80bc3202de8016e6ee2face1c78d91a285b25adbd6f14e2083ca3cdc.json => query-d65d37efd19671aa01d5f9201fd305102fc48feb6424c8686f789ea36f216bf5.json} (65%) rename backend/.sqlx/{query-72831ae68d2499129834edefc378eb867bfc4aa95ddb561ec7ba70d4cbbc9a02.json => query-e02053265b5acce5aec76546cec1614c4ca70338e8fa5b0ad9e377416747b036.json} (76%) create mode 100644 backend/.sqlx/query-ea6b8c3c2f6cb40db89e394fe67a70f911c059068789b09fe93d04c59280652f.json delete mode 100644 backend/.sqlx/query-eab134949ec166baafbd7e1929678bb798f3826a50c606d6476573b1755f7ae6.json create mode 100644 backend/.sqlx/query-ec27e35b2e3f55b0bafb7304616eca6898c69dd9c3e3842e039c9e4325e3eb5c.json delete mode 100644 backend/.sqlx/query-f6322f6cc0bcecd8cea95129cb5519fd7b8226d049952ad719935242d53e0725.json create mode 100644 types/src/codec/persist.rs diff --git a/backend/.sqlx/query-15e9a4e431eed56e3659c9fed90909699052e3c430673f174596355e5d8e929c.json b/backend/.sqlx/query-15e9a4e431eed56e3659c9fed90909699052e3c430673f174596355e5d8e929c.json deleted file mode 100644 index 8a887273..00000000 --- a/backend/.sqlx/query-15e9a4e431eed56e3659c9fed90909699052e3c430673f174596355e5d8e929c.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n WITH updated_sequence AS (\n -- Step 1: Update and return the current sequence number.\n UPDATE qs_queue_data \n SET sequence_number = sequence_number + 1 \n WHERE queue_id = $1 \n RETURNING sequence_number - 1 as sequence_number\n )\n -- Step 2: Insert the message with the new sequence number.\n INSERT INTO qs_queues (queue_id, sequence_number, message_bytes) \n SELECT $1, sequence_number, $2 FROM updated_sequence\n RETURNING sequence_number\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "sequence_number", - "type_info": "Int8" - } - ], - "parameters": { - "Left": [ - "Uuid", - "Bytea" - ] - }, - "nullable": [ - false - ] - }, - "hash": "15e9a4e431eed56e3659c9fed90909699052e3c430673f174596355e5d8e929c" -} diff --git a/backend/.sqlx/query-2945470e25774c62bbecaa091b3f23504f47073a7c5694a2e7f5a0177d0310f6.json b/backend/.sqlx/query-2945470e25774c62bbecaa091b3f23504f47073a7c5694a2e7f5a0177d0310f6.json new file mode 100644 index 00000000..5648c633 --- /dev/null +++ b/backend/.sqlx/query-2945470e25774c62bbecaa091b3f23504f47073a7c5694a2e7f5a0177d0310f6.json @@ -0,0 +1,30 @@ +{ + "db_name": "PostgreSQL", + "query": "\n WITH deleted AS (\n DELETE FROM qs_queues\n WHERE queue_id = $1 AND sequence_number < $2\n RETURNING *\n ),\n fetched AS (\n SELECT message_bytes FROM qs_queues\n WHERE queue_id = $1 AND sequence_number >= $2\n ORDER BY sequence_number ASC\n LIMIT $3\n ),\n remaining AS (\n SELECT COALESCE(COUNT(*)) AS count\n FROM qs_queues\n WHERE queue_id = $1 AND sequence_number >= $2\n )\n SELECT\n fetched.message_bytes AS \"message: BlobPersisted\",\n remaining.count\n FROM fetched, remaining\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "message: BlobPersisted", + "type_info": "Bytea" + }, + { + "ordinal": 1, + "name": "count", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Int8", + "Int8" + ] + }, + "nullable": [ + false, + null + ] + }, + "hash": "2945470e25774c62bbecaa091b3f23504f47073a7c5694a2e7f5a0177d0310f6" +} diff --git a/backend/.sqlx/query-2ad78e4179dbfc7c098d3221ee0757672ff82aa91e948e2cb0a63f7e655e8b1e.json b/backend/.sqlx/query-2ad78e4179dbfc7c098d3221ee0757672ff82aa91e948e2cb0a63f7e655e8b1e.json new file mode 100644 index 00000000..7294034e --- /dev/null +++ b/backend/.sqlx/query-2ad78e4179dbfc7c098d3221ee0757672ff82aa91e948e2cb0a63f7e655e8b1e.json @@ -0,0 +1,23 @@ +{ + "db_name": "PostgreSQL", + "query": "\n WITH updated_sequence AS (\n -- Step 1: Update and return the current sequence number.\n UPDATE qs_queue_data\n SET sequence_number = sequence_number + 1\n WHERE queue_id = $1\n RETURNING sequence_number - 1 as sequence_number\n )\n -- Step 2: Insert the message with the new sequence number.\n INSERT INTO qs_queues (queue_id, sequence_number, message_bytes)\n SELECT $1, sequence_number, $2 FROM updated_sequence\n RETURNING sequence_number\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "sequence_number", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Bytea" + ] + }, + "nullable": [ + false + ] + }, + "hash": "2ad78e4179dbfc7c098d3221ee0757672ff82aa91e948e2cb0a63f7e655e8b1e" +} diff --git a/backend/.sqlx/query-795d788226217f679312794261862cf0e9236d1688a96fc124b41f0fbcb61c06.json b/backend/.sqlx/query-2b9fe7f2cc6f20dc70437ae7cc04e6cb26e63f5225accf7fdabe2799cafd35a7.json similarity index 54% rename from backend/.sqlx/query-795d788226217f679312794261862cf0e9236d1688a96fc124b41f0fbcb61c06.json rename to backend/.sqlx/query-2b9fe7f2cc6f20dc70437ae7cc04e6cb26e63f5225accf7fdabe2799cafd35a7.json index 27d5fbe6..a5a3a2bd 100644 --- a/backend/.sqlx/query-795d788226217f679312794261862cf0e9236d1688a96fc124b41f0fbcb61c06.json +++ b/backend/.sqlx/query-2b9fe7f2cc6f20dc70437ae7cc04e6cb26e63f5225accf7fdabe2799cafd35a7.json @@ -1,11 +1,11 @@ { "db_name": "PostgreSQL", - "query": "SELECT opaque_setup FROM opaque_setup", + "query": "SELECT opaque_setup AS \"opaque_setup: _\" FROM opaque_setup", "describe": { "columns": [ { "ordinal": 0, - "name": "opaque_setup", + "name": "opaque_setup: _", "type_info": "Bytea" } ], @@ -16,5 +16,5 @@ false ] }, - "hash": "795d788226217f679312794261862cf0e9236d1688a96fc124b41f0fbcb61c06" + "hash": "2b9fe7f2cc6f20dc70437ae7cc04e6cb26e63f5225accf7fdabe2799cafd35a7" } diff --git a/backend/.sqlx/query-85d7d1965b49129880928185c5a8f9e52944f0c35d6d7d5e8dce937979067b4c.json b/backend/.sqlx/query-30a44abed45924287e76fce292746281a38f860c68e04f1e2c9a9c2c023101c4.json similarity index 85% rename from backend/.sqlx/query-85d7d1965b49129880928185c5a8f9e52944f0c35d6d7d5e8dce937979067b4c.json rename to backend/.sqlx/query-30a44abed45924287e76fce292746281a38f860c68e04f1e2c9a9c2c023101c4.json index 0d085d09..9266160c 100644 --- a/backend/.sqlx/query-85d7d1965b49129880928185c5a8f9e52944f0c35d6d7d5e8dce937979067b4c.json +++ b/backend/.sqlx/query-30a44abed45924287e76fce292746281a38f860c68e04f1e2c9a9c2c023101c4.json @@ -1,16 +1,16 @@ { "db_name": "PostgreSQL", - "query": "SELECT\n queue_encryption_key,\n ratchet,\n activity_time,\n credential as \"client_credential: FlatClientCredential\",\n remaining_tokens\n FROM as_client_records WHERE client_id = $1", + "query": "SELECT\n queue_encryption_key AS \"queue_encryption_key: _\",\n ratchet AS \"ratchet: _\",\n activity_time,\n credential AS \"credential: _\",\n remaining_tokens\n FROM as_client_records WHERE client_id = $1", "describe": { "columns": [ { "ordinal": 0, - "name": "queue_encryption_key", + "name": "queue_encryption_key: _", "type_info": "Bytea" }, { "ordinal": 1, - "name": "ratchet", + "name": "ratchet: _", "type_info": "Bytea" }, { @@ -20,7 +20,7 @@ }, { "ordinal": 3, - "name": "client_credential: FlatClientCredential", + "name": "credential: _", "type_info": { "Custom": { "name": "client_credential", @@ -126,5 +126,5 @@ false ] }, - "hash": "85d7d1965b49129880928185c5a8f9e52944f0c35d6d7d5e8dce937979067b4c" + "hash": "30a44abed45924287e76fce292746281a38f860c68e04f1e2c9a9c2c023101c4" } diff --git a/backend/.sqlx/query-351db7378b1632b1777256ec230441cb8a24dc86b9da866ed7fa13b661400d6c.json b/backend/.sqlx/query-351db7378b1632b1777256ec230441cb8a24dc86b9da866ed7fa13b661400d6c.json new file mode 100644 index 00000000..233f3e52 --- /dev/null +++ b/backend/.sqlx/query-351db7378b1632b1777256ec230441cb8a24dc86b9da866ed7fa13b661400d6c.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT password_file AS \"password_file: _\"\n FROM as_user_records\n WHERE user_name = $1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "password_file: _", + "type_info": "Bytea" + } + ], + "parameters": { + "Left": [ + "Text" + ] + }, + "nullable": [ + false + ] + }, + "hash": "351db7378b1632b1777256ec230441cb8a24dc86b9da866ed7fa13b661400d6c" +} diff --git a/backend/.sqlx/query-4c8dac26fef5c235476dca6fd0fbbafb1929a95bdf63fd120ffbf0cc0af07990.json b/backend/.sqlx/query-47b201aa117da7b57f9aa6736b8105236910ed5600d9fbc6c016d9a85413d1c0.json similarity index 56% rename from backend/.sqlx/query-4c8dac26fef5c235476dca6fd0fbbafb1929a95bdf63fd120ffbf0cc0af07990.json rename to backend/.sqlx/query-47b201aa117da7b57f9aa6736b8105236910ed5600d9fbc6c016d9a85413d1c0.json index eaf9f89c..1d554b6b 100644 --- a/backend/.sqlx/query-4c8dac26fef5c235476dca6fd0fbbafb1929a95bdf63fd120ffbf0cc0af07990.json +++ b/backend/.sqlx/query-47b201aa117da7b57f9aa6736b8105236910ed5600d9fbc6c016d9a85413d1c0.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "SELECT\n group_id, ciphertext, last_used, deleted_queues\n FROM\n encrypted_groups\n WHERE\n group_id = $1", + "query": "SELECT\n group_id,\n ciphertext AS \"ciphertext: _\",\n last_used,\n deleted_queues AS \"deleted_queues: _\"\n FROM\n encrypted_groups\n WHERE\n group_id = $1", "describe": { "columns": [ { @@ -10,7 +10,7 @@ }, { "ordinal": 1, - "name": "ciphertext", + "name": "ciphertext: _", "type_info": "Bytea" }, { @@ -20,7 +20,7 @@ }, { "ordinal": 3, - "name": "deleted_queues", + "name": "deleted_queues: _", "type_info": "Bytea" } ], @@ -36,5 +36,5 @@ false ] }, - "hash": "4c8dac26fef5c235476dca6fd0fbbafb1929a95bdf63fd120ffbf0cc0af07990" + "hash": "47b201aa117da7b57f9aa6736b8105236910ed5600d9fbc6c016d9a85413d1c0" } diff --git a/backend/.sqlx/query-4af97c317d3bd047d6fe16cb14d552c3f2eeca8420b8888daf3b10b28a742974.json b/backend/.sqlx/query-4af97c317d3bd047d6fe16cb14d552c3f2eeca8420b8888daf3b10b28a742974.json deleted file mode 100644 index 50602943..00000000 --- a/backend/.sqlx/query-4af97c317d3bd047d6fe16cb14d552c3f2eeca8420b8888daf3b10b28a742974.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "INSERT INTO \n qs_queue_data \n (queue_id, sequence_number)\n VALUES \n ($1, $2)", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Uuid", - "Int8" - ] - }, - "nullable": [] - }, - "hash": "4af97c317d3bd047d6fe16cb14d552c3f2eeca8420b8888daf3b10b28a742974" -} diff --git a/backend/.sqlx/query-502da62467e18e60e69818c407515b27336d14b5fa113ae84643af6b10d4ebbd.json b/backend/.sqlx/query-502da62467e18e60e69818c407515b27336d14b5fa113ae84643af6b10d4ebbd.json deleted file mode 100644 index c568aaf7..00000000 --- a/backend/.sqlx/query-502da62467e18e60e69818c407515b27336d14b5fa113ae84643af6b10d4ebbd.json +++ /dev/null @@ -1,28 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "SELECT user_name as \"user_name: UserName\", password_file FROM as_user_records WHERE user_name = $1", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "user_name: UserName", - "type_info": "Text" - }, - { - "ordinal": 1, - "name": "password_file", - "type_info": "Bytea" - } - ], - "parameters": { - "Left": [ - "Text" - ] - }, - "nullable": [ - false, - false - ] - }, - "hash": "502da62467e18e60e69818c407515b27336d14b5fa113ae84643af6b10d4ebbd" -} diff --git a/backend/.sqlx/query-ca471c422d4e4c8f7f4db7f66274b03f4c30e0450b3943049f72c889849092a5.json b/backend/.sqlx/query-6543b5717596b7db1ac35e503adefda13a1bd0dc113b90bfb54bef583a9642bc.json similarity index 53% rename from backend/.sqlx/query-ca471c422d4e4c8f7f4db7f66274b03f4c30e0450b3943049f72c889849092a5.json rename to backend/.sqlx/query-6543b5717596b7db1ac35e503adefda13a1bd0dc113b90bfb54bef583a9642bc.json index ce059784..2a3fa823 100644 --- a/backend/.sqlx/query-ca471c422d4e4c8f7f4db7f66274b03f4c30e0450b3943049f72c889849092a5.json +++ b/backend/.sqlx/query-6543b5717596b7db1ac35e503adefda13a1bd0dc113b90bfb54bef583a9642bc.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "UPDATE\n encrypted_groups\n SET\n ciphertext = $2, last_used = $3, deleted_queues = $4\n WHERE\n group_id = $1", + "query": "UPDATE\n encrypted_groups\n SET\n ciphertext = $2,\n last_used = $3,\n deleted_queues = $4\n WHERE\n group_id = $1", "describe": { "columns": [], "parameters": { @@ -13,5 +13,5 @@ }, "nullable": [] }, - "hash": "ca471c422d4e4c8f7f4db7f66274b03f4c30e0450b3943049f72c889849092a5" + "hash": "6543b5717596b7db1ac35e503adefda13a1bd0dc113b90bfb54bef583a9642bc" } diff --git a/backend/.sqlx/query-6d1479de3067abf0dbc0096dbe3fdfc0f4a5f03ae269e1f26e33a0a0819478e7.json b/backend/.sqlx/query-6d1479de3067abf0dbc0096dbe3fdfc0f4a5f03ae269e1f26e33a0a0819478e7.json deleted file mode 100644 index 19a2272f..00000000 --- a/backend/.sqlx/query-6d1479de3067abf0dbc0096dbe3fdfc0f4a5f03ae269e1f26e33a0a0819478e7.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "SELECT signing_key FROM as_signing_keys WHERE currently_active = true AND cred_type = 'intermediate'", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "signing_key", - "type_info": "Bytea" - } - ], - "parameters": { - "Left": [] - }, - "nullable": [ - false - ] - }, - "hash": "6d1479de3067abf0dbc0096dbe3fdfc0f4a5f03ae269e1f26e33a0a0819478e7" -} diff --git a/backend/.sqlx/query-46b576ddf33d367695053fe68c8b5ff892102ff763146d3301a075c3c80417b0.json b/backend/.sqlx/query-708d9a419f96911616be4700d93ddeccdaf71ae897bfbd52b221ba37616dea82.json similarity index 64% rename from backend/.sqlx/query-46b576ddf33d367695053fe68c8b5ff892102ff763146d3301a075c3c80417b0.json rename to backend/.sqlx/query-708d9a419f96911616be4700d93ddeccdaf71ae897bfbd52b221ba37616dea82.json index 01e73a0c..ac265e63 100644 --- a/backend/.sqlx/query-46b576ddf33d367695053fe68c8b5ff892102ff763146d3301a075c3c80417b0.json +++ b/backend/.sqlx/query-708d9a419f96911616be4700d93ddeccdaf71ae897bfbd52b221ba37616dea82.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "INSERT INTO as_queues (message_id, queue_id, sequence_number, message_bytes) VALUES ($1, $2, $3, $4)", + "query": "INSERT INTO as_queues (message_id, queue_id, sequence_number, message_bytes)\n VALUES ($1, $2, $3, $4)", "describe": { "columns": [], "parameters": { @@ -13,5 +13,5 @@ }, "nullable": [] }, - "hash": "46b576ddf33d367695053fe68c8b5ff892102ff763146d3301a075c3c80417b0" + "hash": "708d9a419f96911616be4700d93ddeccdaf71ae897bfbd52b221ba37616dea82" } diff --git a/backend/.sqlx/query-7100ebdd08030851784354d3f9c58839d29b52d9269cde8d33e3705e588f74b0.json b/backend/.sqlx/query-7100ebdd08030851784354d3f9c58839d29b52d9269cde8d33e3705e588f74b0.json new file mode 100644 index 00000000..90bf95c2 --- /dev/null +++ b/backend/.sqlx/query-7100ebdd08030851784354d3f9c58839d29b52d9269cde8d33e3705e588f74b0.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "INSERT INTO\n qs_queue_data\n (queue_id, sequence_number)\n VALUES\n ($1, $2)", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Uuid", + "Int8" + ] + }, + "nullable": [] + }, + "hash": "7100ebdd08030851784354d3f9c58839d29b52d9269cde8d33e3705e588f74b0" +} diff --git a/backend/.sqlx/query-3ba8fe0bbf9d259cc4778b7ccccd85a866f2474b517c57e640d31b0f948061cf.json b/backend/.sqlx/query-84e59f457de64db71587e2de08afb86dfe1d06042de8457c07a9a05536d0386c.json similarity index 53% rename from backend/.sqlx/query-3ba8fe0bbf9d259cc4778b7ccccd85a866f2474b517c57e640d31b0f948061cf.json rename to backend/.sqlx/query-84e59f457de64db71587e2de08afb86dfe1d06042de8457c07a9a05536d0386c.json index a3c2ec9c..c48c05d3 100644 --- a/backend/.sqlx/query-3ba8fe0bbf9d259cc4778b7ccccd85a866f2474b517c57e640d31b0f948061cf.json +++ b/backend/.sqlx/query-84e59f457de64db71587e2de08afb86dfe1d06042de8457c07a9a05536d0386c.json @@ -1,11 +1,11 @@ { "db_name": "PostgreSQL", - "query": "SELECT voprf_server FROM as_batched_keys WHERE token_key_id = $1", + "query": "SELECT voprf_server AS \"voprf_server: _\" FROM as_batched_keys WHERE token_key_id = $1", "describe": { "columns": [ { "ordinal": 0, - "name": "voprf_server", + "name": "voprf_server: _", "type_info": "Bytea" } ], @@ -18,5 +18,5 @@ false ] }, - "hash": "3ba8fe0bbf9d259cc4778b7ccccd85a866f2474b517c57e640d31b0f948061cf" + "hash": "84e59f457de64db71587e2de08afb86dfe1d06042de8457c07a9a05536d0386c" } diff --git a/backend/.sqlx/query-95da95aa6155ed03e44689a625c62e11d3942c33d5e308bb1ddb63ae757689d3.json b/backend/.sqlx/query-95da95aa6155ed03e44689a625c62e11d3942c33d5e308bb1ddb63ae757689d3.json new file mode 100644 index 00000000..6082dda9 --- /dev/null +++ b/backend/.sqlx/query-95da95aa6155ed03e44689a625c62e11d3942c33d5e308bb1ddb63ae757689d3.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "WITH next_connection_package AS (\n SELECT id, connection_package\n FROM connection_packages\n WHERE client_id = $1\n LIMIT 1\n FOR UPDATE -- make sure two concurrent queries don't return the same package\n SKIP LOCKED -- skip rows that are already locked by other processes\n ),\n remaining_packages AS (\n SELECT COUNT(*) as count\n FROM connection_packages\n WHERE client_id = $1\n ),\n deleted_package AS (\n DELETE FROM connection_packages\n WHERE id = (\n SELECT id\n FROM next_connection_package\n )\n AND (SELECT count FROM remaining_packages) > 1\n RETURNING connection_package\n )\n SELECT connection_package AS \"connection_package: _\"\n FROM next_connection_package", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "connection_package: _", + "type_info": "Bytea" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + false + ] + }, + "hash": "95da95aa6155ed03e44689a625c62e11d3942c33d5e308bb1ddb63ae757689d3" +} diff --git a/backend/.sqlx/query-6f5111ddd6b33e7449b904797bd9b8363e107a5194a40d065860f31aba15db02.json b/backend/.sqlx/query-96c852e6675141e33b0fbdf5428c0d6f8e61224d7795550aaf2d27e3d3fa64ee.json similarity index 86% rename from backend/.sqlx/query-6f5111ddd6b33e7449b904797bd9b8363e107a5194a40d065860f31aba15db02.json rename to backend/.sqlx/query-96c852e6675141e33b0fbdf5428c0d6f8e61224d7795550aaf2d27e3d3fa64ee.json index 5f949255..9c6e6996 100644 --- a/backend/.sqlx/query-6f5111ddd6b33e7449b904797bd9b8363e107a5194a40d065860f31aba15db02.json +++ b/backend/.sqlx/query-96c852e6675141e33b0fbdf5428c0d6f8e61224d7795550aaf2d27e3d3fa64ee.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "INSERT INTO as_client_records (client_id, user_name, queue_encryption_key, ratchet, activity_time, credential, remaining_tokens) VALUES ($1, $2, $3, $4, $5, $6, $7)", + "query": "INSERT INTO as_client_records (\n client_id,\n user_name,\n queue_encryption_key,\n ratchet,\n activity_time,\n credential,\n remaining_tokens\n ) VALUES ($1, $2, $3, $4, $5, $6, $7)", "describe": { "columns": [], "parameters": { @@ -100,5 +100,5 @@ }, "nullable": [] }, - "hash": "6f5111ddd6b33e7449b904797bd9b8363e107a5194a40d065860f31aba15db02" + "hash": "96c852e6675141e33b0fbdf5428c0d6f8e61224d7795550aaf2d27e3d3fa64ee" } diff --git a/backend/.sqlx/query-fa1f4e4a98ac823a01d7af12eaff3edf9bdd9ba1671314eec9aeba7e4de2e671.json b/backend/.sqlx/query-abf7c9d6858f7a1bd7e4d415b69641abe2b696b2ffdda7edcc9ac237dd89c75e.json similarity index 87% rename from backend/.sqlx/query-fa1f4e4a98ac823a01d7af12eaff3edf9bdd9ba1671314eec9aeba7e4de2e671.json rename to backend/.sqlx/query-abf7c9d6858f7a1bd7e4d415b69641abe2b696b2ffdda7edcc9ac237dd89c75e.json index facf2663..d330efea 100644 --- a/backend/.sqlx/query-fa1f4e4a98ac823a01d7af12eaff3edf9bdd9ba1671314eec9aeba7e4de2e671.json +++ b/backend/.sqlx/query-abf7c9d6858f7a1bd7e4d415b69641abe2b696b2ffdda7edcc9ac237dd89c75e.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "UPDATE as_client_records SET queue_encryption_key = $1, ratchet = $2, activity_time = $3, credential = $4, remaining_tokens = $5 WHERE client_id = $6", + "query": "UPDATE as_client_records SET\n queue_encryption_key = $1,\n ratchet = $2,\n activity_time = $3,\n credential = $4,\n remaining_tokens = $5\n WHERE client_id = $6\n ", "describe": { "columns": [], "parameters": { @@ -99,5 +99,5 @@ }, "nullable": [] }, - "hash": "fa1f4e4a98ac823a01d7af12eaff3edf9bdd9ba1671314eec9aeba7e4de2e671" + "hash": "abf7c9d6858f7a1bd7e4d415b69641abe2b696b2ffdda7edcc9ac237dd89c75e" } diff --git a/backend/.sqlx/query-922474dcc71d22509f7569235396bc901ea31124e737d0f4e3a39793429255be.json b/backend/.sqlx/query-b4fc9f5a57f88216319328f9f7b48885101d7f21a9608dc7139b3fba16e18c9a.json similarity index 71% rename from backend/.sqlx/query-922474dcc71d22509f7569235396bc901ea31124e737d0f4e3a39793429255be.json rename to backend/.sqlx/query-b4fc9f5a57f88216319328f9f7b48885101d7f21a9608dc7139b3fba16e18c9a.json index c890b9b6..170e3d8f 100644 --- a/backend/.sqlx/query-922474dcc71d22509f7569235396bc901ea31124e737d0f4e3a39793429255be.json +++ b/backend/.sqlx/query-b4fc9f5a57f88216319328f9f7b48885101d7f21a9608dc7139b3fba16e18c9a.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "SELECT\n user_id as \"user_id: QsUserId\",\n encrypted_push_token as \"encrypted_push_token: EncryptedPushToken\",\n owner_public_key,\n owner_signature_key,\n ratchet,\n activity_time as \"activity_time: TimeStamp\"\n FROM\n qs_client_records\n WHERE\n client_id = $1", + "query": "SELECT\n user_id as \"user_id: QsUserId\",\n encrypted_push_token as \"encrypted_push_token: EncryptedPushToken\",\n owner_public_key AS \"queue_encryption_key: _\",\n owner_signature_key AS \"auth_key: _\",\n ratchet AS \"ratchet: _\",\n activity_time AS \"activity_time: TimeStamp\"\n FROM\n qs_client_records\n WHERE\n client_id = $1", "describe": { "columns": [ { @@ -31,17 +31,17 @@ }, { "ordinal": 2, - "name": "owner_public_key", + "name": "queue_encryption_key: _", "type_info": "Bytea" }, { "ordinal": 3, - "name": "owner_signature_key", + "name": "auth_key: _", "type_info": "Bytea" }, { "ordinal": 4, - "name": "ratchet", + "name": "ratchet: _", "type_info": "Bytea" }, { @@ -64,5 +64,5 @@ false ] }, - "hash": "922474dcc71d22509f7569235396bc901ea31124e737d0f4e3a39793429255be" + "hash": "b4fc9f5a57f88216319328f9f7b48885101d7f21a9608dc7139b3fba16e18c9a" } diff --git a/backend/.sqlx/query-05558d3ff3e5761778af844cbc9bd71ebc98657719abe87bc7aa821c01c24b37.json b/backend/.sqlx/query-bb0a400f437c1bb430c82718b55951dce2f7a0fbffc99cae9522c7632fb70a04.json similarity index 66% rename from backend/.sqlx/query-05558d3ff3e5761778af844cbc9bd71ebc98657719abe87bc7aa821c01c24b37.json rename to backend/.sqlx/query-bb0a400f437c1bb430c82718b55951dce2f7a0fbffc99cae9522c7632fb70a04.json index 1c875832..727a5845 100644 --- a/backend/.sqlx/query-05558d3ff3e5761778af844cbc9bd71ebc98657719abe87bc7aa821c01c24b37.json +++ b/backend/.sqlx/query-bb0a400f437c1bb430c82718b55951dce2f7a0fbffc99cae9522c7632fb70a04.json @@ -1,11 +1,11 @@ { "db_name": "PostgreSQL", - "query": "SELECT signing_key FROM as_signing_keys WHERE currently_active = true AND cred_type = $1", + "query": "SELECT signing_key AS \"signing_key: _\"\n FROM as_signing_keys WHERE cred_type = $1", "describe": { "columns": [ { "ordinal": 0, - "name": "signing_key", + "name": "signing_key: _", "type_info": "Bytea" } ], @@ -28,5 +28,5 @@ false ] }, - "hash": "05558d3ff3e5761778af844cbc9bd71ebc98657719abe87bc7aa821c01c24b37" + "hash": "bb0a400f437c1bb430c82718b55951dce2f7a0fbffc99cae9522c7632fb70a04" } diff --git a/backend/.sqlx/query-c1ccb1a286feb7be2dd2c00dd1f9f80fb3e2775e66139de3a23f63f7fdc1cf4f.json b/backend/.sqlx/query-c1ccb1a286feb7be2dd2c00dd1f9f80fb3e2775e66139de3a23f63f7fdc1cf4f.json new file mode 100644 index 00000000..1be68b0e --- /dev/null +++ b/backend/.sqlx/query-c1ccb1a286feb7be2dd2c00dd1f9f80fb3e2775e66139de3a23f63f7fdc1cf4f.json @@ -0,0 +1,32 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT signing_key AS \"signing_key: _\"\n FROM as_signing_keys\n WHERE currently_active = true AND cred_type = $1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "signing_key: _", + "type_info": "Bytea" + } + ], + "parameters": { + "Left": [ + { + "Custom": { + "name": "credential_type", + "kind": { + "Enum": [ + "as", + "intermediate" + ] + } + } + } + ] + }, + "nullable": [ + false + ] + }, + "hash": "c1ccb1a286feb7be2dd2c00dd1f9f80fb3e2775e66139de3a23f63f7fdc1cf4f" +} diff --git a/backend/.sqlx/query-de09bf0b80bc3202de8016e6ee2face1c78d91a285b25adbd6f14e2083ca3cdc.json b/backend/.sqlx/query-d65d37efd19671aa01d5f9201fd305102fc48feb6424c8686f789ea36f216bf5.json similarity index 65% rename from backend/.sqlx/query-de09bf0b80bc3202de8016e6ee2face1c78d91a285b25adbd6f14e2083ca3cdc.json rename to backend/.sqlx/query-d65d37efd19671aa01d5f9201fd305102fc48feb6424c8686f789ea36f216bf5.json index f1f1c92a..ea2ea20b 100644 --- a/backend/.sqlx/query-de09bf0b80bc3202de8016e6ee2face1c78d91a285b25adbd6f14e2083ca3cdc.json +++ b/backend/.sqlx/query-d65d37efd19671aa01d5f9201fd305102fc48feb6424c8686f789ea36f216bf5.json @@ -1,11 +1,11 @@ { "db_name": "PostgreSQL", - "query": "SELECT signing_key FROM as_signing_keys WHERE cred_type = $1", + "query": "SELECT signing_key AS \"signing_key: _\"\n FROM as_signing_keys\n WHERE cred_type = $1", "describe": { "columns": [ { "ordinal": 0, - "name": "signing_key", + "name": "signing_key: _", "type_info": "Bytea" } ], @@ -28,5 +28,5 @@ false ] }, - "hash": "de09bf0b80bc3202de8016e6ee2face1c78d91a285b25adbd6f14e2083ca3cdc" + "hash": "d65d37efd19671aa01d5f9201fd305102fc48feb6424c8686f789ea36f216bf5" } diff --git a/backend/.sqlx/query-72831ae68d2499129834edefc378eb867bfc4aa95ddb561ec7ba70d4cbbc9a02.json b/backend/.sqlx/query-e02053265b5acce5aec76546cec1614c4ca70338e8fa5b0ad9e377416747b036.json similarity index 76% rename from backend/.sqlx/query-72831ae68d2499129834edefc378eb867bfc4aa95ddb561ec7ba70d4cbbc9a02.json rename to backend/.sqlx/query-e02053265b5acce5aec76546cec1614c4ca70338e8fa5b0ad9e377416747b036.json index 3c713dd5..eaeb10a7 100644 --- a/backend/.sqlx/query-72831ae68d2499129834edefc378eb867bfc4aa95ddb561ec7ba70d4cbbc9a02.json +++ b/backend/.sqlx/query-e02053265b5acce5aec76546cec1614c4ca70338e8fa5b0ad9e377416747b036.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "INSERT INTO\n as_signing_keys\n (cred_type, credential_fingerprint, signing_key, currently_active)\n VALUES \n ($1, $2, $3, $4)", + "query": "INSERT INTO\n as_signing_keys\n (cred_type, credential_fingerprint, signing_key, currently_active)\n VALUES\n ($1, $2, $3, $4)", "describe": { "columns": [], "parameters": { @@ -23,5 +23,5 @@ }, "nullable": [] }, - "hash": "72831ae68d2499129834edefc378eb867bfc4aa95ddb561ec7ba70d4cbbc9a02" + "hash": "e02053265b5acce5aec76546cec1614c4ca70338e8fa5b0ad9e377416747b036" } diff --git a/backend/.sqlx/query-ea6b8c3c2f6cb40db89e394fe67a70f911c059068789b09fe93d04c59280652f.json b/backend/.sqlx/query-ea6b8c3c2f6cb40db89e394fe67a70f911c059068789b09fe93d04c59280652f.json new file mode 100644 index 00000000..b674b9ef --- /dev/null +++ b/backend/.sqlx/query-ea6b8c3c2f6cb40db89e394fe67a70f911c059068789b09fe93d04c59280652f.json @@ -0,0 +1,20 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT signing_key AS \"signing_key: _\"\n FROM as_signing_keys\n WHERE currently_active = true AND cred_type = 'intermediate'", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "signing_key: _", + "type_info": "Bytea" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + false + ] + }, + "hash": "ea6b8c3c2f6cb40db89e394fe67a70f911c059068789b09fe93d04c59280652f" +} diff --git a/backend/.sqlx/query-eab134949ec166baafbd7e1929678bb798f3826a50c606d6476573b1755f7ae6.json b/backend/.sqlx/query-eab134949ec166baafbd7e1929678bb798f3826a50c606d6476573b1755f7ae6.json deleted file mode 100644 index a6c89dce..00000000 --- a/backend/.sqlx/query-eab134949ec166baafbd7e1929678bb798f3826a50c606d6476573b1755f7ae6.json +++ /dev/null @@ -1,30 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n WITH deleted AS (\n DELETE FROM qs_queues \n WHERE queue_id = $1 AND sequence_number < $2\n RETURNING *\n ),\n fetched AS (\n SELECT message_bytes FROM qs_queues\n WHERE queue_id = $1 AND sequence_number >= $2\n ORDER BY sequence_number ASC\n LIMIT $3\n ),\n remaining AS (\n SELECT COALESCE(COUNT(*)) AS count \n FROM qs_queues\n WHERE queue_id = $1 AND sequence_number >= $2\n )\n SELECT \n fetched.message_bytes,\n remaining.count\n FROM fetched, remaining\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "message_bytes", - "type_info": "Bytea" - }, - { - "ordinal": 1, - "name": "count", - "type_info": "Int8" - } - ], - "parameters": { - "Left": [ - "Uuid", - "Int8", - "Int8" - ] - }, - "nullable": [ - false, - null - ] - }, - "hash": "eab134949ec166baafbd7e1929678bb798f3826a50c606d6476573b1755f7ae6" -} diff --git a/backend/.sqlx/query-ec27e35b2e3f55b0bafb7304616eca6898c69dd9c3e3842e039c9e4325e3eb5c.json b/backend/.sqlx/query-ec27e35b2e3f55b0bafb7304616eca6898c69dd9c3e3842e039c9e4325e3eb5c.json new file mode 100644 index 00000000..a9731335 --- /dev/null +++ b/backend/.sqlx/query-ec27e35b2e3f55b0bafb7304616eca6898c69dd9c3e3842e039c9e4325e3eb5c.json @@ -0,0 +1,30 @@ +{ + "db_name": "PostgreSQL", + "query": "WITH deleted AS (\n DELETE FROM as_queues\n WHERE queue_id = $1 AND sequence_number < $2\n ),\n fetched AS (\n SELECT message_bytes FROM as_queues\n WHERE queue_id = $1 AND sequence_number >= $2\n ORDER BY sequence_number ASC\n LIMIT $3\n ),\n remaining AS (\n SELECT COUNT(*) AS count\n FROM as_queues\n WHERE queue_id = $1 AND sequence_number >= $2\n )\n SELECT\n fetched.message_bytes AS \"message: BlobPersisted\",\n remaining.count\n FROM fetched, remaining", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "message: BlobPersisted", + "type_info": "Bytea" + }, + { + "ordinal": 1, + "name": "count", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Int8", + "Int8" + ] + }, + "nullable": [ + false, + null + ] + }, + "hash": "ec27e35b2e3f55b0bafb7304616eca6898c69dd9c3e3842e039c9e4325e3eb5c" +} diff --git a/backend/.sqlx/query-f6322f6cc0bcecd8cea95129cb5519fd7b8226d049952ad719935242d53e0725.json b/backend/.sqlx/query-f6322f6cc0bcecd8cea95129cb5519fd7b8226d049952ad719935242d53e0725.json deleted file mode 100644 index 172a559e..00000000 --- a/backend/.sqlx/query-f6322f6cc0bcecd8cea95129cb5519fd7b8226d049952ad719935242d53e0725.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "WITH next_connection_package AS (\n SELECT id, connection_package \n FROM connection_packages \n WHERE client_id = $1 \n LIMIT 1 \n FOR UPDATE -- make sure two concurrent queries don't return the same package\n SKIP LOCKED -- skip rows that are already locked by other processes\n ), \n remaining_packages AS (\n SELECT COUNT(*) as count \n FROM connection_packages \n WHERE client_id = $1\n ),\n deleted_package AS (\n DELETE FROM connection_packages \n WHERE id = (\n SELECT id \n FROM next_connection_package\n ) \n AND (SELECT count FROM remaining_packages) > 1\n RETURNING connection_package\n )\n SELECT connection_package FROM next_connection_package", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "connection_package", - "type_info": "Bytea" - } - ], - "parameters": { - "Left": [ - "Uuid" - ] - }, - "nullable": [ - false - ] - }, - "hash": "f6322f6cc0bcecd8cea95129cb5519fd7b8226d049952ad719935242d53e0725" -} diff --git a/backend/migrations/20240927070412_create_initial_as_tables.sql b/backend/migrations/20240927070412_create_initial_as_tables.sql index e6642bc1..97bae2a8 100644 --- a/backend/migrations/20240927070412_create_initial_as_tables.sql +++ b/backend/migrations/20240927070412_create_initial_as_tables.sql @@ -85,4 +85,4 @@ CREATE TABLE as_queues ( CREATE TABLE opaque_setup( id INTEGER GENERATED ALWAYS AS IDENTITY PRIMARY KEY, opaque_setup BYTEA NOT NULL -); \ No newline at end of file +); diff --git a/backend/src/auth_service/client_record.rs b/backend/src/auth_service/client_record.rs index 14ef7b4e..aae32595 100644 --- a/backend/src/auth_service/client_record.rs +++ b/backend/src/auth_service/client_record.rs @@ -57,7 +57,8 @@ impl ClientRecord { mod persistence { use phnxtypes::{ - codec::PhnxCodec, credentials::persistence::FlatClientCredential, + codec::persist::{BlobPersist, BlobPersisted}, + credentials::persistence::FlatClientCredential, identifiers::QualifiedUserName, }; use sqlx::{ @@ -72,18 +73,23 @@ mod persistence { &self, connection: &mut PgConnection, ) -> Result<(), StorageError> { - let queue_encryption_key_bytes = PhnxCodec::to_vec(&self.queue_encryption_key)?; - let ratchet = PhnxCodec::to_vec(&self.ratchet_key)?; - let activity_time = DateTime::::from(self.activity_time); let client_credential = FlatClientCredential::from(self.credential.clone()); let client_id = self.credential.identity(); sqlx::query!( - "INSERT INTO as_client_records (client_id, user_name, queue_encryption_key, ratchet, activity_time, credential, remaining_tokens) VALUES ($1, $2, $3, $4, $5, $6, $7)", + "INSERT INTO as_client_records ( + client_id, + user_name, + queue_encryption_key, + ratchet, + activity_time, + credential, + remaining_tokens + ) VALUES ($1, $2, $3, $4, $5, $6, $7)", client_id.client_id(), client_id.user_name().to_string(), - queue_encryption_key_bytes, - ratchet, - activity_time, + self.queue_encryption_key.persist() as _, + self.ratchet_key.persist() as _, + self.activity_time as TimeStamp, client_credential as FlatClientCredential, self.token_allowance, ) @@ -96,17 +102,21 @@ mod persistence { &self, connection: impl PgExecutor<'_>, ) -> Result<(), StorageError> { - let queue_encryption_key_bytes = PhnxCodec::to_vec(&self.queue_encryption_key)?; - let ratchet = PhnxCodec::to_vec(&self.ratchet_key)?; - let activity_time = DateTime::::from(self.activity_time); let client_credential = FlatClientCredential::from(self.credential.clone()); let client_id = self.credential.identity(); sqlx::query!( - "UPDATE as_client_records SET queue_encryption_key = $1, ratchet = $2, activity_time = $3, credential = $4, remaining_tokens = $5 WHERE client_id = $6", - queue_encryption_key_bytes, - ratchet, - activity_time, - client_credential as FlatClientCredential, + r#"UPDATE as_client_records SET + queue_encryption_key = $1, + ratchet = $2, + activity_time = $3, + credential = $4, + remaining_tokens = $5 + WHERE client_id = $6 + "#, + self.queue_encryption_key.persist() as _, + self.ratchet_key.persist() as _, + self.activity_time as TimeStamp, + client_credential as _, self.token_allowance, client_id.client_id(), ) @@ -119,30 +129,45 @@ mod persistence { connection: impl PgExecutor<'_>, client_id: &AsClientId, ) -> Result, StorageError> { - sqlx::query!( + struct SqlClientRecord { + queue_encryption_key: BlobPersisted, + ratchet: + BlobPersisted>, + activity_time: DateTime, + credential: FlatClientCredential, + remaining_tokens: i32, + } + + sqlx::query_as!( + SqlClientRecord, r#"SELECT - queue_encryption_key, - ratchet, + queue_encryption_key AS "queue_encryption_key: _", + ratchet AS "ratchet: _", activity_time, - credential as "client_credential: FlatClientCredential", + credential AS "credential: _", remaining_tokens FROM as_client_records WHERE client_id = $1"#, client_id.client_id(), ) .fetch_optional(connection) .await? - .map(|record| { - let queue_encryption_key = PhnxCodec::from_slice(&record.queue_encryption_key)?; - let ratchet = PhnxCodec::from_slice(&record.ratchet)?; - let activity_time = record.activity_time.into(); - Ok(ClientRecord { - queue_encryption_key, - ratchet_key: ratchet, - activity_time, - credential: record.client_credential.into(), - token_allowance: record.remaining_tokens, - }) - }) + .map( + |SqlClientRecord { + queue_encryption_key: BlobPersisted(queue_encryption_key), + ratchet: BlobPersisted(ratchet_key), + activity_time, + credential, + remaining_tokens, + }: SqlClientRecord| { + Ok(ClientRecord { + queue_encryption_key, + ratchet_key, + activity_time: activity_time.into(), + credential: credential.into(), + token_allowance: remaining_tokens, + }) + }, + ) .transpose() } diff --git a/backend/src/auth_service/connection_package/mod.rs b/backend/src/auth_service/connection_package/mod.rs index c1ef9a53..6330e448 100644 --- a/backend/src/auth_service/connection_package/mod.rs +++ b/backend/src/auth_service/connection_package/mod.rs @@ -2,7 +2,7 @@ // // SPDX-License-Identifier: AGPL-3.0-or-later -use phnxtypes::messages::client_as::ConnectionPackage; +use phnxtypes::{codec::persist::BlobPersist, messages::client_as::ConnectionPackage}; use serde::{Deserialize, Serialize}; mod persistence; @@ -12,6 +12,8 @@ pub(in crate::auth_service) enum StorableConnectionPackage { CurrentVersion(ConnectionPackage), } +impl BlobPersist for StorableConnectionPackage {} + impl From for ConnectionPackage { fn from(connection_package: StorableConnectionPackage) -> Self { match connection_package { diff --git a/backend/src/auth_service/connection_package/persistence.rs b/backend/src/auth_service/connection_package/persistence.rs index 2e71edc9..859de5ea 100644 --- a/backend/src/auth_service/connection_package/persistence.rs +++ b/backend/src/auth_service/connection_package/persistence.rs @@ -3,7 +3,7 @@ // SPDX-License-Identifier: AGPL-3.0-or-later use phnxtypes::{ - codec::PhnxCodec, + codec::persist::{BlobPersist, BlobPersisted}, identifiers::{AsClientId, QualifiedUserName}, messages::client_as::ConnectionPackage, }; @@ -26,11 +26,10 @@ impl StorableConnectionPackage { for (i, connection_package) in connection_packages.into_iter().enumerate() { let connection_package: StorableConnectionPackage = connection_package.into(); - let connection_package_bytes = PhnxCodec::to_vec(&connection_package)?; // Add values to the query arguments. None of these should throw an error. query_args.add(client_id.client_id())?; - query_args.add(connection_package_bytes)?; + query_args.add(connection_package.persist())?; if i > 0 { query_string.push(','); @@ -51,36 +50,40 @@ impl StorableConnectionPackage { Ok(()) } - async fn load(connection: &mut PgConnection, client_id: Uuid) -> Result, StorageError> { + async fn load( + connection: &mut PgConnection, + client_id: Uuid, + ) -> Result { let mut transaction = connection.begin().await?; // This is to ensure that counting and deletion happen atomically. If we // don't do this, two concurrent queries might both count 2 and delete, // leaving us with 0 packages. - let connection_package_bytes_record = sqlx::query!( - "WITH next_connection_package AS ( - SELECT id, connection_package - FROM connection_packages - WHERE client_id = $1 - LIMIT 1 + let BlobPersisted(connection_package) = sqlx::query_scalar!( + r#"WITH next_connection_package AS ( + SELECT id, connection_package + FROM connection_packages + WHERE client_id = $1 + LIMIT 1 FOR UPDATE -- make sure two concurrent queries don't return the same package SKIP LOCKED -- skip rows that are already locked by other processes - ), + ), remaining_packages AS ( - SELECT COUNT(*) as count - FROM connection_packages + SELECT COUNT(*) as count + FROM connection_packages WHERE client_id = $1 ), deleted_package AS ( - DELETE FROM connection_packages + DELETE FROM connection_packages WHERE id = ( - SELECT id + SELECT id FROM next_connection_package - ) + ) AND (SELECT count FROM remaining_packages) > 1 RETURNING connection_package ) - SELECT connection_package FROM next_connection_package", + SELECT connection_package AS "connection_package: _" + FROM next_connection_package"#, client_id, ) .fetch_one(&mut *transaction) @@ -88,7 +91,7 @@ impl StorableConnectionPackage { transaction.commit().await?; - Ok(connection_package_bytes_record.connection_package) + Ok(connection_package) } /// TODO: Last resort key package @@ -96,12 +99,9 @@ impl StorableConnectionPackage { connection: &mut PgConnection, client_id: &AsClientId, ) -> Result { - let connection_package_bytes = Self::load(connection, client_id.client_id()).await?; - - let connection_package: StorableConnectionPackage = - PhnxCodec::from_slice(&connection_package_bytes)?; - - Ok(connection_package.into()) + Self::load(connection, client_id.client_id()) + .await + .map(From::from) } /// Return a connection package for each client of a user referenced by a @@ -126,26 +126,16 @@ impl StorableConnectionPackage { .await?; // First fetch all connection package records from the DB. - let mut connection_packages_bytes = Vec::new(); + let mut connection_packages: Vec = + Vec::with_capacity(client_ids_record.len()); for client_id in client_ids_record { - let connection_package_bytes = - Self::load(&mut transaction, client_id.client_id).await?; - connection_packages_bytes.push(connection_package_bytes); + let connection_package = Self::load(&mut transaction, client_id.client_id).await?; + connection_packages.push(connection_package.into()); } // End the transaction. transaction.commit().await?; - // Deserialize the connection packages. - let connection_packages = connection_packages_bytes - .into_iter() - .map(|connection_package_bytes| { - let storable: StorableConnectionPackage = - PhnxCodec::from_slice(&connection_package_bytes)?; - Ok(storable.into()) - }) - .collect::, StorageError>>()?; - Ok(connection_packages) } } diff --git a/backend/src/auth_service/credentials/intermediate_signing_key.rs b/backend/src/auth_service/credentials/intermediate_signing_key.rs index 771f2518..ada52e89 100644 --- a/backend/src/auth_service/credentials/intermediate_signing_key.rs +++ b/backend/src/auth_service/credentials/intermediate_signing_key.rs @@ -6,6 +6,7 @@ use std::ops::Deref; use mls_assist::openmls::prelude::SignatureScheme; use phnxtypes::{ + codec::persist::BlobPersist, credentials::{ keys::AsIntermediateSigningKey, AsIntermediateCredential, AsIntermediateCredentialCsr, CredentialFingerprint, @@ -24,6 +25,8 @@ pub(in crate::auth_service) enum IntermediateSigningKey { V1(AsIntermediateSigningKey), } +impl BlobPersist for IntermediateSigningKey {} + impl From for AsIntermediateSigningKey { fn from(signing_key: IntermediateSigningKey) -> Self { match signing_key { @@ -97,7 +100,7 @@ impl IntermediateSigningKey { mod persistence { use phnxtypes::{ - codec::PhnxCodec, + codec::persist::{BlobPersist, BlobPersisted}, credentials::{keys::AsIntermediateSigningKey, AsIntermediateCredential}, }; use sqlx::PgExecutor; @@ -115,11 +118,11 @@ mod persistence { "INSERT INTO as_signing_keys (cred_type, credential_fingerprint, signing_key, currently_active) - VALUES + VALUES ($1, $2, $3, $4)", CredentialType::Intermediate as _, self.fingerprint().as_bytes(), - PhnxCodec::to_vec(&self)?, + self.persist() as _, false, ) .execute(connection) @@ -130,12 +133,15 @@ mod persistence { pub(in crate::auth_service) async fn load( connection: impl PgExecutor<'_>, ) -> Result, StorageError> { - sqlx::query!("SELECT signing_key FROM as_signing_keys WHERE currently_active = true AND cred_type = 'intermediate'") - .fetch_optional(connection) - .await?.map(|record| { - let signing_key: IntermediateSigningKey = PhnxCodec::from_slice(&record.signing_key)?; - Ok(signing_key.into()) - }).transpose() + let value = sqlx::query_scalar!( + r#"SELECT signing_key AS "signing_key: _" + FROM as_signing_keys + WHERE currently_active = true AND cred_type = 'intermediate'"# + ) + .fetch_optional(connection) + .await? + .map(|BlobPersisted(value): BlobPersisted| value.into()); + Ok(value) } pub(super) async fn activate( @@ -161,21 +167,20 @@ mod persistence { pub(in crate::auth_service) async fn load_all( connection: impl PgExecutor<'_>, ) -> Result, StorageError> { - let records = sqlx::query!( - "SELECT signing_key FROM as_signing_keys WHERE cred_type = $1", + let credentials = sqlx::query_scalar!( + r#"SELECT signing_key AS "signing_key: _" + FROM as_signing_keys WHERE cred_type = $1"#, CredentialType::Intermediate as _, ) .fetch_all(connection) - .await?; - let credentials = records - .into_iter() - .map(|record| { - let signing_key: IntermediateSigningKey = - PhnxCodec::from_slice(&record.signing_key)?; - let as_signing_key = AsIntermediateSigningKey::from(signing_key); - Ok(as_signing_key.credential().clone()) - }) - .collect::, StorageError>>()?; + .await? + .into_iter() + .map( + |BlobPersisted(value): BlobPersisted| { + AsIntermediateSigningKey::from(value).take_credential() + }, + ) + .collect(); Ok(credentials) } } diff --git a/backend/src/auth_service/credentials/signing_key.rs b/backend/src/auth_service/credentials/signing_key.rs index c61167c9..d9ff6a79 100644 --- a/backend/src/auth_service/credentials/signing_key.rs +++ b/backend/src/auth_service/credentials/signing_key.rs @@ -6,6 +6,7 @@ use std::ops::Deref; use mls_assist::openmls::prelude::SignatureScheme; use phnxtypes::{ + codec::persist::BlobPersist, credentials::{keys::AsSigningKey, AsCredential, CredentialFingerprint}, identifiers::Fqdn, }; @@ -19,6 +20,8 @@ pub(in crate::auth_service) enum StorableSigningKey { V1(AsSigningKey), } +impl BlobPersist for StorableSigningKey {} + impl From for AsSigningKey { fn from(signing_key: StorableSigningKey) -> Self { match signing_key { @@ -66,7 +69,7 @@ impl StorableSigningKey { } mod persistence { - use phnxtypes::codec::PhnxCodec; + use phnxtypes::codec::persist::BlobPersisted; use crate::{auth_service::credentials::CredentialType, errors::StorageError}; @@ -81,11 +84,11 @@ mod persistence { "INSERT INTO as_signing_keys (cred_type, credential_fingerprint, signing_key, currently_active) - VALUES + VALUES ($1, $2, $3, $4)", CredentialType::As as _, self.fingerprint().as_bytes(), - PhnxCodec::to_vec(&self)?, + self.persist() as _, false, ) .execute(connection) @@ -96,17 +99,16 @@ mod persistence { pub(in crate::auth_service) async fn load( connection: impl PgExecutor<'_>, ) -> Result, StorageError> { - sqlx::query!( - "SELECT signing_key FROM as_signing_keys WHERE currently_active = true AND cred_type = $1", + let value = sqlx::query_scalar!( + r#"SELECT signing_key AS "signing_key: _" + FROM as_signing_keys + WHERE currently_active = true AND cred_type = $1"#, CredentialType::As as _ ) .fetch_optional(connection) .await? - .map(|record| { - let signing_key: StorableSigningKey = PhnxCodec::from_slice(&record.signing_key)?; - Ok(signing_key.into()) - }) - .transpose() + .map(|BlobPersisted(key): BlobPersisted| key.into()); + Ok(value) } pub(super) async fn activate( @@ -132,23 +134,19 @@ mod persistence { pub(in crate::auth_service) async fn load_all( connection: impl PgExecutor<'_>, ) -> Result, StorageError> { - let records = sqlx::query!( - "SELECT signing_key FROM as_signing_keys WHERE cred_type = $1", + let credentials = sqlx::query_scalar!( + r#"SELECT signing_key AS "signing_key: _" + FROM as_signing_keys + WHERE cred_type = $1"#, CredentialType::As as _ ) .fetch_all(connection) - .await?; - - let credentials = records - .into_iter() - .map(|record| { - let signing_key: StorableSigningKey = - PhnxCodec::from_slice(&record.signing_key)?; - let as_signing_key = AsSigningKey::from(signing_key); - Ok(as_signing_key.credential().clone()) - }) - .collect::, StorageError>>()?; - + .await? + .into_iter() + .map(|BlobPersisted(key): BlobPersisted| { + AsSigningKey::from(key).take_credential() + }) + .collect(); Ok(credentials) } } diff --git a/backend/src/auth_service/opaque.rs b/backend/src/auth_service/opaque.rs index 3b940fcb..b00f086e 100644 --- a/backend/src/auth_service/opaque.rs +++ b/backend/src/auth_service/opaque.rs @@ -8,13 +8,17 @@ use opaque_ke::{ rand::{CryptoRng, RngCore}, ServerSetup, }; -use phnxtypes::crypto::OpaqueCiphersuite; +use phnxtypes::{codec::persist::BlobPersist, crypto::OpaqueCiphersuite}; +use serde::{Deserialize, Serialize}; use sqlx::PgExecutor; use crate::errors::StorageError; +#[derive(Serialize, Deserialize)] pub(super) struct OpaqueSetup(ServerSetup); +impl BlobPersist for OpaqueSetup {} + impl Deref for OpaqueSetup { type Target = ServerSetup; @@ -35,7 +39,7 @@ impl OpaqueSetup { } mod persistence { - use phnxtypes::codec::PhnxCodec; + use phnxtypes::codec::persist::BlobPersisted; use super::*; @@ -46,7 +50,7 @@ mod persistence { ) -> Result<(), StorageError> { sqlx::query!( "INSERT INTO opaque_setup (opaque_setup) VALUES ($1)", - PhnxCodec::to_vec(&self.0)? + self.persist() as _ ) .execute(connection) .await?; @@ -57,11 +61,12 @@ mod persistence { connection: impl PgExecutor<'_>, ) -> Result, StorageError> { // There is currently only one OPAQUE setup. - let opaque_setup_record = sqlx::query!("SELECT opaque_setup FROM opaque_setup") - .fetch_one(connection) - .await?; - let opaque_setup = PhnxCodec::from_slice(&opaque_setup_record.opaque_setup)?; - Ok(opaque_setup) + let BlobPersisted(OpaqueSetup(value)) = sqlx::query_scalar!( + r#"SELECT opaque_setup AS "opaque_setup: _" FROM opaque_setup"# + ) + .fetch_one(connection) + .await?; + Ok(value) } } } diff --git a/backend/src/auth_service/privacy_pass.rs b/backend/src/auth_service/privacy_pass.rs index a4a7ef54..1e7881c6 100644 --- a/backend/src/auth_service/privacy_pass.rs +++ b/backend/src/auth_service/privacy_pass.rs @@ -3,12 +3,13 @@ // SPDX-License-Identifier: AGPL-3.0-or-later use async_trait::async_trait; -use phnxtypes::codec::PhnxCodec; +use phnxtypes::codec::persist::{BlobPersist, BlobPersisted}; use privacypass::{ batched_tokens_ristretto255::server::BatchedKeyStore, private_tokens::{Ristretto255, VoprfServer}, TruncatedTokenKeyId, }; +use serde::{Deserialize, Serialize}; use sqlx::{Acquire, Postgres, Transaction}; use tokio::sync::Mutex; @@ -24,6 +25,11 @@ impl<'a, 'b> AuthServiceBatchedKeyStoreProvider<'a, 'b> { } } +#[derive(Debug, Serialize, Deserialize)] +struct StorableVoprfServer(VoprfServer); + +impl BlobPersist for StorableVoprfServer {} + #[async_trait] impl BatchedKeyStore for AuthServiceBatchedKeyStoreProvider<'_, '_> { /// Inserts a keypair with a given `truncated_token_key_id` into the key store. @@ -32,17 +38,15 @@ impl BatchedKeyStore for AuthServiceBatchedKeyStoreProvider<'_, '_> { truncated_token_key_id: TruncatedTokenKeyId, server: VoprfServer, ) { - let Ok(server_bytes) = PhnxCodec::to_vec(&server) else { - return; - }; let mut transaction = self.transaction_mutex.lock().await; let Ok(connection) = transaction.acquire().await else { return; }; + let server = StorableVoprfServer(server); let _ = sqlx::query!( "INSERT INTO as_batched_keys (token_key_id, voprf_server) VALUES ($1, $2)", - truncated_token_key_id as i16, - server_bytes, + i16::from(truncated_token_key_id), + server.persist() as _, ) .execute(connection) .await; @@ -55,13 +59,13 @@ impl BatchedKeyStore for AuthServiceBatchedKeyStoreProvider<'_, '_> { ) -> Option> { let mut transaction = self.transaction_mutex.lock().await; let connection = transaction.acquire().await.ok()?; - let server_bytes_record = sqlx::query!( - "SELECT voprf_server FROM as_batched_keys WHERE token_key_id = $1", + let server: Option> = sqlx::query_scalar!( + r#"SELECT voprf_server AS "voprf_server: _" FROM as_batched_keys WHERE token_key_id = $1"#, *truncated_token_key_id as i16, ) .fetch_one(connection) .await .ok()?; - PhnxCodec::from_slice(&server_bytes_record.voprf_server).ok() + server.map(|record| record.into_inner().0) } } diff --git a/backend/src/auth_service/queue.rs b/backend/src/auth_service/queue.rs index 7b7d69e9..1cd1e140 100644 --- a/backend/src/auth_service/queue.rs +++ b/backend/src/auth_service/queue.rs @@ -27,8 +27,11 @@ impl Queue { } mod persistence { - use phnxtypes::{codec::PhnxCodec, messages::QueueMessage}; - use sqlx::{Connection, Row}; + use phnxtypes::{ + codec::persist::{BlobPersist, BlobPersisted}, + messages::QueueMessage, + }; + use sqlx::Connection; use uuid::Uuid; use crate::errors::QueueError; @@ -55,9 +58,6 @@ mod persistence { client_id: &AsClientId, message: QueueMessage, ) -> Result<(), QueueError> { - // Encode the message - let message_bytes = PhnxCodec::to_vec(&message).map_err(StorageError::Serde)?; - // Begin the transaction let mut transaction = connection.begin().await?; @@ -87,14 +87,15 @@ mod persistence { let message_id = Uuid::new_v4(); // Store the message in the DB sqlx::query!( - "INSERT INTO as_queues (message_id, queue_id, sequence_number, message_bytes) VALUES ($1, $2, $3, $4)", - message_id, - client_id.client_id(), - sequence_number, - message_bytes, - ) - .execute(&mut *transaction) - .await?; + "INSERT INTO as_queues (message_id, queue_id, sequence_number, message_bytes) + VALUES ($1, $2, $3, $4)", + message_id, + client_id.client_id(), + sequence_number, + message.persist() as _, + ) + .execute(&mut *transaction) + .await?; let new_sequence_number = sequence_number + 1; // Increase the sequence number and store it. @@ -128,7 +129,8 @@ mod persistence { let mut transaction = connection.begin().await?; // This query is idempotent, so there's no need to lock anything. - let query = "WITH deleted AS ( + let rows = sqlx::query!( + r#"WITH deleted AS ( DELETE FROM as_queues WHERE queue_id = $1 AND sequence_number < $2 ), @@ -144,32 +146,29 @@ mod persistence { WHERE queue_id = $1 AND sequence_number >= $2 ) SELECT - fetched.message_bytes, + fetched.message_bytes AS "message: BlobPersisted", remaining.count - FROM fetched, remaining"; - - let rows = sqlx::query(query) - .bind(client_id.client_id()) - .bind(sequence_number as i64) - .bind(number_of_messages) - .fetch_all(&mut *transaction) - .await?; + FROM fetched, remaining"#, + client_id.client_id(), + sequence_number as i64, + number_of_messages, + ) + .fetch_all(&mut *transaction) + .await?; transaction.commit().await?; // Convert the records to messages. - let messages = rows - .iter() + let mut remaining = None; + let messages: Vec = rows + .into_iter() .map(|row| { - let message_bytes: &[u8] = row.try_get("message_bytes")?; - let message = - PhnxCodec::from_slice(message_bytes).map_err(StorageError::Serde)?; - Ok(message) + remaining.get_or_insert(row.count); + row.message.into_inner() }) - .collect::, QueueError>>()?; + .collect(); - let remaining_messages = if let Some(row) = rows.first() { - let remaining_count: i64 = row.try_get("count")?; + let remaining_messages = if let Some(remaining_count) = remaining.flatten() { // Subtract the number of messages we've read from the remaining // count to get the number of unread messages. remaining_count - messages.len() as i64 diff --git a/backend/src/auth_service/user_record.rs b/backend/src/auth_service/user_record.rs index 12eed37d..538b172d 100644 --- a/backend/src/auth_service/user_record.rs +++ b/backend/src/auth_service/user_record.rs @@ -40,15 +40,23 @@ impl UserRecord { } mod persistence { + use std::borrow::Cow; + use phnxtypes::{ - codec::PhnxCodec, - identifiers::{QualifiedUserName, UserName}, + codec::persist::{BlobPersist, BlobPersisted}, + identifiers::QualifiedUserName, }; + use serde::{Deserialize, Serialize}; use sqlx::PgExecutor; use crate::errors::StorageError; - use super::UserRecord; + use super::*; + + #[derive(Serialize, Deserialize)] + struct StorableServerRegistration<'a>(Cow<'a, ServerRegistration>); + + impl BlobPersist for StorableServerRegistration<'_> {} impl UserRecord { /// Loads the AsUserRecord for a given UserName. Returns None if no AsUserRecord @@ -57,15 +65,19 @@ mod persistence { connection: impl PgExecutor<'_>, user_name: &QualifiedUserName, ) -> Result, StorageError> { - sqlx::query!( - r#"SELECT user_name as "user_name: UserName", password_file FROM as_user_records WHERE user_name = $1"#, + sqlx::query_scalar!( + r#"SELECT password_file AS "password_file: _" + FROM as_user_records + WHERE user_name = $1"#, user_name.to_string(), ) .fetch_optional(connection) .await? - .map(|record| { - let password_file = PhnxCodec::from_slice(&record.password_file)?; - Ok(UserRecord::new(user_name.clone(), password_file)) + .map(|BlobPersisted(StorableServerRegistration(password_file))| { + Ok(UserRecord::new( + user_name.clone(), + password_file.into_owned(), + )) }) .transpose() } @@ -76,11 +88,11 @@ mod persistence { &self, connection: impl PgExecutor<'_>, ) -> Result<(), StorageError> { - let password_file_bytes = PhnxCodec::to_vec(&self.password_file)?; + let password_file = StorableServerRegistration(Cow::Borrowed(&self.password_file)); sqlx::query!( "INSERT INTO as_user_records (user_name, password_file) VALUES ($1, $2)", self.user_name.to_string(), - password_file_bytes, + password_file.persist() as _, ) .execute(connection) .await?; diff --git a/backend/src/ds/group_state/mod.rs b/backend/src/ds/group_state/mod.rs index 02e98005..a836cc9e 100644 --- a/backend/src/ds/group_state/mod.rs +++ b/backend/src/ds/group_state/mod.rs @@ -15,7 +15,7 @@ use mls_assist::{ MlsAssistRustCrypto, }; use phnxtypes::{ - codec::PhnxCodec, + codec::{persist::BlobPersist, PhnxCodec}, crypto::{ ear::{ keys::{EncryptedIdentityLinkKey, GroupStateEarKey}, @@ -244,6 +244,8 @@ pub(super) enum DsGroupStateDecryptionError { #[serde(transparent)] pub struct EncryptedDsGroupState(Ciphertext); +impl BlobPersist for EncryptedDsGroupState {} + #[derive(Debug)] pub(super) struct StorableDsGroupData { group_id: Uuid, diff --git a/backend/src/ds/group_state/persistence.rs b/backend/src/ds/group_state/persistence.rs index ff554e12..d455f14f 100644 --- a/backend/src/ds/group_state/persistence.rs +++ b/backend/src/ds/group_state/persistence.rs @@ -2,14 +2,17 @@ // // SPDX-License-Identifier: AGPL-3.0-or-later -use phnxtypes::codec::PhnxCodec; -use phnxtypes::identifiers::QualifiedGroupId; +use phnxtypes::{ + codec::persist::{BlobPersist, BlobPersisted}, + identifiers::{QualifiedGroupId, SealedClientReference}, +}; use sqlx::{ types::chrono::{DateTime, Utc}, PgExecutor, }; +use uuid::Uuid; -use crate::errors::StorageError; +use crate::{ds::group_state::EncryptedDsGroupState, errors::StorageError}; use super::StorableDsGroupData; @@ -23,9 +26,9 @@ impl StorableDsGroupData { ($1, $2, $3, $4) ON CONFLICT (group_id) DO NOTHING", self.group_id, - PhnxCodec::to_vec(&self.encrypted_group_state)?, + self.encrypted_group_state.persist() as _, DateTime::::from(self.last_used), - PhnxCodec::to_vec(&self.deleted_queues)? + self.deleted_queues.persist() as _, ) .execute(connection) .await?; @@ -36,27 +39,44 @@ impl StorableDsGroupData { connection: impl PgExecutor<'_>, qgid: &QualifiedGroupId, ) -> Result, StorageError> { - let Some(group_data_record) = sqlx::query!( - "SELECT - group_id, ciphertext, last_used, deleted_queues + struct SqlStorableDsGroupData { + group_id: Uuid, + ciphertext: BlobPersisted, + last_used: DateTime, + deleted_queues: BlobPersisted>, + } + + let group_data = sqlx::query_as!( + SqlStorableDsGroupData, + r#"SELECT + group_id, + ciphertext AS "ciphertext: _", + last_used, + deleted_queues AS "deleted_queues: _" FROM encrypted_groups WHERE - group_id = $1", + group_id = $1"#, qgid.group_uuid() ) .fetch_optional(connection) .await? - else { - return Ok(None); - }; - let storable_group_data = Self { - group_id: group_data_record.group_id, - encrypted_group_state: PhnxCodec::from_slice(&group_data_record.ciphertext)?, - last_used: group_data_record.last_used.into(), - deleted_queues: PhnxCodec::from_slice(&group_data_record.deleted_queues)?, - }; - Ok(Some(storable_group_data)) + .map( + |SqlStorableDsGroupData { + group_id, + ciphertext: BlobPersisted(encrypted_group_state), + last_used, + deleted_queues: BlobPersisted(deleted_queues), + }| { + Self { + group_id, + encrypted_group_state, + last_used: last_used.into(), + deleted_queues, + } + }, + ); + Ok(group_data) } pub(crate) async fn update(&self, connection: impl PgExecutor<'_>) -> Result<(), StorageError> { @@ -64,13 +84,15 @@ impl StorableDsGroupData { "UPDATE encrypted_groups SET - ciphertext = $2, last_used = $3, deleted_queues = $4 + ciphertext = $2, + last_used = $3, + deleted_queues = $4 WHERE group_id = $1", self.group_id, - PhnxCodec::to_vec(&self.encrypted_group_state)?, + self.encrypted_group_state.persist() as _, DateTime::::from(self.last_used), - PhnxCodec::to_vec(&self.deleted_queues)? + self.deleted_queues.persist() as _, ) .execute(connection) .await?; diff --git a/backend/src/qs/client_record.rs b/backend/src/qs/client_record.rs index 1a53c2b6..ded98d72 100644 --- a/backend/src/qs/client_record.rs +++ b/backend/src/qs/client_record.rs @@ -92,7 +92,7 @@ impl QsClientRecord { } mod persistence { - use phnxtypes::codec::PhnxCodec; + use phnxtypes::codec::persist::{BlobPersist, BlobPersisted}; use sqlx::{PgConnection, PgExecutor}; use super::*; @@ -104,11 +104,6 @@ mod persistence { &self, connection: impl PgExecutor<'_>, ) -> Result<(), StorageError> { - // Create and store the client record. - let owner_public_key = PhnxCodec::to_vec(&self.queue_encryption_key)?; - let owner_signature_key = PhnxCodec::to_vec(&self.auth_key)?; - let ratchet = PhnxCodec::to_vec(&self.ratchet_key)?; - sqlx::query!( "INSERT INTO qs_client_records @@ -118,9 +113,9 @@ mod persistence { &self.client_id as &QsClientId, &self.user_id as &QsUserId, self.encrypted_push_token.as_ref() as Option<&EncryptedPushToken>, - owner_public_key, - owner_signature_key, - ratchet, + self.queue_encryption_key.persist() as _, + self.auth_key.persist() as _, + self.ratchet_key.persist() as _, &self.activity_time as &TimeStamp, ) .execute(connection) @@ -133,15 +128,26 @@ mod persistence { connection: impl PgExecutor<'_>, client_id: &QsClientId, ) -> Result, StorageError> { + struct SqlQsClientRecord { + user_id: QsUserId, + encrypted_push_token: Option, + queue_encryption_key: BlobPersisted, + auth_key: BlobPersisted, + ratchet: + BlobPersisted>, + activity_time: TimeStamp, + } + let client_id = client_id.as_uuid(); - sqlx::query!( + sqlx::query_as!( + SqlQsClientRecord, r#"SELECT user_id as "user_id: QsUserId", encrypted_push_token as "encrypted_push_token: EncryptedPushToken", - owner_public_key, - owner_signature_key, - ratchet, - activity_time as "activity_time: TimeStamp" + owner_public_key AS "queue_encryption_key: _", + owner_signature_key AS "auth_key: _", + ratchet AS "ratchet: _", + activity_time AS "activity_time: TimeStamp" FROM qs_client_records WHERE @@ -150,21 +156,26 @@ mod persistence { ) .fetch_optional(connection) .await? - .map(|record| { - let owner_public_key = PhnxCodec::from_slice(&record.owner_public_key)?; - let owner_signature_key = PhnxCodec::from_slice(&record.owner_signature_key)?; - let ratchet_key = PhnxCodec::from_slice(&record.ratchet)?; - - Ok(QsClientRecord { - user_id: record.user_id, - client_id: (*client_id).into(), - encrypted_push_token: record.encrypted_push_token, - queue_encryption_key: owner_public_key, - auth_key: owner_signature_key, - ratchet_key, - activity_time: record.activity_time, - }) - }) + .map( + |SqlQsClientRecord { + user_id, + encrypted_push_token, + queue_encryption_key: BlobPersisted(queue_encryption_key), + auth_key: BlobPersisted(auth_key), + ratchet: BlobPersisted(ratchet_key), + activity_time, + }| { + Ok(QsClientRecord { + user_id, + client_id: (*client_id).into(), + encrypted_push_token, + queue_encryption_key, + auth_key, + ratchet_key, + activity_time, + }) + }, + ) .transpose() } @@ -172,10 +183,6 @@ mod persistence { &self, connection: &mut PgConnection, ) -> Result<(), StorageError> { - let owner_public_key = PhnxCodec::to_vec(&self.queue_encryption_key)?; - let owner_signature_key = PhnxCodec::to_vec(&self.auth_key)?; - let ratchet = PhnxCodec::to_vec(&self.ratchet_key)?; - sqlx::query!( "UPDATE qs_client_records SET @@ -187,9 +194,9 @@ mod persistence { WHERE client_id = $6", self.encrypted_push_token.as_ref() as Option<&EncryptedPushToken>, - owner_public_key, - owner_signature_key, - ratchet, + self.queue_encryption_key.persist() as _, + self.auth_key.persist() as _, + self.ratchet_key.persist() as _, &self.activity_time as &TimeStamp, &self.client_id as &QsClientId, ) diff --git a/backend/src/qs/queue.rs b/backend/src/qs/queue.rs index 740452c7..6bc19541 100644 --- a/backend/src/qs/queue.rs +++ b/backend/src/qs/queue.rs @@ -2,7 +2,11 @@ // // SPDX-License-Identifier: AGPL-3.0-or-later -use phnxtypes::{codec::PhnxCodec, identifiers::QsClientId, messages::QueueMessage}; +use phnxtypes::{ + codec::persist::{BlobPersist, BlobPersisted}, + identifiers::QsClientId, + messages::QueueMessage, +}; use sqlx::{Connection, PgConnection, PgExecutor}; use crate::errors::{QueueError, StorageError}; @@ -30,9 +34,6 @@ impl Queue { queue_id: &QsClientId, message: QueueMessage, ) -> Result<(), QueueError> { - // Encode the message - let message_bytes = PhnxCodec::to_vec(&message)?; - // Begin the transaction let mut transaction = connection.begin().await?; @@ -41,18 +42,18 @@ impl Queue { r#" WITH updated_sequence AS ( -- Step 1: Update and return the current sequence number. - UPDATE qs_queue_data - SET sequence_number = sequence_number + 1 - WHERE queue_id = $1 + UPDATE qs_queue_data + SET sequence_number = sequence_number + 1 + WHERE queue_id = $1 RETURNING sequence_number - 1 as sequence_number ) -- Step 2: Insert the message with the new sequence number. - INSERT INTO qs_queues (queue_id, sequence_number, message_bytes) + INSERT INTO qs_queues (queue_id, sequence_number, message_bytes) SELECT $1, sequence_number, $2 FROM updated_sequence RETURNING sequence_number "#, queue_id as &QsClientId, - message_bytes, + message.persist() as _, ) .fetch_one(&mut *transaction) .await?; @@ -87,7 +88,7 @@ impl Queue { let rows = sqlx::query!( r#" WITH deleted AS ( - DELETE FROM qs_queues + DELETE FROM qs_queues WHERE queue_id = $1 AND sequence_number < $2 RETURNING * ), @@ -98,12 +99,12 @@ impl Queue { LIMIT $3 ), remaining AS ( - SELECT COALESCE(COUNT(*)) AS count + SELECT COALESCE(COUNT(*)) AS count FROM qs_queues WHERE queue_id = $1 AND sequence_number >= $2 ) - SELECT - fetched.message_bytes, + SELECT + fetched.message_bytes AS "message: BlobPersisted", remaining.count FROM fetched, remaining "#, @@ -117,16 +118,16 @@ impl Queue { transaction.commit().await?; // Convert the records to messages. - let messages = rows - .iter() + let mut remaining = None; + let messages: Vec = rows + .into_iter() .map(|row| { - let message = PhnxCodec::from_slice(&row.message_bytes)?; - Ok(message) + remaining.get_or_insert(row.count); + row.message.into_inner() }) - .collect::, QueueError>>()?; + .collect(); - let remaining_messages = if let Some(row) = rows.first() { - let remaining_count: i64 = row.count.unwrap_or_default(); + let remaining_messages = if let Some(remaining_count) = remaining.flatten() { // Subtract the number of messages we've read from the remaining // count to get the number of unread messages. remaining_count - messages.len() as i64 @@ -147,10 +148,10 @@ mod persistence { connection: impl PgExecutor<'_>, ) -> Result<(), StorageError> { sqlx::query!( - "INSERT INTO - qs_queue_data + "INSERT INTO + qs_queue_data (queue_id, sequence_number) - VALUES + VALUES ($1, $2)", &self.queue_id as &QsClientId, self.sequence_number, diff --git a/coreclient/migrations/V2__add_timestamp_indexes.sql b/coreclient/migrations/V2__add_timestamp_indexes.sql index 1f66a1db..f4eabde6 100644 --- a/coreclient/migrations/V2__add_timestamp_indexes.sql +++ b/coreclient/migrations/V2__add_timestamp_indexes.sql @@ -1,6 +1,6 @@ -- SPDX-FileCopyrightText: 2024 Phoenix R&D GmbH -- -- SPDX-License-Identifier: AGPL-3.0-or-later - CREATE INDEX IF NOT EXISTS conversation_messages_timestamp_asc_idx ON conversation_messages (timestamp ASC); + CREATE INDEX IF NOT EXISTS conversation_messages_timestamp_desc_idx ON conversation_messages (timestamp DESC); diff --git a/types/src/codec/cbor.rs b/types/src/codec/cbor.rs index c4f70db3..878af9c0 100644 --- a/types/src/codec/cbor.rs +++ b/types/src/codec/cbor.rs @@ -10,6 +10,15 @@ use super::Codec; #[derive(Debug)] pub(super) struct Cbor; +impl Cbor { + pub(crate) fn to_writer( + value: &impl Serialize, + writer: &mut impl std::io::Write, + ) -> Result<(), ciborium::ser::Error> { + ciborium::into_writer(value, writer) + } +} + #[derive(Debug, Error)] pub enum CborError { #[error(transparent)] diff --git a/types/src/codec/mod.rs b/types/src/codec/mod.rs index af9ca07f..54c9e612 100644 --- a/types/src/codec/mod.rs +++ b/types/src/codec/mod.rs @@ -9,6 +9,7 @@ use serde::{de::DeserializeOwned, Serialize}; mod cbor; mod error; +pub mod persist; #[cfg(test)] mod tests; @@ -54,6 +55,15 @@ impl PhnxCodec { Ok(buf) } + fn serialize_to_writer( + &self, + value: &T, + writer: &mut impl std::io::Write, + ) -> Result<(), Box> { + Cbor::to_writer(value, writer)?; + Ok(()) + } + fn deserialize( &self, bytes: &[u8], diff --git a/types/src/codec/persist.rs b/types/src/codec/persist.rs new file mode 100644 index 00000000..f7ebd85a --- /dev/null +++ b/types/src/codec/persist.rs @@ -0,0 +1,100 @@ +#[cfg(feature = "sqlite")] +use rusqlite::types::{FromSqlError, FromSqlResult, ToSqlOutput, ValueRef}; +use serde::{de::DeserializeOwned, Serialize}; +#[cfg(feature = "sqlx")] +use sqlx::{encode::IsNull, error::BoxDynError, Database, Postgres}; + +use super::PhnxCodec; + +/// A marker trait for types that can be persisted as a blob in the database. +pub trait BlobPersist: Serialize + DeserializeOwned { + fn persist(&self) -> BlobPersisting<'_, Self> { + BlobPersisting(self) + } +} + +impl BlobPersist for Vec {} + +/// A wrapper type for persisting `T: BlobPersist` as a blob in the database. +/// +/// Because of Rust's orphan rules, we can't implement sql related traits for `T: BlobPersist` +/// directly. +#[derive(Debug)] +pub struct BlobPersisting<'a, T: BlobPersist>(pub &'a T); + +/// A wrapper type for retrieving `T: BlobPersist` as a blob from the database. +/// +/// Because of Rust's orphan rules, we can't implement sql related traits for `T: BlobPersist` +/// directly. +#[derive(Debug)] +pub struct BlobPersisted(pub T); + +impl<'a, T: BlobPersist> From<&'a T> for BlobPersisting<'a, T> { + fn from(value: &'a T) -> Self { + Self(value) + } +} + +impl BlobPersisted { + /// Returns the inner value. + /// + /// Clones the data if it is not already owned. + pub fn into_inner(self) -> T { + self.0 + } +} + +#[cfg(feature = "sqlx")] +impl sqlx::Type for BlobPersisting<'_, T> { + fn type_info() -> ::TypeInfo { + <&[u8] as sqlx::Type>::type_info() + } +} + +#[cfg(feature = "sqlx")] +impl<'q, T: BlobPersist> sqlx::Encode<'q, Postgres> for BlobPersisting<'_, T> { + fn encode_by_ref( + &self, + buf: &mut ::ArgumentBuffer<'q>, + ) -> Result { + PhnxCodec::V1.serialize_to_writer(&self.0, &mut **buf)?; + Ok(IsNull::No) + } +} + +#[cfg(feature = "sqlx")] +impl sqlx::Type for BlobPersisted { + fn type_info() -> ::TypeInfo { + <&[u8] as sqlx::Type>::type_info() + } +} + +#[cfg(feature = "sqlx")] +impl<'r, T: BlobPersist> sqlx::Decode<'r, Postgres> for BlobPersisted { + fn decode(value: ::ValueRef<'r>) -> Result { + let bytes = value.as_bytes()?; + let value: T = PhnxCodec::V1.deserialize(bytes)?; + Ok(Self(value)) + } +} + +#[cfg(feature = "sqlite")] +impl rusqlite::ToSql for BlobPersisting<'_, T> { + fn to_sql(&self) -> rusqlite::Result> { + let bytes = PhnxCodec::V1 + .serialize(&self.0) + .map_err(rusqlite::Error::ToSqlConversionFailure)?; + Ok(ToSqlOutput::Owned(bytes.into())) + } +} + +#[cfg(feature = "sqlite")] +impl rusqlite::types::FromSql for BlobPersisted { + fn column_result(value: ValueRef<'_>) -> FromSqlResult { + let bytes = value.as_blob()?; + let value: T = PhnxCodec::V1 + .deserialize(bytes) + .map_err(FromSqlError::Other)?; + Ok(Self(value)) + } +} diff --git a/types/src/credentials/keys.rs b/types/src/credentials/keys.rs index d5cff85a..00c9f385 100644 --- a/types/src/credentials/keys.rs +++ b/types/src/credentials/keys.rs @@ -70,6 +70,10 @@ impl AsIntermediateSigningKey { }) } + pub fn take_credential(self) -> AsIntermediateCredential { + self.credential + } + pub fn credential(&self) -> &AsIntermediateCredential { &self.credential } @@ -104,6 +108,10 @@ impl AsSigningKey { } } + pub fn take_credential(self) -> AsCredential { + self.credential + } + pub fn credential(&self) -> &AsCredential { &self.credential } diff --git a/types/src/crypto/mod.rs b/types/src/crypto/mod.rs index e85bef60..845be9ac 100644 --- a/types/src/crypto/mod.rs +++ b/types/src/crypto/mod.rs @@ -18,7 +18,9 @@ use sha2::Sha256; use thiserror::Error; use tls_codec::{TlsDeserializeBytes, TlsSerialize, TlsSize}; -use crate::{crypto::ear::EarEncryptable, messages::QueueMessage, LibraryError}; +use crate::{ + codec::persist::BlobPersist, crypto::ear::EarEncryptable, messages::QueueMessage, LibraryError, +}; use self::{ ear::{keys::RatchetKey, Ciphertext, EarDecryptable}, @@ -51,6 +53,8 @@ pub type RatchetKeyUpdate = Vec; #[cfg_attr(feature = "sqlx", derive(sqlx::Type), sqlx(transparent))] pub struct RatchetEncryptionKey(EncryptionPublicKey); +impl BlobPersist for RatchetEncryptionKey {} + #[derive(Clone, Serialize, Deserialize)] #[cfg_attr(feature = "sqlx", derive(sqlx::Type), sqlx(transparent))] pub struct RatchetDecryptionKey(DecryptionKey); diff --git a/types/src/crypto/ratchet/mod.rs b/types/src/crypto/ratchet/mod.rs index 1985f065..e60e6c4a 100644 --- a/types/src/crypto/ratchet/mod.rs +++ b/types/src/crypto/ratchet/mod.rs @@ -37,6 +37,11 @@ pub struct QueueRatchet, } +impl> BlobPersist + for QueueRatchet +{ +} + impl> TryFrom for QueueRatchet { diff --git a/types/src/crypto/signatures/keys.rs b/types/src/crypto/signatures/keys.rs index 03c817b5..c43f802c 100644 --- a/types/src/crypto/signatures/keys.rs +++ b/types/src/crypto/signatures/keys.rs @@ -12,7 +12,7 @@ use tls_codec::{TlsDeserializeBytes, TlsSerialize, TlsSize, VLBytes}; #[cfg(feature = "sqlite")] use crate::codec::PhnxCodec; -use crate::crypto::errors::KeyGenerationError; +use crate::{codec::persist::BlobPersist, crypto::errors::KeyGenerationError}; use super::{ private_keys::{SigningKey, VerifyingKey}, @@ -130,6 +130,8 @@ impl UserKeyHash { #[cfg_attr(feature = "sqlx", derive(sqlx::Type), sqlx(transparent))] pub struct QsClientVerifyingKey(VerifyingKey); +impl BlobPersist for QsClientVerifyingKey {} + impl AsRef for QsClientVerifyingKey { fn as_ref(&self) -> &VerifyingKey { &self.0 diff --git a/types/src/identifiers/mod.rs b/types/src/identifiers/mod.rs index 12a27b23..9b561cf6 100644 --- a/types/src/identifiers/mod.rs +++ b/types/src/identifiers/mod.rs @@ -16,10 +16,13 @@ use tracing::{debug, error}; use url::Host; use uuid::Uuid; -use crate::crypto::{ - ear::keys::PushTokenEarKey, - errors::RandomnessError, - hpke::{ClientIdDecryptionKey, ClientIdEncryptionKey, HpkeDecryptable, HpkeEncryptable}, +use crate::{ + codec::persist::BlobPersist, + crypto::{ + ear::keys::PushTokenEarKey, + errors::RandomnessError, + hpke::{ClientIdDecryptionKey, ClientIdEncryptionKey, HpkeDecryptable, HpkeEncryptable}, + }, }; use super::*; @@ -452,6 +455,8 @@ pub struct SealedClientReference { pub(crate) ciphertext: HpkeCiphertext, } +impl BlobPersist for SealedClientReference {} + impl Hash for SealedClientReference { fn hash(&self, state: &mut H) { self.ciphertext.kem_output.hash(state); diff --git a/types/src/messages/mod.rs b/types/src/messages/mod.rs index 0e15e010..d278171e 100644 --- a/types/src/messages/mod.rs +++ b/types/src/messages/mod.rs @@ -9,7 +9,10 @@ use mls_assist::{ use serde::{Deserialize, Serialize}; use tls_codec::{TlsDeserializeBytes, TlsSerialize, TlsSize}; -use crate::crypto::{ear::Ciphertext, errors::RandomnessError}; +use crate::{ + codec::persist::BlobPersist, + crypto::{ear::Ciphertext, errors::RandomnessError}, +}; pub mod client_as; pub mod client_as_out; @@ -83,6 +86,8 @@ pub struct QueueMessage { pub ciphertext: Ciphertext, } +impl BlobPersist for QueueMessage {} + #[derive( Clone, Debug, PartialEq, Serialize, Deserialize, TlsSerialize, TlsDeserializeBytes, TlsSize, )]