-
Notifications
You must be signed in to change notification settings - Fork 379
feat(sqlite): Run VACUUM operation after removing a room #4651
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from 2 commits
2d245af
c5b2d20
b7e5884
8493231
cc4a26c
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -336,7 +336,7 @@ impl SqliteStateStore { | |
| // Defragment the DB and optimize its size on the filesystem. | ||
| // This should have been run in the migration for version 7, to reduce the size | ||
| // of the DB as we removed the media cache. | ||
| conn.execute_batch("VACUUM").await?; | ||
| conn.vacuum().await?; | ||
| conn.set_kv("version", vec![12]).await?; | ||
| } | ||
|
|
||
|
|
@@ -1739,40 +1739,42 @@ impl StateStore for SqliteStateStore { | |
| let this = self.clone(); | ||
| let room_id = room_id.to_owned(); | ||
|
|
||
| self.acquire() | ||
| .await? | ||
| .with_transaction(move |txn| { | ||
| let room_info_room_id = this.encode_key(keys::ROOM_INFO, &room_id); | ||
| txn.remove_room_info(&room_info_room_id)?; | ||
| let conn = self.acquire().await?; | ||
|
|
||
| let state_event_room_id = this.encode_key(keys::STATE_EVENT, &room_id); | ||
| txn.remove_room_state_events(&state_event_room_id, None)?; | ||
| conn.with_transaction(move |txn| -> Result<()> { | ||
| let room_info_room_id = this.encode_key(keys::ROOM_INFO, &room_id); | ||
| txn.remove_room_info(&room_info_room_id)?; | ||
|
|
||
| let member_room_id = this.encode_key(keys::MEMBER, &room_id); | ||
| txn.remove_room_members(&member_room_id, None)?; | ||
| let state_event_room_id = this.encode_key(keys::STATE_EVENT, &room_id); | ||
| txn.remove_room_state_events(&state_event_room_id, None)?; | ||
|
|
||
| let profile_room_id = this.encode_key(keys::PROFILE, &room_id); | ||
| txn.remove_room_profiles(&profile_room_id)?; | ||
| let member_room_id = this.encode_key(keys::MEMBER, &room_id); | ||
| txn.remove_room_members(&member_room_id, None)?; | ||
|
|
||
| let room_account_data_room_id = this.encode_key(keys::ROOM_ACCOUNT_DATA, &room_id); | ||
| txn.remove_room_account_data(&room_account_data_room_id)?; | ||
| let profile_room_id = this.encode_key(keys::PROFILE, &room_id); | ||
| txn.remove_room_profiles(&profile_room_id)?; | ||
|
|
||
| let receipt_room_id = this.encode_key(keys::RECEIPT, &room_id); | ||
| txn.remove_room_receipts(&receipt_room_id)?; | ||
| let room_account_data_room_id = this.encode_key(keys::ROOM_ACCOUNT_DATA, &room_id); | ||
| txn.remove_room_account_data(&room_account_data_room_id)?; | ||
|
|
||
| let display_name_room_id = this.encode_key(keys::DISPLAY_NAME, &room_id); | ||
| txn.remove_room_display_names(&display_name_room_id)?; | ||
| let receipt_room_id = this.encode_key(keys::RECEIPT, &room_id); | ||
| txn.remove_room_receipts(&receipt_room_id)?; | ||
|
|
||
| let send_queue_room_id = this.encode_key(keys::SEND_QUEUE, &room_id); | ||
| txn.remove_room_send_queue(&send_queue_room_id)?; | ||
| let display_name_room_id = this.encode_key(keys::DISPLAY_NAME, &room_id); | ||
| txn.remove_room_display_names(&display_name_room_id)?; | ||
|
|
||
| let dependent_send_queue_room_id = | ||
| this.encode_key(keys::DEPENDENTS_SEND_QUEUE, &room_id); | ||
| txn.remove_room_dependent_send_queue(&dependent_send_queue_room_id)?; | ||
| let send_queue_room_id = this.encode_key(keys::SEND_QUEUE, &room_id); | ||
| txn.remove_room_send_queue(&send_queue_room_id)?; | ||
|
|
||
| Ok(()) | ||
| }) | ||
| .await | ||
| let dependent_send_queue_room_id = | ||
| this.encode_key(keys::DEPENDENTS_SEND_QUEUE, &room_id); | ||
| txn.remove_room_dependent_send_queue(&dependent_send_queue_room_id)?; | ||
|
|
||
| Ok(()) | ||
| }) | ||
| .await?; | ||
|
|
||
| conn.vacuum().await | ||
|
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I'm wondering if we can vacuum as part of the transaction, or if it would lead to more trouble? It's not clear from the sqlite doc…
Collaborator
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. No, it's not possible it returns an error. |
||
| } | ||
|
|
||
| async fn save_send_queue_request( | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -22,6 +22,8 @@ use matrix_sdk_store_encryption::StoreCipher; | |
| use ruma::time::SystemTime; | ||
| use rusqlite::{limits::Limit, OptionalExtension, Params, Row, Statement, Transaction}; | ||
| use serde::{de::DeserializeOwned, Serialize}; | ||
| #[cfg(not(test))] | ||
| use tracing::warn; | ||
|
||
|
|
||
| use crate::{ | ||
| error::{Error, Result}, | ||
|
|
@@ -136,6 +138,24 @@ pub(crate) trait SqliteAsyncConnExt { | |
| self.execute_batch("PRAGMA journal_size_limit = 10000000;").await.map_err(Error::from)?; | ||
| Ok(()) | ||
| } | ||
|
|
||
| /// Defragment the database and free space on the filesystem. | ||
| /// | ||
| /// Only returns an error in tests, otherwise the error is only logged. | ||
| async fn vacuum(&self) -> Result<()> { | ||
| if let Err(error) = self.execute_batch("VACUUM").await { | ||
| // Since this is an optimisation step, do not propagate the error | ||
| // but log it. | ||
| #[cfg(not(test))] | ||
| warn!("Failed to vacuum database: {error}"); | ||
|
|
||
| // We want to know if there is an error with this step during tests. | ||
| #[cfg(test)] | ||
|
||
| return Err(error.into()); | ||
| } | ||
|
|
||
| Ok(()) | ||
| } | ||
| } | ||
|
|
||
| #[async_trait] | ||
|
|
||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Preexisting, but does it make sense to vacuum all the time? If only a single media has been removed, it's probably not worth it — especially as vacuuming requires doubling the size of the database on disk, since it creates a copy of the database while it does it. Unfortunately that means heuristics to decide when to trigger, then, and maybe over a certain number of files / total size on disk, that would be profitable and simple enough to trigger often?