Skip to content

Commit c874f8e

Browse files
committed
Try and make importing faster.
1 parent 23890b9 commit c874f8e

File tree

7 files changed

+209
-414
lines changed

7 files changed

+209
-414
lines changed

Diff for: src/database/chunks.rs

+20-76
Original file line numberDiff line numberDiff line change
@@ -1,12 +1,13 @@
11
use std::borrow::Cow;
22
use std::future::Future;
33
use std::marker::PhantomData;
4-
4+
use std::sync::Arc;
55
use bincode::{Decode, Encode, config::standard};
66
use byteorder::LE;
77
use futures::channel::oneshot::{self, Canceled};
88
use heed::{types::U64, BytesDecode, BytesEncode, Env, MdbError};
9-
use tracing::{info, trace, warn};
9+
use moka::future::Cache;
10+
use tracing::{trace, warn};
1011

1112
use crate::{
1213
database::Database,
@@ -162,27 +163,22 @@ impl Database {
162163
let key = hash((chunk.dimension.as_ref().unwrap(), chunk.x_pos, chunk.z_pos));
163164

164165
// Insert chunk
165-
database.put(&mut rw_tx, &key, chunk)?
166-
// .map_err(|err| {
167-
// Error::DatabaseError(format!("Failed to insert or update chunk: {err}"))
168-
// })?;
166+
database.put(&mut rw_tx, &key, chunk)?;
169167
}
170-
171168
// Commit changes
172169
rw_tx.commit()?;
173-
// .map_err(|err| {
174-
// Error::DatabaseError(format!("Unable to commit changes to database: {err}"))
175-
// })?;
176170
Ok(())
177171
}
178172

179173
async fn load_into_cache(&self, key: u64) -> Result<(), Error> {
180-
let db = self.db.clone();
181-
let tsk_db = self.db.clone();
182-
let cache = self.cache.clone();
174+
Database::load_into_cache_standalone(self.db.clone(), self.cache.clone(), key).await
175+
}
176+
177+
async fn load_into_cache_standalone(db: Env, cache: Arc<Cache<u64, Chunk>>, key: u64) -> Result<(), Error> {
178+
let tsk_db = db.clone();
183179

184180
tokio::task::spawn(async move {
185-
181+
186182
// Check cache
187183
if cache.contains_key(&key) {
188184
trace!("Chunk already exists in cache: {:X}", key);
@@ -210,7 +206,6 @@ impl Database {
210206
.await?;
211207
Ok(())
212208
}
213-
214209
/// Insert a chunk into the database <br>
215210
/// This will also insert the chunk into the cache <br>
216211
/// If the chunk already exists, it will return an error
@@ -395,65 +390,6 @@ impl Database {
395390
///
396391
/// ```
397392
pub async fn batch_insert(&self, values: Vec<Chunk>) -> Result<(), Error> {
398-
/*
399-
400-
trace!("processing chunks (compressing and encoding)");
401-
// Process chunks in parallel
402-
let processed_chunks: Vec<(u64, Vec<u8>)> = values
403-
.par_iter()
404-
.map(|chunk| {
405-
let key = hash((
406-
chunk.dimension.as_ref().expect(&format!("Invalid chunk @ ({},{})", chunk.x_pos, chunk.z_pos)),
407-
chunk.x_pos,
408-
chunk.z_pos,
409-
));
410-
411-
let encoded_chunk = encode_to_vec(chunk, standard())
412-
.expect("Failed to encode chunk");
413-
let compressed = zstd_compress(&encoded_chunk, 3)
414-
.expect("Failed to compress chunk.")
415-
;
416-
417-
(key, compressed)
418-
})
419-
.collect();
420-
trace!("processed chunks");*/
421-
422-
// Insert into cache in parallel
423-
// TODO: re-enable this?
424-
/*values.par_iter().for_each(|chunk| {
425-
let key = hash((
426-
chunk.dimension.as_ref().expect(&format!("Invalid chunk @ ({},{})", chunk.x_pos, chunk.z_pos)),
427-
chunk.x_pos,
428-
chunk.z_pos,
429-
));
430-
431-
// tokio::spawn(self.load_into_cache(key));
432-
// if let Err(e) = self.cache.insert(key, chunk.clone()) {
433-
// warn!("Failed to insert chunk into cache: {:?}", e);
434-
// }
435-
});
436-
*/
437-
438-
/*trace!("Inserting chunks into database");
439-
// Perform batch insert into LMDB
440-
spawn_blocking(move || {
441-
let mut rw_tx = db.write_txn()?;
442-
let database = db
443-
.open_database::<U64<LE>, Bytes>(&rw_tx, Some("chunks"))?
444-
.expect("No table \"chunks\" found. The database should have been initialized");
445-
446-
for (key, compressed) in processed_chunks {
447-
database.put(&mut rw_tx, &key, &compressed)?;
448-
}
449-
450-
rw_tx.commit()?;
451-
Ok::<_, Error>(())
452-
})
453-
.await??;
454-
455-
Ok(())*/
456-
457393
// Clone database pointer
458394
let db = self.db.clone();
459395
let tsk_db = self.db.clone();
@@ -467,9 +403,17 @@ impl Database {
467403
// WARNING: The previous logic was to first insert in database and then insert in cache using load_into_cache fn.
468404
// This has been modified to avoid having to query database while we already have the data available.
469405
// First insert into cache
406+
470407
for (key, chunk) in keys.into_iter().zip(&values) {
471-
self.cache.insert(key, chunk.clone()).await;
472-
self.load_into_cache(key).await?;
408+
let cache = self.cache.clone();
409+
let db = self.db.clone();
410+
let chunk = chunk.clone();
411+
tokio::spawn(async move {
412+
cache.insert(key, chunk).await;
413+
if let Err(e) = Database::load_into_cache_standalone(db, cache, key).await {
414+
warn!("Error inserting chunk into database: {:?}", e);
415+
}
416+
});
473417
}
474418

475419
// Then insert into persistent database

Diff for: src/database/mod.rs

+3-4
Original file line numberDiff line numberDiff line change
@@ -19,9 +19,8 @@ use crate::utils::error::Error;
1919
use crate::world::chunk_format::Chunk;
2020
pub mod chunks;
2121

22-
// MDBX constants
23-
const LMDB_MIN_PAGE_SIZE: usize = 2 * 1024usize.pow(2); // 100MiB
24-
const LMDB_PAGE_SIZE_INCREMENT: usize = 50*1024usize.pow(2); // 200MiB
22+
const LMDB_MIN_PAGE_SIZE: usize = 50 * 1024usize.pow(2); // 50MB
23+
const LMDB_PAGE_SIZE_INCREMENT: usize = 250*1024usize.pow(2); // 250MB
2524
const LMDB_MAX_DBS: u32 = 10;
2625

2726
// Database threadpool
@@ -85,7 +84,7 @@ pub async fn start_database() -> Result<Database, Error> {
8584

8685
// Open database (This operation is safe as we assume no other process touched the database)
8786
let lmdb = unsafe {
88-
opts.flags(EnvFlags::empty())
87+
opts.flags(EnvFlags::WRITE_MAP | EnvFlags::NO_SYNC)
8988
.open(&world_path)
9089
.expect("Unable to open LMDB environment located at {world_path:?}")
9190
};

Diff for: src/net/systems/chunk_sender.rs

+3-2
Original file line numberDiff line numberDiff line change
@@ -143,7 +143,7 @@ impl ChunkSender {
143143

144144
let chunk_radius = player_view_distance as i32;
145145

146-
for x in -chunk_radius..=chunk_radius {
146+
'x: for x in -chunk_radius..=chunk_radius {
147147
for z in -chunk_radius..=chunk_radius {
148148
let Ok(packet) = ChunkDataAndUpdateLight::new(
149149
state.clone(),
@@ -154,7 +154,8 @@ impl ChunkSender {
154154
};
155155
let conn_read = conn.read().await;
156156
if let Err(e) = conn_read.send_packet(packet).await {
157-
warn!("Failed to send chunk to player: {}", e);
157+
warn!("Failed to send chunk to player: {} ; Cancelling.", e);
158+
break 'x;
158159
}
159160
}
160161
}

Diff for: src/utils/error.rs

-3
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,5 @@
11
use std::convert::Infallible;
22

3-
use crate::world::importing_v2::ImportingError;
43
use config::ConfigError;
54

65
#[derive(thiserror::Error, Debug)]
@@ -92,8 +91,6 @@ pub enum Error {
9291
#[error(transparent)]
9392
CompressionError(std::io::Error),
9493

95-
#[error(transparent)]
96-
ImportingError(#[from] ImportingError),
9794
#[error("Database error: {0}")]
9895
LmdbError(#[from] heed::Error),
9996
}

0 commit comments

Comments
 (0)