@@ -6,7 +6,10 @@ use bincode::{Decode, Encode, config::standard};
6
6
use byteorder:: LE ;
7
7
use futures:: channel:: oneshot:: { self , Canceled } ;
8
8
use heed:: { types:: U64 , BytesDecode , BytesEncode , Env , MdbError } ;
9
+ use heed:: types:: Bytes ;
9
10
use moka:: future:: Cache ;
11
+ use tokio:: runtime:: Handle ;
12
+ use tokio:: task:: block_in_place;
10
13
use tracing:: { trace, warn} ;
11
14
12
15
use crate :: {
@@ -15,7 +18,7 @@ use crate::{
15
18
utils:: hash:: hash,
16
19
world:: chunk_format:: Chunk
17
20
} ;
18
-
21
+ use crate :: world :: importing :: SerializedChunk ;
19
22
use super :: { LMDB_PAGE_SIZE , LMDB_PAGE_SIZE_INCREMENT , LMDB_READER_SYNC , LMDB_THREADPOOL } ;
20
23
21
24
pub struct Zstd < T > ( PhantomData < T > ) ;
@@ -48,6 +51,30 @@ impl<'a, T: Decode + 'a> BytesDecode<'a> for Zstd<T> {
48
51
}
49
52
}
50
53
54
+ pub struct ZstdCodec ;
55
+
56
+ impl ZstdCodec {
57
+ pub async fn compress_data < T : Encode + Send + ' static > ( data : T ) -> crate :: Result < Vec < u8 > > {
58
+ tokio:: task:: spawn_blocking (
59
+ move ||{
60
+ let mut bytes = Vec :: new ( ) ;
61
+ let mut compressor = zstd:: Encoder :: new ( & mut bytes, 3 ) ?;
62
+ bincode:: encode_into_std_write ( & data, & mut compressor, standard ( ) ) ?;
63
+ compressor. finish ( ) ?;
64
+ Ok ( bytes)
65
+ }
66
+ ) . await ?
67
+ }
68
+ pub async fn decompress_data < T : Decode + Send + ' static > ( data : Vec < u8 > ) -> crate :: Result < T > {
69
+ tokio:: task:: spawn_blocking (
70
+ move || {
71
+ let decoded = bincode:: decode_from_slice ( data. as_slice ( ) , standard ( ) ) ?;
72
+ Ok ( decoded. 0 )
73
+ }
74
+ ) . await ?
75
+ }
76
+ }
77
+
51
78
/// LMDB will follow a linear growth as opposed to MDBX which
52
79
/// uses a geometric growth.
53
80
pub ( super ) fn new_page_size ( old_size : usize ) -> usize {
@@ -114,11 +141,23 @@ impl Database {
114
141
// Initialize read transaction and open chunks table
115
142
let ro_tx = db. read_txn ( ) ?;
116
143
let database = db
117
- . open_database :: < U64 < LE > , Zstd < Chunk > > ( & ro_tx, Some ( "chunks" ) ) ?
144
+ . open_database :: < U64 < LE > , Bytes > ( & ro_tx, Some ( "chunks" ) ) ?
118
145
. expect ( "No table \" chunks\" found. The database should have been initialized" ) ;
119
146
120
147
// Attempt to fetch chunk from table
121
- database. get ( & ro_tx, key)
148
+ let data = database. get ( & ro_tx, key) ?;
149
+ let chunk = match data {
150
+ Some ( data) => {
151
+ // let chunk = ZstdCodec::decompress_data::<Chunk>(data.to_vec()).expect("Failed to decompress chunk");
152
+ let chunk = Handle :: current ( ) . block_on ( async {
153
+ ZstdCodec :: decompress_data :: < Chunk > ( data. to_vec ( ) ) . await . expect ( "Failed to decompress chunk" )
154
+ } ) ;
155
+ Some ( chunk)
156
+ }
157
+ None => None ,
158
+ } ;
159
+
160
+ Ok ( chunk)
122
161
//.map_err(|err| Error::DatabaseError(format!("Failed to get chunk: {err}")))
123
162
}
124
163
@@ -127,14 +166,23 @@ impl Database {
127
166
// Initialize write transaction and open chunks table
128
167
let mut rw_tx = db. write_txn ( ) ?;
129
168
let database = db
130
- . open_database :: < U64 < LE > , Zstd < Chunk > > ( & rw_tx, Some ( "chunks" ) ) ?
169
+ . open_database :: < U64 < LE > , Bytes > ( & rw_tx, Some ( "chunks" ) ) ?
131
170
. expect ( "No table \" chunks\" found. The database should have been initialized" ) ;
132
171
133
172
// Calculate key
134
173
let key = hash ( ( chunk. dimension . as_ref ( ) . unwrap ( ) , chunk. x_pos , chunk. z_pos ) ) ;
135
174
175
+ // let chunk = Handle::current().block_on(ZstdCodec::compress_data(chunk)).expect("Failed to compress chunk");
176
+ let chunk = chunk. clone ( ) ;
177
+ let chunk = Handle :: current ( ) . block_on ( async {
178
+ ZstdCodec :: compress_data ( chunk) . await . expect ( "Failed to compress chunk" )
179
+ } ) ;
180
+ /*
181
+ ZstdCodec::compress_data(chunk).await.expect("Failed to compress chunk")
182
+ });*/
183
+
136
184
// Insert chunk
137
- let res = database. put ( & mut rw_tx, & key, chunk) ;
185
+ let res = database. put ( & mut rw_tx, & key, chunk. as_slice ( ) ) ;
138
186
rw_tx. commit ( ) ?;
139
187
// .map_err(|err| {
140
188
// Error::DatabaseError(format!("Unable to commit changes to database: {err}"))
@@ -152,20 +200,20 @@ impl Database {
152
200
153
201
/// Insert multiple chunks into database
154
202
/// TODO: Find better name/disambiguation
155
- fn insert_chunks_into_database ( db : & Env , chunks : & [ Chunk ] ) -> Result < ( ) , heed:: Error > {
203
+ fn insert_chunks_into_database ( db : & Env , chunks : & [ SerializedChunk ] ) -> Result < ( ) , heed:: Error > {
156
204
// Initialize write transaction and open chunks table
157
205
let mut rw_tx = db. write_txn ( ) ?;
158
206
let database = db
159
- . open_database :: < U64 < LE > , Zstd < Chunk > > ( & rw_tx, Some ( "chunks" ) ) ?
207
+ . open_database :: < U64 < LE > , Bytes > ( & rw_tx, Some ( "chunks" ) ) ?
160
208
. expect ( "No table \" chunks\" found. The database should have been initialized" ) ;
161
209
162
210
// Update page
163
211
for chunk in chunks {
164
212
// Calculate key
165
- let key = hash ( ( chunk. dimension . as_ref ( ) . unwrap ( ) , chunk. x_pos , chunk. z_pos ) ) ;
213
+ // let key = hash((chunk.dimension.as_ref().unwrap(), chunk.x_pos, chunk.z_pos));
166
214
167
215
// Insert chunk
168
- database. put ( & mut rw_tx, & key , chunk) ?;
216
+ database. put ( & mut rw_tx, & chunk . hash ( ) , chunk. data ( ) ) ?;
169
217
}
170
218
// Commit changes
171
219
rw_tx. commit ( ) ?;
@@ -392,33 +440,38 @@ impl Database {
392
440
/// }
393
441
///
394
442
/// ```
395
- pub async fn batch_insert ( & self , values : Vec < Chunk > ) -> Result < ( ) , Error > {
443
+ pub async fn batch_insert ( & self , values : Vec < SerializedChunk > ) -> Result < ( ) , Error > {
396
444
// Clone database pointer
397
445
let db = self . db . clone ( ) ;
398
446
let tsk_db = self . db . clone ( ) ;
399
447
400
448
// Calculate all keys
401
- let keys = values
449
+ /* let keys = values
402
450
.iter()
403
451
.map(|v| hash((v.dimension.as_ref().unwrap_or_else(|| panic!("Invalid chunk @ ({},{})", v.x_pos, v.z_pos)), v.x_pos, v.z_pos)))
404
452
.collect::<Vec<u64>>();
453
+ */
454
+ // let keys = values.iter().map(|v| v.hash()).collect::<Vec<u64>>();
405
455
406
456
// WARNING: The previous logic was to first insert in database and then insert in cache using load_into_cache fn.
407
457
// This has been modified to avoid having to query database while we already have the data available.
408
458
// First insert into cache
409
459
410
- for ( key, chunk) in keys. into_iter ( ) . zip ( & values) {
460
+ // TODO: Renable cache. Currently disabled because we only get serialized bytes with the hash.
461
+ // to save in the database
462
+ /*for (chunk) in values.iter() {
411
463
let cache = self.cache.clone();
412
464
let db = self.db.clone();
413
- let chunk = chunk. clone ( ) ;
465
+ let key = chunk.hash();
466
+ let chunk = chunk.data().clone();
414
467
tokio::spawn(async move {
415
468
cache.insert(key, chunk).await;
416
469
if let Err(e) = Database::load_into_cache_standalone(db, cache, key).await {
417
470
warn!("Error inserting chunk into database: {:?}", e);
418
471
}
419
472
});
420
473
}
421
-
474
+ */
422
475
// Then insert into persistent database
423
476
spawn_blocking_db ( tsk_db, move || Self :: insert_chunks_into_database ( & db, & values) )
424
477
. await
0 commit comments