1
+ use std:: borrow:: Cow ;
2
+ use std:: future:: Future ;
3
+ use std:: marker:: PhantomData ;
4
+
5
+ use bincode:: { Decode , Encode , config:: standard} ;
1
6
use byteorder:: LE ;
2
- use zstd:: bulk:: compress as zstd_compress;
3
- use zstd:: bulk:: decompress as zstd_decompress;
4
- use heed:: types:: { Bytes , U64 } ;
5
- use heed:: Env ;
6
- use tokio:: task:: spawn_blocking;
7
+ use futures:: channel:: oneshot:: { self , Canceled } ;
8
+ use heed:: { BytesDecode , BytesEncode , types:: U64 , Env } ;
7
9
use tracing:: { trace, warn} ;
8
10
9
- use crate :: database:: Database ;
10
- use crate :: utils:: error:: Error ;
11
- use crate :: utils:: hash:: hash;
12
- use crate :: world:: chunk_format:: Chunk ;
11
+ use crate :: {
12
+ database:: Database ,
13
+ utils:: error:: Error ,
14
+ utils:: hash:: hash,
15
+ world:: chunk_format:: Chunk
16
+ } ;
17
+
18
+ use super :: LMDB_THREADPOOL ;
19
+
20
+ pub struct Zstd < T > ( PhantomData < T > ) ;
13
21
14
- // use crate::utils::binary_utils::{bzip_compress, bzip_decompress};
15
- use bincode:: config:: standard;
16
- use bincode:: { decode_from_slice, encode_to_vec} ;
22
+ impl < ' a , T : Encode + ' a > BytesEncode < ' a > for Zstd < T > {
23
+ type EItem = T ;
24
+
25
+ fn bytes_encode ( item : & ' a Self :: EItem ) -> Result < Cow < ' a , [ u8 ] > , heed:: BoxedError > {
26
+
27
+ // Compress
28
+ let mut bytes = Vec :: new ( ) ;
29
+ let mut compressor = zstd:: Encoder :: new ( & mut bytes, 6 ) ?;
30
+ bincode:: encode_into_std_write ( item, & mut compressor, standard ( ) ) ?;
31
+
32
+ Ok ( Cow :: Owned ( bytes) )
33
+ }
34
+ }
35
+
36
+ impl < ' a , T : Decode + ' a > BytesDecode < ' a > for Zstd < T > {
37
+ type DItem = T ;
38
+
39
+ fn bytes_decode ( bytes : & ' a [ u8 ] ) -> Result < Self :: DItem , heed:: BoxedError > {
40
+
41
+ let mut decompressor = zstd:: Decoder :: new ( bytes) ?;
42
+ let decoded = bincode:: decode_from_std_read ( & mut decompressor, standard ( ) ) ?;
43
+ Ok ( decoded)
44
+ }
45
+ }
46
+
47
+ // Will delegate a database operation to the database threadpool
48
+ pub ( super ) fn spawn_blocking_db < F , R > ( f : F ) -> impl Future < Output = Result < R , Canceled > >
49
+ where
50
+ F : FnOnce ( ) -> R + Send + ' static ,
51
+ R : Send + ' static ,
52
+ {
53
+ let ( tx, res) = oneshot:: channel :: < R > ( ) ;
54
+
55
+ let pool = LMDB_THREADPOOL . get ( ) . unwrap ( ) ;
56
+ pool. spawn ( move || {
57
+ if tx. send ( f ( ) ) . is_err ( ) {
58
+ tracing:: warn!( "A database task has been unable to send its result because the receiver at other end have closed." )
59
+ }
60
+ } ) ;
61
+
62
+ res
63
+ }
17
64
18
65
impl Database {
66
+
67
+ // Close the database
68
+ pub fn close ( self ) {
69
+ let token = self . db . prepare_for_closing ( ) ;
70
+ token. wait ( ) ;
71
+ }
72
+
19
73
/// Fetch chunk from database
20
74
fn get_chunk_from_database ( db : & Env , key : & u64 ) -> Result < Option < Chunk > , Error > {
21
75
// Initialize read transaction and open chunks table
22
76
let ro_tx = db. read_txn ( ) ?;
23
77
let database = db
24
- . open_database :: < U64 < LE > , Bytes > ( & ro_tx, Some ( "chunks" ) ) ?
78
+ . open_database :: < U64 < LE > , Zstd < Chunk > > ( & ro_tx, Some ( "chunks" ) ) ?
25
79
. expect ( "No table \" chunks\" found. The database should have been initialized" ) ;
26
80
27
81
// Attempt to fetch chunk from table
28
- if let Ok ( data) = database. get ( & ro_tx, key) {
29
- Ok ( data. map ( |encoded_chunk| {
30
- // let decompressed =
31
- // bzip_decompress(&encoded_chunk).expect("Failed to decompress chunk");
32
- let decompressed = zstd_decompress ( & encoded_chunk, 1024 * 1024 * 64 ) . expect ( "Failed to decompress chunk" ) ;
33
- let chunk: ( Chunk , usize ) = decode_from_slice ( & * decompressed, standard ( ) )
34
- . expect ( "Failed to decode chunk from database" ) ;
35
- chunk. 0
36
- } ) )
37
- } else {
38
- Err ( Error :: DatabaseError ( "Failed to get chunk" . into ( ) ) )
39
- }
82
+ database. get ( & ro_tx, key)
83
+ . map_err ( |err| Error :: DatabaseError ( format ! ( "Failed to get chunk: {err}" ) ) )
40
84
}
41
85
42
86
/// Insert a single chunk into database
43
87
fn insert_chunk_into_database ( db : & Env , chunk : & Chunk ) -> Result < ( ) , Error > {
44
88
// Initialize write transaction and open chunks table
45
89
let mut rw_tx = db. write_txn ( ) ?;
46
90
let database = db
47
- . open_database :: < U64 < LE > , Bytes > ( & rw_tx, Some ( "chunks" ) ) ?
91
+ . open_database :: < U64 < LE > , Zstd < Chunk > > ( & rw_tx, Some ( "chunks" ) ) ?
48
92
. expect ( "No table \" chunks\" found. The database should have been initialized" ) ;
49
93
50
- // Encode chunk
51
- let encoded_chunk = encode_to_vec ( chunk, standard ( ) ) . expect ( "Failed to encode chunk" ) ;
52
- // let compressed = bzip_compress(&encoded_chunk).expect("Failed to compress chunk");
53
- let compressed = zstd_compress ( & encoded_chunk, 3 ) . expect ( "Failed to compress chunk" ) ;
94
+ // Calculate key
54
95
let key = hash ( ( chunk. dimension . as_ref ( ) . unwrap ( ) , chunk. x_pos , chunk. z_pos ) ) ;
55
96
56
97
// Insert chunk
57
- let res = database. put ( & mut rw_tx, & key, & compressed ) ;
98
+ let res = database. put ( & mut rw_tx, & key, chunk ) ;
58
99
rw_tx. commit ( ) . map_err ( |err| {
59
100
Error :: DatabaseError ( format ! ( "Unable to commit changes to database: {err}" ) )
60
101
} ) ?;
@@ -74,20 +115,16 @@ impl Database {
74
115
// Initialize write transaction and open chunks table
75
116
let mut rw_tx = db. write_txn ( ) ?;
76
117
let database = db
77
- . open_database :: < U64 < LE > , Bytes > ( & rw_tx, Some ( "chunks" ) ) ?
118
+ . open_database :: < U64 < LE > , Zstd < Chunk > > ( & rw_tx, Some ( "chunks" ) ) ?
78
119
. expect ( "No table \" chunks\" found. The database should have been initialized" ) ;
79
120
80
121
// Update page
81
122
for chunk in chunks {
82
- // Encode chunk
83
- let encoded_chunk = encode_to_vec ( chunk, standard ( ) ) . expect ( "Failed to encode chunk" ) ;
84
-
85
- // let compressed = bzip_compress(&encoded_chunk).expect("Failed to compress chunk");
86
- let compressed = zstd_compress ( & encoded_chunk, 3 ) . expect ( "Failed to compress chunk" ) ;
123
+ // Calculate key
87
124
let key = hash ( ( chunk. dimension . as_ref ( ) . unwrap ( ) , chunk. x_pos , chunk. z_pos ) ) ;
88
125
89
126
// Insert chunk
90
- database. put ( & mut rw_tx, & key, & compressed ) . map_err ( |err| {
127
+ database. put ( & mut rw_tx, & key, chunk ) . map_err ( |err| {
91
128
Error :: DatabaseError ( format ! ( "Failed to insert or update chunk: {err}" ) )
92
129
} ) ?;
93
130
}
@@ -104,13 +141,14 @@ impl Database {
104
141
let cache = self . cache . clone ( ) ;
105
142
106
143
tokio:: task:: spawn ( async move {
144
+
107
145
// Check cache
108
146
if cache. contains_key ( & key) {
109
147
trace ! ( "Chunk already exists in cache: {:X}" , key) ;
110
148
}
111
149
// If not in cache then search in database
112
150
else if let Ok ( chunk) =
113
- spawn_blocking ( move || Self :: get_chunk_from_database ( & db, & key) )
151
+ spawn_blocking_db ( move || Self :: get_chunk_from_database ( & db, & key) )
114
152
. await
115
153
. unwrap ( )
116
154
{
@@ -158,7 +196,7 @@ impl Database {
158
196
// Insert chunk into persistent database
159
197
let chunk = value. clone ( ) ;
160
198
let db = self . db . clone ( ) ;
161
- spawn_blocking ( move || Self :: insert_chunk_into_database ( & db, & chunk) )
199
+ spawn_blocking_db ( move || Self :: insert_chunk_into_database ( & db, & chunk) )
162
200
. await
163
201
. unwrap ( ) ?;
164
202
@@ -202,7 +240,7 @@ impl Database {
202
240
Ok ( self . cache . get ( & key) . await )
203
241
}
204
242
// Attempt to get chunk from persistent database
205
- else if let Some ( chunk) = spawn_blocking ( move || Self :: get_chunk_from_database ( & db, & key) )
243
+ else if let Some ( chunk) = spawn_blocking_db ( move || Self :: get_chunk_from_database ( & db, & key) )
206
244
. await
207
245
. unwrap ( ) ?
208
246
{
@@ -243,7 +281,7 @@ impl Database {
243
281
Ok ( true )
244
282
// Else check persistent database and load it into cache
245
283
} else {
246
- let res = spawn_blocking ( move || Self :: get_chunk_from_database ( & db, & key) ) . await ? ;
284
+ let res = spawn_blocking_db ( move || Self :: get_chunk_from_database ( & db, & key) ) . await . unwrap ( ) ;
247
285
248
286
// WARNING: The previous logic was to order the chunk to be loaded into cache whether it existed or not.
249
287
// This has been replaced by directly loading the queried chunk into cache
@@ -286,7 +324,7 @@ impl Database {
286
324
// Insert new chunk state into persistent database
287
325
let chunk = value. clone ( ) ;
288
326
let db = self . db . clone ( ) ;
289
- spawn_blocking ( move || Self :: insert_chunk_into_database ( & db, & chunk) ) . await ? ?;
327
+ spawn_blocking_db ( move || Self :: insert_chunk_into_database ( & db, & chunk) ) . await . unwrap ( ) ?;
290
328
291
329
// Insert new chunk state into cache
292
330
self . cache . insert ( key, value) . await ;
@@ -389,7 +427,7 @@ impl Database {
389
427
}
390
428
391
429
// Then insert into persistent database
392
- spawn_blocking ( move || Self :: insert_chunks_into_database ( & db, & values) )
430
+ spawn_blocking_db ( move || Self :: insert_chunks_into_database ( & db, & values) )
393
431
. await
394
432
. unwrap ( ) ?;
395
433
Ok ( ( ) )
0 commit comments