5
5
* you may not use this file except in compliance with the License.
6
6
* You may obtain a copy of the License at
7
7
*
8
- * http://www.apache.org/licenses/LICENSE-2.0
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
9
*
10
10
* Unless required by applicable law or agreed to in writing, software
11
11
* distributed under the License is distributed on an "AS IS" BASIS,
22
22
#include <stdio.h>
23
23
#include <stdlib.h>
24
24
25
- struct region {
26
- u32 block ;
27
- u32 len ;
28
- int bg ;
29
- struct region * next ;
30
- struct region * prev ;
31
- };
32
-
33
- struct block_group_info {
34
- u32 first_block ;
35
- int header_blocks ;
36
- int data_blocks_used ;
37
- int has_superblock ;
38
- u8 * bitmaps ;
39
- u8 * block_bitmap ;
40
- u8 * inode_bitmap ;
41
- u8 * inode_table ;
42
- u32 free_blocks ;
43
- u32 first_free_block ;
44
- u32 free_inodes ;
45
- u32 first_free_inode ;
46
- u16 flags ;
47
- u16 used_dirs ;
48
- };
49
-
50
25
struct xattr_list_element {
51
26
struct ext4_inode * inode ;
52
27
struct ext4_xattr_header * header ;
@@ -106,7 +81,7 @@ static void region_list_remove(struct region_list *list, struct region *reg)
106
81
reg -> prev = NULL ;
107
82
}
108
83
109
- static void region_list_append (struct region_list * list , struct region * reg )
84
+ void region_list_append (struct region_list * list , struct region * reg )
110
85
{
111
86
if (list -> first == NULL ) {
112
87
list -> first = reg ;
@@ -141,15 +116,17 @@ static void dump_region_lists(struct block_allocation *alloc) {
141
116
}
142
117
#endif
143
118
144
- void print_blocks (FILE * f , struct block_allocation * alloc )
119
+ void print_blocks (FILE * f , struct block_allocation * alloc , char separator )
145
120
{
146
121
struct region * reg ;
122
+ fputc (' ' , f );
147
123
for (reg = alloc -> list .first ; reg ; reg = reg -> next ) {
148
124
if (reg -> len == 1 ) {
149
- fprintf (f , " %d" , reg -> block );
125
+ fprintf (f , "%d" , reg -> block );
150
126
} else {
151
- fprintf (f , " %d-%d" , reg -> block , reg -> block + reg -> len - 1 );
127
+ fprintf (f , "%d-%d" , reg -> block , reg -> block + reg -> len - 1 );
152
128
}
129
+ fputc (separator , f );
153
130
}
154
131
fputc ('\n' , f );
155
132
}
@@ -210,45 +187,38 @@ static int reserve_blocks(struct block_group_info *bg, u32 start, u32 num)
210
187
unsigned int i = 0 ;
211
188
212
189
u32 block = start ;
213
- if (num > bg -> free_blocks )
214
- return -1 ;
215
-
216
190
for (i = 0 ; i < num && block % 8 != 0 ; i ++ , block ++ ) {
217
191
if (bitmap_set_bit (bg -> block_bitmap , block )) {
218
- error ("attempted to reserve already reserved block" );
192
+ error ("attempted to reserve already reserved block %d and num is %d" , block , num );
219
193
return -1 ;
220
194
}
221
195
}
222
196
223
197
for (; i + 8 <= (num & ~7 ); i += 8 , block += 8 ) {
224
198
if (bitmap_set_8_bits (bg -> block_bitmap , block )) {
225
- error ("attempted to reserve already reserved block" );
199
+ error ("attempted to reserve already reserved block %d and num is %d" , block , num );
226
200
return -1 ;
227
201
}
228
202
}
229
203
230
204
for (; i < num ; i ++ , block ++ ) {
231
205
if (bitmap_set_bit (bg -> block_bitmap , block )) {
232
- error ("attempted to reserve already reserved block" );
206
+ error ("attempted to reserve already reserved block %d and num is %d" , block , num );
233
207
return -1 ;
234
208
}
235
209
}
236
210
237
211
bg -> free_blocks -= num ;
238
- if (start == bg -> first_free_block )
239
- bg -> first_free_block = start + num ;
240
212
241
213
return 0 ;
242
214
}
243
215
244
- static void free_blocks (struct block_group_info * bg , u32 num_blocks )
216
+ static void free_blocks (struct block_group_info * bg , u32 block , u32 num_blocks )
245
217
{
246
218
unsigned int i ;
247
- u32 block = bg -> first_free_block - 1 ;
248
219
for (i = 0 ; i < num_blocks ; i ++ , block -- )
249
220
bg -> block_bitmap [block / 8 ] &= ~(1 << (block % 8 ));
250
221
bg -> free_blocks += num_blocks ;
251
- bg -> first_free_block -= num_blocks ;
252
222
}
253
223
254
224
/* Reduces an existing allocation by len blocks by return the last blocks
@@ -258,14 +228,15 @@ void reduce_allocation(struct block_allocation *alloc, u32 len)
258
228
{
259
229
while (len ) {
260
230
struct region * last_reg = alloc -> list .last ;
231
+ struct block_group_info * bg = & aux_info .bgs [last_reg -> bg ];
261
232
262
233
if (last_reg -> len > len ) {
263
- free_blocks (& aux_info . bgs [ last_reg -> bg ] , len );
234
+ free_blocks (bg , last_reg -> block + last_reg -> len - bg -> first_block - 1 , len );
264
235
last_reg -> len -= len ;
265
236
len = 0 ;
266
237
} else {
267
238
struct region * reg = alloc -> list .last -> prev ;
268
- free_blocks (& aux_info . bgs [ last_reg -> bg ] , last_reg -> len );
239
+ free_blocks (bg , last_reg -> block + last_reg -> len - bg -> first_block - 1 , last_reg -> len );
269
240
len -= last_reg -> len ;
270
241
if (reg ) {
271
242
reg -> next = NULL ;
@@ -304,18 +275,28 @@ static void init_bg(struct block_group_info *bg, unsigned int i)
304
275
305
276
bg -> data_blocks_used = 0 ;
306
277
bg -> free_blocks = info .blocks_per_group ;
307
- bg -> first_free_block = 0 ;
308
278
bg -> free_inodes = info .inodes_per_group ;
309
279
bg -> first_free_inode = 1 ;
310
280
bg -> flags = 0 ;
311
281
312
- if (reserve_blocks (bg , bg -> first_free_block , bg -> header_blocks ) < 0 )
282
+ bg -> chunk_count = 0 ;
283
+ bg -> max_chunk_count = 1 ;
284
+ bg -> chunks = (struct region * ) calloc (bg -> max_chunk_count , sizeof (struct region ));
285
+
286
+ if (reserve_blocks (bg , 0 , bg -> header_blocks ) < 0 )
313
287
error ("failed to reserve %u blocks in block group %u\n" , bg -> header_blocks , i );
288
+ // Add empty starting delimiter chunk
289
+ reserve_bg_chunk (i , bg -> header_blocks , 0 );
314
290
315
291
if (bg -> first_block + info .blocks_per_group > aux_info .len_blocks ) {
316
292
u32 overrun = bg -> first_block + info .blocks_per_group - aux_info .len_blocks ;
317
293
reserve_blocks (bg , info .blocks_per_group - overrun , overrun );
294
+ // Add empty ending delimiter chunk
295
+ reserve_bg_chunk (i , info .blocks_per_group - overrun , 0 );
296
+ } else {
297
+ reserve_bg_chunk (i , info .blocks_per_group - 1 , 0 );
318
298
}
299
+
319
300
}
320
301
321
302
void block_allocator_init ()
@@ -341,73 +322,79 @@ void block_allocator_free()
341
322
free (aux_info .bgs );
342
323
}
343
324
344
- static u32 ext4_allocate_blocks_from_block_group (u32 len , int bg_num )
345
- {
346
- if (get_free_blocks (bg_num ) < len )
347
- return EXT4_ALLOCATE_FAILED ;
348
-
349
- u32 block = aux_info .bgs [bg_num ].first_free_block ;
350
- struct block_group_info * bg = & aux_info .bgs [bg_num ];
351
- if (reserve_blocks (bg , bg -> first_free_block , len ) < 0 ) {
352
- error ("failed to reserve %u blocks in block group %u\n" , len , bg_num );
353
- return EXT4_ALLOCATE_FAILED ;
354
- }
355
-
356
- aux_info .bgs [bg_num ].data_blocks_used += len ;
357
-
358
- return bg -> first_block + block ;
359
- }
360
-
361
325
/* Allocate a single block and return its block number */
362
326
u32 allocate_block ()
363
327
{
364
- unsigned int i ;
365
- for (i = 0 ; i < aux_info .groups ; i ++ ) {
366
- u32 block = ext4_allocate_blocks_from_block_group (1 , i );
367
-
368
- if (block != EXT4_ALLOCATE_FAILED )
369
- return block ;
328
+ u32 block ;
329
+ struct block_allocation * blk_alloc = allocate_blocks (1 );
330
+ if (!blk_alloc ) {
331
+ return EXT4_ALLOCATE_FAILED ;
370
332
}
371
-
372
- return EXT4_ALLOCATE_FAILED ;
333
+ block = blk_alloc -> list .first -> block ;
334
+ free_alloc (blk_alloc );
335
+ return block ;
373
336
}
374
337
375
338
static struct region * ext4_allocate_best_fit_partial (u32 len )
376
339
{
377
- unsigned int i ;
378
- unsigned int found_bg = 0 ;
379
- u32 found_bg_len = 0 ;
340
+ unsigned int i , j ;
341
+ unsigned int found_bg = 0 , found_prev_chunk = 0 , found_block = 0 ;
342
+ u32 found_allocate_len = 0 ;
343
+ bool minimize = false;
344
+ struct block_group_info * bgs = aux_info .bgs ;
345
+ struct region * reg ;
380
346
381
347
for (i = 0 ; i < aux_info .groups ; i ++ ) {
382
- u32 bg_len = aux_info .bgs [i ].free_blocks ;
383
-
384
- if ((len <= bg_len && (found_bg_len == 0 || bg_len < found_bg_len )) ||
385
- (len > found_bg_len && bg_len > found_bg_len )) {
386
- found_bg = i ;
387
- found_bg_len = bg_len ;
348
+ for (j = 1 ; j < bgs [i ].chunk_count ; j ++ ) {
349
+ u32 hole_start , hole_size ;
350
+ hole_start = bgs [i ].chunks [j - 1 ].block + bgs [i ].chunks [j - 1 ].len ;
351
+ hole_size = bgs [i ].chunks [j ].block - hole_start ;
352
+ if (hole_size == len ) {
353
+ // Perfect fit i.e. right between 2 chunks no need to keep searching
354
+ found_bg = i ;
355
+ found_prev_chunk = j - 1 ;
356
+ found_block = hole_start ;
357
+ found_allocate_len = hole_size ;
358
+ goto done ;
359
+ } else if (hole_size > len && (found_allocate_len == 0 || (found_allocate_len > hole_size ))) {
360
+ found_bg = i ;
361
+ found_prev_chunk = j - 1 ;
362
+ found_block = hole_start ;
363
+ found_allocate_len = hole_size ;
364
+ minimize = true;
365
+ } else if (!minimize ) {
366
+ if (found_allocate_len < hole_size ) {
367
+ found_bg = i ;
368
+ found_prev_chunk = j - 1 ;
369
+ found_block = hole_start ;
370
+ found_allocate_len = hole_size ;
371
+ }
372
+ }
388
373
}
389
374
}
390
375
391
- if (found_bg_len ) {
392
- u32 allocate_len = min (len , found_bg_len );
393
- struct region * reg ;
394
- u32 block = ext4_allocate_blocks_from_block_group (allocate_len , found_bg );
395
- if (block == EXT4_ALLOCATE_FAILED ) {
396
- error ("failed to allocate %d blocks in block group %d" , allocate_len , found_bg );
397
- return NULL ;
398
- }
399
- reg = malloc (sizeof (struct region ));
400
- reg -> block = block ;
401
- reg -> len = allocate_len ;
402
- reg -> next = NULL ;
403
- reg -> prev = NULL ;
404
- reg -> bg = found_bg ;
405
- return reg ;
406
- } else {
376
+ if (found_allocate_len == 0 ) {
407
377
error ("failed to allocate %u blocks, out of space?" , len );
378
+ return NULL ;
408
379
}
409
-
410
- return NULL ;
380
+ if (found_allocate_len > len ) found_allocate_len = len ;
381
+ done :
382
+ // reclaim allocated space in chunk
383
+ bgs [found_bg ].chunks [found_prev_chunk ].len += found_allocate_len ;
384
+ if (reserve_blocks (& bgs [found_bg ],
385
+ found_block ,
386
+ found_allocate_len ) < 0 ) {
387
+ error ("failed to reserve %u blocks in block group %u\n" , found_allocate_len , found_bg );
388
+ return NULL ;
389
+ }
390
+ bgs [found_bg ].data_blocks_used += found_allocate_len ;
391
+ reg = malloc (sizeof (struct region ));
392
+ reg -> block = found_block + bgs [found_bg ].first_block ;
393
+ reg -> len = found_allocate_len ;
394
+ reg -> next = NULL ;
395
+ reg -> prev = NULL ;
396
+ reg -> bg = found_bg ;
397
+ return reg ;
411
398
}
412
399
413
400
static struct region * ext4_allocate_best_fit (u32 len )
@@ -439,9 +426,9 @@ static struct region *ext4_allocate_best_fit(u32 len)
439
426
/* Allocate len blocks. The blocks may be spread across multiple block groups,
440
427
and are returned in a linked list of the blocks in each block group. The
441
428
allocation algorithm is:
442
- 1. If the remaining allocation is larger than any available contiguous region,
443
- allocate the largest contiguous region and loop
444
- 2. Otherwise, allocate the smallest contiguous region that it fits in
429
+ 1. If the remaining allocation is larger than any available contiguous region,
430
+ allocate the largest contiguous region and loop
431
+ 2. Otherwise, allocate the smallest contiguous region that it fits in
445
432
*/
446
433
struct block_allocation * allocate_blocks (u32 len )
447
434
{
@@ -452,6 +439,8 @@ struct block_allocation *allocate_blocks(u32 len)
452
439
453
440
struct block_allocation * alloc = create_allocation ();
454
441
alloc -> list .first = reg ;
442
+ while (reg -> next != NULL )
443
+ reg = reg -> next ;
455
444
alloc -> list .last = reg ;
456
445
alloc -> list .iter = alloc -> list .first ;
457
446
alloc -> list .partial_iter = 0 ;
@@ -779,3 +768,34 @@ void free_alloc(struct block_allocation *alloc)
779
768
780
769
free (alloc );
781
770
}
771
+
772
+ void reserve_bg_chunk (int bg , u32 start_block , u32 size ) {
773
+ struct block_group_info * bgs = aux_info .bgs ;
774
+ int chunk_count ;
775
+ if (bgs [bg ].chunk_count == bgs [bg ].max_chunk_count ) {
776
+ bgs [bg ].max_chunk_count *= 2 ;
777
+ bgs [bg ].chunks = realloc (bgs [bg ].chunks , bgs [bg ].max_chunk_count * sizeof (struct region ));
778
+ if (!bgs [bg ].chunks )
779
+ critical_error ("realloc failed" );
780
+ }
781
+ chunk_count = bgs [bg ].chunk_count ;
782
+ bgs [bg ].chunks [chunk_count ].block = start_block ;
783
+ bgs [bg ].chunks [chunk_count ].len = size ;
784
+ bgs [bg ].chunks [chunk_count ].bg = bg ;
785
+ bgs [bg ].chunk_count ++ ;
786
+ }
787
+
788
+ int reserve_blocks_for_allocation (struct block_allocation * alloc ) {
789
+ struct region * reg ;
790
+ struct block_group_info * bgs = aux_info .bgs ;
791
+
792
+ if (!alloc ) return 0 ;
793
+ reg = alloc -> list .first ;
794
+ while (reg != NULL ) {
795
+ if (reserve_blocks (& bgs [reg -> bg ], reg -> block - bgs [reg -> bg ].first_block , reg -> len ) < 0 )
796
+ return -1 ;
797
+ reg = reg -> next ;
798
+ }
799
+ return 0 ;
800
+ }
801
+
0 commit comments