Skip to content

Commit 9579198

Browse files
committed
Redesign make_ext4fs to incrementally generate ext4 images
Allows passing a base fs mapping file through -d which preserves the location of those mapping in existing files Internal Design Doc: go/incremental-ext4 BUG: 26839493 Change-Id: I05e296693429d39466d257d1d0a3daf00510dc26 Signed-off-by: Mohamad Ayyash <[email protected]>
1 parent 18785a8 commit 9579198

File tree

8 files changed

+419
-130
lines changed

8 files changed

+419
-130
lines changed

ext4_utils/allocate.c

Lines changed: 120 additions & 100 deletions
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55
* you may not use this file except in compliance with the License.
66
* You may obtain a copy of the License at
77
*
8-
* http://www.apache.org/licenses/LICENSE-2.0
8+
* http://www.apache.org/licenses/LICENSE-2.0
99
*
1010
* Unless required by applicable law or agreed to in writing, software
1111
* distributed under the License is distributed on an "AS IS" BASIS,
@@ -22,31 +22,6 @@
2222
#include <stdio.h>
2323
#include <stdlib.h>
2424

25-
struct region {
26-
u32 block;
27-
u32 len;
28-
int bg;
29-
struct region *next;
30-
struct region *prev;
31-
};
32-
33-
struct block_group_info {
34-
u32 first_block;
35-
int header_blocks;
36-
int data_blocks_used;
37-
int has_superblock;
38-
u8 *bitmaps;
39-
u8 *block_bitmap;
40-
u8 *inode_bitmap;
41-
u8 *inode_table;
42-
u32 free_blocks;
43-
u32 first_free_block;
44-
u32 free_inodes;
45-
u32 first_free_inode;
46-
u16 flags;
47-
u16 used_dirs;
48-
};
49-
5025
struct xattr_list_element {
5126
struct ext4_inode *inode;
5227
struct ext4_xattr_header *header;
@@ -106,7 +81,7 @@ static void region_list_remove(struct region_list *list, struct region *reg)
10681
reg->prev = NULL;
10782
}
10883

109-
static void region_list_append(struct region_list *list, struct region *reg)
84+
void region_list_append(struct region_list *list, struct region *reg)
11085
{
11186
if (list->first == NULL) {
11287
list->first = reg;
@@ -141,15 +116,17 @@ static void dump_region_lists(struct block_allocation *alloc) {
141116
}
142117
#endif
143118

144-
void print_blocks(FILE* f, struct block_allocation *alloc)
119+
void print_blocks(FILE* f, struct block_allocation *alloc, char separator)
145120
{
146121
struct region *reg;
122+
fputc(' ', f);
147123
for (reg = alloc->list.first; reg; reg = reg->next) {
148124
if (reg->len == 1) {
149-
fprintf(f, " %d", reg->block);
125+
fprintf(f, "%d", reg->block);
150126
} else {
151-
fprintf(f, " %d-%d", reg->block, reg->block + reg->len - 1);
127+
fprintf(f, "%d-%d", reg->block, reg->block + reg->len - 1);
152128
}
129+
fputc(separator, f);
153130
}
154131
fputc('\n', f);
155132
}
@@ -210,45 +187,38 @@ static int reserve_blocks(struct block_group_info *bg, u32 start, u32 num)
210187
unsigned int i = 0;
211188

212189
u32 block = start;
213-
if (num > bg->free_blocks)
214-
return -1;
215-
216190
for (i = 0; i < num && block % 8 != 0; i++, block++) {
217191
if (bitmap_set_bit(bg->block_bitmap, block)) {
218-
error("attempted to reserve already reserved block");
192+
error("attempted to reserve already reserved block %d and num is %d", block, num);
219193
return -1;
220194
}
221195
}
222196

223197
for (; i + 8 <= (num & ~7); i += 8, block += 8) {
224198
if (bitmap_set_8_bits(bg->block_bitmap, block)) {
225-
error("attempted to reserve already reserved block");
199+
error("attempted to reserve already reserved block %d and num is %d", block, num);
226200
return -1;
227201
}
228202
}
229203

230204
for (; i < num; i++, block++) {
231205
if (bitmap_set_bit(bg->block_bitmap, block)) {
232-
error("attempted to reserve already reserved block");
206+
error("attempted to reserve already reserved block %d and num is %d", block, num);
233207
return -1;
234208
}
235209
}
236210

237211
bg->free_blocks -= num;
238-
if (start == bg->first_free_block)
239-
bg->first_free_block = start + num;
240212

241213
return 0;
242214
}
243215

244-
static void free_blocks(struct block_group_info *bg, u32 num_blocks)
216+
static void free_blocks(struct block_group_info *bg, u32 block, u32 num_blocks)
245217
{
246218
unsigned int i;
247-
u32 block = bg->first_free_block - 1;
248219
for (i = 0; i < num_blocks; i++, block--)
249220
bg->block_bitmap[block / 8] &= ~(1 << (block % 8));
250221
bg->free_blocks += num_blocks;
251-
bg->first_free_block -= num_blocks;
252222
}
253223

254224
/* Reduces an existing allocation by len blocks by return the last blocks
@@ -258,14 +228,15 @@ void reduce_allocation(struct block_allocation *alloc, u32 len)
258228
{
259229
while (len) {
260230
struct region *last_reg = alloc->list.last;
231+
struct block_group_info *bg = &aux_info.bgs[last_reg->bg];
261232

262233
if (last_reg->len > len) {
263-
free_blocks(&aux_info.bgs[last_reg->bg], len);
234+
free_blocks(bg, last_reg->block + last_reg->len - bg->first_block - 1, len);
264235
last_reg->len -= len;
265236
len = 0;
266237
} else {
267238
struct region *reg = alloc->list.last->prev;
268-
free_blocks(&aux_info.bgs[last_reg->bg], last_reg->len);
239+
free_blocks(bg, last_reg->block + last_reg->len - bg->first_block - 1, last_reg->len);
269240
len -= last_reg->len;
270241
if (reg) {
271242
reg->next = NULL;
@@ -304,18 +275,28 @@ static void init_bg(struct block_group_info *bg, unsigned int i)
304275

305276
bg->data_blocks_used = 0;
306277
bg->free_blocks = info.blocks_per_group;
307-
bg->first_free_block = 0;
308278
bg->free_inodes = info.inodes_per_group;
309279
bg->first_free_inode = 1;
310280
bg->flags = 0;
311281

312-
if (reserve_blocks(bg, bg->first_free_block, bg->header_blocks) < 0)
282+
bg->chunk_count = 0;
283+
bg->max_chunk_count = 1;
284+
bg->chunks = (struct region*) calloc(bg->max_chunk_count, sizeof(struct region));
285+
286+
if (reserve_blocks(bg, 0, bg->header_blocks) < 0)
313287
error("failed to reserve %u blocks in block group %u\n", bg->header_blocks, i);
288+
// Add empty starting delimiter chunk
289+
reserve_bg_chunk(i, bg->header_blocks, 0);
314290

315291
if (bg->first_block + info.blocks_per_group > aux_info.len_blocks) {
316292
u32 overrun = bg->first_block + info.blocks_per_group - aux_info.len_blocks;
317293
reserve_blocks(bg, info.blocks_per_group - overrun, overrun);
294+
// Add empty ending delimiter chunk
295+
reserve_bg_chunk(i, info.blocks_per_group - overrun, 0);
296+
} else {
297+
reserve_bg_chunk(i, info.blocks_per_group - 1, 0);
318298
}
299+
319300
}
320301

321302
void block_allocator_init()
@@ -341,73 +322,79 @@ void block_allocator_free()
341322
free(aux_info.bgs);
342323
}
343324

344-
static u32 ext4_allocate_blocks_from_block_group(u32 len, int bg_num)
345-
{
346-
if (get_free_blocks(bg_num) < len)
347-
return EXT4_ALLOCATE_FAILED;
348-
349-
u32 block = aux_info.bgs[bg_num].first_free_block;
350-
struct block_group_info *bg = &aux_info.bgs[bg_num];
351-
if (reserve_blocks(bg, bg->first_free_block, len) < 0) {
352-
error("failed to reserve %u blocks in block group %u\n", len, bg_num);
353-
return EXT4_ALLOCATE_FAILED;
354-
}
355-
356-
aux_info.bgs[bg_num].data_blocks_used += len;
357-
358-
return bg->first_block + block;
359-
}
360-
361325
/* Allocate a single block and return its block number */
362326
u32 allocate_block()
363327
{
364-
unsigned int i;
365-
for (i = 0; i < aux_info.groups; i++) {
366-
u32 block = ext4_allocate_blocks_from_block_group(1, i);
367-
368-
if (block != EXT4_ALLOCATE_FAILED)
369-
return block;
328+
u32 block;
329+
struct block_allocation *blk_alloc = allocate_blocks(1);
330+
if (!blk_alloc) {
331+
return EXT4_ALLOCATE_FAILED;
370332
}
371-
372-
return EXT4_ALLOCATE_FAILED;
333+
block = blk_alloc->list.first->block;
334+
free_alloc(blk_alloc);
335+
return block;
373336
}
374337

375338
static struct region *ext4_allocate_best_fit_partial(u32 len)
376339
{
377-
unsigned int i;
378-
unsigned int found_bg = 0;
379-
u32 found_bg_len = 0;
340+
unsigned int i, j;
341+
unsigned int found_bg = 0, found_prev_chunk = 0, found_block = 0;
342+
u32 found_allocate_len = 0;
343+
bool minimize = false;
344+
struct block_group_info *bgs = aux_info.bgs;
345+
struct region *reg;
380346

381347
for (i = 0; i < aux_info.groups; i++) {
382-
u32 bg_len = aux_info.bgs[i].free_blocks;
383-
384-
if ((len <= bg_len && (found_bg_len == 0 || bg_len < found_bg_len)) ||
385-
(len > found_bg_len && bg_len > found_bg_len)) {
386-
found_bg = i;
387-
found_bg_len = bg_len;
348+
for (j = 1; j < bgs[i].chunk_count; j++) {
349+
u32 hole_start, hole_size;
350+
hole_start = bgs[i].chunks[j-1].block + bgs[i].chunks[j-1].len;
351+
hole_size = bgs[i].chunks[j].block - hole_start;
352+
if (hole_size == len) {
353+
// Perfect fit i.e. right between 2 chunks no need to keep searching
354+
found_bg = i;
355+
found_prev_chunk = j - 1;
356+
found_block = hole_start;
357+
found_allocate_len = hole_size;
358+
goto done;
359+
} else if (hole_size > len && (found_allocate_len == 0 || (found_allocate_len > hole_size))) {
360+
found_bg = i;
361+
found_prev_chunk = j - 1;
362+
found_block = hole_start;
363+
found_allocate_len = hole_size;
364+
minimize = true;
365+
} else if (!minimize) {
366+
if (found_allocate_len < hole_size) {
367+
found_bg = i;
368+
found_prev_chunk = j - 1;
369+
found_block = hole_start;
370+
found_allocate_len = hole_size;
371+
}
372+
}
388373
}
389374
}
390375

391-
if (found_bg_len) {
392-
u32 allocate_len = min(len, found_bg_len);
393-
struct region *reg;
394-
u32 block = ext4_allocate_blocks_from_block_group(allocate_len, found_bg);
395-
if (block == EXT4_ALLOCATE_FAILED) {
396-
error("failed to allocate %d blocks in block group %d", allocate_len, found_bg);
397-
return NULL;
398-
}
399-
reg = malloc(sizeof(struct region));
400-
reg->block = block;
401-
reg->len = allocate_len;
402-
reg->next = NULL;
403-
reg->prev = NULL;
404-
reg->bg = found_bg;
405-
return reg;
406-
} else {
376+
if (found_allocate_len == 0) {
407377
error("failed to allocate %u blocks, out of space?", len);
378+
return NULL;
408379
}
409-
410-
return NULL;
380+
if (found_allocate_len > len) found_allocate_len = len;
381+
done:
382+
// reclaim allocated space in chunk
383+
bgs[found_bg].chunks[found_prev_chunk].len += found_allocate_len;
384+
if (reserve_blocks(&bgs[found_bg],
385+
found_block,
386+
found_allocate_len) < 0) {
387+
error("failed to reserve %u blocks in block group %u\n", found_allocate_len, found_bg);
388+
return NULL;
389+
}
390+
bgs[found_bg].data_blocks_used += found_allocate_len;
391+
reg = malloc(sizeof(struct region));
392+
reg->block = found_block + bgs[found_bg].first_block;
393+
reg->len = found_allocate_len;
394+
reg->next = NULL;
395+
reg->prev = NULL;
396+
reg->bg = found_bg;
397+
return reg;
411398
}
412399

413400
static struct region *ext4_allocate_best_fit(u32 len)
@@ -439,9 +426,9 @@ static struct region *ext4_allocate_best_fit(u32 len)
439426
/* Allocate len blocks. The blocks may be spread across multiple block groups,
440427
and are returned in a linked list of the blocks in each block group. The
441428
allocation algorithm is:
442-
1. If the remaining allocation is larger than any available contiguous region,
443-
allocate the largest contiguous region and loop
444-
2. Otherwise, allocate the smallest contiguous region that it fits in
429+
1. If the remaining allocation is larger than any available contiguous region,
430+
allocate the largest contiguous region and loop
431+
2. Otherwise, allocate the smallest contiguous region that it fits in
445432
*/
446433
struct block_allocation *allocate_blocks(u32 len)
447434
{
@@ -452,6 +439,8 @@ struct block_allocation *allocate_blocks(u32 len)
452439

453440
struct block_allocation *alloc = create_allocation();
454441
alloc->list.first = reg;
442+
while (reg->next != NULL)
443+
reg = reg->next;
455444
alloc->list.last = reg;
456445
alloc->list.iter = alloc->list.first;
457446
alloc->list.partial_iter = 0;
@@ -779,3 +768,34 @@ void free_alloc(struct block_allocation *alloc)
779768

780769
free(alloc);
781770
}
771+
772+
void reserve_bg_chunk(int bg, u32 start_block, u32 size) {
773+
struct block_group_info *bgs = aux_info.bgs;
774+
int chunk_count;
775+
if (bgs[bg].chunk_count == bgs[bg].max_chunk_count) {
776+
bgs[bg].max_chunk_count *= 2;
777+
bgs[bg].chunks = realloc(bgs[bg].chunks, bgs[bg].max_chunk_count * sizeof(struct region));
778+
if (!bgs[bg].chunks)
779+
critical_error("realloc failed");
780+
}
781+
chunk_count = bgs[bg].chunk_count;
782+
bgs[bg].chunks[chunk_count].block = start_block;
783+
bgs[bg].chunks[chunk_count].len = size;
784+
bgs[bg].chunks[chunk_count].bg = bg;
785+
bgs[bg].chunk_count++;
786+
}
787+
788+
int reserve_blocks_for_allocation(struct block_allocation *alloc) {
789+
struct region *reg;
790+
struct block_group_info *bgs = aux_info.bgs;
791+
792+
if (!alloc) return 0;
793+
reg = alloc->list.first;
794+
while (reg != NULL) {
795+
if (reserve_blocks(&bgs[reg->bg], reg->block - bgs[reg->bg].first_block, reg->len) < 0)
796+
return -1;
797+
reg = reg->next;
798+
}
799+
return 0;
800+
}
801+

0 commit comments

Comments
 (0)