Skip to content

Commit 52322a5

Browse files
bzuzojordankanter
authored andcommitted
finetune : fix ggml_allocr lifetimes (tmp workaround) (ggml-org#5033)
* Fix issue with alloc causing max_compute_size to be calculated * remove ggml_allocr_free as suggested in issue ggml-org#4791
1 parent 141e4f0 commit 52322a5

File tree

1 file changed

+0
-4
lines changed

1 file changed

+0
-4
lines changed

examples/train-text-from-scratch/train-text-from-scratch.cpp

-4
Original file line numberDiff line numberDiff line change
@@ -263,7 +263,6 @@ static void init_model(struct my_llama_model * model) {
263263
model->data.resize(size + tensor_alignment);
264264
alloc = ggml_allocr_new(model->data.data(), model->data.size(), tensor_alignment);
265265
alloc_model(alloc, model);
266-
ggml_allocr_free(alloc);
267266
}
268267

269268
static void randomize_model(struct my_llama_model * model, int seed, float mean, float std, float min, float max) {
@@ -1102,7 +1101,6 @@ int main(int argc, char ** argv) {
11021101
alloc = ggml_allocr_new(mem_input_data.data(), mem_input_data.size(), tensor_alignment);
11031102
ggml_allocr_alloc(alloc, tokens_input);
11041103
ggml_allocr_alloc(alloc, target_probs);
1105-
ggml_allocr_free(alloc);
11061104

11071105
// context for compute tensors without their data
11081106
const size_t estimated_compute_size_wo_data = (
@@ -1149,7 +1147,6 @@ int main(int argc, char ** argv) {
11491147
best_compute_size = max_compute_size;
11501148
best_order = gf->order;
11511149
}
1152-
ggml_allocr_free(alloc);
11531150
ggml_free(ctx_compute);
11541151
}
11551152
size_t max_compute_size = best_compute_size;
@@ -1177,7 +1174,6 @@ int main(int argc, char ** argv) {
11771174
params.common.use_flash,
11781175
params.common.use_checkpointing
11791176
);
1180-
ggml_allocr_free(alloc);
11811177

11821178
std::vector<llama_token> train_tokens;
11831179
std::vector<size_t> train_samples_begin;

0 commit comments

Comments
 (0)