Skip to content

Commit 8125e6c

Browse files
authored
server : don't overfill the batch during infill (#10018)
ggml-ci
1 parent 8841ce3 commit 8125e6c

File tree

2 files changed

+5
-2
lines changed

2 files changed

+5
-2
lines changed

examples/server/server.cpp

+1
Original file line numberDiff line numberDiff line change
@@ -1880,6 +1880,7 @@ struct server_context {
18801880
if (slot.state == SLOT_STATE_STARTED) {
18811881
slot.t_start_process_prompt = ggml_time_us();
18821882
slot.t_start_generation = 0;
1883+
18831884
slot.n_past = 0;
18841885
slot.n_prompt_tokens = prompt_tokens.size();
18851886
slot.state = SLOT_STATE_PROCESSING_PROMPT;

examples/server/utils.hpp

+4-2
Original file line numberDiff line numberDiff line change
@@ -266,8 +266,10 @@ static llama_tokens format_infill(
266266
}
267267

268268
// for now pick FIM context to fit in a batch (ratio prefix:suffix = 3:1, TODO: configurable?)
269-
const int n_suffix_take = std::min<int>(tokens_suffix.size(), (n_batch/4));
270-
const int n_prefix_take = std::min<int>(tokens_prefix.size(), 3*(n_batch/4) - 3);
269+
const int n_prefix_take = std::min<int>(tokens_prefix.size(), 3*(n_batch/4));
270+
const int n_suffix_take = std::min<int>(tokens_suffix.size(), std::max<int>(0, (n_batch/4) - (2 + tokens_prompt.size())));
271+
272+
SRV_DBG("n_prefix_take = %d, n_suffix_take = %d, total = %d\n", n_prefix_take, n_suffix_take, (n_prefix_take + n_suffix_take));
271273

272274
// fill the rest of the context with extra chunks
273275
const int n_extra_take = std::min<int>(std::max<int>(0, n_ctx - (n_batch) - 2*n_predict), extra_tokens.size());

0 commit comments

Comments
 (0)