From f7449e06ce2343902b639da699d53721396e4a9c Mon Sep 17 00:00:00 2001 From: Minsoo Cheong <54794500+mscheong01@users.noreply.github.com> Date: Tue, 26 Mar 2024 18:11:46 +0900 Subject: [PATCH] embedding : adjust `n_ubatch` value (#6296) * embedding: assign `n_ubatch` value, print error on `n_batch` overflow * Update examples/embedding/embedding.cpp Co-authored-by: Xuan Son Nguyen * use %ld instead of %lld * Revert "use %ld instead of %lld" This reverts commit ea753ede90a86a0699f65878cc8e2020ff5eabb8. --------- Co-authored-by: Xuan Son Nguyen --- examples/embedding/embedding.cpp | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/examples/embedding/embedding.cpp b/examples/embedding/embedding.cpp index cbf9aa2b560dd..9aede7fadfe31 100644 --- a/examples/embedding/embedding.cpp +++ b/examples/embedding/embedding.cpp @@ -61,6 +61,8 @@ int main(int argc, char ** argv) { } params.embedding = true; + // For non-causal models, batch size must be equal to ubatch size + params.n_ubatch = params.n_batch; print_build_info(); @@ -114,7 +116,9 @@ int main(int argc, char ** argv) { for (const auto & prompt : prompts) { auto inp = ::llama_tokenize(ctx, prompt, true, false); if (inp.size() > n_batch) { - inp.resize(n_batch); + fprintf(stderr, "%s: error: number of tokens in input line (%lld) exceeds batch size (%lld), increase batch size and re-run\n", + __func__, (long long int) inp.size(), (long long int) n_batch); + return 1; } inputs.push_back(inp); }