From 928480ef5b7b03d7a07e98286aebe3d8b24457d9 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Wed, 22 Mar 2023 07:45:00 +0200 Subject: [PATCH] Init llama_context_params properly from CLI (#370) --- llama.cpp | 4 ++++ main.cpp | 5 ++++- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/llama.cpp b/llama.cpp index 08dfcb3..fde4d25 100644 --- a/llama.cpp +++ b/llama.cpp @@ -1398,6 +1398,10 @@ struct llama_context * llama_init_from_file( llama_context * ctx = new llama_context; + if (params.seed <= 0) { + params.seed = time(NULL); + } + ctx->rng = std::mt19937(params.seed); ctx->logits_all = params.logits_all; diff --git a/main.cpp b/main.cpp index 7db3df7..b98c9e2 100644 --- a/main.cpp +++ b/main.cpp @@ -194,7 +194,10 @@ int main(int argc, char ** argv) { { auto lparams = llama_context_default_params(); - lparams.f16_kv = params.memory_f16; + lparams.n_ctx = params.n_ctx; + lparams.n_parts = params.n_parts; + lparams.seed = params.seed; + lparams.f16_kv = params.memory_f16; lparams.logits_all = params.perplexity; ctx = llama_init_from_file(params.model.c_str(), lparams);