llama : use the same threshold for OpenBLAS and ggml thread limiting (#577)

pull/600/head
Maël Kerbiriou 1 year ago committed by GitHub
parent a6956b25a1
commit 41318d708e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -856,7 +856,7 @@ static bool llama_eval_internal(
// for big prompts, if BLAS is enabled, it is better to use only one thread
// otherwise, the threads are spin-lock waiting for the BLAS calls and are degrading the performance
ggml_cgraph gf = {};
gf.n_threads = N > 255 && ggml_cpu_has_blas() ? 1 : n_threads;
gf.n_threads = N >= 32 && ggml_cpu_has_blas() ? 1 : n_threads;
struct ggml_tensor * embd = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, N);
memcpy(embd->data, tokens, N*ggml_element_size(embd));

Loading…
Cancel
Save