diff --git a/llama.cpp b/llama.cpp index 653558b..6d8b706 100644 --- a/llama.cpp +++ b/llama.cpp @@ -827,7 +827,9 @@ static const char *llama_ftype_name(enum llama_ftype ftype) { case LLAMA_FTYPE_MOSTLY_F16: return "mostly F16"; case LLAMA_FTYPE_MOSTLY_Q4_0: return "mostly Q4_0"; case LLAMA_FTYPE_MOSTLY_Q4_1: return "mostly Q4_1"; - default: LLAMA_ASSERT(false); + case LLAMA_FTYPE_MOSTLY_Q4_1_SOME_F16: + return "mostly Q4_1, some F16"; + default: return "unknown, may not work"; } } diff --git a/llama.h b/llama.h index 8a0d50f..7a258a1 100644 --- a/llama.h +++ b/llama.h @@ -71,6 +71,7 @@ extern "C" { LLAMA_FTYPE_MOSTLY_F16 = 1, // except 1d tensors LLAMA_FTYPE_MOSTLY_Q4_0 = 2, // except 1d tensors LLAMA_FTYPE_MOSTLY_Q4_1 = 3, // except 1d tensors + LLAMA_FTYPE_MOSTLY_Q4_1_SOME_F16 = 4, // tok_embeddings.weight and output.weight are F16 }; LLAMA_API struct llama_context_params llama_context_default_params();