From d40fded93e1a533e969768e1e335c15c61c296ce Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Fri, 21 Apr 2023 10:23:36 +0300 Subject: [PATCH] llama : fix comment for "output.weight" tensor --- llama.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/llama.cpp b/llama.cpp index 4a646eb..33ee4fb 100644 --- a/llama.cpp +++ b/llama.cpp @@ -1618,8 +1618,8 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s // quantize only 2D tensors quantize &= (tensor.ne.size() == 2); - // GG: uncomment this to keep the output layer in FP16 - //if (tensor.name.rfind("output")) { + // uncomment this to keep the output layer in FP16 + //if (tensor.name == "output.weight") { // quantize = false; //}