From 492e16891d517b6f9df03ebe1a794a0057b64c82 Mon Sep 17 00:00:00 2001 From: Taksh Date: Sun, 19 Apr 2026 10:40:59 +0530 Subject: [PATCH] Fix load_compress_model retry using bitwise ~ on bool instead of logical not --- fastchat/model/compression.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fastchat/model/compression.py b/fastchat/model/compression.py index 7329cfe0c..2f14daf5f 100644 --- a/fastchat/model/compression.py +++ b/fastchat/model/compression.py @@ -115,7 +115,7 @@ def load_compress_model(model_path, device, torch_dtype, use_fast, revision="mai ) except TypeError: tokenizer = AutoTokenizer.from_pretrained( - model_path, use_fast=~use_fast, revision=revision, trust_remote_code=True + model_path, use_fast=not use_fast, revision=revision, trust_remote_code=True ) with init_empty_weights(): # `trust_remote_code` should be set as `True` for both AutoConfig and AutoModel