From 5751bc4a4a1fa5683607151c6e5756ffccd8e5a7 Mon Sep 17 00:00:00 2001 From: Taksh Date: Mon, 6 Apr 2026 17:14:28 +0530 Subject: [PATCH] Fix batch embedding averaging for batch_size > 1 token_num was computed as a single scalar summing all tokens across the entire batch, then used to divide each per-sequence embedding. This caused incorrect averaging when batch_size > 1, as every sequence was divided by the total token count instead of its own. Change token_num to a per-sequence tensor via attention_mask.sum(dim=1, keepdim=True) so each sequence is divided by its own token count. Fixes #3785 Co-Authored-By: Claude Opus 4.6 (1M context) --- fastchat/serve/model_worker.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/fastchat/serve/model_worker.py b/fastchat/serve/model_worker.py index 683a78556..277c01f30 100644 --- a/fastchat/serve/model_worker.py +++ b/fastchat/serve/model_worker.py @@ -171,7 +171,7 @@ def __process_embed_chunk(self, input_ids, attention_mask, **model_type_dict): mask = attention_mask.unsqueeze(-1).expand(data.size()).float() masked_embeddings = data * mask sum_embeddings = torch.sum(masked_embeddings, dim=1) - token_num = torch.sum(attention_mask).item() + token_num = attention_mask.sum(dim=1, keepdim=True) return sum_embeddings, token_num @@ -224,7 +224,7 @@ def get_embeddings(self, params): ): embedding = embedding / token_num normalized_embeddings = F.normalize(embedding, p=2, dim=1) - ret["token_num"] = token_num + ret["token_num"] = token_num.sum().item() else: all_embeddings = [] all_token_num = 0 @@ -273,7 +273,7 @@ def get_embeddings(self, params): embedding = torch.sum(all_embeddings_tensor, dim=0) / all_token_num normalized_embeddings = F.normalize(embedding, p=2, dim=1) - ret["token_num"] = all_token_num + ret["token_num"] = all_token_num.sum().item() if base64_encode == "base64": out_embeddings = self.__encode_base64(normalized_embeddings)