From b595652166186fb84ceac681f9cd260770e052f3 Mon Sep 17 00:00:00 2001 From: wz501469 Date: Fri, 12 Dec 2025 14:50:39 +0800 Subject: [PATCH] =?UTF-8?q?=E6=B7=BB=E5=8A=A0=E5=AF=B9=20LoRA=EF=BC=88?= =?UTF-8?q?=E4=BD=8E=E7=A7=A9=E9=80=82=E9=85=8D=EF=BC=89PEFT=20=E6=96=B9?= =?UTF-8?q?=E6=B3=95=E7=9A=84=E6=94=AF=E6=8C=81?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- CGE/utils/vllm_codefuse_cge_large.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/CGE/utils/vllm_codefuse_cge_large.py b/CGE/utils/vllm_codefuse_cge_large.py index 49c58e0..ab96331 100644 --- a/CGE/utils/vllm_codefuse_cge_large.py +++ b/CGE/utils/vllm_codefuse_cge_large.py @@ -496,7 +496,12 @@ def __init__( self.config = config self.lora_config = lora_config self.quant_config = quant_config - self.plm_model = Qwen2ForCausalLM(config, cache_config, quant_config) + self.plm_model = Qwen2ForCausalLM( + config=config, + cache_config=cache_config, + quant_config=quant_config, + lora_config=lora_config # 添加lora_config + ) self.embedding_method = config.embedding_method self.inf_seq_length = config.inf_seq_length self.padding_side = config.padding_side