提示要下载这个库,但是下载了也还报错。ImportError: Loading GPTQ quantized model requires optimum library : `pip install optimum` and auto-gptq library 'pip install auto-gptq'
ImportError Traceback (most recent call last)
/tmp/ipython-input-3987283645.py in <cell line: 0>()
5 # load the tokenizer and the model
6 tokenizer = AutoTokenizer.from_pretrained(model_name)
----> 7 model = AutoModelForCausalLM.from_pretrained(
8 model_name,
9 torch_dtype="auto",
5 frames
/usr/local/lib/python3.12/dist-packages/transformers/models/auto/auto_factory.py in from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs)
602 if model_class.config_class == config.sub_configs.get("text_config", None):
603 config = config.get_text_config()
--> 604 return model_class.from_pretrained(
605 pretrained_model_name_or_path, *model_args, config=config, **hub_kwargs, **kwargs
606 )
/usr/local/lib/python3.12/dist-packages/transformers/modeling_utils.py in _wrapper(*args, **kwargs)
286 old_dtype = torch.get_default_dtype()
287 try:
--> 288 return func(*args, **kwargs)
289 finally:
290 torch.set_default_dtype(old_dtype)
/usr/local/lib/python3.12/dist-packages/transformers/modeling_utils.py in from_pretrained(cls, pretrained_model_name_or_path, config, cache_dir, ignore_mismatched_sizes, force_download, local_files_only, token, revision, use_safetensors, weights_only, *model_args, **kwargs)
5009 )
5010
-> 5011 hf_quantizer, config, dtype, device_map = get_hf_quantizer(
5012 config, quantization_config, dtype, from_tf, from_flax, device_map, weights_only, user_agent
5013 )
/usr/local/lib/python3.12/dist-packages/transformers/quantizers/auto.py in get_hf_quantizer(config, quantization_config, dtype, from_tf, from_flax, device_map, weights_only, user_agent)
309 config.quantization_config = quantization_config
310
--> 311 hf_quantizer = AutoHfQuantizer.from_config(
312 config.quantization_config,
313 pre_quantized=pre_quantized,
/usr/local/lib/python3.12/dist-packages/transformers/quantizers/auto.py in from_config(cls, quantization_config, **kwargs)
183
184 target_cls = AUTO_QUANTIZER_MAPPING[quant_method]
--> 185 return target_cls(quantization_config, **kwargs)
186
187 @classmethod
/usr/local/lib/python3.12/dist-packages/transformers/quantizers/quantizer_gptq.py in init(self, quantization_config, **kwargs)
47
48 if not is_optimum_available():
---> 49 raise ImportError("Loading a GPTQ quantized model requires optimum (pip install optimum)")
50 from optimum.gptq import GPTQQuantizer
51
ImportError: Loading a GPTQ quantized model requires optimum (pip install optimum)
NOTE: If your import is failing due to a missing package, you can
manually install dependencies using either !pip or !apt.
To view examples of installing some common dependencies, click the
"Open Examples" button below.
pip install gptqmodel