Spaces:
Runtime error
Runtime error
Removing bitsandbytes quantization to 8 bits
Browse files- model_translation.py +2 -2
model_translation.py
CHANGED
|
@@ -209,8 +209,8 @@ class ModelM2M100(metaclass=Singleton):
|
|
| 209 |
self._model_name,
|
| 210 |
device_map="auto",
|
| 211 |
torch_dtype=torch.float16,
|
| 212 |
-
low_cpu_mem_usage=True
|
| 213 |
-
quantization_config=quantization_config
|
| 214 |
)
|
| 215 |
self._model = torch.compile(self._model)
|
| 216 |
|
|
|
|
| 209 |
self._model_name,
|
| 210 |
device_map="auto",
|
| 211 |
torch_dtype=torch.float16,
|
| 212 |
+
low_cpu_mem_usage=True
|
| 213 |
+
#quantization_config=quantization_config
|
| 214 |
)
|
| 215 |
self._model = torch.compile(self._model)
|
| 216 |
|