Commit
·
055c8bd
1
Parent(s):
455746d
Add support for Ascend NPU
Browse filesTransformers is already supported on Ascend NPU. This PR allows users to easily use ChatGLM2 on NPU :-)
- modeling_chatglm.py +1 -1
modeling_chatglm.py
CHANGED
|
@@ -27,7 +27,7 @@ from .configuration_chatglm import ChatGLMConfig
|
|
| 27 |
|
| 28 |
# flags required to enable jit fusion kernels
|
| 29 |
|
| 30 |
-
if sys.platform != 'darwin':
|
| 31 |
torch._C._jit_set_profiling_mode(False)
|
| 32 |
torch._C._jit_set_profiling_executor(False)
|
| 33 |
torch._C._jit_override_can_fuse_on_cpu(True)
|
|
|
|
| 27 |
|
| 28 |
# flags required to enable jit fusion kernels
|
| 29 |
|
| 30 |
+
if sys.platform != 'darwin' and torch.cuda.is_available():
|
| 31 |
torch._C._jit_set_profiling_mode(False)
|
| 32 |
torch._C._jit_set_profiling_executor(False)
|
| 33 |
torch._C._jit_override_can_fuse_on_cpu(True)
|