mix_ascend_bf16_infer_err (#6794)

This commit is contained in:
zhoufan2956
2025-02-12 19:48:11 +08:00
committed by GitHub
parent ab888e1e0b
commit 35740259de

View File

@@ -1082,6 +1082,9 @@ def should_use_bf16(device=None, model_params=0, prioritize_performance=True, ma
if is_intel_xpu():
return True
if is_ascend_npu():
return True
props = torch.cuda.get_device_properties(device)
if props.major >= 8: