Skip to content

Commit

Permalink
PR #22368: [XLA:GPU] Replace "gpu_b100" with "gpu_b200" test backend …
Browse files Browse the repository at this point in the history
…name

Imported from GitHub PR #22368

B200 is the name of the released chip.
https://www.nvidia.com/en-us/data-center/dgx-b200/
Copybara import of the project:

--
124885a by Sergey Kozub <[email protected]>:

[XLA:GPU] Replace "gpu_b100" with "gpu_b200" test backend name

Merging this change closes #22368

FUTURE_COPYBARA_INTEGRATE_REVIEW=#22368 from openxla:skozub/b200 124885a
PiperOrigin-RevId: 724403930
  • Loading branch information
sergey-kozub authored and Google-ML-Automation committed Feb 8, 2025
1 parent 686154f commit 9cd6884
Show file tree
Hide file tree
Showing 11 changed files with 28 additions and 28 deletions.
2 changes: 1 addition & 1 deletion build_tools/lint/tags.py
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,7 @@
"xla_gpu_v100": "Runs on a v100.",
"xla_gpu_a100": "Runs on an a100.",
"xla_gpu_h100": "Runs on an h100.",
"xla_gpu_b100": "Runs on an b100.",
"xla_gpu_b200": "Runs on a b200.",
# Below tags are consumed by `xla_test`.
"test_xla_cpu_no_thunks": (
"Internally, `xla_test` sets `--xla_cpu_use_thunk_runtime` to false."
Expand Down
2 changes: 1 addition & 1 deletion xla/backends/gpu/codegen/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ xla_test(
srcs = if_cuda_is_configured(["cudnn_test.cc"]),
backends = [
"gpu_h100",
"gpu_b100",
"gpu_b200",
],
deps = [
"//xla:comparison_util",
Expand Down
14 changes: 7 additions & 7 deletions xla/backends/gpu/codegen/triton/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -625,7 +625,7 @@ xla_test(
backends = [
"gpu_a100",
"gpu_h100",
"gpu_b100",
"gpu_b200",
"gpu_amd_any",
],
shard_count = 20,
Expand Down Expand Up @@ -680,7 +680,7 @@ xla_test(
backends = [
"gpu_a100",
"gpu_h100",
"gpu_b100",
"gpu_b200",
"gpu_amd_any",
],
tags = [
Expand Down Expand Up @@ -718,7 +718,7 @@ xla_test(
backends = [
"gpu_a100",
"gpu_h100",
"gpu_b100",
"gpu_b200",
"gpu_amd_any",
],
shard_count = 30,
Expand Down Expand Up @@ -763,7 +763,7 @@ xla_test(
backends = [
"gpu_a100",
"gpu_h100",
"gpu_b100",
"gpu_b200",
"gpu_amd_any",
],
tags = [
Expand Down Expand Up @@ -908,7 +908,7 @@ xla_test(
backends = [
"gpu_a100",
"gpu_h100",
"gpu_b100",
"gpu_b200",
"gpu_amd_any",
],
tags = [
Expand Down Expand Up @@ -936,7 +936,7 @@ xla_test(
backends = [
"gpu_a100",
"gpu_h100",
"gpu_b100",
"gpu_b200",
"gpu_amd_any",
],
shard_count = 10,
Expand Down Expand Up @@ -1023,7 +1023,7 @@ xla_test(
backends = [
"gpu_a100",
"gpu_h100",
"gpu_b100",
"gpu_b200",
"gpu_amd_any",
],
tags = ["no_mac"],
Expand Down
4 changes: 2 additions & 2 deletions xla/backends/gpu/runtime/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -229,7 +229,7 @@ xla_test(
backends = [
"gpu_a100",
"gpu_v100",
"gpu_b100",
"gpu_b200",
"gpu_amd_any",
],
local_defines = if_cuda_is_configured(["GOOGLE_CUDA=1"]) + if_rocm_is_configured(["TENSORFLOW_USE_ROCM=1"]),
Expand Down Expand Up @@ -343,7 +343,7 @@ xla_test(
backends = [
"gpu_a100",
"gpu_v100",
"gpu_b100",
"gpu_b200",
"gpu_amd_any",
],
deps = [
Expand Down
8 changes: 4 additions & 4 deletions xla/service/gpu/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -1871,7 +1871,7 @@ xla_test(
backends = [
"gpu_v100",
"gpu_a100",
"gpu_b100",
"gpu_b200",
],
tags = [
"cuda-only",
Expand Down Expand Up @@ -2620,7 +2620,7 @@ xla_test(
backends = [
"gpu_a100",
"gpu_h100",
"gpu_b100",
"gpu_b200",
],
deps = [
":variant_visitor",
Expand Down Expand Up @@ -2867,7 +2867,7 @@ xla_test(
"gpu_v100",
"gpu_a100",
"gpu_h100",
"gpu_b100",
"gpu_b200",
"gpu_amd_any",
],
tags = [
Expand Down Expand Up @@ -2951,7 +2951,7 @@ xla_test(
srcs = ["determinism_test.cc"],
backends = [
"gpu_a100",
"gpu_b100",
"gpu_b200",
"gpu_amd_any",
],
deps = [
Expand Down
2 changes: 1 addition & 1 deletion xla/service/gpu/autotuning/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -197,7 +197,7 @@ xla_test(
backends = [
"gpu_a100",
"gpu_h100",
"gpu_b100",
"gpu_b200",
],
tags = [
"cuda-only",
Expand Down
2 changes: 1 addition & 1 deletion xla/service/gpu/model/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -119,7 +119,7 @@ xla_test(
"gpu_v100",
"gpu_a100",
"gpu_h100",
"gpu_b100",
"gpu_b200",
],
deps = [
":analytical_latency_estimator",
Expand Down
8 changes: 4 additions & 4 deletions xla/service/gpu/tests/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -320,7 +320,7 @@ xla_test(
backends = [
"gpu_a100",
"gpu_v100",
"gpu_b100",
"gpu_b200",
"gpu_amd_any",
],
deps = [
Expand Down Expand Up @@ -685,7 +685,7 @@ xla_test(
srcs = ["tensor_float_32_global_var_test.cc"],
backends = [
"gpu_a100",
"gpu_b100",
"gpu_b200",
"gpu_amd_any",
] + if_oss([
"gpu_any",
Expand All @@ -704,7 +704,7 @@ xla_test(
backends = [
"gpu_a100",
"gpu_h100",
"gpu_b100",
"gpu_b200",
],
tags = ["cuda-only"],
deps = if_cuda_is_configured(
Expand Down Expand Up @@ -748,7 +748,7 @@ xla_test(
backends = [
"gpu_a100",
"gpu_h100",
"gpu_b100",
"gpu_b200",
],
shard_count = 2,
deps = [
Expand Down
6 changes: 3 additions & 3 deletions xla/service/gpu/transforms/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -475,7 +475,7 @@ xla_cc_test(
xla_test(
name = "block_scaling_rewriter_cudnn_test",
srcs = ["block_scaling_rewriter_cudnn_test.cc"],
backends = ["gpu_b100"],
backends = ["gpu_b200"],
deps = [
":block_scaling_rewriter",
"//xla/tests:hlo_test_base",
Expand Down Expand Up @@ -1410,7 +1410,7 @@ xla_test(
"gpu_a100",
"gpu_p100",
"gpu_v100",
"gpu_b100",
"gpu_b200",
"gpu_amd_any",
],
deps = if_gpu_is_configured(
Expand Down Expand Up @@ -3226,7 +3226,7 @@ xla_test(
backends = [
"gpu_a100",
"gpu_h100",
"gpu_b100",
"gpu_b200",
],
deps = [
":triton_fusion_numerics_verifier",
Expand Down
2 changes: 1 addition & 1 deletion xla/tests/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -1671,7 +1671,7 @@ xla_test(
"gpu_v100",
"gpu_a100",
"gpu_h100",
"gpu_b100",
"gpu_b200",
],
data = ["data/cudnn_reproducer.hlo"],
deps = [
Expand Down
6 changes: 3 additions & 3 deletions xla/tests/build_defs.bzl
Original file line number Diff line number Diff line change
Expand Up @@ -14,15 +14,15 @@ NVIDIA_GPU_BACKENDS = [
"gpu_v100",
"gpu_a100",
"gpu_h100",
"gpu_b100",
"gpu_b200",
]

# The generic "gpu" backend includes the actual backends in this list.
NVIDIA_GPU_DEFAULT_BACKENDS = [
"gpu_any",
"gpu_a100",
"gpu_h100",
"gpu_b100",
"gpu_b200",
]

AMD_GPU_DEFAULT_BACKENDS = ["gpu_amd_any"]
Expand Down Expand Up @@ -65,7 +65,7 @@ def prepare_nvidia_gpu_backend_data(backends, disabled_backends, backend_tags, b
"gpu_v100": (7, 0),
"gpu_a100": (8, 0),
"gpu_h100": (9, 0),
"gpu_b100": (10, 0),
"gpu_b200": (10, 0),
}
for gpu_backend in NVIDIA_GPU_BACKENDS:
all_tags = new_backend_tags[gpu_backend]
Expand Down

0 comments on commit 9cd6884

Please sign in to comment.