diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 8ca71ba4d0..90eb4e92d4 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -2,7 +2,7 @@ name: Lint -on: +on: pull_request: paths-ignore: ['**.md', 'docs/**'] @@ -19,3 +19,15 @@ jobs: run: | git fetch --no-tags --prune --depth=1 origin "${GITHUB_BASE_REF?}:${GITHUB_BASE_REF?}" ./build_tools/github_actions/lint_clang_format.sh -b "${GITHUB_BASE_REF}" + + whitespace-checks: + # This job can only be run on pull_request since GITHUB_BASE_REF is only set on PR. + if: "github.event_name == 'pull_request'" + runs-on: ubuntu-latest + steps: + - name: Checking out repository + uses: actions/checkout@v2 + + - name: Running lint_whitespace_checks.sh on source files + run: | + ./build_tools/github_actions/lint_whitespace_checks.sh diff --git a/build_tools/github_actions/lint_whitespace_checks.sh b/build_tools/github_actions/lint_whitespace_checks.sh new file mode 100755 index 0000000000..3531023215 --- /dev/null +++ b/build_tools/github_actions/lint_whitespace_checks.sh @@ -0,0 +1,75 @@ +# Copyright 2022 The StableHLO Authors. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +print_usage() { + echo "Usage: $0 [-f]" + echo " -f Auto-fix whitespace issues." +} + +FORMAT_MODE='validate' +while getopts 'f' flag; do + case "${flag}" in + f) FORMAT_MODE="fix" ;; + *) print_usage + exit 1 ;; + esac +done +shift $(( OPTIND - 1 )) + +if [[ $# -ne 0 ]] ; then + print_usage + exit 1 +fi + +get_source_files() { + find . -iname '*.h' -o -iname '*.cpp' -o -iname '*.td' -o -iname '*.md' -o -iname '*.txt' -o -iname '*.mlir' -o -iname '*.yml' +} + +files_without_eof_newline() { + get_source_files | xargs -L1 bash -c 'test "$(tail -c 1 "$0")" && echo "$0"' +} + +files_with_trailing_whitespace() { + get_source_files | xargs grep -lP '[ \t]+$' +} + +fix_files_without_eof_newline() { + echo $1 | xargs --no-run-if-empty sed -i -e '$a\' +} + +fix_files_with_trailing_whitespace() { + echo $1 | xargs --no-run-if-empty sed -i 's/[ \t]*$//' +} + +EOF_NL=$(files_without_eof_newline) +TRAIL_WS=$(files_with_trailing_whitespace) + +if [[ $FORMAT_MODE == 'fix' ]]; then + echo "Fixing EOF newlines..." + fix_files_without_eof_newline "$EOF_NL" + echo "Fixing trailing whitespaces..." + fix_files_with_trailing_whitespace "$TRAIL_WS" +else + if [ ! -z "$EOF_NL$TRAIL_WS" ]; then + echo "Missing newline at EOF:" + echo $EOF_NL + echo "Has trailing whitespace:" + echo $TRAIL_WS + echo + echo "Auto-fix using:" + echo " $ lint_whitespace_checks.sh -f" + exit 1 + else + echo "No whitespace issues found." + fi +fi diff --git a/docs/bytecode.md b/docs/bytecode.md index 26a35c1c85..fc1f320841 100644 --- a/docs/bytecode.md +++ b/docs/bytecode.md @@ -88,8 +88,8 @@ $ stablehlo-opt -emit-bytecode stablehlo/tests/print_stablehlo.mlir | stablehlo- Since attributes and types that don't get encoded are instead stored as strings, the `strings` command can be used to see what attributes were missed: -_Note: Currently all types/attrs are implemented and log only shows -the dialect name `stablehlo` and the unregistered `stablehlo.frontend_attributes` +_Note: Currently all types/attrs are implemented and log only shows +the dialect name `stablehlo` and the unregistered `stablehlo.frontend_attributes` and `stablehlo.sharding` attributes._ ``` @@ -123,9 +123,9 @@ Called: readRngAlgorithmAttr(mlir::DialectBytecodeReader &) const ### Adding Bytecode for a New Type / Attribute -Adding bytecode for a new type or attribute is simple. In the file +Adding bytecode for a new type or attribute is simple. In the file `StablehloBytecode.cpp` or `ChloBytecode.cpp` search for the term `TO ADD ATTRIBUTE` or `TO ADD TYPE` -depending on the change. Ensure that each location tagged with `TO ADD` +depending on the change. Ensure that each location tagged with `TO ADD` instructions is addressed. If so, bytecode for the attr/type should be generated on next call to `stablehlo-opt -emit-bytecode`. This can be verified using the proper bytecode trace. diff --git a/docs/type_inference.md b/docs/type_inference.md index 1afd952b08..0892fc67a3 100644 --- a/docs/type_inference.md +++ b/docs/type_inference.md @@ -1,6 +1,6 @@ # Type Inference -StableHLO has been originally bootstrapped from [the MHLO dialect](https://github.com/tensorflow/mlir-hlo#meta-hlo-dialect-mhlo), including inheriting the implementation of type inference. The implementation progress is tracked in [status.md](https://github.com/openxla/stablehlo/blob/main/docs/status.md). +StableHLO has been originally bootstrapped from [the MHLO dialect](https://github.com/tensorflow/mlir-hlo#meta-hlo-dialect-mhlo), including inheriting the implementation of type inference. The implementation progress is tracked in [status.md](https://github.com/openxla/stablehlo/blob/main/docs/status.md). To implement high-quality verifiers and shape functions for StableHLO ops, these guidelines are proposed below to follow: @@ -8,7 +8,7 @@ To implement high-quality verifiers and shape functions for StableHLO ops, these These proposals apply to both revisiting existing implementations, and achieving new ops until a comprehensive coverage. -## (P1) Use the StableHLO spec as the source of truth. +## (P1) Use the StableHLO spec as the source of truth. The [spec](https://github.com/openxla/stablehlo/blob/main/docs/spec_draft.md) is the source of truth for all verifiers and shape functions of the StableHLO ops. The existing verifiers and shape functions of every op need revisited to be fully aligned with the specification. Note that the specification document keeps evolving, in cases that the spec for an op is not available, the XLA implementation should be used as the source of truth instead: including [xla/service/shape\_inference.cc](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/compiler/xla/service/shape_inference.cc) and [xla/service/hlo\_verifier.cc](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/compiler/xla/service/hlo_verifier.cc). XLA implementation doesn't cover unbounded dynamism, so for unbounded dynamism we'll apply common sense until the dynamism RFC is available. @@ -22,14 +22,14 @@ Do we need adding tests for the constraints from the ODS? Please see “Establis ## (P3) Maintain verification code in verifiers and shape functions -Both -- **verifiers**: implemented by `Op::verify()`, and -- **shape functions**: implemented by `InferTypeOpInterface` like `Op::inferReturnTypes()` or `Op::inferReturnTypeComponents` +Both +- **verifiers**: implemented by `Op::verify()`, and +- **shape functions**: implemented by `InferTypeOpInterface` like `Op::inferReturnTypes()` or `Op::inferReturnTypeComponents` may have verification code to check operands/attributes/results. An ideal split would be that: let the verifiers check the operands/attributes, then let shape functions to calculate inferred result types and check the compatibility against the real result types. However, in reality this split has a few problems: 1. Duplicated code: for example in verifiers we do some processing on the operands then verify some intermediate results, then in shape functions these intermediate results are useful to infer the final results. These intermediate results have to be calculated twice. -2. Maintenance burden: as verifications of an op are contained in two different methods. +2. Maintenance burden: as verifications of an op are contained in two different methods. One solution is to discard verifiers totally and move all the verification code into the shape functions [example](https://github.com/openxla/stablehlo/pull/135)). However, there are user cases that the op is created before all the components are in place, for [example](https://github.com/tensorflow/mlir-hlo/blob/master/lib/Dialect/mhlo/transforms/mhlo_canonicalize_reduction.cc#L222), the ReduceOp is created without regions and soon the shape functions are used. Involving verifications in shape functions would break the use cases like this. Thus, the most practical solution is to include as much as possible verifications in verifiers and leave the shape function as thin as possible. @@ -39,7 +39,7 @@ One solution is to discard verifiers totally and move all the verification code We do not. The tests should focus on the verifiers and shape functions, while changes to ODS need a revisit of this op. -But stay careful about the missing pieces: for example, if the op contains the trait `SameOperandsAndResultShape` which checks only shapes but not element type, then the verification for element types of operands/results still need tests. +But stay careful about the missing pieces: for example, if the op contains the trait `SameOperandsAndResultShape` which checks only shapes but not element type, then the verification for element types of operands/results still need tests. **Where do we put tests for verifiers and type inference?** diff --git a/stablehlo/tests/infer_stablehlo.mlir b/stablehlo/tests/infer_stablehlo.mlir index 65d26fc622..2d61d291e0 100644 --- a/stablehlo/tests/infer_stablehlo.mlir +++ b/stablehlo/tests/infer_stablehlo.mlir @@ -413,7 +413,7 @@ func.func @convolution(%arg0 : tensor<100x26x26x32xf32>, %arg1 : tensor<3x3x1x32 // CHECK: (tensor<100x28x28x1xf32>) -> tensor<100x28x28x1xindex> %1 = "hlo_test_infer.get_return_type_components"(%result) : (tensor<100x28x28x1xf32>) -> tensor<100x28x28x1xindex> - func.return %1 : tensor<100x28x28x1xindex> + func.return %1 : tensor<100x28x28x1xindex> } // ----- @@ -750,13 +750,13 @@ func.func @pad_with_negative_inferred_bounds(%arg0: tensor<3x?x?xf16, #stablehlo // CHECK-LABEL: @concat_bounds_c0 func.func @concat_bounds_c0( - %arg0: tensor<5x1xi32, #stablehlo.type_extensions>, + %arg0: tensor<5x1xi32, #stablehlo.type_extensions>, %arg1: tensor<5x2xi32, #stablehlo.type_extensions>) -> tensor<*xindex> { %result = "stablehlo.concatenate"(%arg0, %arg1) { dimension = 1 : i64 } : ( - tensor<5x1xi32, #stablehlo.type_extensions>, + tensor<5x1xi32, #stablehlo.type_extensions>, tensor<5x2xi32, #stablehlo.type_extensions>) -> tensor // CHECK: types0 = tensor<5x3xi32> - %1 = "hlo_test_infer.get_return_types"(%result) : (tensor) -> tensor<*xindex> + %1 = "hlo_test_infer.get_return_types"(%result) : (tensor) -> tensor<*xindex> func.return %1 : tensor<*xindex> } @@ -764,19 +764,19 @@ func.func @concat_bounds_c0( // CHECK-LABEL: @concat_bounds_c1 func.func @concat_bounds_c1( - %arg0: tensor<5x2xi32, #stablehlo.type_extensions>, + %arg0: tensor<5x2xi32, #stablehlo.type_extensions>, %arg1: tensor<5x?xi32, #stablehlo.type_extensions>) -> tensor<*xindex> { %result = "stablehlo.concatenate"(%arg0, %arg1) { dimension = 1 : i64 } : ( - tensor<5x2xi32, #stablehlo.type_extensions>, + tensor<5x2xi32, #stablehlo.type_extensions>, tensor<5x?xi32, #stablehlo.type_extensions>) -> tensor // CHECK: types0 = tensor<5x?xi32> - %1 = "hlo_test_infer.get_return_types"(%result) : (tensor) -> tensor<*xindex> + %1 = "hlo_test_infer.get_return_types"(%result) : (tensor) -> tensor<*xindex> %result_swap = "stablehlo.concatenate"(%arg1, %arg0) { dimension = 1 : i64 } : ( - tensor<5x?xi32, #stablehlo.type_extensions>, + tensor<5x?xi32, #stablehlo.type_extensions>, tensor<5x2xi32, #stablehlo.type_extensions>) -> tensor // CHECK: types0 = tensor<5x?xi32> - %2 = "hlo_test_infer.get_return_types"(%result_swap) : (tensor) -> tensor<*xindex> + %2 = "hlo_test_infer.get_return_types"(%result_swap) : (tensor) -> tensor<*xindex> func.return %1 : tensor<*xindex> } @@ -785,19 +785,19 @@ func.func @concat_bounds_c1( // CHECK-LABEL: @concat_bounds_c2 func.func @concat_bounds_c2( - %arg0: tensor<5x2xi32, #stablehlo.type_extensions>, + %arg0: tensor<5x2xi32, #stablehlo.type_extensions>, %arg1: tensor<5x?xi32, #stablehlo.type_extensions>) -> tensor<*xindex> { %result = "stablehlo.concatenate"(%arg0, %arg1) { dimension = 1 : i64 } : ( - tensor<5x2xi32, #stablehlo.type_extensions>, + tensor<5x2xi32, #stablehlo.type_extensions>, tensor<5x?xi32, #stablehlo.type_extensions>) -> tensor // CHECK: types0 = tensor<5x?xi32, #stablehlo.type_extensions> - %1 = "hlo_test_infer.get_return_types"(%result) : (tensor) -> tensor<*xindex> + %1 = "hlo_test_infer.get_return_types"(%result) : (tensor) -> tensor<*xindex> %result_swap = "stablehlo.concatenate"(%arg1, %arg0) { dimension = 1 : i64 } : ( - tensor<5x?xi32, #stablehlo.type_extensions>, + tensor<5x?xi32, #stablehlo.type_extensions>, tensor<5x2xi32, #stablehlo.type_extensions>) -> tensor // CHECK: types0 = tensor<5x?xi32, #stablehlo.type_extensions> - %2 = "hlo_test_infer.get_return_types"(%result_swap) : (tensor) -> tensor<*xindex> + %2 = "hlo_test_infer.get_return_types"(%result_swap) : (tensor) -> tensor<*xindex> func.return %1 : tensor<*xindex> } @@ -806,13 +806,13 @@ func.func @concat_bounds_c2( // CHECK-LABEL: @concat_bounds_c3 func.func @concat_bounds_c3( - %arg0: tensor<5x?xi32, #stablehlo.type_extensions>, + %arg0: tensor<5x?xi32, #stablehlo.type_extensions>, %arg1: tensor<5x?xi32, #stablehlo.type_extensions>) -> tensor<*xindex> { %result = "stablehlo.concatenate"(%arg0, %arg1) { dimension = 1 : i64 } : ( - tensor<5x?xi32, #stablehlo.type_extensions>, + tensor<5x?xi32, #stablehlo.type_extensions>, tensor<5x?xi32, #stablehlo.type_extensions>) -> tensor // CHECK: types0 = tensor<5x?xi32> - %1 = "hlo_test_infer.get_return_types"(%result) : (tensor) -> tensor<*xindex> + %1 = "hlo_test_infer.get_return_types"(%result) : (tensor) -> tensor<*xindex> func.return %1 : tensor<*xindex> } @@ -820,19 +820,19 @@ func.func @concat_bounds_c3( // CHECK-LABEL: @concat_bounds_c4 func.func @concat_bounds_c4( - %arg0: tensor<5x?xi32, #stablehlo.type_extensions>, + %arg0: tensor<5x?xi32, #stablehlo.type_extensions>, %arg1: tensor<5x?xi32, #stablehlo.type_extensions>) -> tensor<*xindex> { %result = "stablehlo.concatenate"(%arg0, %arg1) { dimension = 1 : i64 } : ( - tensor<5x?xi32, #stablehlo.type_extensions>, + tensor<5x?xi32, #stablehlo.type_extensions>, tensor<5x?xi32, #stablehlo.type_extensions>) -> tensor // CHECK: types0 = tensor<5x?xi32> - %1 = "hlo_test_infer.get_return_types"(%result) : (tensor) -> tensor<*xindex> + %1 = "hlo_test_infer.get_return_types"(%result) : (tensor) -> tensor<*xindex> %result_swap = "stablehlo.concatenate"(%arg1, %arg0) { dimension = 1 : i64 } : ( - tensor<5x?xi32, #stablehlo.type_extensions>, + tensor<5x?xi32, #stablehlo.type_extensions>, tensor<5x?xi32, #stablehlo.type_extensions>) -> tensor // CHECK: types0 = tensor<5x?xi32> - %2 = "hlo_test_infer.get_return_types"(%result_swap) : (tensor) -> tensor<*xindex> + %2 = "hlo_test_infer.get_return_types"(%result_swap) : (tensor) -> tensor<*xindex> func.return %1 : tensor<*xindex> } @@ -841,13 +841,13 @@ func.func @concat_bounds_c4( // CHECK-LABEL: @concat_bounds_c5 func.func @concat_bounds_c5( - %arg0: tensor<5x?xi32, #stablehlo.type_extensions>, + %arg0: tensor<5x?xi32, #stablehlo.type_extensions>, %arg1: tensor<5x?xi32, #stablehlo.type_extensions>) -> tensor<*xindex> { %result = "stablehlo.concatenate"(%arg0, %arg1) { dimension = 1 : i64 } : ( - tensor<5x?xi32, #stablehlo.type_extensions>, + tensor<5x?xi32, #stablehlo.type_extensions>, tensor<5x?xi32, #stablehlo.type_extensions>) -> tensor // CHECK: types0 = tensor<5x?xi32, #stablehlo.type_extensions> - %1 = "hlo_test_infer.get_return_types"(%result) : (tensor) -> tensor<*xindex> + %1 = "hlo_test_infer.get_return_types"(%result) : (tensor) -> tensor<*xindex> func.return %1 : tensor<*xindex> } @@ -860,13 +860,13 @@ func.func @concat_bounds_c5( // // CHECK-LABEL: @concat_bounds_unranked_c0 func.func @concat_bounds_unranked_c0( - %arg0: tensor<*xi32>, + %arg0: tensor<*xi32>, %arg1: tensor<5x?xi32, #stablehlo.type_extensions>) -> tensor<*xindex> { %result = "stablehlo.concatenate"(%arg0, %arg1) { dimension = 0 : i64 } : ( - tensor<*xi32>, + tensor<*xi32>, tensor<5x?xi32, #stablehlo.type_extensions>) -> tensor<5x?xi32> // CHECK: types0 = tensor> - %1 = "hlo_test_infer.get_return_types"(%result) : (tensor<5x?xi32>) -> tensor<*xindex> + %1 = "hlo_test_infer.get_return_types"(%result) : (tensor<5x?xi32>) -> tensor<*xindex> func.return %1 : tensor<*xindex> } @@ -874,12 +874,12 @@ func.func @concat_bounds_unranked_c0( // CHECK-LABEL: @concat_bounds_unranked_c1 func.func @concat_bounds_unranked_c1( - %arg0: tensor<*xi32>, + %arg0: tensor<*xi32>, %arg1: tensor<5x?xi32, #stablehlo.type_extensions>) -> tensor<*xindex> { %result = "stablehlo.concatenate"(%arg0, %arg1) { dimension = 1 : i64 } : ( - tensor<*xi32>, + tensor<*xi32>, tensor<5x?xi32, #stablehlo.type_extensions>) -> tensor<5x?xi32> // CHECK: types0 = tensor<5x?xi32> - %1 = "hlo_test_infer.get_return_types"(%result) : (tensor<5x?xi32>) -> tensor<*xindex> + %1 = "hlo_test_infer.get_return_types"(%result) : (tensor<5x?xi32>) -> tensor<*xindex> func.return %1 : tensor<*xindex> } diff --git a/stablehlo/tests/ops_stablehlo_roundtrip.mlir b/stablehlo/tests/ops_stablehlo_roundtrip.mlir index 6fbc4b9183..ddeeaaf10b 100644 --- a/stablehlo/tests/ops_stablehlo_roundtrip.mlir +++ b/stablehlo/tests/ops_stablehlo_roundtrip.mlir @@ -7,7 +7,7 @@ // trip of the a bytecoded version of this file. If the outputs do not match, // the test will fail. // -// Additionally this test will fail if any ops are not implemented on read or +// Additionally this test will fail if any ops are not implemented on read or // write. This is accomplished by calling `stablehlo-opt` with the // `-debug-only=stablehlo-bytecode` trace enabled. If any type or attr is not // implemented, a message '***Not Implemented' is logged. If there are no logs diff --git a/stablehlo/tests/print_stablehlo.mlir b/stablehlo/tests/print_stablehlo.mlir index d512992fc1..039c8bea01 100644 --- a/stablehlo/tests/print_stablehlo.mlir +++ b/stablehlo/tests/print_stablehlo.mlir @@ -123,7 +123,7 @@ func.func @type_convert_ops(%arg0 : tensor<2xf32>) -> () { // CHECK-NEXT: %2 = stablehlo.bitcast_convert %arg0 : (tensor<2xf32>) -> tensor<2xi32> %0 = "stablehlo.convert"(%arg0) : (tensor<2xf32>) -> tensor<2xf64> %1 = "stablehlo.reshape"(%arg0) : (tensor<2xf32>) -> tensor<1x2xf32> - %2 = "stablehlo.bitcast_convert"(%arg0) : (tensor<2xf32>) -> tensor<2xi32> + %2 = "stablehlo.bitcast_convert"(%arg0) : (tensor<2xf32>) -> tensor<2xi32> "stablehlo.return"() : () -> () } diff --git a/stablehlo/tests/print_types_invalid.mlir b/stablehlo/tests/print_types_invalid.mlir index e8ee6b2ea6..3e4ffb3360 100644 --- a/stablehlo/tests/print_types_invalid.mlir +++ b/stablehlo/tests/print_types_invalid.mlir @@ -238,7 +238,7 @@ func.func @reduce_precision_overflow_int32_m(%arg0: tensor<3x4xf32>) -> (tensor< func.func @variadic_with_attr_no_comma(%arg0: tensor<4x1xf32>, %arg1: tensor<4x2xf32>) -> () { // expected-error @+1 {{expected ','}} - %0 = stablehlo.concatenate %arg0, %arg1 dim = 1 : (tensor<4x1xf32>, tensor<4x2xf32>) -> tensor<4x3xf32> + %0 = stablehlo.concatenate %arg0, %arg1 dim = 1 : (tensor<4x1xf32>, tensor<4x2xf32>) -> tensor<4x3xf32> func.return } @@ -246,7 +246,7 @@ func.func @variadic_with_attr_no_comma(%arg0: tensor<4x1xf32>, %arg1: tensor<4x2 func.func @variadic_with_attr_no_comma_no_dim(%arg0: tensor<4x1xf32>, %arg1: tensor<4x2xf32>) -> () { // expected-error @+1 {{expected ','}} - %0 = stablehlo.concatenate %arg0, %arg1: (tensor<4x1xf32>, tensor<4x2xf32>) -> tensor<4x3xf32> + %0 = stablehlo.concatenate %arg0, %arg1: (tensor<4x1xf32>, tensor<4x2xf32>) -> tensor<4x3xf32> func.return } @@ -254,7 +254,7 @@ func.func @variadic_with_attr_no_comma_no_dim(%arg0: tensor<4x1xf32>, %arg1: ten func.func @variadic_with_attr_no_dim(%arg0: tensor<4x1xf32>, %arg1: tensor<4x2xf32>) -> (tensor<3x4xf32>) { // expected-error @+1 {{custom op 'stablehlo.concatenate' expected 'dim'}} - %0 = stablehlo.concatenate %arg0, %arg1, : (tensor<4x1xf32>, tensor<4x2xf32>) -> tensor<4x3xf32> + %0 = stablehlo.concatenate %arg0, %arg1, : (tensor<4x1xf32>, tensor<4x2xf32>) -> tensor<4x3xf32> func.return } diff --git a/stablehlo/tests/verify_conv.mlir b/stablehlo/tests/verify_conv.mlir index e26bf4913b..db6a242826 100644 --- a/stablehlo/tests/verify_conv.mlir +++ b/stablehlo/tests/verify_conv.mlir @@ -790,7 +790,7 @@ func.func @check_inferred_type_with_dynamic_input_dims(%arg0: tensor<1x8x8x207xf //===----------------------------------------------------------------------===// // This is an positive test in MLIR-HLO: -// https://github.com/tensorflow/mlir-hlo/blob/master/tests/Dialect/mhlo/ops.mlir#L3829 +// https://github.com/tensorflow/mlir-hlo/blob/master/tests/Dialect/mhlo/ops.mlir#L3829 // but negative here: stablehlo.convolution does no support unknown dimenstion // dim_numbers = [b, 0, 1, ?, f]x[0, 1, ?, i, o]->[?, b, 0, 1, f] // window = {stride = [1, 1], pad = [[1, 1], [1, 1]], lhs_dilate = [1, 1], rhs_dilate = [1, 1]} @@ -1000,4 +1000,4 @@ func.func @convolution(%arg0: tensor<2x2x3x4xf32>, %arg1: tensor<3x5x5x3xf32>) - { batch_group_count = 1 : i64, feature_group_count = 1 : i64} : (tensor<2x2x3x4xf32>, tensor<3x5x5x3xf32>) -> tensor<3x5x5x4xf32> func.return %0 : tensor<3x5x5x4xf32> -} \ No newline at end of file +}