Skip to content

[mlir] -tosa-optional-decompositions crashes #118452

@Anonymous7i0em3yd

Description

@Anonymous7i0em3yd

git version: d097070

system: Ubuntu 18.04.6 LTS

reproduce with: mlir-opt -tosa-optional-decompositions a.mlir

a.mlir:

func.func @test_transpose_conv2D_pad_left(%arg0: tensor<1x32x 32x 16x f32>, %arg1: tensor<16x2x16x16xf32>, %arg2: tensor<16xf32>) -> tensor<1x32x 32x 16x f32> {
  %0 = "tosa.transpose_conv2d"(%arg0, %arg1, %arg2) {out_pad = array<i64: 0, 0, 8193, 0>, out_shape = array<i64: 1, 32, 8193, 16>, stride = array<i64: 1, 2>} :
              (tensor<1x32x 32x 16x f32>, tensor<16x2x16x16xf32>, tensor<16xf32>) -> tensor<1x32x 32x 16x f32>
  return %0 : tensor<1x32x 32x 16x f32>
}

stack trace:

<unknown>:0: error: invalid tensor dimension size
mlir-opt: /data/szy/MLIR/llvm-release/llvm-project/mlir/include/mlir/IR/StorageUniquerSupport.h:180: static ConcreteT mlir::detail::StorageUserBase<mlir::RankedTensorType, mlir::TensorType, mlir::detail::RankedTensorTypeStorage, mlir::detail::TypeUniquer, mlir::ShapedType::Trait, mlir::ValueSemantics>::get(MLIRContext *, Args &&...) [ConcreteT = mlir::RankedTensorType, BaseT = mlir::TensorType, StorageT = mlir::detail::RankedTensorTypeStorage, UniquerT = mlir::detail::TypeUniquer, Traits = <mlir::ShapedType::Trait, mlir::ValueSemantics>, Args = <llvm::ArrayRef<long> &, mlir::Type &, mlir::Attribute &>]: Assertion `succeeded( ConcreteT::verifyInvariants(getDefaultDiagnosticEmitFn(ctx), args...))' failed.
PLEASE submit a bug report to https://github.com/llvm/llvm-project/issues/ and include the crash backtrace.
Stack dump:
0.      Program arguments: /data/szy/MLIR/llvm-release/llvm-project/build/bin/mlir-opt -tosa-optional-decompositions /data/szy/MLIR/seed/seed4/tmp.xkcd0pMA92.mlir
 #0 0x000055d5911b6f88 llvm::sys::PrintStackTrace(llvm::raw_ostream&, int) (/data/szy/MLIR/llvm-release/llvm-project/build/bin/mlir-opt+0x1185f88)
 #1 0x000055d5911b4a9e llvm::sys::RunSignalHandlers() (/data/szy/MLIR/llvm-release/llvm-project/build/bin/mlir-opt+0x1183a9e)
 #2 0x000055d5911b791d SignalHandler(int) Signals.cpp:0:0
 #3 0x00007f93bf621420 __restore_rt (/lib/x86_64-linux-gnu/libpthread.so.0+0x14420)
 #4 0x00007f93bec5e00b raise /build/glibc-LcI20x/glibc-2.31/signal/../sysdeps/unix/sysv/linux/raise.c:51:1
 #5 0x00007f93bec3d859 abort /build/glibc-LcI20x/glibc-2.31/stdlib/abort.c:81:7
 #6 0x00007f93bec3d729 get_sysdep_segment_value /build/glibc-LcI20x/glibc-2.31/intl/loadmsgcat.c:509:8
 #7 0x00007f93bec3d729 _nl_load_domain /build/glibc-LcI20x/glibc-2.31/intl/loadmsgcat.c:970:34
 #8 0x00007f93bec4efd6 (/lib/x86_64-linux-gnu/libc.so.6+0x33fd6)
 #9 0x000055d59471e703 (/data/szy/MLIR/llvm-release/llvm-project/build/bin/mlir-opt+0x46ed703)
#10 0x000055d59471e628 mlir::RankedTensorType::get(llvm::ArrayRef<long>, mlir::Type, mlir::Attribute) (/data/szy/MLIR/llvm-release/llvm-project/build/bin/mlir-opt+0x46ed628)
#11 0x000055d5938425d5 mlir::tosa::SliceOp mlir::tosa::CreateOpAndInferShape<mlir::tosa::SliceOp, mlir::Value&, mlir::detail::DenseArrayAttrImpl<long>&, mlir::detail::DenseArrayAttrImpl<long>&>(mlir::ImplicitLocOpBuilder&, mlir::Type, mlir::Value&, mlir::detail::DenseArrayAttrImpl<long>&, mlir::detail::DenseArrayAttrImpl<long>&) (/data/szy/MLIR/llvm-release/llvm-project/build/bin/mlir-opt+0x38115d5)
#12 0x000055d59383dbe6 (anonymous namespace)::TransposeConvStridedConverter::matchAndRewrite(mlir::tosa::TransposeConv2DOp, mlir::PatternRewriter&) const TosaDecomposeTransposeConv.cpp:0:0
#13 0x000055d597110a21 void llvm::function_ref<void ()>::callback_fn<mlir::PatternApplicator::matchAndRewrite(mlir::Operation*, mlir::PatternRewriter&, llvm::function_ref<bool (mlir::Pattern const&)>, llvm::function_ref<void (mlir::Pattern const&)>, llvm::function_ref<llvm::LogicalResult (mlir::Pattern const&)>)::$_0>(long) PatternApplicator.cpp:0:0
#14 0x000055d59710d6eb mlir::PatternApplicator::matchAndRewrite(mlir::Operation*, mlir::PatternRewriter&, llvm::function_ref<bool (mlir::Pattern const&)>, llvm::function_ref<void (mlir::Pattern const&)>, llvm::function_ref<llvm::LogicalResult (mlir::Pattern const&)>) (/data/szy/MLIR/llvm-release/llvm-project/build/bin/mlir-opt+0x70dc6eb)
#15 0x000055d59466c11f (anonymous namespace)::GreedyPatternRewriteDriver::processWorklist() GreedyPatternRewriteDriver.cpp:0:0
#16 0x000055d594668aaf mlir::applyPatternsAndFoldGreedily(mlir::Region&, mlir::FrozenRewritePatternSet const&, mlir::GreedyRewriteConfig, bool*) (/data/szy/MLIR/llvm-release/llvm-project/build/bin/mlir-opt+0x4637aaf)
#17 0x000055d59382579b (anonymous namespace)::TosaOptionalDecompositions::runOnOperation() TosaOptionalDecompositions.cpp:0:0
#18 0x000055d5945f1936 mlir::detail::OpToOpPassAdaptor::run(mlir::Pass*, mlir::Operation*, mlir::AnalysisManager, bool, unsigned int) (/data/szy/MLIR/llvm-release/llvm-project/build/bin/mlir-opt+0x45c0936)
#19 0x000055d5945f2260 mlir::detail::OpToOpPassAdaptor::runPipeline(mlir::OpPassManager&, mlir::Operation*, mlir::AnalysisManager, bool, unsigned int, mlir::PassInstrumentor*, mlir::PassInstrumentation::PipelineParentInfo const*) (/data/szy/MLIR/llvm-release/llvm-project/build/bin/mlir-opt+0x45c1260)
#20 0x000055d5945f760e auto void mlir::parallelForEach<__gnu_cxx::__normal_iterator<mlir::detail::OpToOpPassAdaptor::runOnOperationAsyncImpl(bool)::OpPMInfo*, std::vector<mlir::detail::OpToOpPassAdaptor::runOnOperationAsyncImpl(bool)::OpPMInfo, std::allocator<mlir::detail::OpToOpPassAdaptor::runOnOperationAsyncImpl(bool)::OpPMInfo>>>, mlir::detail::OpToOpPassAdaptor::runOnOperationAsyncImpl(bool)::$_0>(mlir::MLIRContext*, __gnu_cxx::__normal_iterator<mlir::detail::OpToOpPassAdaptor::runOnOperationAsyncImpl(bool)::OpPMInfo*, std::vector<mlir::detail::OpToOpPassAdaptor::runOnOperationAsyncImpl(bool)::OpPMInfo, std::allocator<mlir::detail::OpToOpPassAdaptor::runOnOperationAsyncImpl(bool)::OpPMInfo>>>, __gnu_cxx::__normal_iterator<mlir::detail::OpToOpPassAdaptor::runOnOperationAsyncImpl(bool)::OpPMInfo*, std::vector<mlir::detail::OpToOpPassAdaptor::runOnOperationAsyncImpl(bool)::OpPMInfo, std::allocator<mlir::detail::OpToOpPassAdaptor::runOnOperationAsyncImpl(bool)::OpPMInfo>>>, mlir::detail::OpToOpPassAdaptor::runOnOperationAsyncImpl(bool)::$_0&&)::'lambda'(__gnu_cxx::__normal_iterator<mlir::detail::OpToOpPassAdaptor::runOnOperationAsyncImpl(bool)::OpPMInfo*, std::vector<mlir::detail::OpToOpPassAdaptor::runOnOperationAsyncImpl(bool)::OpPMInfo, std::allocator<mlir::detail::OpToOpPassAdaptor::runOnOperationAsyncImpl(bool)::OpPMInfo>>>&&)::operator()<mlir::detail::OpToOpPassAdaptor::runOnOperationAsyncImpl(bool)::OpPMInfo&>(__gnu_cxx::__normal_iterator<mlir::detail::OpToOpPassAdaptor::runOnOperationAsyncImpl(bool)::OpPMInfo*, std::vector<mlir::detail::OpToOpPassAdaptor::runOnOperationAsyncImpl(bool)::OpPMInfo, std::allocator<mlir::detail::OpToOpPassAdaptor::runOnOperationAsyncImpl(bool)::OpPMInfo>>>&&) const Pass.cpp:0:0
#21 0x000055d5945f38bb mlir::detail::OpToOpPassAdaptor::runOnOperationAsyncImpl(bool) (/data/szy/MLIR/llvm-release/llvm-project/build/bin/mlir-opt+0x45c28bb)
#22 0x000055d5945f1a8f mlir::detail::OpToOpPassAdaptor::run(mlir::Pass*, mlir::Operation*, mlir::AnalysisManager, bool, unsigned int) (/data/szy/MLIR/llvm-release/llvm-project/build/bin/mlir-opt+0x45c0a8f)
#23 0x000055d5945f2260 mlir::detail::OpToOpPassAdaptor::runPipeline(mlir::OpPassManager&, mlir::Operation*, mlir::AnalysisManager, bool, unsigned int, mlir::PassInstrumentor*, mlir::PassInstrumentation::PipelineParentInfo const*) (/data/szy/MLIR/llvm-release/llvm-project/build/bin/mlir-opt+0x45c1260)
#24 0x000055d5945f4832 mlir::PassManager::run(mlir::Operation*) (/data/szy/MLIR/llvm-release/llvm-project/build/bin/mlir-opt+0x45c3832)
#25 0x000055d5945ed0da performActions(llvm::raw_ostream&, std::shared_ptr<llvm::SourceMgr> const&, mlir::MLIRContext*, mlir::MlirOptMainConfig const&) MlirOptMain.cpp:0:0
#26 0x000055d5945ecd2d llvm::LogicalResult llvm::function_ref<llvm::LogicalResult (std::unique_ptr<llvm::MemoryBuffer, std::default_delete<llvm::MemoryBuffer>>, llvm::raw_ostream&)>::callback_fn<mlir::MlirOptMain(llvm::raw_ostream&, std::unique_ptr<llvm::MemoryBuffer, std::default_delete<llvm::MemoryBuffer>>, mlir::DialectRegistry&, mlir::MlirOptMainConfig const&)::$_0>(long, std::unique_ptr<llvm::MemoryBuffer, std::default_delete<llvm::MemoryBuffer>>, llvm::raw_ostream&) MlirOptMain.cpp:0:0
#27 0x000055d594698fa5 mlir::splitAndProcessBuffer(std::unique_ptr<llvm::MemoryBuffer, std::default_delete<llvm::MemoryBuffer>>, llvm::function_ref<llvm::LogicalResult (std::unique_ptr<llvm::MemoryBuffer, std::default_delete<llvm::MemoryBuffer>>, llvm::raw_ostream&)>, llvm::raw_ostream&, llvm::StringRef, llvm::StringRef) (/data/szy/MLIR/llvm-release/llvm-project/build/bin/mlir-opt+0x4667fa5)
#28 0x000055d5945e6d15 mlir::MlirOptMain(llvm::raw_ostream&, std::unique_ptr<llvm::MemoryBuffer, std::default_delete<llvm::MemoryBuffer>>, mlir::DialectRegistry&, mlir::MlirOptMainConfig const&) (/data/szy/MLIR/llvm-release/llvm-project/build/bin/mlir-opt+0x45b5d15)
#29 0x000055d5945e6fbf mlir::MlirOptMain(int, char**, llvm::StringRef, llvm::StringRef, mlir::DialectRegistry&) (/data/szy/MLIR/llvm-release/llvm-project/build/bin/mlir-opt+0x45b5fbf)
#30 0x000055d5945e72ee mlir::MlirOptMain(int, char**, llvm::StringRef, mlir::DialectRegistry&) (/data/szy/MLIR/llvm-release/llvm-project/build/bin/mlir-opt+0x45b62ee)
#31 0x000055d591197a77 main (/data/szy/MLIR/llvm-release/llvm-project/build/bin/mlir-opt+0x1166a77)
#32 0x00007f93bec3f083 __libc_start_main /build/glibc-LcI20x/glibc-2.31/csu/../csu/libc-start.c:342:3
#33 0x000055d5911975ee _start (/data/szy/MLIR/llvm-release/llvm-project/build/bin/mlir-opt+0x11665ee)

Activity

added
crashPrefer [crash-on-valid] or [crash-on-invalid]
and removed on Dec 3, 2024
swote-git

swote-git commented on Apr 30, 2025

@swote-git

I have checked and reviewed your issue.

Here's Two Problem:

  1. your mlir function is invalid.
    1-1. Missing Operands
    - in your code. %0 = "tosa.transpose_conv2d"(%arg0, %arg1, %arg2) is not satisfied mlir.tosa spec. there's 3(input, weight, bias) operands, but tosa.transpose_conv2d required 5(input, weight, bias, input_zp, weight_zp).
    - see: https://mlir.llvm.org/docs/Dialects/TOSA/#tosatranspose_conv2d-mlirtosatransposeconv2dop

    1-2. out_shape = array<i64: 1, 32, 8193, 16> out_shape is incorrect.
    - OH == (IH - 1) * stride_y + out_pad_top + out_pad_bottom + KH : should be 33, but 32.
    - OW == (IW - 1) * stride_x + out_pad_left + out_pad_right + KW : should be 8271, but 8193.

corrected mlir function should be:

func.func @test_transpose_conv2D_pad_left(%arg0: tensor<1x32x32x16xf32>, %arg1: tensor<16x2x16x16xf32>, %arg2: tensor<16xf32>) -> tensor<1x33x8271x16xf32> {
    %input_zp = "tosa.const"() <{values = dense<0.0> : tensor<1xf32>}> : () -> tensor<1xf32>
    %weight_zp = "tosa.const"() <{values = dense<0.0> : tensor<1xf32>}> : () -> tensor<1xf32>
    
    %0 = tosa.transpose_conv2d %arg0, %arg1, %arg2, %input_zp, %weight_zp {
        acc_type = f32, 
        out_pad = array<i64: 0, 0, 8193, 0>, 
        out_shape = array<i64: 1, 33, 8271, 16>, 
        stride = array<i64: 1, 2>
    } : (tensor<1x32x32x16xf32>, tensor<16x2x16x16xf32>, tensor<16xf32>, tensor<1xf32>, tensor<1xf32>) -> tensor<1x33x8271x16xf32>
    
    return %0 : tensor<1x33x8271x16xf32>
}
  1. Even if your mlir function is normal, you will still receive errors due to validation failure.

In, llvm/llvm-project/mlir/lib/Dialect/Tosa/Transforms/TosaValidation.cpp line 403~428. There's level check kernel sizes for kH and KW.

This explicitly checks that each padding value (p) satisfies the same constraints as kernel dimensions, ensuring pad <= MAX_KERNEL. This is to prevent serious waste of memory and computing resources due to large padding.

lhutton1

lhutton1 commented on Jul 4, 2025

@lhutton1
Contributor

I tried with @swote-git's rewritten example on a newer checkout, but could not reproduce the issue:

$ mlir-opt test.mlir --tosa-optional-decompositions
module {
  func.func @test_transpose_conv2D_pad_left(%arg0: tensor<1x32x32x16xf32>, %arg1: tensor<16x2x16x16xf32>, %arg2: tensor<16xf32>) -> tensor<1x33x8271x16xf32> {
    %0 = tosa.const_shape  {values = dense<[1, 1, 1, 16]> : tensor<4xindex>} : () -> !tosa.shape<4>
    %1 = tosa.const_shape  {values = dense<[0, 0, 0, 0, 8193, 0, 0, 0]> : tensor<8xindex>} : () -> !tosa.shape<8>
    %2 = tosa.const_shape  {values = dense<[1, 33, 78, 16]> : tensor<4xindex>} : () -> !tosa.shape<4>
    %3 = tosa.const_shape  {values = dense<[1, 33, 39, 1, 2, 16]> : tensor<6xindex>} : () -> !tosa.shape<6>
    %4 = "tosa.const"() <{values = dense<0.000000e+00> : tensor<32xf32>}> : () -> tensor<32xf32>
    %5 = tosa.const_shape  {values = dense<[0, 0, 1, 1, 7, 7, 0, 0]> : tensor<8xindex>} : () -> !tosa.shape<8>
    %6 = tosa.const_shape  {values = dense<[32, 2, 8, 16]> : tensor<4xindex>} : () -> !tosa.shape<4>
    %7 = "tosa.const"() <{values = dense<0.000000e+00> : tensor<1xf32>}> : () -> tensor<1xf32>
    %8 = tosa.const_shape  {values = dense<[16, 2, 1, 8, 2, 16]> : tensor<6xindex>} : () -> !tosa.shape<6>
    %9 = tosa.reshape %arg1, %8 : (tensor<16x2x16x16xf32>, !tosa.shape<6>) -> tensor<16x2x1x8x2x16xf32>
    %10 = tosa.transpose %9 {perms = array<i32: 2, 4, 0, 1, 3, 5>} : (tensor<16x2x1x8x2x16xf32>) -> tensor<1x2x16x2x8x16xf32>
    %11 = tosa.reshape %10, %6 : (tensor<1x2x16x2x8x16xf32>, !tosa.shape<4>) -> tensor<32x2x8x16xf32>
    %12 = tosa.reverse %11 {axis = 1 : i32} : (tensor<32x2x8x16xf32>) -> tensor<32x2x8x16xf32>
    %13 = tosa.reverse %12 {axis = 2 : i32} : (tensor<32x2x8x16xf32>) -> tensor<32x2x8x16xf32>
    %14 = tosa.pad %arg0, %5, %7 : (tensor<1x32x32x16xf32>, !tosa.shape<8>, tensor<1xf32>) -> tensor<1x34x46x16xf32>
    %15 = tosa.conv2d %14, %13, %4, %7, %7 {acc_type = f32, dilation = array<i64: 1, 1>, pad = array<i64: 0, 0, 0, 0>, stride = array<i64: 1, 1>} : (tensor<1x34x46x16xf32>, tensor<32x2x8x16xf32>, tensor<32xf32>, tensor<1xf32>, tensor<1xf32>) -> tensor<1x33x39x32xf32>
    %16 = tosa.reshape %15, %3 : (tensor<1x33x39x32xf32>, !tosa.shape<6>) -> tensor<1x33x39x1x2x16xf32>
    %17 = tosa.transpose %16 {perms = array<i32: 0, 1, 3, 2, 4, 5>} : (tensor<1x33x39x1x2x16xf32>) -> tensor<1x33x1x39x2x16xf32>
    %18 = tosa.reshape %17, %2 : (tensor<1x33x1x39x2x16xf32>, !tosa.shape<4>) -> tensor<1x33x78x16xf32>
    %19 = tosa.pad %18, %1, %7 : (tensor<1x33x78x16xf32>, !tosa.shape<8>, tensor<1xf32>) -> tensor<1x33x8271x16xf32>
    %20 = tosa.reshape %arg2, %0 : (tensor<16xf32>, !tosa.shape<4>) -> tensor<1x1x1x16xf32>
    %21 = tosa.add %19, %20 : (tensor<1x33x8271x16xf32>, tensor<1x1x1x16xf32>) -> tensor<1x33x8271x16xf32>
    return %21 : tensor<1x33x8271x16xf32>
  }
}

shall we close it?

psunn

psunn commented on Jul 16, 2025

@psunn
Contributor

@Anonymous7i0em3yd Please reopen the issue if the problem persists.

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment

Metadata

Metadata

Assignees

No one assigned

    Labels

    crashPrefer [crash-on-valid] or [crash-on-invalid]mlir:tosa

    Type

    Projects

    No projects

    Milestone

    No milestone

    Relationships

    None yet

      Development

      No branches or pull requests

        Participants

        @EugeneZelenko@lhutton1@psunn@Anonymous7i0em3yd@swote-git

        Issue actions

          [mlir] -tosa-optional-decompositions crashes · Issue #118452 · llvm/llvm-project