@@ -65,15 +65,15 @@ func.func @test_quantizelinear_f8(%arg0: !torch.vtensor<[6],f32>, %arg1: !torch.
65
65
// -----
66
66
67
67
// CHECK-LABEL: @test_qlinearconv_nobias
68
- func.func @test_qlinearconv_nobias (%arg0: !torch.vtensor <[1 ,1 ,7 ,7 ],ui8 >, %arg1: !torch.vtensor <[],f32 >, %arg2: !torch.vtensor <[],ui8 >, %arg3: !torch.vtensor <[1 ,1 ,1 ,1 ],ui8 >, %arg4: !torch.vtensor <[1 ],f32 >, %arg5: !torch.vtensor <[1 ],ui8 >, %arg6: !torch.vtensor <[],f32 >, %arg7: !torch.vtensor <[],ui8 >) -> !torch.vtensor <[1 ,1 ,7 ,7 ],ui8 > attributes {torch.onnx_meta.ir_version = 5 : si64 , torch.onnx_meta.opset_version = 10 : si64 , torch.onnx_meta.producer_name = " backend-test" , torch.onnx_meta.producer_version = " " } {
69
- %0 = torch.operator " onnx.QLinearConv" (%arg0 , %arg1 , %arg2 , %arg3 , %arg4 , %arg5 , %arg6 , %arg7 ) : (!torch.vtensor <[1 ,1 ,7 ,7 ],ui8 >, !torch.vtensor <[],f32 >, !torch.vtensor <[],ui8 >, !torch.vtensor <[1 ,1 ,1 ,1 ],ui8 >, !torch.vtensor <[1 ],f32 >, !torch.vtensor <[1 ],ui8 >, !torch.vtensor <[],f32 >, !torch.vtensor <[],ui8 >) -> !torch.vtensor <[1 ,1 ,7 ,7 ],ui8 >
68
+ func.func @test_qlinearconv_nobias (%arg0: !torch.vtensor <[1 ,1 ,7 ,7 ],ui8 >, %arg1: !torch.vtensor <[],f32 >, %arg2: !torch.vtensor <[],ui8 >, %arg3: !torch.vtensor <[1 ,1 ,1 ,1 ],ui8 >, %arg4: !torch.vtensor <[],f32 >, %arg5: !torch.vtensor <[],ui8 >, %arg6: !torch.vtensor <[],f32 >, %arg7: !torch.vtensor <[],ui8 >) -> !torch.vtensor <[1 ,1 ,7 ,7 ],ui8 > attributes {torch.onnx_meta.ir_version = 5 : si64 , torch.onnx_meta.opset_version = 10 : si64 , torch.onnx_meta.producer_name = " backend-test" , torch.onnx_meta.producer_version = " " } {
69
+ %0 = torch.operator " onnx.QLinearConv" (%arg0 , %arg1 , %arg2 , %arg3 , %arg4 , %arg5 , %arg6 , %arg7 ) : (!torch.vtensor <[1 ,1 ,7 ,7 ],ui8 >, !torch.vtensor <[],f32 >, !torch.vtensor <[],ui8 >, !torch.vtensor <[1 ,1 ,1 ,1 ],ui8 >, !torch.vtensor <[],f32 >, !torch.vtensor <[],ui8 >, !torch.vtensor <[],f32 >, !torch.vtensor <[],ui8 >) -> !torch.vtensor <[1 ,1 ,7 ,7 ],ui8 >
70
70
// CHECK: %[[aZp:.+]] = torch.aten.item %arg2 : !torch.vtensor<[],ui8> -> !torch.int
71
- // CHECK: %[[bZp:.+]] = torch.aten.item %arg5 : !torch.vtensor<[1],ui8> -> !torch.int
72
71
// CHECK: %[[cZp:.+]] = torch.aten.item %arg7 : !torch.vtensor<[],ui8> -> !torch.int
73
72
// CHECK: %[[aScale:.+]] = torch.aten.item %arg1 : !torch.vtensor<[],f32> -> !torch.float
74
- // CHECK: %[[bScale:.+]] = torch.aten.item %arg4 : !torch.vtensor<[1],f32> -> !torch.float
75
73
// CHECK: %[[cScale:.+]] = torch.aten.item %arg6 : !torch.vtensor<[],f32> -> !torch.float
76
74
// CHECK: %[[A:.+]] = torch.aten._make_per_tensor_quantized_tensor %arg0, %[[aScale]], %[[aZp]] : !torch.vtensor<[1,1,7,7],ui8>, !torch.float, !torch.int -> !torch.vtensor<[1,1,7,7],!torch.quint8>
75
+ // CHECK: %[[bScale:.+]] = torch.aten.item %arg4 : !torch.vtensor<[],f32> -> !torch.float
76
+ // CHECK: %[[bZp:.+]] = torch.aten.item %arg5 : !torch.vtensor<[],ui8> -> !torch.int
77
77
// CHECK: %[[B:.+]] = torch.aten._make_per_tensor_quantized_tensor %arg3, %[[bScale]], %[[bZp]] : !torch.vtensor<[1,1,1,1],ui8>, !torch.float, !torch.int -> !torch.vtensor<[1,1,1,1],!torch.quint8>
78
78
// CHECK: %[[INT0_0:.+]] = torch.constant.int 0
79
79
// CHECK: %[[INT0_1:.+]] = torch.constant.int 0
@@ -103,17 +103,17 @@ func.func @test_qlinearconv_nobias(%arg0: !torch.vtensor<[1,1,7,7],ui8>, %arg1:
103
103
104
104
// -----
105
105
106
- // CHECK-LABEL: @test_qlinearconv_bias
107
- func.func @test_qlinearconv_bias (%arg0: !torch.vtensor <[1 ,1 ,7 ,7 ],ui8 >, %arg1: !torch.vtensor <[],f32 >, %arg2: !torch.vtensor <[],ui8 >, %arg3: !torch.vtensor <[1 ,1 ,1 ,1 ],ui8 >, %arg4: !torch.vtensor <[1 ],f32 >, %arg5: !torch.vtensor <[1 ],ui8 >, %arg6: !torch.vtensor <[],f32 >, %arg7: !torch.vtensor <[],ui8 >, %arg8 : !torch.vtensor <[7 ],si32 >) -> !torch.vtensor <[1 ,1 ,7 ,7 ],ui8 > attributes {torch.onnx_meta.ir_version = 5 : si64 , torch.onnx_meta.opset_version = 10 : si64 , torch.onnx_meta.producer_name = " backend-test" , torch.onnx_meta.producer_version = " " } {
106
+ // CHECK-LABEL: @test_qlinearconv_bias_weight_per_channel
107
+ func.func @test_qlinearconv_bias_weight_per_channel (%arg0: !torch.vtensor <[1 ,1 ,7 ,7 ],ui8 >, %arg1: !torch.vtensor <[],f32 >, %arg2: !torch.vtensor <[],ui8 >, %arg3: !torch.vtensor <[1 ,1 ,1 ,1 ],ui8 >, %arg4: !torch.vtensor <[1 ],f32 >, %arg5: !torch.vtensor <[1 ],ui8 >, %arg6: !torch.vtensor <[],f32 >, %arg7: !torch.vtensor <[],ui8 >, %arg8 : !torch.vtensor <[7 ],si32 >) -> !torch.vtensor <[1 ,1 ,7 ,7 ],ui8 > attributes {torch.onnx_meta.ir_version = 5 : si64 , torch.onnx_meta.opset_version = 10 : si64 , torch.onnx_meta.producer_name = " backend-test" , torch.onnx_meta.producer_version = " " } {
108
108
%0 = torch.operator " onnx.QLinearConv" (%arg0 , %arg1 , %arg2 , %arg3 , %arg4 , %arg5 , %arg6 , %arg7 , %arg8 ) : (!torch.vtensor <[1 ,1 ,7 ,7 ],ui8 >, !torch.vtensor <[],f32 >, !torch.vtensor <[],ui8 >, !torch.vtensor <[1 ,1 ,1 ,1 ],ui8 >, !torch.vtensor <[1 ],f32 >, !torch.vtensor <[1 ],ui8 >, !torch.vtensor <[],f32 >, !torch.vtensor <[],ui8 >, !torch.vtensor <[7 ],si32 >) -> !torch.vtensor <[1 ,1 ,7 ,7 ],ui8 >
109
109
// CHECK: %[[aZp:.+]] = torch.aten.item %arg2 : !torch.vtensor<[],ui8> -> !torch.int
110
- // CHECK: %[[bZp:.+]] = torch.aten.item %arg5 : !torch.vtensor<[1],ui8> -> !torch.int
111
110
// CHECK: %[[cZp:.+]] = torch.aten.item %arg7 : !torch.vtensor<[],ui8> -> !torch.int
112
111
// CHECK: %[[aScale:.+]] = torch.aten.item %arg1 : !torch.vtensor<[],f32> -> !torch.float
113
- // CHECK: %[[bScale:.+]] = torch.aten.item %arg4 : !torch.vtensor<[1],f32> -> !torch.float
114
112
// CHECK: %[[cScale:.+]] = torch.aten.item %arg6 : !torch.vtensor<[],f32> -> !torch.float
115
113
// CHECK: %[[A:.+]] = torch.aten._make_per_tensor_quantized_tensor %arg0, %[[aScale]], %[[aZp]] : !torch.vtensor<[1,1,7,7],ui8>, !torch.float, !torch.int -> !torch.vtensor<[1,1,7,7],!torch.quint8>
116
- // CHECK: %[[B:.+]] = torch.aten._make_per_tensor_quantized_tensor %arg3, %[[bScale]], %[[bZp]] : !torch.vtensor<[1,1,1,1],ui8>, !torch.float, !torch.int -> !torch.vtensor<[1,1,1,1],!torch.quint8>
114
+ // CHECK: %[[bScale:.+]] = torch.aten.item %arg4 : !torch.vtensor<[1],f32> -> !torch.float
115
+ // CHECK: %[[INT0:.+]] = torch.constant.int 0
116
+ // CHECK: %[[B:.+]] = torch.aten._make_per_channel_quantized_tensor %arg3, %arg4, %arg5, %[[INT0]] : !torch.vtensor<[1,1,1,1],ui8>, !torch.vtensor<[1],f32>, !torch.vtensor<[1],ui8>, !torch.int -> !torch.vtensor<[1,1,1,1],!torch.quint8>
117
117
// CHECK: %[[INT0_0:.+]] = torch.constant.int 0
118
118
// CHECK: %[[INT0_1:.+]] = torch.constant.int 0
119
119
// CHECK: %[[PAD:.+]] = torch.prim.ListConstruct %[[INT0_0]], %[[INT0_1]]
0 commit comments