Skip to content

Commit 59534ac

Browse files
oscarandersson8218digantdesai
authored andcommitted
Arm backend: Update VelaIO handling
VelaIO is always 6D. - Update AOT handling of metadata from Vela. - Adds unittest to trigger 5D cases. - Updates EthosUBackend to read IO as 6D arrays. Signed-off-by: Oscar Andersson <[email protected]> Change-Id: I8d7d3a44ac84e5bb14fa27e7b7765c3b7a8ee483
1 parent 3124a6b commit 59534ac

File tree

8 files changed

+52
-11
lines changed

8 files changed

+52
-11
lines changed

backends/arm/arm_vela.py

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -25,17 +25,19 @@
2525
# per-io structs to simplify runtime use.
2626
def vela_bin_pack_io(prefix, data):
2727
vela_input_shapes = data[prefix + "_shape"]
28+
# Vela input/output shape is fixed to 6D
29+
vela_io_shape_dims = 6
2830

2931
ios = struct.pack("<i", len(vela_input_shapes))
3032
for i in range(len(vela_input_shapes)):
3133
io_shape = vela_input_shapes[i]
3234
io_elem_size = data[prefix + "_elem_size"][i]
3335
io_offset = data[prefix + "_offset"][i]
3436
io_region = data[prefix + "_region"][i]
35-
assert len(io_shape) <= 4
36-
inp_pad = io_shape.tolist() + [0] * (4 - len(io_shape))
37+
assert len(io_shape) == vela_io_shape_dims
38+
inp_pad = io_shape.tolist()
3739
io_struct = struct.pack(
38-
"<iiiiiii", *inp_pad, io_elem_size, io_offset, io_region
40+
"<iiiiiiiii", *inp_pad, io_elem_size, io_offset, io_region
3941
)
4042
ios += io_struct
4143
return ios

backends/arm/requirements-arm-ethos-u.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,4 +3,4 @@
33
# This source code is licensed under the BSD-style license found in the
44
# LICENSE file in the root directory of this source tree.
55

6-
ethos-u-vela @ git+https://gitlab.arm.com/artificial-intelligence/ethos-u/ethos-u-vela@d37febc1715edf0d236c2ff555739a8a9aadcf9a
6+
ethos-u-vela @ git+https://gitlab.arm.com/artificial-intelligence/ethos-u/ethos-u-vela@9a43a1bf26bfc7588358d7e6e6bb2613b4981a34

backends/arm/runtime/EthosUBackend.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -383,8 +383,8 @@ class EthosUBackend final : public ::executorch::runtime::BackendInterface {
383383
*tensor_count = *tensor_count * tensor.size(i);
384384
}
385385

386-
// The VelaIO type has a shape of fixed size 4
387-
for (int i = 0; i < 4; i++) {
386+
// The VelaIO type has a shape of fixed size 6
387+
for (int i = 0; i < shapeDim; i++) {
388388
*io_count = *io_count * io->shape[i];
389389
}
390390
}

backends/arm/runtime/VelaBinStream.h

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -34,9 +34,11 @@ typedef struct {
3434
char data[]; // block.name specific format data
3535
} VelaBinBlock;
3636

37+
constexpr int shapeDim = 6; // Number of dimensions in VelaIO
38+
3739
// A Vela input or output descriptor in the binary stream
3840
typedef struct {
39-
int shape[4]; // Up to 4D shape of input or output
41+
int shape[shapeDim]; // Shape of input or output
4042
int elem_size; // Element sizeof in bytes
4143
int offset; // Offset in bytes within SRAM working data
4244
int region; // Scratch region this belongs to

backends/arm/test/ops/test_squeeze.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -29,6 +29,7 @@ class SqueezeDim(torch.nn.Module):
2929
"squeeze3d_dim_neg_2": lambda: (torch.randn(1, 1, 5), -2),
3030
"squeeze4d_dim_pos_3": lambda: (torch.randn(1, 2, 3, 1), 3),
3131
"squeeze4d_dim_neg_2": lambda: (torch.randn(1, 5, 1, 5), -2),
32+
"squeeze5d_dim_neg_2": lambda: (torch.randn(1, 1, 5, 1, 5), -2),
3233
}
3334

3435
def forward(self, x: torch.Tensor, dim: int):
@@ -40,6 +41,7 @@ class SqueezeDims(torch.nn.Module):
4041
"squeeze3d_dims_0_1": lambda: (torch.randn(1, 1, 5), (0, 1)),
4142
"squeeze4d_dims_0_neg_1": lambda: (torch.randn(1, 5, 5, 1), (0, -1)),
4243
"squeeze4d_dims_0_neg_2": lambda: (torch.randn(1, 5, 1, 5), (0, -2)),
44+
"squeeze5d_dims_0_neg_2": lambda: (torch.randn(1, 1, 5, 1, 5), (0, -2)),
4345
}
4446

4547
def forward(self, x: torch.Tensor, dims: tuple[int]):
@@ -51,6 +53,7 @@ class Squeeze(torch.nn.Module):
5153
"squeeze3d": lambda: (torch.randn(1, 1, 5),),
5254
"squeeze4d_dims": lambda: (torch.randn(1, 5, 5, 1),),
5355
"squeeze3d_dims_mix": lambda: (torch.randn(1, 5, 1, 5),),
56+
"squeeze4d_dims_mix": lambda: (torch.randn(1, 1, 5, 1, 5),),
5457
}
5558

5659
def forward(self, x: torch.Tensor):

backends/arm/test/ops/test_unflatten.py

Lines changed: 33 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,8 @@
99
import torch
1010
from executorch.backends.arm.test import common
1111
from executorch.backends.arm.test.tester.test_pipeline import (
12+
EthosU55PipelineINT,
13+
EthosU85PipelineINT,
1214
TosaPipelineFP,
1315
TosaPipelineINT,
1416
VgfPipeline,
@@ -30,8 +32,10 @@ def forward(self, x: torch.Tensor) -> torch.Tensor:
3032
return torch.unflatten(x, self.dim, self.sizes)
3133

3234
test_data: dict[str, test_data_t] = {
33-
"randn_4d": (lambda: (Unflatten(1, (2, 2)), (torch.randn(3, 4, 5, 1),))),
34-
"rand_3d": (lambda: (Unflatten(1, (-1, 2)), (torch.rand(3, 4, 4),))),
35+
"rand_3d_batch3": (lambda: (Unflatten(1, (-1, 2)), (torch.rand(3, 4, 4),))),
36+
"rand_3d_batch1": (lambda: (Unflatten(1, (-1, 2)), (torch.rand(1, 4, 4),))),
37+
"randn_4d_dim1": (lambda: (Unflatten(1, (2, 2)), (torch.randn(3, 4, 5, 1),))),
38+
"randn_4d_dim3": (lambda: (Unflatten(3, (2, 2)), (torch.randn(1, 1, 5, 4),))),
3539
}
3640

3741

@@ -49,7 +53,33 @@ def test_unflatten_int_tosa_FP(test_data: test_data_t):
4953
@common.parametrize("test_data", Unflatten.test_data)
5054
def test_unflatten_int_tosa_INT(test_data: test_data_t):
5155
module, inputs = test_data()
52-
pipeline = TosaPipelineINT[input_t](
56+
pipeline = TosaPipelineINT[input_t](module, inputs, Unflatten.aten_op)
57+
pipeline.run()
58+
59+
60+
xfails = {
61+
"rand_3d_batch3": "Batch size > 1 currently not supported for FVP tests",
62+
"randn_4d_dim1": "Batch size > 1 currently not supported for FVP tests",
63+
}
64+
65+
66+
@common.parametrize("test_data", Unflatten.test_data, xfails=xfails, strict=False)
67+
@common.XfailIfNoCorstone300
68+
def test_unflatten_int_u55_INT(test_data: test_data_t):
69+
module, inputs = test_data()
70+
pipeline = EthosU55PipelineINT[input_t](
71+
module,
72+
inputs,
73+
Unflatten.aten_op,
74+
)
75+
pipeline.run()
76+
77+
78+
@common.parametrize("test_data", Unflatten.test_data, xfails=xfails, strict=False)
79+
@common.XfailIfNoCorstone320
80+
def test_unflatten_int_u85_INT(test_data: test_data_t):
81+
module, inputs = test_data()
82+
pipeline = EthosU85PipelineINT[input_t](
5383
module,
5484
inputs,
5585
Unflatten.aten_op,

backends/arm/test/ops/test_unsqueeze.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@
2525

2626

2727
class Unsqueeze(torch.nn.Module):
28-
shapes: list[int | Sequence[int]] = [5, (5, 5), (5, 4), (5, 4, 3)]
28+
shapes: list[int | Sequence[int]] = [5, (5, 5), (5, 4), (5, 4, 3), (1, 5, 4, 3)]
2929
test_parameters = {}
3030
for n in shapes:
3131
test_parameters[f"rand_{n}"] = (torch.randn(n),)

backends/arm/test/ops/test_view.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -51,6 +51,10 @@ class View(torch.nn.Module):
5151
"rand_4d_4_3": lambda: (torch.rand(5, 10, 1, 1), (1, 25, 2)),
5252
"rand_4d_4_2": lambda: (torch.rand(2, 50, 1, 1), (1, 100)),
5353
"rand_4d_2_4_same": lambda: (torch.rand(2, 3, 2, 3), (2, 3, 3, 2)),
54+
"rand_4d_5d": lambda: (torch.rand(1, 3, 4, 5), (1, 1, 4, 5, -1)),
55+
"rand_5d_5d": lambda: (torch.rand(1, 1, 4, 5, 6), (1, 1, 4, -1, 6)),
56+
"rand_5d_3d": lambda: (torch.rand(1, 1, 4, 5, 6), (2, 3, -1)),
57+
"rand_3d_5d": lambda: (torch.rand(4, 5, 6), (1, 1, 2, -1, 3)),
5458
}
5559

5660
rank_product_too_large = {

0 commit comments

Comments
 (0)