@@ -170058,12 +170058,9 @@ fn airTrunc(self: *CodeGen, inst: Air.Inst.Index) !void {
170058
170058
});
170059
170059
const splat_abi_size: u32 = @intCast(splat_ty.abiSize(zcu));
170060
170060
170061
- const splat_val = try pt.intern(.{ .aggregate = .{
170062
- .ty = splat_ty.ip_index,
170063
- .storage = .{ .repeated_elem = mask_val.ip_index },
170064
- } });
170061
+ const splat_val = try pt.aggregateSplatValue(splat_ty, mask_val);
170065
170062
170066
- const splat_mcv = try self.lowerValue(.fromInterned( splat_val) );
170063
+ const splat_mcv = try self.lowerValue(splat_val);
170067
170064
const splat_addr_mcv: MCValue = switch (splat_mcv) {
170068
170065
.memory, .indirect, .load_frame => splat_mcv.address(),
170069
170066
else => .{ .register = try self.copyToTmpRegister(.usize, splat_mcv.address()) },
@@ -171693,12 +171690,12 @@ fn airShlShrBinOp(self: *CodeGen, inst: Air.Inst.Index) !void {
171693
171690
defer self.register_manager.unlockReg(shift_lock);
171694
171691
171695
171692
const mask_ty = try pt.vectorType(.{ .len = 16, .child = .u8_type });
171696
- const mask_mcv = try self.lowerValue(.fromInterned( try pt.intern(.{ .aggregate = .{
171697
- .ty = mask_ty.toIntern() ,
171698
- .storage = .{ .elems = &([1]InternPool.Index{
171693
+ const mask_mcv = try self.lowerValue(try pt.aggregateValue(
171694
+ mask_ty,
171695
+ &([1]InternPool.Index{
171699
171696
(try rhs_ty.childType(zcu).maxIntScalar(pt, .u8)).toIntern(),
171700
- } ++ [1]InternPool.Index{.zero_u8} ** 15) } ,
171701
- } }) ));
171697
+ } ++ [1]InternPool.Index{.zero_u8} ** 15),
171698
+ ));
171702
171699
const mask_addr_reg = try self.copyToTmpRegister(.usize, mask_mcv.address());
171703
171700
const mask_addr_lock = self.register_manager.lockRegAssumeUnused(mask_addr_reg);
171704
171701
defer self.register_manager.unlockReg(mask_addr_lock);
@@ -181139,10 +181136,7 @@ fn genSetReg(
181139
181136
.child = .u8_type,
181140
181137
});
181141
181138
try self.genSetReg(dst_reg, full_ty, try self.lowerValue(
181142
- .fromInterned(try pt.intern(.{ .aggregate = .{
181143
- .ty = full_ty.toIntern(),
181144
- .storage = .{ .repeated_elem = (try pt.intValue(.u8, 0xaa)).toIntern() },
181145
- } })),
181139
+ try pt.aggregateSplatValue(full_ty, try pt.intValue(.u8, 0xaa)),
181146
181140
), opts);
181147
181141
},
181148
181142
.x87 => try self.genSetReg(dst_reg, .f80, try self.lowerValue(
@@ -183565,10 +183559,7 @@ fn airSelect(self: *CodeGen, inst: Air.Inst.Index) !void {
183565
183559
mask_elem_ty,
183566
183560
@as(u8, 1) << @truncate(bit),
183567
183561
)).toIntern();
183568
- const mask_mcv = try self.lowerValue(.fromInterned(try pt.intern(.{ .aggregate = .{
183569
- .ty = mask_ty.toIntern(),
183570
- .storage = .{ .elems = mask_elems },
183571
- } })));
183562
+ const mask_mcv = try self.lowerValue(try pt.aggregateValue(mask_ty, mask_elems));
183572
183563
const mask_mem: Memory = .{
183573
183564
.base = .{ .reg = try self.copyToTmpRegister(.usize, mask_mcv.address()) },
183574
183565
.mod = .{ .rm = .{ .size = self.memSize(ty) } },
@@ -184296,10 +184287,9 @@ fn airShuffle(self: *CodeGen, inst: Air.Inst.Index) !void {
184296
184287
else
184297
184288
try select_mask_elem_ty.minIntScalar(pt, select_mask_elem_ty)).toIntern();
184298
184289
}
184299
- const select_mask_mcv = try self.lowerValue(.fromInterned(try pt.intern(.{ .aggregate = .{
184300
- .ty = select_mask_ty.toIntern(),
184301
- .storage = .{ .elems = select_mask_elems[0..mask_elems.len] },
184302
- } })));
184290
+ const select_mask_mcv = try self.lowerValue(
184291
+ try pt.aggregateValue(select_mask_ty, select_mask_elems[0..mask_elems.len]),
184292
+ );
184303
184293
184304
184294
if (self.hasFeature(.sse4_1)) {
184305
184295
const mir_tag: Mir.Inst.FixedTag = .{
@@ -184441,10 +184431,9 @@ fn airShuffle(self: *CodeGen, inst: Air.Inst.Index) !void {
184441
184431
})).toIntern();
184442
184432
}
184443
184433
const lhs_mask_ty = try pt.vectorType(.{ .len = max_abi_size, .child = .u8_type });
184444
- const lhs_mask_mcv = try self.lowerValue(.fromInterned(try pt.intern(.{ .aggregate = .{
184445
- .ty = lhs_mask_ty.toIntern(),
184446
- .storage = .{ .elems = lhs_mask_elems[0..max_abi_size] },
184447
- } })));
184434
+ const lhs_mask_mcv = try self.lowerValue(
184435
+ try pt.aggregateValue(lhs_mask_ty, lhs_mask_elems[0..max_abi_size]),
184436
+ );
184448
184437
const lhs_mask_mem: Memory = .{
184449
184438
.base = .{ .reg = try self.copyToTmpRegister(.usize, lhs_mask_mcv.address()) },
184450
184439
.mod = .{ .rm = .{ .size = .fromSize(@max(max_abi_size, 16)) } },
@@ -184472,10 +184461,9 @@ fn airShuffle(self: *CodeGen, inst: Air.Inst.Index) !void {
184472
184461
})).toIntern();
184473
184462
}
184474
184463
const rhs_mask_ty = try pt.vectorType(.{ .len = max_abi_size, .child = .u8_type });
184475
- const rhs_mask_mcv = try self.lowerValue(.fromInterned(try pt.intern(.{ .aggregate = .{
184476
- .ty = rhs_mask_ty.toIntern(),
184477
- .storage = .{ .elems = rhs_mask_elems[0..max_abi_size] },
184478
- } })));
184464
+ const rhs_mask_mcv = try self.lowerValue(
184465
+ try pt.aggregateValue(rhs_mask_ty, rhs_mask_elems[0..max_abi_size]),
184466
+ );
184479
184467
const rhs_mask_mem: Memory = .{
184480
184468
.base = .{ .reg = try self.copyToTmpRegister(.usize, rhs_mask_mcv.address()) },
184481
184469
.mod = .{ .rm = .{ .size = .fromSize(@max(max_abi_size, 16)) } },
@@ -192924,36 +192912,30 @@ const Select = struct {
192924
192912
break :res_scalar .{ res_scalar_ty, try pt.intValue_big(res_scalar_ty, res_big_int.toConst()) };
192925
192913
},
192926
192914
};
192927
- const res_val: Value = if (res_vector_len) |len| .fromInterned(try pt.intern(.{ .aggregate = .{
192928
- .ty = (try pt.vectorType(.{
192929
- .len = len,
192930
- .child = res_scalar_ty.toIntern(),
192931
- })).toIntern(),
192932
- .storage = .{ .repeated_elem = res_scalar_val.toIntern() },
192933
- } })) else res_scalar_val;
192915
+ const res_val = if (res_vector_len) |len| try pt.aggregateSplatValue(try pt.vectorType(.{
192916
+ .len = len,
192917
+ .child = res_scalar_ty.toIntern(),
192918
+ }), res_scalar_val) else res_scalar_val;
192934
192919
return .{ try cg.tempMemFromValue(res_val), true };
192935
192920
},
192936
- .f64_0x1p52_0x1p84_mem => .{ try cg.tempMemFromValue(.fromInterned(try pt.intern(.{ .aggregate = .{
192937
- .ty = (try pt.vectorType(.{ .len = 2, .child = .f64_type })).toIntern(),
192938
- .storage = .{ .elems = &.{
192921
+ .f64_0x1p52_0x1p84_mem => .{ try cg.tempMemFromValue(
192922
+ try pt.aggregateValue(try pt.vectorType(.{ .len = 2, .child = .f64_type }), &.{
192939
192923
(try pt.floatValue(.f64, @as(f64, 0x1p52))).toIntern(),
192940
192924
(try pt.floatValue(.f64, @as(f64, 0x1p84))).toIntern(),
192941
- } },
192942
- } }))), true },
192943
- .u32_0x1p52_hi_0x1p84_hi_0_0_mem => .{ try cg.tempMemFromValue(.fromInterned(try pt.intern(.{ .aggregate = .{
192944
- .ty = (try pt.vectorType(.{ .len = 4, .child = .u32_type })).toIntern(),
192945
- .storage = .{ .elems = &(.{
192925
+ }),
192926
+ ), true },
192927
+ .u32_0x1p52_hi_0x1p84_hi_0_0_mem => .{ try cg.tempMemFromValue(
192928
+ try pt.aggregateValue(try pt.vectorType(.{ .len = 4, .child = .u32_type }), &(.{
192946
192929
(try pt.intValue(.u32, @as(u64, @bitCast(@as(f64, 0x1p52))) >> 32)).toIntern(),
192947
192930
(try pt.intValue(.u32, @as(u64, @bitCast(@as(f64, 0x1p84))) >> 32)).toIntern(),
192948
- } ++ .{(try pt.intValue(.u32, 0)).toIntern()} ** 2) },
192949
- } }))), true },
192950
- .f32_0_0x1p64_mem => .{ try cg.tempMemFromValue(.fromInterned(try pt.intern(.{ .aggregate = .{
192951
- .ty = (try pt.vectorType(.{ .len = 2, .child = .f32_type })).toIntern(),
192952
- .storage = .{ .elems = &.{
192931
+ } ++ .{(try pt.intValue(.u32, 0)).toIntern()} ** 2)),
192932
+ ), true },
192933
+ .f32_0_0x1p64_mem => .{ try cg.tempMemFromValue(
192934
+ try pt.aggregateValue(try pt.vectorType(.{ .len = 2, .child = .f32_type }), &.{
192953
192935
(try pt.floatValue(.f32, @as(f32, 0))).toIntern(),
192954
192936
(try pt.floatValue(.f32, @as(f32, 0x1p64))).toIntern(),
192955
- } } ,
192956
- } })) ), true },
192937
+ }) ,
192938
+ ), true },
192957
192939
.pshufb_splat_mem => |splat_spec| {
192958
192940
const zcu = pt.zcu;
192959
192941
assert(spec.type.isVector(zcu) and spec.type.childType(zcu).toIntern() == .u8_type);
@@ -193110,13 +193092,10 @@ const Select = struct {
193110
193092
const mem_size = cg.unalignedSize(spec.type);
193111
193093
return .{ try cg.tempMemFromAlignedValue(
193112
193094
if (mem_size < 16) .fromByteUnits(mem_size) else .none,
193113
- .fromInterned(try pt.intern(.{ .aggregate = .{
193114
- .ty = if (mem_size < 16)
193115
- (try pt.arrayType(.{ .len = elems.len, .child = elem_ty.toIntern() })).toIntern()
193116
- else
193117
- spec.type.toIntern(),
193118
- .storage = .{ .elems = elems },
193119
- } })),
193095
+ try pt.aggregateValue(if (mem_size < 16) try pt.arrayType(.{
193096
+ .len = elems.len,
193097
+ .child = elem_ty.toIntern(),
193098
+ }) else spec.type, elems),
193120
193099
), true };
193121
193100
},
193122
193101
.splat_float_mem => |splat_spec| {
@@ -193133,10 +193112,7 @@ const Select = struct {
193133
193112
.zero => 0.0,
193134
193113
}))).toIntern());
193135
193114
@memset(elems[inside_len..], (try pt.floatValue(elem_ty, splat_spec.outside)).toIntern());
193136
- return .{ try cg.tempMemFromValue(.fromInterned(try pt.intern(.{ .aggregate = .{
193137
- .ty = spec.type.toIntern(),
193138
- .storage = .{ .elems = elems },
193139
- } }))), true };
193115
+ return .{ try cg.tempMemFromValue(try pt.aggregateValue(spec.type, elems)), true };
193140
193116
},
193141
193117
.frame => |frame_index| .{ try cg.tempInit(spec.type, .{ .load_frame = .{
193142
193118
.index = frame_index,
0 commit comments