Merge pull request #10635 from ziglang/stage2-x86_64-params-stack

stage2: fix passing arguments on the stack on x86_64
This commit is contained in:
Jakub Konka 2022-01-20 00:45:34 +01:00 committed by GitHub
commit 538c9e7baf
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 234 additions and 143 deletions

View File

@ -61,6 +61,8 @@ end_di_column: u32,
/// which is a relative jump, based on the address following the reloc. /// which is a relative jump, based on the address following the reloc.
exitlude_jump_relocs: std.ArrayListUnmanaged(Mir.Inst.Index) = .{}, exitlude_jump_relocs: std.ArrayListUnmanaged(Mir.Inst.Index) = .{},
stack_args_relocs: std.ArrayListUnmanaged(Mir.Inst.Index) = .{},
/// Whenever there is a runtime branch, we push a Branch onto this stack, /// Whenever there is a runtime branch, we push a Branch onto this stack,
/// and pop it off when the runtime branch joins. This provides an "overlay" /// and pop it off when the runtime branch joins. This provides an "overlay"
/// of the table of mappings from instructions to `MCValue` from within the branch. /// of the table of mappings from instructions to `MCValue` from within the branch.
@ -182,7 +184,7 @@ const Branch = struct {
const StackAllocation = struct { const StackAllocation = struct {
inst: Air.Inst.Index, inst: Air.Inst.Index,
/// TODO do we need size? should be determined by inst.ty.abiSize() /// TODO do we need size? should be determined by inst.ty.abiSize(self.target.*)
size: u32, size: u32,
}; };
@ -284,6 +286,7 @@ pub fn generate(
defer function.exitlude_jump_relocs.deinit(bin_file.allocator); defer function.exitlude_jump_relocs.deinit(bin_file.allocator);
defer function.mir_instructions.deinit(bin_file.allocator); defer function.mir_instructions.deinit(bin_file.allocator);
defer function.mir_extra.deinit(bin_file.allocator); defer function.mir_extra.deinit(bin_file.allocator);
defer function.stack_args_relocs.deinit(bin_file.allocator);
defer if (builtin.mode == .Debug) function.mir_to_air_map.deinit(); defer if (builtin.mode == .Debug) function.mir_to_air_map.deinit();
var call_info = function.resolveCallingConventionValues(fn_type) catch |err| switch (err) { var call_info = function.resolveCallingConventionValues(fn_type) catch |err| switch (err) {
@ -459,13 +462,11 @@ fn gen(self: *Self) InnerError!void {
// Thus we don't need to adjust the stack for the first push instruction. However, // Thus we don't need to adjust the stack for the first push instruction. However,
// any subsequent push of values on the stack such as when preserving registers, // any subsequent push of values on the stack such as when preserving registers,
// needs to be taken into account here. // needs to be taken into account here.
var stack_adjustment: i32 = 0; var stack_adjustment: u32 = 0;
inline for (callee_preserved_regs) |reg, i| { inline for (callee_preserved_regs) |reg, i| {
if (self.register_manager.isRegAllocated(reg)) { if (self.register_manager.isRegAllocated(reg)) {
callee_preserved_regs_push_data |= 1 << @intCast(u5, i); callee_preserved_regs_push_data |= 1 << @intCast(u5, i);
if (self.target.isDarwin()) { stack_adjustment += @divExact(reg.size(), 8);
stack_adjustment += @divExact(reg.size(), 8);
}
} }
} }
const data = self.mir_instructions.items(.data); const data = self.mir_instructions.items(.data);
@ -490,23 +491,33 @@ fn gen(self: *Self) InnerError!void {
if (stack_end > math.maxInt(i32) - stack_adjustment) { if (stack_end > math.maxInt(i32) - stack_adjustment) {
return self.failSymbol("too much stack used in call parameters", .{}); return self.failSymbol("too much stack used in call parameters", .{});
} }
const aligned_stack_end = mem.alignForward(stack_end, self.stack_align); // TODO we should reuse this mechanism to align the stack when calling any function even if
if (aligned_stack_end > 0 or stack_adjustment > 0) { // we do not pass any args on the stack BUT we still push regs to stack with `push` inst.
const aligned_stack_end = @intCast(u32, mem.alignForward(stack_end, self.stack_align));
if (aligned_stack_end > 0 or (stack_adjustment > 0 and self.target.isDarwin())) {
const imm = if (self.target.isDarwin()) aligned_stack_end + stack_adjustment else aligned_stack_end;
self.mir_instructions.set(backpatch_stack_sub, .{ self.mir_instructions.set(backpatch_stack_sub, .{
.tag = .sub, .tag = .sub,
.ops = (Mir.Ops{ .ops = (Mir.Ops{
.reg1 = .rsp, .reg1 = .rsp,
}).encode(), }).encode(),
.data = .{ .imm = @bitCast(u32, @intCast(i32, aligned_stack_end) + stack_adjustment) }, .data = .{ .imm = imm },
}); });
self.mir_instructions.set(backpatch_stack_add, .{ self.mir_instructions.set(backpatch_stack_add, .{
.tag = .add, .tag = .add,
.ops = (Mir.Ops{ .ops = (Mir.Ops{
.reg1 = .rsp, .reg1 = .rsp,
}).encode(), }).encode(),
.data = .{ .imm = @bitCast(u32, @intCast(i32, aligned_stack_end) + stack_adjustment) }, .data = .{ .imm = imm },
}); });
} }
while (self.stack_args_relocs.popOrNull()) |index| {
// TODO like above, gotta figure out the alignment shenanigans for macOS, etc.
const adjustment = if (self.target.isDarwin()) 2 * stack_adjustment else stack_adjustment;
// +16 bytes to account for saved return address of the `call` instruction and
// `push rbp`.
self.mir_instructions.items(.data)[index].imm += adjustment + aligned_stack_end + 16;
}
} else { } else {
_ = try self.addInst(.{ _ = try self.addInst(.{
.tag = .dbg_prologue_end, .tag = .dbg_prologue_end,
@ -1613,6 +1624,7 @@ fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!vo
return self.genInlineMemcpy( return self.genInlineMemcpy(
@bitCast(u32, -@intCast(i32, off + abi_size)), @bitCast(u32, -@intCast(i32, off + abi_size)),
.rbp,
registerAlias(addr_reg, @divExact(reg.size(), 8)), registerAlias(addr_reg, @divExact(reg.size(), 8)),
count_reg.to64(), count_reg.to64(),
tmp_reg.to8(), tmp_reg.to8(),
@ -2157,9 +2169,6 @@ fn airArg(self: *Self, inst: Air.Inst.Index) !void {
const arg_index = self.arg_index; const arg_index = self.arg_index;
self.arg_index += 1; self.arg_index += 1;
const ty = self.air.typeOfIndex(inst);
_ = ty;
const mcv = self.args[arg_index]; const mcv = self.args[arg_index];
const payload = try self.addExtra(Mir.ArgDbgInfo{ const payload = try self.addExtra(Mir.ArgDbgInfo{
.air_inst = inst, .air_inst = inst,
@ -2173,14 +2182,68 @@ fn airArg(self: *Self, inst: Air.Inst.Index) !void {
if (self.liveness.isUnused(inst)) if (self.liveness.isUnused(inst))
return self.finishAirBookkeeping(); return self.finishAirBookkeeping();
switch (mcv) { const dst_mcv: MCValue = blk: {
.register => |reg| { switch (mcv) {
self.register_manager.getRegAssumeFree(reg.to64(), inst); .register => |reg| {
}, self.register_manager.getRegAssumeFree(reg.to64(), inst);
else => {}, break :blk mcv;
} },
.stack_offset => |off| {
const ty = self.air.typeOfIndex(inst);
const abi_size = ty.abiSize(self.target.*);
return self.finishAir(inst, mcv, .{ .none, .none, .none }); if (abi_size <= 8) {
const reg = try self.register_manager.allocReg(inst, &.{});
const reloc = try self.addInst(.{
.tag = .mov,
.ops = (Mir.Ops{
.reg1 = registerAlias(reg, @intCast(u32, abi_size)),
.reg2 = .rsp,
.flags = 0b01,
}).encode(),
.data = .{ .imm = off },
});
try self.stack_args_relocs.append(self.bin_file.allocator, reloc);
break :blk .{ .register = reg };
}
// TODO copy ellision
const dst_mcv = try self.allocRegOrMem(inst, false);
const regs = try self.register_manager.allocRegs(3, .{ null, null, null }, &.{ .rax, .rcx });
const addr_reg = regs[0];
const count_reg = regs[1];
const tmp_reg = regs[2];
try self.register_manager.getReg(.rax, null);
try self.register_manager.getReg(.rcx, null);
const reloc = try self.addInst(.{
.tag = .lea,
.ops = (Mir.Ops{
.reg1 = addr_reg.to64(),
.reg2 = .rsp,
}).encode(),
.data = .{ .imm = off },
});
try self.stack_args_relocs.append(self.bin_file.allocator, reloc);
// TODO allow for abi_size to be u64
try self.genSetReg(Type.initTag(.u32), count_reg, .{ .immediate = @intCast(u32, abi_size) });
try self.genInlineMemcpy(
@bitCast(u32, -@intCast(i32, dst_mcv.stack_offset + abi_size)),
.rbp,
addr_reg.to64(),
count_reg.to64(),
tmp_reg.to8(),
);
break :blk dst_mcv;
},
else => unreachable,
}
};
return self.finishAir(inst, dst_mcv, .{ .none, .none, .none });
} }
fn airBreakpoint(self: *Self) !void { fn airBreakpoint(self: *Self) !void {
@ -2201,6 +2264,64 @@ fn airFence(self: *Self) !void {
//return self.finishAirBookkeeping(); //return self.finishAirBookkeeping();
} }
fn genSetStackArg(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerError!void {
const abi_size = ty.abiSize(self.target.*);
switch (mcv) {
.dead => unreachable,
.ptr_embedded_in_code => unreachable,
.unreach, .none => return,
.register => |reg| {
_ = try self.addInst(.{
.tag = .mov,
.ops = (Mir.Ops{
.reg1 = .rsp,
.reg2 = registerAlias(reg, @intCast(u32, abi_size)),
.flags = 0b10,
}).encode(),
.data = .{ .imm = @bitCast(u32, -@intCast(i32, stack_offset + abi_size)) },
});
},
.ptr_stack_offset => {
const reg = try self.copyToTmpRegister(ty, mcv);
return self.genSetStackArg(ty, stack_offset, MCValue{ .register = reg });
},
.stack_offset => |unadjusted_off| {
if (abi_size <= 8) {
const reg = try self.copyToTmpRegister(ty, mcv);
return self.genSetStackArg(ty, stack_offset, MCValue{ .register = reg });
}
const regs = try self.register_manager.allocRegs(3, .{ null, null, null }, &.{ .rax, .rcx });
const addr_reg = regs[0];
const count_reg = regs[1];
const tmp_reg = regs[2];
try self.register_manager.getReg(.rax, null);
try self.register_manager.getReg(.rcx, null);
_ = try self.addInst(.{
.tag = .lea,
.ops = (Mir.Ops{
.reg1 = addr_reg.to64(),
.reg2 = .rbp,
}).encode(),
.data = .{ .imm = @bitCast(u32, -@intCast(i32, unadjusted_off + abi_size)) },
});
// TODO allow for abi_size to be u64
try self.genSetReg(Type.initTag(.u32), count_reg, .{ .immediate = @intCast(u32, abi_size) });
try self.genInlineMemcpy(
@bitCast(u32, -@intCast(i32, stack_offset + abi_size)),
.rsp,
addr_reg.to64(),
count_reg.to64(),
tmp_reg.to8(),
);
},
else => return self.fail("TODO implement args on stack for {}", .{mcv}),
}
}
fn airCall(self: *Self, inst: Air.Inst.Index) !void { fn airCall(self: *Self, inst: Air.Inst.Index) !void {
const pl_op = self.air.instructions.items(.data)[inst].pl_op; const pl_op = self.air.instructions.items(.data)[inst].pl_op;
const callee = pl_op.operand; const callee = pl_op.operand;
@ -2217,43 +2338,58 @@ fn airCall(self: *Self, inst: Air.Inst.Index) !void {
var info = try self.resolveCallingConventionValues(fn_ty); var info = try self.resolveCallingConventionValues(fn_ty);
defer info.deinit(self); defer info.deinit(self);
var count: usize = info.args.len;
var stack_adjustment: u32 = 0;
while (count > 0) : (count -= 1) {
const arg_i = count - 1;
const mc_arg = info.args[arg_i];
const arg = args[arg_i];
const arg_ty = self.air.typeOf(arg);
const arg_mcv = try self.resolveInst(args[arg_i]);
// Here we do not use setRegOrMem even though the logic is similar, because
// the function call will move the stack pointer, so the offsets are different.
switch (mc_arg) {
.none => continue,
.register => |reg| {
try self.register_manager.getReg(reg, null);
try self.genSetReg(arg_ty, reg, arg_mcv);
},
.stack_offset => |off| {
const abi_size = arg_ty.abiSize(self.target.*);
try self.genSetStackArg(arg_ty, off, arg_mcv);
stack_adjustment += @intCast(u32, abi_size);
},
.ptr_stack_offset => {
return self.fail("TODO implement calling with MCValue.ptr_stack_offset arg", .{});
},
.ptr_embedded_in_code => {
return self.fail("TODO implement calling with MCValue.ptr_embedded_in_code arg", .{});
},
.undef => unreachable,
.immediate => unreachable,
.unreach => unreachable,
.dead => unreachable,
.embedded_in_code => unreachable,
.memory => unreachable,
.compare_flags_signed => unreachable,
.compare_flags_unsigned => unreachable,
}
}
if (stack_adjustment > 0) {
// Adjust the stack
_ = try self.addInst(.{
.tag = .sub,
.ops = (Mir.Ops{
.reg1 = .rsp,
}).encode(),
.data = .{ .imm = stack_adjustment },
});
}
// Due to incremental compilation, how function calls are generated depends // Due to incremental compilation, how function calls are generated depends
// on linking. // on linking.
if (self.bin_file.tag == link.File.Elf.base_tag or self.bin_file.tag == link.File.Coff.base_tag) { if (self.bin_file.tag == link.File.Elf.base_tag or self.bin_file.tag == link.File.Coff.base_tag) {
for (info.args) |mc_arg, arg_i| {
const arg = args[arg_i];
const arg_ty = self.air.typeOf(arg);
const arg_mcv = try self.resolveInst(args[arg_i]);
// Here we do not use setRegOrMem even though the logic is similar, because
// the function call will move the stack pointer, so the offsets are different.
switch (mc_arg) {
.none => continue,
.register => |reg| {
try self.register_manager.getReg(reg, null);
try self.genSetReg(arg_ty, reg, arg_mcv);
},
.stack_offset => |off| {
// Here we need to emit instructions like this:
// mov qword ptr [rsp + stack_offset], x
try self.genSetStack(arg_ty, off, arg_mcv);
},
.ptr_stack_offset => {
return self.fail("TODO implement calling with MCValue.ptr_stack_offset arg", .{});
},
.ptr_embedded_in_code => {
return self.fail("TODO implement calling with MCValue.ptr_embedded_in_code arg", .{});
},
.undef => unreachable,
.immediate => unreachable,
.unreach => unreachable,
.dead => unreachable,
.embedded_in_code => unreachable,
.memory => unreachable,
.compare_flags_signed => unreachable,
.compare_flags_unsigned => unreachable,
}
}
if (self.air.value(callee)) |func_value| { if (self.air.value(callee)) |func_value| {
if (func_value.castTag(.function)) |func_payload| { if (func_value.castTag(.function)) |func_payload| {
const func = func_payload.data; const func = func_payload.data;
@ -2292,41 +2428,6 @@ fn airCall(self: *Self, inst: Air.Inst.Index) !void {
}); });
} }
} else if (self.bin_file.cast(link.File.MachO)) |macho_file| { } else if (self.bin_file.cast(link.File.MachO)) |macho_file| {
for (info.args) |mc_arg, arg_i| {
const arg = args[arg_i];
const arg_ty = self.air.typeOf(arg);
const arg_mcv = try self.resolveInst(args[arg_i]);
// Here we do not use setRegOrMem even though the logic is similar, because
// the function call will move the stack pointer, so the offsets are different.
switch (mc_arg) {
.none => continue,
.register => |reg| {
// TODO prevent this macho if block to be generated for all archs
try self.register_manager.getReg(reg, null);
try self.genSetReg(arg_ty, reg, arg_mcv);
},
.stack_offset => |off| {
// Here we need to emit instructions like this:
// mov qword ptr [rsp + stack_offset], x
try self.genSetStack(arg_ty, off, arg_mcv);
},
.ptr_stack_offset => {
return self.fail("TODO implement calling with MCValue.ptr_stack_offset arg", .{});
},
.ptr_embedded_in_code => {
return self.fail("TODO implement calling with MCValue.ptr_embedded_in_code arg", .{});
},
.undef => unreachable,
.immediate => unreachable,
.unreach => unreachable,
.dead => unreachable,
.embedded_in_code => unreachable,
.memory => unreachable,
.compare_flags_signed => unreachable,
.compare_flags_unsigned => unreachable,
}
}
if (self.air.value(callee)) |func_value| { if (self.air.value(callee)) |func_value| {
if (func_value.castTag(.function)) |func_payload| { if (func_value.castTag(.function)) |func_payload| {
const func = func_payload.data; const func = func_payload.data;
@ -2369,39 +2470,6 @@ fn airCall(self: *Self, inst: Air.Inst.Index) !void {
}); });
} }
} else if (self.bin_file.cast(link.File.Plan9)) |p9| { } else if (self.bin_file.cast(link.File.Plan9)) |p9| {
for (info.args) |mc_arg, arg_i| {
const arg = args[arg_i];
const arg_ty = self.air.typeOf(arg);
const arg_mcv = try self.resolveInst(args[arg_i]);
// Here we do not use setRegOrMem even though the logic is similar, because
// the function call will move the stack pointer, so the offsets are different.
switch (mc_arg) {
.none => continue,
.register => |reg| {
try self.register_manager.getReg(reg, null);
try self.genSetReg(arg_ty, reg, arg_mcv);
},
.stack_offset => |off| {
// Here we need to emit instructions like this:
// mov qword ptr [rsp + stack_offset], x
try self.genSetStack(arg_ty, off, arg_mcv);
},
.ptr_stack_offset => {
return self.fail("TODO implement calling with MCValue.ptr_stack_offset arg", .{});
},
.ptr_embedded_in_code => {
return self.fail("TODO implement calling with MCValue.ptr_embedded_in_code arg", .{});
},
.undef => unreachable,
.immediate => unreachable,
.unreach => unreachable,
.dead => unreachable,
.embedded_in_code => unreachable,
.memory => unreachable,
.compare_flags_signed => unreachable,
.compare_flags_unsigned => unreachable,
}
}
if (self.air.value(callee)) |func_value| { if (self.air.value(callee)) |func_value| {
if (func_value.castTag(.function)) |func_payload| { if (func_value.castTag(.function)) |func_payload| {
try p9.seeDecl(func_payload.data.owner_decl); try p9.seeDecl(func_payload.data.owner_decl);
@ -2433,6 +2501,17 @@ fn airCall(self: *Self, inst: Air.Inst.Index) !void {
} }
} else unreachable; } else unreachable;
if (stack_adjustment > 0) {
// Readjust the stack
_ = try self.addInst(.{
.tag = .add,
.ops = (Mir.Ops{
.reg1 = .rsp,
}).encode(),
.data = .{ .imm = stack_adjustment },
});
}
const result: MCValue = result: { const result: MCValue = result: {
switch (info.return_value) { switch (info.return_value) {
.register => |reg| { .register => |reg| {
@ -3346,6 +3425,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
return self.genInlineMemcpy( return self.genInlineMemcpy(
@bitCast(u32, -@intCast(i32, stack_offset + abi_size)), @bitCast(u32, -@intCast(i32, stack_offset + abi_size)),
.rbp,
addr_reg.to64(), addr_reg.to64(),
count_reg.to64(), count_reg.to64(),
tmp_reg.to8(), tmp_reg.to8(),
@ -3357,6 +3437,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
fn genInlineMemcpy( fn genInlineMemcpy(
self: *Self, self: *Self,
stack_offset: u32, stack_offset: u32,
stack_reg: Register,
addr_reg: Register, addr_reg: Register,
count_reg: Register, count_reg: Register,
tmp_reg: Register, tmp_reg: Register,
@ -3410,7 +3491,7 @@ fn genInlineMemcpy(
_ = try self.addInst(.{ _ = try self.addInst(.{
.tag = .mov_scale_dst, .tag = .mov_scale_dst,
.ops = (Mir.Ops{ .ops = (Mir.Ops{
.reg1 = .rbp, .reg1 = stack_reg,
.reg2 = tmp_reg.to8(), .reg2 = tmp_reg.to8(),
}).encode(), }).encode(),
.data = .{ .imm = stack_offset }, .data = .{ .imm = stack_offset },
@ -4140,15 +4221,14 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
return result; return result;
}, },
.Unspecified, .C => { .Unspecified, .C => {
// First, split into args that can be passed via registers.
// This will make it easier to then push the rest of args in reverse
// order on the stack.
var next_int_reg: usize = 0; var next_int_reg: usize = 0;
var next_stack_offset: u32 = 0; var by_reg = std.AutoHashMap(usize, usize).init(self.bin_file.allocator);
defer by_reg.deinit();
for (param_types) |ty, i| { for (param_types) |ty, i| {
if (!ty.hasCodeGenBits()) { if (!ty.hasCodeGenBits()) continue;
assert(cc != .C);
result.args[i] = .{ .none = {} };
continue;
}
const param_size = @intCast(u32, ty.abiSize(self.target.*)); const param_size = @intCast(u32, ty.abiSize(self.target.*));
const pass_in_reg = switch (ty.zigTypeTag()) { const pass_in_reg = switch (ty.zigTypeTag()) {
.Bool => true, .Bool => true,
@ -4158,17 +4238,27 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
else => false, else => false,
}; };
if (pass_in_reg) { if (pass_in_reg) {
if (next_int_reg >= c_abi_int_param_regs.len) { if (next_int_reg >= c_abi_int_param_regs.len) break;
result.args[i] = .{ .stack_offset = next_stack_offset }; try by_reg.putNoClobber(i, next_int_reg);
next_stack_offset += param_size; next_int_reg += 1;
} else { }
const aliased_reg = registerAlias( }
c_abi_int_param_regs[next_int_reg],
param_size, var next_stack_offset: u32 = 0;
); var count: usize = param_types.len;
result.args[i] = .{ .register = aliased_reg }; while (count > 0) : (count -= 1) {
next_int_reg += 1; const i = count - 1;
} const ty = param_types[i];
if (!ty.hasCodeGenBits()) {
assert(cc != .C);
result.args[i] = .{ .none = {} };
continue;
}
const param_size = @intCast(u32, ty.abiSize(self.target.*));
if (by_reg.get(i)) |int_reg| {
const aliased_reg = registerAlias(c_abi_int_param_regs[int_reg], param_size);
result.args[i] = .{ .register = aliased_reg };
next_int_reg += 1;
} else { } else {
// For simplicity of codegen, slices and other types are always pushed onto the stack. // For simplicity of codegen, slices and other types are always pushed onto the stack.
// TODO: look into optimizing this by passing things as registers sometimes, // TODO: look into optimizing this by passing things as registers sometimes,
@ -4179,6 +4269,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
next_stack_offset += param_size; next_stack_offset += param_size;
} }
} }
result.stack_byte_count = next_stack_offset; result.stack_byte_count = next_stack_offset;
result.stack_align = 16; result.stack_align = 16;
}, },

View File

@ -119,7 +119,7 @@ fn fnWithAlignedStack() i32 {
} }
test "implicitly decreasing slice alignment" { test "implicitly decreasing slice alignment" {
if (builtin.zig_backend == .stage2_x86_64 or builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
const a: u32 align(4) = 3; const a: u32 align(4) = 3;
const b: u32 align(8) = 4; const b: u32 align(8) = 4;
@ -130,7 +130,7 @@ fn addUnalignedSlice(a: []align(1) const u32, b: []align(1) const u32) u32 {
} }
test "specifying alignment allows pointer cast" { test "specifying alignment allows pointer cast" {
if (builtin.zig_backend == .stage2_x86_64 or builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
try testBytesAlign(0x33); try testBytesAlign(0x33);
} }

View File

@ -20,7 +20,7 @@ test "array to slice" {
} }
test "arrays" { test "arrays" {
if (builtin.zig_backend == .stage2_x86_64 or builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
var array: [5]u32 = undefined; var array: [5]u32 = undefined;