Merge pull request #10625 from ziglang/stage2-x86_64-inline-memset

stage2: add inline memset, partial intcast and more array goodness for x86_64
This commit is contained in:
Jakub Konka 2022-01-18 22:23:58 +01:00 committed by GitHub
commit e80ebc6740
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 327 additions and 71 deletions

View File

@ -876,10 +876,26 @@ fn airIntCast(self: *Self, inst: Air.Inst.Index) !void {
if (info_a.signedness != info_b.signedness)
return self.fail("TODO gen intcast sign safety in semantic analysis", .{});
if (info_a.bits == info_b.bits)
return self.finishAir(inst, operand, .{ ty_op.operand, .none, .none });
const operand_abi_size = operand_ty.abiSize(self.target.*);
const dest_ty = self.air.typeOfIndex(inst);
const dest_abi_size = dest_ty.abiSize(self.target.*);
const dst_mcv: MCValue = blk: {
if (info_a.bits == info_b.bits) {
break :blk operand;
}
if (operand_abi_size > 8 or dest_abi_size > 8) {
return self.fail("TODO implement intCast for abi sizes larger than 8", .{});
}
const reg = switch (operand) {
.register => |src_reg| try self.register_manager.allocReg(inst, &.{src_reg}),
else => try self.register_manager.allocReg(inst, &.{}),
};
try self.genSetReg(dest_ty, reg, .{ .immediate = 0 });
try self.genSetReg(dest_ty, reg, operand);
break :blk .{ .register = registerAlias(reg, @intCast(u32, dest_abi_size)) };
};
return self.fail("TODO implement intCast for {}", .{self.target.cpu.arch});
return self.finishAir(inst, dst_mcv, .{ ty_op.operand, .none, .none });
}
fn airTrunc(self: *Self, inst: Air.Inst.Index) !void {
@ -1312,59 +1328,49 @@ fn airPtrSlicePtrPtr(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
fn elemOffset(self: *Self, index_ty: Type, index: MCValue, elem_size: u64) !Register {
const reg = try self.register_manager.allocReg(null, &.{});
try self.genSetReg(index_ty, reg, index);
try self.genIMulOpMir(index_ty, .{ .register = reg }, .{ .immediate = elem_size });
return reg;
}
fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void {
const is_volatile = false; // TODO
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const result: MCValue = if (!is_volatile and self.liveness.isUnused(inst)) .dead else result: {
const slice_mcv = try self.resolveInst(bin_op.lhs);
const slice_ty = self.air.typeOf(bin_op.lhs);
const elem_ty = slice_ty.childType();
const elem_size = elem_ty.abiSize(self.target.*);
var buf: Type.SlicePtrFieldTypeBuffer = undefined;
const slice_ptr_field_type = slice_ty.slicePtrFieldType(&buf);
const offset_reg = blk: {
const index_ty = self.air.typeOf(bin_op.rhs);
const index_mcv = try self.resolveInst(bin_op.rhs);
const offset_reg = try self.register_manager.allocReg(null, &.{});
try self.genSetReg(index_ty, offset_reg, index_mcv);
try self.genIMulOpMir(index_ty, .{ .register = offset_reg }, .{ .immediate = elem_size });
break :blk offset_reg;
};
const dst_mcv = blk: {
switch (slice_mcv) {
.stack_offset => |off| {
const dst_mcv = try self.allocRegOrMem(inst, false);
const addr_reg = try self.register_manager.allocReg(null, &.{offset_reg});
// mov reg, [rbp - 8]
_ = try self.addInst(.{
.tag = .mov,
.ops = (Mir.Ops{
.reg1 = addr_reg.to64(),
.reg2 = .rbp,
.flags = 0b01,
}).encode(),
.data = .{ .imm = @bitCast(u32, -@intCast(i32, off + 16)) },
});
// add addr, offset
_ = try self.addInst(.{
.tag = .add,
.ops = (Mir.Ops{
.reg1 = addr_reg.to64(),
.reg2 = offset_reg.to64(),
}).encode(),
.data = undefined,
});
try self.load(dst_mcv, .{ .register = addr_reg }, slice_ptr_field_type);
break :blk dst_mcv;
},
else => return self.fail("TODO implement slice_elem_val when slice is {}", .{slice_mcv}),
}
};
const index_ty = self.air.typeOf(bin_op.rhs);
const index_mcv = try self.resolveInst(bin_op.rhs);
const offset_reg = try self.elemOffset(index_ty, index_mcv, elem_size);
const addr_reg = try self.register_manager.allocReg(null, &.{offset_reg});
switch (slice_mcv) {
.stack_offset => |off| {
// mov reg, [rbp - 8]
_ = try self.addInst(.{
.tag = .mov,
.ops = (Mir.Ops{
.reg1 = addr_reg.to64(),
.reg2 = .rbp,
.flags = 0b01,
}).encode(),
.data = .{ .imm = @bitCast(u32, -@intCast(i32, off + 16)) },
});
},
else => return self.fail("TODO implement slice_elem_val when slice is {}", .{slice_mcv}),
}
// TODO we could allocate register here, but need to except addr register and potentially
// offset register.
const dst_mcv = try self.allocRegOrMem(inst, false);
try self.genBinMathOpMir(.add, slice_ptr_field_type, .unsigned, .{ .register = addr_reg.to64() }, .{
.register = offset_reg.to64(),
});
try self.load(dst_mcv, .{ .register = addr_reg.to64() }, slice_ptr_field_type);
break :result dst_mcv;
};
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
@ -1382,10 +1388,43 @@ fn airSliceElemPtr(self: *Self, inst: Air.Inst.Index) !void {
fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const result: MCValue = if (self.liveness.isUnused(inst))
.dead
else
return self.fail("TODO implement array_elem_val for {}", .{self.target.cpu.arch});
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const array_ty = self.air.typeOf(bin_op.lhs);
const array = try self.resolveInst(bin_op.lhs);
const array_abi_size = array_ty.abiSize(self.target.*);
const elem_ty = array_ty.childType();
const elem_abi_size = elem_ty.abiSize(self.target.*);
const index_ty = self.air.typeOf(bin_op.rhs);
const index = try self.resolveInst(bin_op.rhs);
const offset_reg = try self.elemOffset(index_ty, index, elem_abi_size);
const addr_reg = try self.register_manager.allocReg(null, &.{offset_reg});
switch (array) {
.stack_offset => |off| {
// lea reg, [rbp]
_ = try self.addInst(.{
.tag = .lea,
.ops = (Mir.Ops{
.reg1 = addr_reg.to64(),
.reg2 = .rbp,
}).encode(),
.data = .{ .imm = @bitCast(u32, -@intCast(i32, off + array_abi_size)) },
});
},
else => return self.fail("TODO implement array_elem_val when array is {}", .{array}),
}
// TODO we could allocate register here, but need to except addr register and potentially
// offset register.
const dst_mcv = try self.allocRegOrMem(inst, false);
try self.genBinMathOpMir(
.add,
array_ty,
.unsigned,
.{ .register = addr_reg.to64() },
.{ .register = offset_reg.to64() },
);
try self.load(dst_mcv, .{ .register = addr_reg.to64() }, array_ty);
break :result dst_mcv;
};
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
@ -1402,10 +1441,27 @@ fn airPtrElemVal(self: *Self, inst: Air.Inst.Index) !void {
fn airPtrElemPtr(self: *Self, inst: Air.Inst.Index) !void {
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const extra = self.air.extraData(Air.Bin, ty_pl.payload).data;
const result: MCValue = if (self.liveness.isUnused(inst))
.dead
else
return self.fail("TODO implement ptr_elem_ptr for {}", .{self.target.cpu.arch});
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const ptr_ty = self.air.typeOf(extra.lhs);
const ptr = try self.resolveInst(extra.lhs);
const elem_ty = ptr_ty.elemType2();
const elem_abi_size = elem_ty.abiSize(self.target.*);
const index_ty = self.air.typeOf(extra.rhs);
const index = try self.resolveInst(extra.rhs);
const offset_reg = try self.elemOffset(index_ty, index, elem_abi_size);
const dst_mcv = blk: {
switch (ptr) {
.ptr_stack_offset => {
const reg = try self.register_manager.allocReg(inst, &.{offset_reg});
try self.genSetReg(ptr_ty, reg, ptr);
break :blk .{ .register = reg };
},
else => return self.fail("TODO implement ptr_elem_ptr when ptr is {}", .{ptr}),
}
};
try self.genBinMathOpMir(.add, ptr_ty, .unsigned, dst_mcv, .{ .register = offset_reg });
break :result dst_mcv;
};
return self.finishAir(inst, result, .{ extra.lhs, extra.rhs, .none });
}
@ -3147,7 +3203,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
2 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaaaa }),
4 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaaaaaaaa }),
8 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaaaaaaaaaaaaaaaa }),
else => return self.fail("TODO implement memset", .{}),
else => return self.genInlineMemset(ty, stack_offset, .{ .immediate = 0xaa }),
}
},
.compare_flags_unsigned => |op| {
@ -3397,6 +3453,97 @@ fn genInlineMemcpy(
try self.performReloc(loop_reloc);
}
fn genInlineMemset(self: *Self, ty: Type, stack_offset: u32, value: MCValue) InnerError!void {
try self.register_manager.getReg(.rax, null);
const abi_size = ty.abiSize(self.target.*);
const adj_off = stack_offset + abi_size;
if (adj_off > 128) {
return self.fail("TODO inline memset with large stack offset", .{});
}
const negative_offset = @bitCast(u32, -@intCast(i32, adj_off));
// We are actually counting `abi_size` bytes; however, we reuse the index register
// as both the counter and offset scaler, hence we need to subtract one from `abi_size`
// and count until -1.
if (abi_size > math.maxInt(i32)) {
// movabs rax, abi_size - 1
const payload = try self.addExtra(Mir.Imm64.encode(abi_size - 1));
_ = try self.addInst(.{
.tag = .movabs,
.ops = (Mir.Ops{
.reg1 = .rax,
}).encode(),
.data = .{ .payload = payload },
});
} else {
// mov rax, abi_size - 1
_ = try self.addInst(.{
.tag = .mov,
.ops = (Mir.Ops{
.reg1 = .rax,
}).encode(),
.data = .{ .imm = @truncate(u32, abi_size - 1) },
});
}
// loop:
// cmp rax, -1
const loop_start = try self.addInst(.{
.tag = .cmp,
.ops = (Mir.Ops{
.reg1 = .rax,
}).encode(),
.data = .{ .imm = @bitCast(u32, @as(i32, -1)) },
});
// je end
const loop_reloc = try self.addInst(.{
.tag = .cond_jmp_eq_ne,
.ops = (Mir.Ops{ .flags = 0b01 }).encode(),
.data = .{ .inst = undefined },
});
switch (value) {
.immediate => |x| {
if (x > math.maxInt(i32)) {
return self.fail("TODO inline memset for value immediate larger than 32bits", .{});
}
// mov byte ptr [rbp + rax + stack_offset], imm
const payload = try self.addExtra(Mir.ImmPair{
.dest_off = negative_offset,
.operand = @truncate(u32, x),
});
_ = try self.addInst(.{
.tag = .mov_mem_index_imm,
.ops = (Mir.Ops{
.reg1 = .rbp,
}).encode(),
.data = .{ .payload = payload },
});
},
else => return self.fail("TODO inline memset for value of type {}", .{value}),
}
// sub rax, 1
_ = try self.addInst(.{
.tag = .sub,
.ops = (Mir.Ops{
.reg1 = .rax,
}).encode(),
.data = .{ .imm = 1 },
});
// jmp loop
_ = try self.addInst(.{
.tag = .jmp,
.ops = (Mir.Ops{ .flags = 0b00 }).encode(),
.data = .{ .inst = loop_start },
});
// end:
try self.performReloc(loop_reloc);
}
fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void {
switch (mcv) {
.dead => unreachable,
@ -3634,12 +3781,12 @@ fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const ptr_ty = self.air.typeOf(ty_op.operand);
const ptr = try self.resolveInst(ty_op.operand);
const array_ty = ptr_ty.childType();
const array_len = array_ty.arrayLenIncludingSentinel();
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else blk: {
const stack_offset = try self.allocMem(inst, 16, 16);
const array_ty = ptr_ty.childType();
const array_len = array_ty.arrayLenIncludingSentinel();
try self.genSetStack(Type.initTag(.usize), stack_offset + 8, ptr);
try self.genSetStack(Type.initTag(.u64), stack_offset + 16, .{ .immediate = array_len });
try self.genSetStack(ptr_ty, stack_offset + 8, ptr);
try self.genSetStack(Type.initTag(.u64), stack_offset, .{ .immediate = array_len });
break :blk .{ .stack_offset = stack_offset };
};
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });

View File

@ -115,6 +115,16 @@ pub fn lowerMir(emit: *Emit) InnerError!void {
.cmp_scale_imm => try emit.mirArithScaleImm(.cmp, inst),
.mov_scale_imm => try emit.mirArithScaleImm(.mov, inst),
.adc_mem_index_imm => try emit.mirArithMemIndexImm(.adc, inst),
.add_mem_index_imm => try emit.mirArithMemIndexImm(.add, inst),
.sub_mem_index_imm => try emit.mirArithMemIndexImm(.sub, inst),
.xor_mem_index_imm => try emit.mirArithMemIndexImm(.xor, inst),
.and_mem_index_imm => try emit.mirArithMemIndexImm(.@"and", inst),
.or_mem_index_imm => try emit.mirArithMemIndexImm(.@"or", inst),
.sbb_mem_index_imm => try emit.mirArithMemIndexImm(.sbb, inst),
.cmp_mem_index_imm => try emit.mirArithMemIndexImm(.cmp, inst),
.mov_mem_index_imm => try emit.mirArithMemIndexImm(.mov, inst),
.movabs => try emit.mirMovabs(inst),
.lea => try emit.mirLea(inst),
@ -549,6 +559,29 @@ fn mirArithScaleImm(emit: *Emit, tag: Tag, inst: Mir.Inst.Index) InnerError!void
}), imm_pair.operand, emit.code) catch |err| emit.failWithLoweringError(err);
}
fn mirArithMemIndexImm(emit: *Emit, tag: Tag, inst: Mir.Inst.Index) InnerError!void {
const ops = Mir.Ops.decode(emit.mir.instructions.items(.ops)[inst]);
assert(ops.reg2 == .none);
const payload = emit.mir.instructions.items(.data)[inst].payload;
const imm_pair = emit.mir.extraData(Mir.ImmPair, payload).data;
const ptr_size: Memory.PtrSize = switch (ops.flags) {
0b00 => .byte_ptr,
0b01 => .word_ptr,
0b10 => .dword_ptr,
0b11 => .qword_ptr,
};
const scale_index = ScaleIndex{
.scale = 0,
.index = .rax,
};
// OP ptr [reg1 + rax*1 + imm32], imm32
return lowerToMiEnc(tag, RegisterOrMemory.mem(ptr_size, .{
.disp = imm_pair.dest_off,
.base = ops.reg1,
.scale_index = scale_index,
}), imm_pair.operand, emit.code) catch |err| emit.failWithLoweringError(err);
}
fn mirMovabs(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
const tag = emit.mir.instructions.items(.tag)[inst];
assert(tag == .movabs);
@ -1287,7 +1320,7 @@ const Memory = struct {
encoder.disp32(@bitCast(i32, mem_op.disp));
}
} else {
if (mem_op.disp == 0) {
if (mem_op.disp == 0 and dst != 5) {
encoder.modRm_indirectDisp0(src, dst);
} else if (immOpSize(mem_op.disp) == 8) {
encoder.modRm_indirectDisp8(src, dst);

View File

@ -79,6 +79,13 @@ pub const Inst = struct {
/// * Data field `payload` points at `ImmPair`.
adc_scale_imm,
/// ops flags: form:
/// 0b00 byte ptr [reg1 + rax + imm32], imm8
/// 0b01 word ptr [reg1 + rax + imm32], imm16
/// 0b10 dword ptr [reg1 + rax + imm32], imm32
/// 0b11 qword ptr [reg1 + rax + imm32], imm32 (sign-extended to imm64)
adc_mem_index_imm,
// The following instructions all have the same encoding as `adc`.
add,
@ -86,81 +93,97 @@ pub const Inst = struct {
add_scale_src,
add_scale_dst,
add_scale_imm,
add_mem_index_imm,
sub,
sub_mem_imm,
sub_scale_src,
sub_scale_dst,
sub_scale_imm,
sub_mem_index_imm,
xor,
xor_mem_imm,
xor_scale_src,
xor_scale_dst,
xor_scale_imm,
xor_mem_index_imm,
@"and",
and_mem_imm,
and_scale_src,
and_scale_dst,
and_scale_imm,
and_mem_index_imm,
@"or",
or_mem_imm,
or_scale_src,
or_scale_dst,
or_scale_imm,
or_mem_index_imm,
rol,
rol_mem_imm,
rol_scale_src,
rol_scale_dst,
rol_scale_imm,
rol_mem_index_imm,
ror,
ror_mem_imm,
ror_scale_src,
ror_scale_dst,
ror_scale_imm,
ror_mem_index_imm,
rcl,
rcl_mem_imm,
rcl_scale_src,
rcl_scale_dst,
rcl_scale_imm,
rcl_mem_index_imm,
rcr,
rcr_mem_imm,
rcr_scale_src,
rcr_scale_dst,
rcr_scale_imm,
rcr_mem_index_imm,
shl,
shl_mem_imm,
shl_scale_src,
shl_scale_dst,
shl_scale_imm,
shl_mem_index_imm,
sal,
sal_mem_imm,
sal_scale_src,
sal_scale_dst,
sal_scale_imm,
sal_mem_index_imm,
shr,
shr_mem_imm,
shr_scale_src,
shr_scale_dst,
shr_scale_imm,
shr_mem_index_imm,
sar,
sar_mem_imm,
sar_scale_src,
sar_scale_dst,
sar_scale_imm,
sar_mem_index_imm,
sbb,
sbb_mem_imm,
sbb_scale_src,
sbb_scale_dst,
sbb_scale_imm,
sbb_mem_index_imm,
cmp,
cmp_mem_imm,
cmp_scale_src,
cmp_scale_dst,
cmp_scale_imm,
cmp_mem_index_imm,
mov,
mov_mem_imm,
mov_scale_src,
mov_scale_dst,
mov_scale_imm,
mov_mem_index_imm,
/// ops flags: form:
/// 0b00 reg1, [reg2 + imm32]

View File

@ -64,6 +64,7 @@ pub fn printMir(print: *const Print, w: anytype, mir_to_air_map: std.AutoHashMap
.@"or" => try print.mirArith(.@"or", inst, w),
.sbb => try print.mirArith(.sbb, inst, w),
.cmp => try print.mirArith(.cmp, inst, w),
.mov => try print.mirArith(.mov, inst, w),
.adc_mem_imm => try print.mirArithMemImm(.adc, inst, w),
.add_mem_imm => try print.mirArithMemImm(.add, inst, w),
@ -73,6 +74,7 @@ pub fn printMir(print: *const Print, w: anytype, mir_to_air_map: std.AutoHashMap
.or_mem_imm => try print.mirArithMemImm(.@"or", inst, w),
.sbb_mem_imm => try print.mirArithMemImm(.sbb, inst, w),
.cmp_mem_imm => try print.mirArithMemImm(.cmp, inst, w),
.mov_mem_imm => try print.mirArithMemImm(.mov, inst, w),
.adc_scale_src => try print.mirArithScaleSrc(.adc, inst, w),
.add_scale_src => try print.mirArithScaleSrc(.add, inst, w),
@ -82,6 +84,7 @@ pub fn printMir(print: *const Print, w: anytype, mir_to_air_map: std.AutoHashMap
.or_scale_src => try print.mirArithScaleSrc(.@"or", inst, w),
.sbb_scale_src => try print.mirArithScaleSrc(.sbb, inst, w),
.cmp_scale_src => try print.mirArithScaleSrc(.cmp, inst, w),
.mov_scale_src => try print.mirArithScaleSrc(.mov, inst, w),
.adc_scale_dst => try print.mirArithScaleDst(.adc, inst, w),
.add_scale_dst => try print.mirArithScaleDst(.add, inst, w),
@ -91,6 +94,7 @@ pub fn printMir(print: *const Print, w: anytype, mir_to_air_map: std.AutoHashMap
.or_scale_dst => try print.mirArithScaleDst(.@"or", inst, w),
.sbb_scale_dst => try print.mirArithScaleDst(.sbb, inst, w),
.cmp_scale_dst => try print.mirArithScaleDst(.cmp, inst, w),
.mov_scale_dst => try print.mirArithScaleDst(.mov, inst, w),
.adc_scale_imm => try print.mirArithScaleImm(.adc, inst, w),
.add_scale_imm => try print.mirArithScaleImm(.add, inst, w),
@ -100,11 +104,18 @@ pub fn printMir(print: *const Print, w: anytype, mir_to_air_map: std.AutoHashMap
.or_scale_imm => try print.mirArithScaleImm(.@"or", inst, w),
.sbb_scale_imm => try print.mirArithScaleImm(.sbb, inst, w),
.cmp_scale_imm => try print.mirArithScaleImm(.cmp, inst, w),
.mov => try print.mirArith(.mov, inst, w),
.mov_scale_src => try print.mirArithScaleSrc(.mov, inst, w),
.mov_scale_dst => try print.mirArithScaleDst(.mov, inst, w),
.mov_scale_imm => try print.mirArithScaleImm(.mov, inst, w),
.adc_mem_index_imm => try print.mirArithMemIndexImm(.adc, inst, w),
.add_mem_index_imm => try print.mirArithMemIndexImm(.add, inst, w),
.sub_mem_index_imm => try print.mirArithMemIndexImm(.sub, inst, w),
.xor_mem_index_imm => try print.mirArithMemIndexImm(.xor, inst, w),
.and_mem_index_imm => try print.mirArithMemIndexImm(.@"and", inst, w),
.or_mem_index_imm => try print.mirArithMemIndexImm(.@"or", inst, w),
.sbb_mem_index_imm => try print.mirArithMemIndexImm(.sbb, inst, w),
.cmp_mem_index_imm => try print.mirArithMemIndexImm(.cmp, inst, w),
.mov_mem_index_imm => try print.mirArithMemIndexImm(.mov, inst, w),
.movabs => try print.mirMovabs(inst, w),
.lea => try print.mirLea(inst, w),
@ -316,11 +327,11 @@ fn mirArithScaleDst(print: *const Print, tag: Mir.Inst.Tag, inst: Mir.Inst.Index
if (ops.reg2 == .none) {
// OP [reg1 + scale*rax + 0], imm32
try w.print("{s} [{s} + {d}*rcx + 0], {d}\n", .{ @tagName(tag), @tagName(ops.reg1), scale, imm });
try w.print("{s} [{s} + {d}*rax + 0], {d}\n", .{ @tagName(tag), @tagName(ops.reg1), scale, imm });
}
// OP [reg1 + scale*rax + imm32], reg2
try w.print("{s} [{s} + {d}*rcx + {d}], {s}\n", .{ @tagName(tag), @tagName(ops.reg1), scale, imm, @tagName(ops.reg2) });
try w.print("{s} [{s} + {d}*rax + {d}], {s}\n", .{ @tagName(tag), @tagName(ops.reg1), scale, imm, @tagName(ops.reg2) });
}
fn mirArithScaleImm(print: *const Print, tag: Mir.Inst.Tag, inst: Mir.Inst.Index, w: anytype) !void {
@ -328,7 +339,21 @@ fn mirArithScaleImm(print: *const Print, tag: Mir.Inst.Tag, inst: Mir.Inst.Index
const scale = ops.flags;
const payload = print.mir.instructions.items(.data)[inst].payload;
const imm_pair = print.mir.extraData(Mir.ImmPair, payload).data;
try w.print("{s} [{s} + {d}*rcx + {d}], {d}\n", .{ @tagName(tag), @tagName(ops.reg1), scale, imm_pair.dest_off, imm_pair.operand });
try w.print("{s} [{s} + {d}*rax + {d}], {d}\n", .{ @tagName(tag), @tagName(ops.reg1), scale, imm_pair.dest_off, imm_pair.operand });
}
fn mirArithMemIndexImm(print: *const Print, tag: Mir.Inst.Tag, inst: Mir.Inst.Index, w: anytype) !void {
const ops = Mir.Ops.decode(print.mir.instructions.items(.ops)[inst]);
const payload = print.mir.instructions.items(.data)[inst].payload;
const imm_pair = print.mir.extraData(Mir.ImmPair, payload).data;
try w.print("{s} ", .{@tagName(tag)});
switch (ops.flags) {
0b00 => try w.print("byte ptr ", .{}),
0b01 => try w.print("word ptr ", .{}),
0b10 => try w.print("dword ptr ", .{}),
0b11 => try w.print("qword ptr ", .{}),
}
try w.print("[{s} + 1*rax + {d}], {d}\n", .{ @tagName(ops.reg1), imm_pair.dest_off, imm_pair.operand });
}
fn mirMovabs(print: *const Print, inst: Mir.Inst.Index, w: anytype) !void {

View File

@ -17,6 +17,7 @@ test {
_ = @import("behavior/bool.zig");
_ = @import("behavior/align.zig");
_ = @import("behavior/array.zig");
_ = @import("behavior/cast.zig");
if (builtin.zig_backend != .stage2_arm and builtin.zig_backend != .stage2_x86_64) {
// Tests that pass for stage1, llvm backend, C backend, wasm backend.
@ -35,7 +36,6 @@ test {
_ = @import("behavior/bugs/4954.zig");
_ = @import("behavior/byval_arg_var.zig");
_ = @import("behavior/call.zig");
_ = @import("behavior/cast.zig");
_ = @import("behavior/defer.zig");
_ = @import("behavior/enum.zig");
_ = @import("behavior/error.zig");

View File

@ -75,8 +75,6 @@ test "array literal with inferred length" {
}
test "array dot len const expr" {
if (builtin.zig_backend == .stage2_x86_64 or builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
try expect(comptime x: {
break :x some_array.len == 4;
});
@ -166,7 +164,7 @@ test "nested arrays" {
}
test "implicit comptime in array type size" {
if (builtin.zig_backend == .stage2_x86_64 or builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
var arr: [plusOne(10)]bool = undefined;
try expect(arr.len == 11);

View File

@ -5,6 +5,8 @@ const maxInt = std.math.maxInt;
const builtin = @import("builtin");
test "int to ptr cast" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
const x = @as(usize, 13);
const y = @intToPtr(*u8, x);
const z = @ptrToInt(y);
@ -12,11 +14,15 @@ test "int to ptr cast" {
}
test "integer literal to pointer cast" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
const vga_mem = @intToPtr(*u16, 0xB8000);
try expect(@ptrToInt(vga_mem) == 0xB8000);
}
test "peer type resolution: ?T and T" {
if (builtin.zig_backend == .stage2_x86_64 or builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
try expect(peerTypeTAndOptionalT(true, false).? == 0);
try expect(peerTypeTAndOptionalT(false, false).? == 3);
comptime {
@ -33,6 +39,8 @@ fn peerTypeTAndOptionalT(c: bool, b: bool) ?usize {
}
test "resolve undefined with integer" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
try testResolveUndefWithInt(true, 1234);
comptime try testResolveUndefWithInt(true, 1234);
}
@ -88,6 +96,8 @@ test "comptime_int @intToFloat" {
}
test "@floatToInt" {
if (builtin.zig_backend == .stage2_x86_64 or builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
try testFloatToInts();
comptime try testFloatToInts();
}
@ -107,6 +117,8 @@ fn expectFloatToInt(comptime F: type, f: F, comptime I: type, i: I) !void {
}
test "implicitly cast indirect pointer to maybe-indirect pointer" {
if (builtin.zig_backend == .stage2_x86_64 or builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
const S = struct {
const Self = @This();
x: u8,
@ -163,6 +175,8 @@ test "@floatCast comptime_int and comptime_float" {
}
test "coerce undefined to optional" {
if (builtin.zig_backend == .stage2_x86_64 or builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
try expect(MakeType(void).getNull() == null);
try expect(MakeType(void).getNonNull() != null);
}
@ -180,6 +194,8 @@ fn MakeType(comptime T: type) type {
}
test "implicit cast from *[N]T to [*c]T" {
if (builtin.zig_backend == .stage2_x86_64 or builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
var x: [4]u16 = [4]u16{ 0, 1, 2, 3 };
var y: [*c]u16 = &x;
@ -190,6 +206,8 @@ test "implicit cast from *[N]T to [*c]T" {
}
test "*usize to *void" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
var i = @as(usize, 0);
var v = @ptrCast(*void, &i);
v.* = {};
@ -202,6 +220,8 @@ test "@intToEnum passed a comptime_int to an enum with one item" {
}
test "@intCast to u0 and use the result" {
if (builtin.zig_backend == .stage2_x86_64 or builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
const S = struct {
fn doTheTest(zero: u1, one: u1, bigzero: i32) !void {
try expect((one << @intCast(u0, bigzero)) == 1);
@ -213,6 +233,8 @@ test "@intCast to u0 and use the result" {
}
test "peer result null and comptime_int" {
if (builtin.zig_backend == .stage2_x86_64 or builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
const S = struct {
fn blah(n: i32) ?i32 {
if (n == 0) {
@ -234,6 +256,8 @@ test "peer result null and comptime_int" {
}
test "*const ?[*]const T to [*c]const [*c]const T" {
if (builtin.zig_backend == .stage2_x86_64 or builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
var array = [_]u8{ 'o', 'k' };
const opt_array_ptr: ?[*]const u8 = &array;
const a: *const ?[*]const u8 = &opt_array_ptr;
@ -243,6 +267,8 @@ test "*const ?[*]const T to [*c]const [*c]const T" {
}
test "array coersion to undefined at runtime" {
if (builtin.zig_backend == .stage2_x86_64 or builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
@setRuntimeSafety(true);
// TODO implement @setRuntimeSafety in stage2
@ -270,6 +296,8 @@ fn implicitIntLitToOptional() void {
}
test "return u8 coercing into ?u32 return type" {
if (builtin.zig_backend == .stage2_x86_64 or builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
try expect(foo(123).? == 123);
@ -288,6 +316,8 @@ test "cast from ?[*]T to ??[*]T" {
}
test "peer type unsigned int to signed" {
if (builtin.zig_backend == .stage2_x86_64 or builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
var w: u31 = 5;
var x: u8 = 7;
var y: i32 = -5;