mirror of
https://github.com/ziglang/zig.git
synced 2024-12-12 22:25:21 +00:00
stage2: update LLVM backend to new AIR memory layout
Also fix compile errors when not using -Dskip-non-native
This commit is contained in:
parent
33aab2c1bb
commit
4a0f38bb76
@ -642,7 +642,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
|
||||
|
||||
try self.dbgSetPrologueEnd();
|
||||
|
||||
try self.genBody(self.mod_fn.body);
|
||||
try self.genBody(self.air.getMainBody());
|
||||
|
||||
// Backpatch push callee saved regs
|
||||
var saved_regs = Instruction.RegisterList{
|
||||
@ -703,7 +703,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
|
||||
writeInt(u32, try self.code.addManyAsArray(4), Instruction.ldm(.al, .sp, true, saved_regs).toU32());
|
||||
} else {
|
||||
try self.dbgSetPrologueEnd();
|
||||
try self.genBody(self.mod_fn.body);
|
||||
try self.genBody(self.air.getMainBody());
|
||||
try self.dbgSetEpilogueBegin();
|
||||
}
|
||||
},
|
||||
@ -727,7 +727,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
|
||||
|
||||
try self.dbgSetPrologueEnd();
|
||||
|
||||
try self.genBody(self.mod_fn.body);
|
||||
try self.genBody(self.air.getMainBody());
|
||||
|
||||
// Backpatch stack offset
|
||||
const stack_end = self.max_end_stack;
|
||||
@ -779,13 +779,13 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
|
||||
writeInt(u32, try self.code.addManyAsArray(4), Instruction.ret(null).toU32());
|
||||
} else {
|
||||
try self.dbgSetPrologueEnd();
|
||||
try self.genBody(self.mod_fn.body);
|
||||
try self.genBody(self.air.getMainBody());
|
||||
try self.dbgSetEpilogueBegin();
|
||||
}
|
||||
},
|
||||
else => {
|
||||
try self.dbgSetPrologueEnd();
|
||||
try self.genBody(self.mod_fn.body);
|
||||
try self.genBody(self.air.getMainBody());
|
||||
try self.dbgSetEpilogueBegin();
|
||||
},
|
||||
}
|
||||
@ -1492,7 +1492,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
|
||||
};
|
||||
}
|
||||
|
||||
fn genArmBinOp(self: *Self, inst: Air.Inst.Index, op_lhs: Air.Inst.Ref, op_rhs: Air.Inst.Ref, op: ir.Inst.Tag) !MCValue {
|
||||
fn genArmBinOp(self: *Self, inst: Air.Inst.Index, op_lhs: Air.Inst.Ref, op_rhs: Air.Inst.Ref, op: Air.Inst.Tag) !MCValue {
|
||||
const lhs = try self.resolveInst(op_lhs);
|
||||
const rhs = try self.resolveInst(op_rhs);
|
||||
|
||||
@ -1514,14 +1514,14 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
|
||||
if (reuse_lhs) {
|
||||
// Allocate 0 or 1 registers
|
||||
if (!rhs_is_register and rhs_should_be_register) {
|
||||
rhs_mcv = MCValue{ .register = try self.register_manager.allocReg(op_rhs, &.{lhs.register}) };
|
||||
rhs_mcv = MCValue{ .register = try self.register_manager.allocReg(Air.refToIndex(op_rhs).?, &.{lhs.register}) };
|
||||
branch.inst_table.putAssumeCapacity(Air.refToIndex(op_rhs).?, rhs_mcv);
|
||||
}
|
||||
dst_mcv = lhs;
|
||||
} else if (reuse_rhs) {
|
||||
// Allocate 0 or 1 registers
|
||||
if (!lhs_is_register and lhs_should_be_register) {
|
||||
lhs_mcv = MCValue{ .register = try self.register_manager.allocReg(op_lhs, &.{rhs.register}) };
|
||||
lhs_mcv = MCValue{ .register = try self.register_manager.allocReg(Air.refToIndex(op_lhs).?, &.{rhs.register}) };
|
||||
branch.inst_table.putAssumeCapacity(Air.refToIndex(op_lhs).?, lhs_mcv);
|
||||
}
|
||||
dst_mcv = rhs;
|
||||
@ -1542,7 +1542,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
|
||||
lhs_mcv = dst_mcv;
|
||||
} else {
|
||||
// Move LHS and RHS to register
|
||||
const regs = try self.register_manager.allocRegs(2, .{ inst, op_rhs }, &.{});
|
||||
const regs = try self.register_manager.allocRegs(2, .{ inst, Air.refToIndex(op_rhs).? }, &.{});
|
||||
lhs_mcv = MCValue{ .register = regs[0] };
|
||||
rhs_mcv = MCValue{ .register = regs[1] };
|
||||
dst_mcv = lhs_mcv;
|
||||
@ -1572,10 +1572,10 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
|
||||
|
||||
// Move the operands to the newly allocated registers
|
||||
if (lhs_mcv == .register and !lhs_is_register) {
|
||||
try self.genSetReg(op_lhs.ty, lhs_mcv.register, lhs);
|
||||
try self.genSetReg(self.air.typeOf(op_lhs), lhs_mcv.register, lhs);
|
||||
}
|
||||
if (rhs_mcv == .register and !rhs_is_register) {
|
||||
try self.genSetReg(op_rhs.ty, rhs_mcv.register, rhs);
|
||||
try self.genSetReg(self.air.typeOf(op_rhs), rhs_mcv.register, rhs);
|
||||
}
|
||||
|
||||
try self.genArmBinOpCode(
|
||||
@ -1594,7 +1594,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
|
||||
lhs_mcv: MCValue,
|
||||
rhs_mcv: MCValue,
|
||||
swap_lhs_and_rhs: bool,
|
||||
op: ir.Inst.Tag,
|
||||
op: Air.Inst.Tag,
|
||||
) !void {
|
||||
assert(lhs_mcv == .register or rhs_mcv == .register);
|
||||
|
||||
@ -1665,14 +1665,14 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
|
||||
if (reuse_lhs) {
|
||||
// Allocate 0 or 1 registers
|
||||
if (!rhs_is_register) {
|
||||
rhs_mcv = MCValue{ .register = try self.register_manager.allocReg(op_rhs, &.{lhs.register}) };
|
||||
rhs_mcv = MCValue{ .register = try self.register_manager.allocReg(Air.refToIndex(op_rhs).?, &.{lhs.register}) };
|
||||
branch.inst_table.putAssumeCapacity(Air.refToIndex(op_rhs).?, rhs_mcv);
|
||||
}
|
||||
dst_mcv = lhs;
|
||||
} else if (reuse_rhs) {
|
||||
// Allocate 0 or 1 registers
|
||||
if (!lhs_is_register) {
|
||||
lhs_mcv = MCValue{ .register = try self.register_manager.allocReg(op_lhs, &.{rhs.register}) };
|
||||
lhs_mcv = MCValue{ .register = try self.register_manager.allocReg(Air.refToIndex(op_lhs).?, &.{rhs.register}) };
|
||||
branch.inst_table.putAssumeCapacity(Air.refToIndex(op_lhs).?, lhs_mcv);
|
||||
}
|
||||
dst_mcv = rhs;
|
||||
@ -1690,7 +1690,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
|
||||
lhs_mcv = dst_mcv;
|
||||
} else {
|
||||
// Move LHS and RHS to register
|
||||
const regs = try self.register_manager.allocRegs(2, .{ inst, op_rhs }, &.{});
|
||||
const regs = try self.register_manager.allocRegs(2, .{ inst, Air.refToIndex(op_rhs).? }, &.{});
|
||||
lhs_mcv = MCValue{ .register = regs[0] };
|
||||
rhs_mcv = MCValue{ .register = regs[1] };
|
||||
dst_mcv = lhs_mcv;
|
||||
@ -1701,10 +1701,10 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
|
||||
|
||||
// Move the operands to the newly allocated registers
|
||||
if (!lhs_is_register) {
|
||||
try self.genSetReg(op_lhs.ty, lhs_mcv.register, lhs);
|
||||
try self.genSetReg(self.air.typeOf(op_lhs), lhs_mcv.register, lhs);
|
||||
}
|
||||
if (!rhs_is_register) {
|
||||
try self.genSetReg(op_rhs.ty, rhs_mcv.register, rhs);
|
||||
try self.genSetReg(self.air.typeOf(op_rhs), rhs_mcv.register, rhs);
|
||||
}
|
||||
|
||||
writeInt(u32, try self.code.addManyAsArray(4), Instruction.mul(.al, dst_mcv.register, lhs_mcv.register, rhs_mcv.register).toU32());
|
||||
@ -2704,9 +2704,9 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
|
||||
},
|
||||
.aarch64 => {
|
||||
for (info.args) |mc_arg, arg_i| {
|
||||
const arg = inst.args[arg_i];
|
||||
const arg = args[arg_i];
|
||||
const arg_ty = self.air.typeOf(arg);
|
||||
const arg_mcv = try self.resolveInst(inst.args[arg_i]);
|
||||
const arg_mcv = try self.resolveInst(args[arg_i]);
|
||||
|
||||
switch (mc_arg) {
|
||||
.none => continue,
|
||||
@ -2733,7 +2733,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
|
||||
},
|
||||
}
|
||||
}
|
||||
if (inst.func.value()) |func_value| {
|
||||
if (self.air.value(callee)) |func_value| {
|
||||
if (func_value.castTag(.function)) |func_payload| {
|
||||
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
|
||||
const ptr_bytes: u64 = @divExact(ptr_bits, 8);
|
||||
@ -2899,15 +2899,17 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
|
||||
// Allocate registers
|
||||
if (rhs_should_be_register) {
|
||||
if (!lhs_is_register and !rhs_is_register) {
|
||||
const regs = try self.register_manager.allocRegs(2, .{ bin_op.rhs, bin_op.lhs }, &.{});
|
||||
const regs = try self.register_manager.allocRegs(2, .{
|
||||
Air.refToIndex(bin_op.rhs).?, Air.refToIndex(bin_op.lhs).?,
|
||||
}, &.{});
|
||||
lhs_mcv = MCValue{ .register = regs[0] };
|
||||
rhs_mcv = MCValue{ .register = regs[1] };
|
||||
} else if (!rhs_is_register) {
|
||||
rhs_mcv = MCValue{ .register = try self.register_manager.allocReg(bin_op.rhs, &.{}) };
|
||||
rhs_mcv = MCValue{ .register = try self.register_manager.allocReg(Air.refToIndex(bin_op.rhs).?, &.{}) };
|
||||
}
|
||||
}
|
||||
if (!lhs_is_register) {
|
||||
lhs_mcv = MCValue{ .register = try self.register_manager.allocReg(bin_op.lhs, &.{}) };
|
||||
lhs_mcv = MCValue{ .register = try self.register_manager.allocReg(Air.refToIndex(bin_op.lhs).?, &.{}) };
|
||||
}
|
||||
|
||||
// Move the operands to the newly allocated registers
|
||||
@ -3538,7 +3540,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
|
||||
|
||||
break :result MCValue{ .register = reg };
|
||||
} else {
|
||||
break :result MCValue.none;
|
||||
break :result MCValue{ .none = {} };
|
||||
}
|
||||
},
|
||||
.aarch64 => result: {
|
||||
@ -3576,7 +3578,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
|
||||
return self.fail("unrecognized register: '{s}'", .{reg_name});
|
||||
break :result MCValue{ .register = reg };
|
||||
} else {
|
||||
break :result MCValue.none;
|
||||
break :result MCValue{ .none = {} };
|
||||
}
|
||||
},
|
||||
.riscv64 => result: {
|
||||
@ -3612,7 +3614,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
|
||||
return self.fail("unrecognized register: '{s}'", .{reg_name});
|
||||
break :result MCValue{ .register = reg };
|
||||
} else {
|
||||
break :result MCValue.none;
|
||||
break :result MCValue{ .none = {} };
|
||||
}
|
||||
},
|
||||
.x86_64, .i386 => result: {
|
||||
|
@ -974,6 +974,9 @@ fn airArg(o: *Object) CValue {
|
||||
|
||||
fn airLoad(o: *Object, inst: Air.Inst.Index) !CValue {
|
||||
const ty_op = o.air.instructions.items(.data)[inst].ty_op;
|
||||
const is_volatile = o.air.typeOf(ty_op.operand).isVolatilePtr();
|
||||
if (!is_volatile and o.liveness.isUnused(inst))
|
||||
return CValue.none;
|
||||
const inst_ty = o.air.typeOfIndex(inst);
|
||||
const operand = try o.resolveInst(ty_op.operand);
|
||||
const writer = o.writer();
|
||||
|
@ -10,7 +10,7 @@ const math = std.math;
|
||||
const Module = @import("../Module.zig");
|
||||
const TypedValue = @import("../TypedValue.zig");
|
||||
const Air = @import("../Air.zig");
|
||||
const Inst = ir.Inst;
|
||||
const Liveness = @import("../Liveness.zig");
|
||||
|
||||
const Value = @import("../value.zig").Value;
|
||||
const Type = @import("../type.zig").Type;
|
||||
@ -355,6 +355,7 @@ pub const DeclGen = struct {
|
||||
builder.positionBuilderAtEnd(entry_block);
|
||||
|
||||
var fg: FuncGen = .{
|
||||
.gpa = self.gpa,
|
||||
.dg = self,
|
||||
.builder = builder,
|
||||
.args = args,
|
||||
@ -593,29 +594,29 @@ pub const DeclGen = struct {
|
||||
};
|
||||
|
||||
pub const FuncGen = struct {
|
||||
gpa: *Allocator,
|
||||
dg: *DeclGen,
|
||||
|
||||
builder: *const llvm.Builder,
|
||||
|
||||
/// This stores the LLVM values used in a function, such that they can be
|
||||
/// referred to in other instructions. This table is cleared before every function is generated.
|
||||
/// TODO: Change this to a stack of Branch. Currently we store all the values from all the blocks
|
||||
/// in here, however if a block ends, the instructions can be thrown away.
|
||||
func_inst_table: std.AutoHashMapUnmanaged(*Inst, *const llvm.Value),
|
||||
/// This stores the LLVM values used in a function, such that they can be referred to
|
||||
/// in other instructions. This table is cleared before every function is generated.
|
||||
func_inst_table: std.AutoHashMapUnmanaged(Air.Inst.Index, *const llvm.Value),
|
||||
|
||||
/// These fields are used to refer to the LLVM value of the function paramaters in an Arg instruction.
|
||||
/// These fields are used to refer to the LLVM value of the function paramaters
|
||||
/// in an Arg instruction.
|
||||
args: []*const llvm.Value,
|
||||
arg_index: usize,
|
||||
|
||||
entry_block: *const llvm.BasicBlock,
|
||||
/// This fields stores the last alloca instruction, such that we can append more alloca instructions
|
||||
/// to the top of the function.
|
||||
/// This fields stores the last alloca instruction, such that we can append
|
||||
/// more alloca instructions to the top of the function.
|
||||
latest_alloca_inst: ?*const llvm.Value,
|
||||
|
||||
llvm_func: *const llvm.Value,
|
||||
|
||||
/// This data structure is used to implement breaking to blocks.
|
||||
blocks: std.AutoHashMapUnmanaged(*Inst.Block, struct {
|
||||
blocks: std.AutoHashMapUnmanaged(Air.Inst.Index, struct {
|
||||
parent_bb: *const llvm.BasicBlock,
|
||||
break_bbs: *BreakBasicBlocks,
|
||||
break_vals: *BreakValues,
|
||||
@ -626,9 +627,9 @@ pub const FuncGen = struct {
|
||||
|
||||
fn deinit(self: *FuncGen) void {
|
||||
self.builder.dispose();
|
||||
self.func_inst_table.deinit(self.gpa());
|
||||
self.gpa().free(self.args);
|
||||
self.blocks.deinit(self.gpa());
|
||||
self.func_inst_table.deinit(self.gpa);
|
||||
self.gpa.free(self.args);
|
||||
self.blocks.deinit(self.gpa);
|
||||
}
|
||||
|
||||
fn todo(self: *FuncGen, comptime format: []const u8, args: anytype) error{ OutOfMemory, CodegenFail } {
|
||||
@ -644,13 +645,9 @@ pub const FuncGen = struct {
|
||||
return self.dg.object.context;
|
||||
}
|
||||
|
||||
fn gpa(self: *FuncGen) *Allocator {
|
||||
return self.dg.gpa;
|
||||
}
|
||||
|
||||
fn resolveInst(self: *FuncGen, inst: *ir.Inst) !*const llvm.Value {
|
||||
if (inst.value()) |val| {
|
||||
return self.dg.genTypedValue(.{ .ty = inst.ty, .val = val }, self);
|
||||
fn resolveInst(self: *FuncGen, inst: Air.Inst.Ref) !*const llvm.Value {
|
||||
if (self.air.value(inst)) |val| {
|
||||
return self.dg.genTypedValue(.{ .ty = self.air.typeOf(inst), .val = val }, self);
|
||||
}
|
||||
if (self.func_inst_table.get(inst)) |value| return value;
|
||||
|
||||
@ -658,51 +655,57 @@ pub const FuncGen = struct {
|
||||
}
|
||||
|
||||
fn genBody(self: *FuncGen, body: ir.Body) error{ OutOfMemory, CodegenFail }!void {
|
||||
const air_tags = self.air.instructions.items(.tag);
|
||||
for (body.instructions) |inst| {
|
||||
const opt_value = switch (inst.tag) {
|
||||
.add => try self.genAdd(inst.castTag(.add).?),
|
||||
.alloc => try self.genAlloc(inst.castTag(.alloc).?),
|
||||
.arg => try self.genArg(inst.castTag(.arg).?),
|
||||
.bitcast => try self.genBitCast(inst.castTag(.bitcast).?),
|
||||
.block => try self.genBlock(inst.castTag(.block).?),
|
||||
.br => try self.genBr(inst.castTag(.br).?),
|
||||
.breakpoint => try self.genBreakpoint(inst.castTag(.breakpoint).?),
|
||||
.br_void => try self.genBrVoid(inst.castTag(.br_void).?),
|
||||
.call => try self.genCall(inst.castTag(.call).?),
|
||||
.cmp_eq => try self.genCmp(inst.castTag(.cmp_eq).?, .eq),
|
||||
.cmp_gt => try self.genCmp(inst.castTag(.cmp_gt).?, .gt),
|
||||
.cmp_gte => try self.genCmp(inst.castTag(.cmp_gte).?, .gte),
|
||||
.cmp_lt => try self.genCmp(inst.castTag(.cmp_lt).?, .lt),
|
||||
.cmp_lte => try self.genCmp(inst.castTag(.cmp_lte).?, .lte),
|
||||
.cmp_neq => try self.genCmp(inst.castTag(.cmp_neq).?, .neq),
|
||||
.condbr => try self.genCondBr(inst.castTag(.condbr).?),
|
||||
.intcast => try self.genIntCast(inst.castTag(.intcast).?),
|
||||
.is_non_null => try self.genIsNonNull(inst.castTag(.is_non_null).?, false),
|
||||
.is_non_null_ptr => try self.genIsNonNull(inst.castTag(.is_non_null_ptr).?, true),
|
||||
.is_null => try self.genIsNull(inst.castTag(.is_null).?, false),
|
||||
.is_null_ptr => try self.genIsNull(inst.castTag(.is_null_ptr).?, true),
|
||||
.load => try self.genLoad(inst.castTag(.load).?),
|
||||
.loop => try self.genLoop(inst.castTag(.loop).?),
|
||||
.not => try self.genNot(inst.castTag(.not).?),
|
||||
.ret => try self.genRet(inst.castTag(.ret).?),
|
||||
.retvoid => self.genRetVoid(inst.castTag(.retvoid).?),
|
||||
.store => try self.genStore(inst.castTag(.store).?),
|
||||
.sub => try self.genSub(inst.castTag(.sub).?),
|
||||
.unreach => self.genUnreach(inst.castTag(.unreach).?),
|
||||
.optional_payload => try self.genOptionalPayload(inst.castTag(.optional_payload).?, false),
|
||||
.optional_payload_ptr => try self.genOptionalPayload(inst.castTag(.optional_payload_ptr).?, true),
|
||||
const opt_value = switch (air_tags[inst]) {
|
||||
.add => try self.airAdd(inst),
|
||||
.sub => try self.airSub(inst),
|
||||
|
||||
.cmp_eq => try self.airCmp(inst, .eq),
|
||||
.cmp_gt => try self.airCmp(inst, .gt),
|
||||
.cmp_gte => try self.airCmp(inst, .gte),
|
||||
.cmp_lt => try self.airCmp(inst, .lt),
|
||||
.cmp_lte => try self.airCmp(inst, .lte),
|
||||
.cmp_neq => try self.airCmp(inst, .neq),
|
||||
|
||||
.is_non_null => try self.airIsNonNull(inst, false),
|
||||
.is_non_null_ptr => try self.airIsNonNull(inst, true),
|
||||
.is_null => try self.airIsNull(inst, false),
|
||||
.is_null_ptr => try self.airIsNull(inst, true),
|
||||
|
||||
.alloc => try self.airAlloc(inst),
|
||||
.arg => try self.airArg(inst),
|
||||
.bitcast => try self.airBitCast(inst),
|
||||
.block => try self.airBlock(inst),
|
||||
.br => try self.airBr(inst),
|
||||
.breakpoint => try self.airBreakpoint(inst),
|
||||
.call => try self.airCall(inst),
|
||||
.cond_br => try self.airCondBr(inst),
|
||||
.intcast => try self.airIntCast(inst),
|
||||
.load => try self.airLoad(inst),
|
||||
.loop => try self.airLoop(inst),
|
||||
.not => try self.airNot(inst),
|
||||
.ret => try self.airRet(inst),
|
||||
.store => try self.airStore(inst),
|
||||
.unreach => self.airUnreach(inst),
|
||||
.optional_payload => try self.airOptionalPayload(inst, false),
|
||||
.optional_payload_ptr => try self.airOptionalPayload(inst, true),
|
||||
.dbg_stmt => blk: {
|
||||
// TODO: implement debug info
|
||||
break :blk null;
|
||||
},
|
||||
else => |tag| return self.todo("implement TZIR instruction: {}", .{tag}),
|
||||
else => |tag| return self.todo("implement AIR instruction: {}", .{tag}),
|
||||
};
|
||||
if (opt_value) |val| try self.func_inst_table.putNoClobber(self.gpa(), inst, val);
|
||||
if (opt_value) |val| try self.func_inst_table.putNoClobber(self.gpa, inst, val);
|
||||
}
|
||||
}
|
||||
|
||||
fn genCall(self: *FuncGen, inst: *Inst.Call) !?*const llvm.Value {
|
||||
if (inst.func.value()) |func_value| {
|
||||
fn airCall(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
|
||||
const pl_op = self.air.instructions.items(.data)[inst].pl_op;
|
||||
const extra = self.air.extraData(Air.Call, pl_op.payload);
|
||||
const args = @bitCast([]const Air.Inst.Ref, self.air.extra[extra.end..][0..extra.data.args_len]);
|
||||
|
||||
if (self.air.value(pl_op.operand)) |func_value| {
|
||||
const fn_decl = if (func_value.castTag(.extern_fn)) |extern_fn|
|
||||
extern_fn.data
|
||||
else if (func_value.castTag(.function)) |func_payload|
|
||||
@ -714,12 +717,10 @@ pub const FuncGen = struct {
|
||||
const zig_fn_type = fn_decl.ty;
|
||||
const llvm_fn = try self.dg.resolveLLVMFunction(fn_decl);
|
||||
|
||||
const num_args = inst.args.len;
|
||||
const llvm_param_vals = try self.gpa.alloc(*const llvm.Value, args.len);
|
||||
defer self.gpa.free(llvm_param_vals);
|
||||
|
||||
const llvm_param_vals = try self.gpa().alloc(*const llvm.Value, num_args);
|
||||
defer self.gpa().free(llvm_param_vals);
|
||||
|
||||
for (inst.args) |arg, i| {
|
||||
for (args) |arg, i| {
|
||||
llvm_param_vals[i] = try self.resolveInst(arg);
|
||||
}
|
||||
|
||||
@ -727,8 +728,8 @@ pub const FuncGen = struct {
|
||||
// Do we need that?
|
||||
const call = self.builder.buildCall(
|
||||
llvm_fn,
|
||||
if (num_args == 0) null else llvm_param_vals.ptr,
|
||||
@intCast(c_uint, num_args),
|
||||
if (args.len == 0) null else llvm_param_vals.ptr,
|
||||
@intCast(c_uint, args.len),
|
||||
"",
|
||||
);
|
||||
|
||||
@ -746,31 +747,31 @@ pub const FuncGen = struct {
|
||||
}
|
||||
}
|
||||
|
||||
fn genRetVoid(self: *FuncGen, inst: *Inst.NoOp) ?*const llvm.Value {
|
||||
_ = inst;
|
||||
_ = self.builder.buildRetVoid();
|
||||
return null;
|
||||
}
|
||||
|
||||
fn genRet(self: *FuncGen, inst: *Inst.UnOp) !?*const llvm.Value {
|
||||
if (!inst.operand.ty.hasCodeGenBits()) {
|
||||
// TODO: in astgen these instructions should turn into `retvoid` instructions.
|
||||
fn airRet(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
|
||||
const un_op = self.air.instructions.items(.data)[inst].un_op;
|
||||
if (!self.air.typeOf(un_op).hasCodeGenBits()) {
|
||||
_ = self.builder.buildRetVoid();
|
||||
return null;
|
||||
}
|
||||
_ = self.builder.buildRet(try self.resolveInst(inst.operand));
|
||||
const operand = try self.resolveInst(un_op);
|
||||
_ = self.builder.buildRet(operand);
|
||||
return null;
|
||||
}
|
||||
|
||||
fn genCmp(self: *FuncGen, inst: *Inst.BinOp, op: math.CompareOperator) !?*const llvm.Value {
|
||||
const lhs = try self.resolveInst(inst.lhs);
|
||||
const rhs = try self.resolveInst(inst.rhs);
|
||||
fn airCmp(self: *FuncGen, inst: Air.Inst.Index, op: math.CompareOperator) !?*const llvm.Value {
|
||||
if (self.liveness.isUnused(inst))
|
||||
return null;
|
||||
|
||||
if (!inst.base.ty.isInt())
|
||||
if (inst.base.ty.tag() != .bool)
|
||||
return self.todo("implement 'genCmp' for type {}", .{inst.base.ty});
|
||||
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
|
||||
const lhs = try self.resolveInst(bin_op.lhs);
|
||||
const rhs = try self.resolveInst(bin_op.rhs);
|
||||
const inst_ty = self.air.typeOfIndex(inst);
|
||||
|
||||
const is_signed = inst.base.ty.isSignedInt();
|
||||
if (!inst_ty.isInt())
|
||||
if (inst_ty.tag() != .bool)
|
||||
return self.todo("implement 'airCmp' for type {}", .{inst_ty});
|
||||
|
||||
const is_signed = inst_ty.isSignedInt();
|
||||
const operation = switch (op) {
|
||||
.eq => .EQ,
|
||||
.neq => .NE,
|
||||
@ -783,32 +784,36 @@ pub const FuncGen = struct {
|
||||
return self.builder.buildICmp(operation, lhs, rhs, "");
|
||||
}
|
||||
|
||||
fn genBlock(self: *FuncGen, inst: *Inst.Block) !?*const llvm.Value {
|
||||
fn airBlock(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
|
||||
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
|
||||
const extra = self.air.extraData(Air.Block, ty_pl.payload);
|
||||
const body = self.air.extra[extra.end..][0..extra.data.body_len];
|
||||
const parent_bb = self.context().createBasicBlock("Block");
|
||||
|
||||
// 5 breaks to a block seems like a reasonable default.
|
||||
var break_bbs = try BreakBasicBlocks.initCapacity(self.gpa(), 5);
|
||||
var break_vals = try BreakValues.initCapacity(self.gpa(), 5);
|
||||
try self.blocks.putNoClobber(self.gpa(), inst, .{
|
||||
var break_bbs = try BreakBasicBlocks.initCapacity(self.gpa, 5);
|
||||
var break_vals = try BreakValues.initCapacity(self.gpa, 5);
|
||||
try self.blocks.putNoClobber(self.gpa, inst, .{
|
||||
.parent_bb = parent_bb,
|
||||
.break_bbs = &break_bbs,
|
||||
.break_vals = &break_vals,
|
||||
});
|
||||
defer {
|
||||
assert(self.blocks.remove(inst));
|
||||
break_bbs.deinit(self.gpa());
|
||||
break_vals.deinit(self.gpa());
|
||||
break_bbs.deinit(self.gpa);
|
||||
break_vals.deinit(self.gpa);
|
||||
}
|
||||
|
||||
try self.genBody(inst.body);
|
||||
try self.genBody(body);
|
||||
|
||||
self.llvm_func.appendExistingBasicBlock(parent_bb);
|
||||
self.builder.positionBuilderAtEnd(parent_bb);
|
||||
|
||||
// If the block does not return a value, we dont have to create a phi node.
|
||||
if (!inst.base.ty.hasCodeGenBits()) return null;
|
||||
const inst_ty = self.air.typeOfIndex(inst);
|
||||
if (!inst_ty.hasCodeGenBits()) return null;
|
||||
|
||||
const phi_node = self.builder.buildPhi(try self.dg.getLLVMType(inst.base.ty), "");
|
||||
const phi_node = self.builder.buildPhi(try self.dg.getLLVMType(inst_ty), "");
|
||||
phi_node.addIncoming(
|
||||
break_vals.items.ptr,
|
||||
break_bbs.items.ptr,
|
||||
@ -817,35 +822,30 @@ pub const FuncGen = struct {
|
||||
return phi_node;
|
||||
}
|
||||
|
||||
fn genBr(self: *FuncGen, inst: *Inst.Br) !?*const llvm.Value {
|
||||
var block = self.blocks.get(inst.block).?;
|
||||
fn airBr(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
|
||||
const branch = self.air.instructions.items(.data)[inst].br;
|
||||
const block = self.blocks.get(branch.block_inst).?;
|
||||
|
||||
// If the break doesn't break a value, then we don't have to add
|
||||
// the values to the lists.
|
||||
if (!inst.operand.ty.hasCodeGenBits()) {
|
||||
// TODO: in astgen these instructions should turn into `br_void` instructions.
|
||||
_ = self.builder.buildBr(block.parent_bb);
|
||||
} else {
|
||||
const val = try self.resolveInst(inst.operand);
|
||||
if (self.air.typeOf(branch.result).hasCodeGenBits()) {
|
||||
const val = try self.resolveInst(branch.result);
|
||||
|
||||
// For the phi node, we need the basic blocks and the values of the
|
||||
// break instructions.
|
||||
try block.break_bbs.append(self.gpa(), self.builder.getInsertBlock());
|
||||
try block.break_vals.append(self.gpa(), val);
|
||||
|
||||
_ = self.builder.buildBr(block.parent_bb);
|
||||
try block.break_bbs.append(self.gpa, self.builder.getInsertBlock());
|
||||
try block.break_vals.append(self.gpa, val);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
fn genBrVoid(self: *FuncGen, inst: *Inst.BrVoid) !?*const llvm.Value {
|
||||
var block = self.blocks.get(inst.block).?;
|
||||
_ = self.builder.buildBr(block.parent_bb);
|
||||
return null;
|
||||
}
|
||||
|
||||
fn genCondBr(self: *FuncGen, inst: *Inst.CondBr) !?*const llvm.Value {
|
||||
const condition_value = try self.resolveInst(inst.condition);
|
||||
fn airCondBr(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
|
||||
const pl_op = self.air.instructions.items(.data)[inst].pl_op;
|
||||
const cond = try self.resolveInst(pl_op.operand);
|
||||
const extra = self.air.extraData(Air.CondBr, pl_op.payload);
|
||||
const then_body = self.air.extra[extra.end..][0..extra.data.then_body_len];
|
||||
const else_body = self.air.extra[extra.end + then_body.len ..][0..extra.data.else_body_len];
|
||||
|
||||
const then_block = self.context().appendBasicBlock(self.llvm_func, "Then");
|
||||
const else_block = self.context().appendBasicBlock(self.llvm_func, "Else");
|
||||
@ -854,38 +854,51 @@ pub const FuncGen = struct {
|
||||
defer self.builder.positionBuilderAtEnd(prev_block);
|
||||
|
||||
self.builder.positionBuilderAtEnd(then_block);
|
||||
try self.genBody(inst.then_body);
|
||||
try self.genBody(then_body);
|
||||
|
||||
self.builder.positionBuilderAtEnd(else_block);
|
||||
try self.genBody(inst.else_body);
|
||||
try self.genBody(else_body);
|
||||
}
|
||||
_ = self.builder.buildCondBr(condition_value, then_block, else_block);
|
||||
_ = self.builder.buildCondBr(cond, then_block, else_block);
|
||||
return null;
|
||||
}
|
||||
|
||||
fn genLoop(self: *FuncGen, inst: *Inst.Loop) !?*const llvm.Value {
|
||||
fn airLoop(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
|
||||
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
|
||||
const loop = self.air.extraData(Air.Block, ty_pl.payload);
|
||||
const body = self.air.extra[loop.end..][0..loop.data.body_len];
|
||||
const loop_block = self.context().appendBasicBlock(self.llvm_func, "Loop");
|
||||
_ = self.builder.buildBr(loop_block);
|
||||
|
||||
self.builder.positionBuilderAtEnd(loop_block);
|
||||
try self.genBody(inst.body);
|
||||
try self.genBody(body);
|
||||
|
||||
_ = self.builder.buildBr(loop_block);
|
||||
return null;
|
||||
}
|
||||
|
||||
fn genNot(self: *FuncGen, inst: *Inst.UnOp) !?*const llvm.Value {
|
||||
return self.builder.buildNot(try self.resolveInst(inst.operand), "");
|
||||
fn airNot(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
|
||||
if (self.liveness.isUnused(inst))
|
||||
return null;
|
||||
|
||||
const un_op = self.air.instructions.items(.data)[inst].un_op;
|
||||
const operand = try self.resolveInst(un_op);
|
||||
|
||||
return self.builder.buildNot(operand, "");
|
||||
}
|
||||
|
||||
fn genUnreach(self: *FuncGen, inst: *Inst.NoOp) ?*const llvm.Value {
|
||||
fn airUnreach(self: *FuncGen, inst: Air.Inst.Index) ?*const llvm.Value {
|
||||
_ = inst;
|
||||
_ = self.builder.buildUnreachable();
|
||||
return null;
|
||||
}
|
||||
|
||||
fn genIsNonNull(self: *FuncGen, inst: *Inst.UnOp, operand_is_ptr: bool) !?*const llvm.Value {
|
||||
const operand = try self.resolveInst(inst.operand);
|
||||
fn airIsNonNull(self: *FuncGen, inst: Air.Inst.Index, operand_is_ptr: bool) !?*const llvm.Value {
|
||||
if (self.liveness.isUnused(inst))
|
||||
return null;
|
||||
|
||||
const un_op = self.air.instructions.items(.data)[inst].un_op;
|
||||
const operand = try self.resolveInst(un_op);
|
||||
|
||||
if (operand_is_ptr) {
|
||||
const index_type = self.context().intType(32);
|
||||
@ -901,12 +914,23 @@ pub const FuncGen = struct {
|
||||
}
|
||||
}
|
||||
|
||||
fn genIsNull(self: *FuncGen, inst: *Inst.UnOp, operand_is_ptr: bool) !?*const llvm.Value {
|
||||
return self.builder.buildNot((try self.genIsNonNull(inst, operand_is_ptr)).?, "");
|
||||
fn airIsNull(self: *FuncGen, inst: Air.Inst.Index, operand_is_ptr: bool) !?*const llvm.Value {
|
||||
if (self.liveness.isUnused(inst))
|
||||
return null;
|
||||
|
||||
return self.builder.buildNot((try self.airIsNonNull(inst, operand_is_ptr)).?, "");
|
||||
}
|
||||
|
||||
fn genOptionalPayload(self: *FuncGen, inst: *Inst.UnOp, operand_is_ptr: bool) !?*const llvm.Value {
|
||||
const operand = try self.resolveInst(inst.operand);
|
||||
fn airOptionalPayload(
|
||||
self: *FuncGen,
|
||||
inst: Air.Inst.Index,
|
||||
operand_is_ptr: bool,
|
||||
) !?*const llvm.Value {
|
||||
if (self.liveness.isUnused(inst))
|
||||
return null;
|
||||
|
||||
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
|
||||
const operand = try self.resolveInst(ty_op.operand);
|
||||
|
||||
if (operand_is_ptr) {
|
||||
const index_type = self.context().intType(32);
|
||||
@ -922,61 +946,83 @@ pub const FuncGen = struct {
|
||||
}
|
||||
}
|
||||
|
||||
fn genAdd(self: *FuncGen, inst: *Inst.BinOp) !?*const llvm.Value {
|
||||
const lhs = try self.resolveInst(inst.lhs);
|
||||
const rhs = try self.resolveInst(inst.rhs);
|
||||
fn airAdd(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
|
||||
if (self.liveness.isUnused(inst))
|
||||
return null;
|
||||
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
|
||||
const lhs = try self.resolveInst(bin_op.lhs);
|
||||
const rhs = try self.resolveInst(bin_op.rhs);
|
||||
const inst_ty = self.air.typeOfIndex(inst);
|
||||
|
||||
if (!inst.base.ty.isInt())
|
||||
return self.todo("implement 'genAdd' for type {}", .{inst.base.ty});
|
||||
if (!inst_ty.isInt())
|
||||
return self.todo("implement 'airAdd' for type {}", .{inst_ty});
|
||||
|
||||
return if (inst.base.ty.isSignedInt())
|
||||
return if (inst_ty.isSignedInt())
|
||||
self.builder.buildNSWAdd(lhs, rhs, "")
|
||||
else
|
||||
self.builder.buildNUWAdd(lhs, rhs, "");
|
||||
}
|
||||
|
||||
fn genSub(self: *FuncGen, inst: *Inst.BinOp) !?*const llvm.Value {
|
||||
const lhs = try self.resolveInst(inst.lhs);
|
||||
const rhs = try self.resolveInst(inst.rhs);
|
||||
fn airSub(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
|
||||
if (self.liveness.isUnused(inst))
|
||||
return null;
|
||||
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
|
||||
const lhs = try self.resolveInst(bin_op.lhs);
|
||||
const rhs = try self.resolveInst(bin_op.rhs);
|
||||
const inst_ty = self.air.typeOfIndex(inst);
|
||||
|
||||
if (!inst.base.ty.isInt())
|
||||
return self.todo("implement 'genSub' for type {}", .{inst.base.ty});
|
||||
if (!inst_ty.isInt())
|
||||
return self.todo("implement 'airSub' for type {}", .{inst_ty});
|
||||
|
||||
return if (inst.base.ty.isSignedInt())
|
||||
return if (inst_ty.isSignedInt())
|
||||
self.builder.buildNSWSub(lhs, rhs, "")
|
||||
else
|
||||
self.builder.buildNUWSub(lhs, rhs, "");
|
||||
}
|
||||
|
||||
fn genIntCast(self: *FuncGen, inst: *Inst.UnOp) !?*const llvm.Value {
|
||||
const val = try self.resolveInst(inst.operand);
|
||||
fn airIntCast(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
|
||||
if (self.liveness.isUnused(inst))
|
||||
return null;
|
||||
|
||||
const signed = inst.base.ty.isSignedInt();
|
||||
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
|
||||
const operand = try self.resolveInst(ty_op.operand);
|
||||
const inst_ty = self.air.typeOfIndex(inst);
|
||||
|
||||
const signed = inst_ty.isSignedInt();
|
||||
// TODO: Should we use intcast here or just a simple bitcast?
|
||||
// LLVM does truncation vs bitcast (+signed extension) in the intcast depending on the sizes
|
||||
return self.builder.buildIntCast2(val, try self.dg.getLLVMType(inst.base.ty), llvm.Bool.fromBool(signed), "");
|
||||
return self.builder.buildIntCast2(operand, try self.dg.getLLVMType(inst_ty), llvm.Bool.fromBool(signed), "");
|
||||
}
|
||||
|
||||
fn genBitCast(self: *FuncGen, inst: *Inst.UnOp) !?*const llvm.Value {
|
||||
const val = try self.resolveInst(inst.operand);
|
||||
const dest_type = try self.dg.getLLVMType(inst.base.ty);
|
||||
fn airBitCast(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
|
||||
if (self.liveness.isUnused(inst))
|
||||
return null;
|
||||
|
||||
return self.builder.buildBitCast(val, dest_type, "");
|
||||
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
|
||||
const operand = try self.resolveInst(ty_op.operand);
|
||||
const inst_ty = self.air.typeOfIndex(inst);
|
||||
const dest_type = try self.dg.getLLVMType(inst_ty);
|
||||
|
||||
return self.builder.buildBitCast(operand, dest_type, "");
|
||||
}
|
||||
|
||||
fn genArg(self: *FuncGen, inst: *Inst.Arg) !?*const llvm.Value {
|
||||
fn airArg(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
|
||||
const arg_val = self.args[self.arg_index];
|
||||
self.arg_index += 1;
|
||||
|
||||
const ptr_val = self.buildAlloca(try self.dg.getLLVMType(inst.base.ty));
|
||||
const inst_ty = self.air.typeOfIndex(inst);
|
||||
const ptr_val = self.buildAlloca(try self.dg.getLLVMType(inst_ty));
|
||||
_ = self.builder.buildStore(arg_val, ptr_val);
|
||||
return self.builder.buildLoad(ptr_val, "");
|
||||
}
|
||||
|
||||
fn genAlloc(self: *FuncGen, inst: *Inst.NoOp) !?*const llvm.Value {
|
||||
fn airAlloc(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
|
||||
if (self.liveness.isUnused(inst))
|
||||
return null;
|
||||
// buildAlloca expects the pointee type, not the pointer type, so assert that
|
||||
// a Payload.PointerSimple is passed to the alloc instruction.
|
||||
const pointee_type = inst.base.ty.castPointer().?.data;
|
||||
const inst_ty = self.air.typeOfIndex(inst);
|
||||
const pointee_type = inst_ty.castPointer().?.data;
|
||||
|
||||
// TODO: figure out a way to get the name of the var decl.
|
||||
// TODO: set alignment and volatile
|
||||
@ -1007,19 +1053,26 @@ pub const FuncGen = struct {
|
||||
return val;
|
||||
}
|
||||
|
||||
fn genStore(self: *FuncGen, inst: *Inst.BinOp) !?*const llvm.Value {
|
||||
const val = try self.resolveInst(inst.rhs);
|
||||
const ptr = try self.resolveInst(inst.lhs);
|
||||
_ = self.builder.buildStore(val, ptr);
|
||||
fn airStore(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
|
||||
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
|
||||
const dest_ptr = try self.resolveInst(bin_op.lhs);
|
||||
const src_operand = try self.resolveInst(bin_op.rhs);
|
||||
// TODO set volatile on this store properly
|
||||
_ = self.builder.buildStore(src_operand, dest_ptr);
|
||||
return null;
|
||||
}
|
||||
|
||||
fn genLoad(self: *FuncGen, inst: *Inst.UnOp) !?*const llvm.Value {
|
||||
const ptr_val = try self.resolveInst(inst.operand);
|
||||
return self.builder.buildLoad(ptr_val, "");
|
||||
fn airLoad(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
|
||||
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
|
||||
const is_volatile = self.air.typeOf(ty_op.operand).isVolatilePtr();
|
||||
if (!is_volatile and self.liveness.isUnused(inst))
|
||||
return null;
|
||||
const ptr = try self.resolveInst(ty_op.operand);
|
||||
// TODO set volatile on this load properly
|
||||
return self.builder.buildLoad(ptr, "");
|
||||
}
|
||||
|
||||
fn genBreakpoint(self: *FuncGen, inst: *Inst.NoOp) !?*const llvm.Value {
|
||||
fn airBreakpoint(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
|
||||
_ = inst;
|
||||
const llvn_fn = self.getIntrinsic("llvm.debugtrap");
|
||||
_ = self.builder.buildCall(llvn_fn, null, 0, "");
|
||||
|
@ -13,6 +13,7 @@ const Type = @import("../type.zig").Type;
|
||||
const Value = @import("../value.zig").Value;
|
||||
const LazySrcLoc = Module.LazySrcLoc;
|
||||
const Air = @import("../Air.zig");
|
||||
const Liveness = @import("../Liveness.zig");
|
||||
|
||||
pub const Word = u32;
|
||||
pub const ResultId = u32;
|
||||
@ -247,6 +248,7 @@ pub const DeclGen = struct {
|
||||
return .{
|
||||
.spv = spv,
|
||||
.air = undefined,
|
||||
.liveness = undefined,
|
||||
.args = std.ArrayList(ResultId).init(spv.gpa),
|
||||
.next_arg_index = undefined,
|
||||
.inst_results = InstMap.init(spv.gpa),
|
||||
@ -259,11 +261,12 @@ pub const DeclGen = struct {
|
||||
}
|
||||
|
||||
/// Generate the code for `decl`. If a reportable error occured during code generation,
|
||||
/// a message is returned by this function. Callee owns the memory. If this function returns such
|
||||
/// a reportable error, it is valid to be called again for a different decl.
|
||||
pub fn gen(self: *DeclGen, decl: *Decl, air: Air) !?*Module.ErrorMsg {
|
||||
/// a message is returned by this function. Callee owns the memory. If this function
|
||||
/// returns such a reportable error, it is valid to be called again for a different decl.
|
||||
pub fn gen(self: *DeclGen, decl: *Decl, air: Air, liveness: Liveness) !?*Module.ErrorMsg {
|
||||
// Reset internal resources, we don't want to re-allocate these.
|
||||
self.air = &air;
|
||||
self.air = air;
|
||||
self.liveness = liveness;
|
||||
self.args.items.len = 0;
|
||||
self.next_arg_index = 0;
|
||||
self.inst_results.clearRetainingCapacity();
|
||||
@ -297,12 +300,12 @@ pub const DeclGen = struct {
|
||||
return error.AnalysisFail;
|
||||
}
|
||||
|
||||
fn resolve(self: *DeclGen, inst: Air.Inst.Index) !ResultId {
|
||||
if (inst.value()) |val| {
|
||||
return self.genConstant(inst.ty, val);
|
||||
fn resolve(self: *DeclGen, inst: Air.Inst.Ref) !ResultId {
|
||||
if (self.air.value(inst)) |val| {
|
||||
return self.genConstant(self.air.typeOf(inst), val);
|
||||
}
|
||||
|
||||
return self.inst_results.get(inst).?; // Instruction does not dominate all uses!
|
||||
const index = Air.refToIndex(inst).?;
|
||||
return self.inst_results.get(index).?; // Assertion means instruction does not dominate usage.
|
||||
}
|
||||
|
||||
fn beginSPIRVBlock(self: *DeclGen, label_id: ResultId) !void {
|
||||
@ -663,40 +666,40 @@ pub const DeclGen = struct {
|
||||
const air_tags = self.air.instructions.items(.tag);
|
||||
const result_id = switch (air_tags[inst]) {
|
||||
// zig fmt: off
|
||||
.add, .addwrap => try self.genArithOp(inst, .{.OpFAdd, .OpIAdd, .OpIAdd}),
|
||||
.sub, .subwrap => try self.genArithOp(inst, .{.OpFSub, .OpISub, .OpISub}),
|
||||
.mul, .mulwrap => try self.genArithOp(inst, .{.OpFMul, .OpIMul, .OpIMul}),
|
||||
.div => try self.genArithOp(inst, .{.OpFDiv, .OpSDiv, .OpUDiv}),
|
||||
.add, .addwrap => try self.airArithOp(inst, .{.OpFAdd, .OpIAdd, .OpIAdd}),
|
||||
.sub, .subwrap => try self.airArithOp(inst, .{.OpFSub, .OpISub, .OpISub}),
|
||||
.mul, .mulwrap => try self.airArithOp(inst, .{.OpFMul, .OpIMul, .OpIMul}),
|
||||
.div => try self.airArithOp(inst, .{.OpFDiv, .OpSDiv, .OpUDiv}),
|
||||
|
||||
.bit_and => try self.genBinOpSimple(inst, .OpBitwiseAnd),
|
||||
.bit_or => try self.genBinOpSimple(inst, .OpBitwiseOr),
|
||||
.xor => try self.genBinOpSimple(inst, .OpBitwiseXor),
|
||||
.bool_and => try self.genBinOpSimple(inst, .OpLogicalAnd),
|
||||
.bool_or => try self.genBinOpSimple(inst, .OpLogicalOr),
|
||||
.bit_and => try self.airBinOpSimple(inst, .OpBitwiseAnd),
|
||||
.bit_or => try self.airBinOpSimple(inst, .OpBitwiseOr),
|
||||
.xor => try self.airBinOpSimple(inst, .OpBitwiseXor),
|
||||
.bool_and => try self.airBinOpSimple(inst, .OpLogicalAnd),
|
||||
.bool_or => try self.airBinOpSimple(inst, .OpLogicalOr),
|
||||
|
||||
.not => try self.genNot(inst),
|
||||
.not => try self.airNot(inst),
|
||||
|
||||
.cmp_eq => try self.genCmp(inst, .{.OpFOrdEqual, .OpLogicalEqual, .OpIEqual}),
|
||||
.cmp_neq => try self.genCmp(inst, .{.OpFOrdNotEqual, .OpLogicalNotEqual, .OpINotEqual}),
|
||||
.cmp_gt => try self.genCmp(inst, .{.OpFOrdGreaterThan, .OpSGreaterThan, .OpUGreaterThan}),
|
||||
.cmp_gte => try self.genCmp(inst, .{.OpFOrdGreaterThanEqual, .OpSGreaterThanEqual, .OpUGreaterThanEqual}),
|
||||
.cmp_lt => try self.genCmp(inst, .{.OpFOrdLessThan, .OpSLessThan, .OpULessThan}),
|
||||
.cmp_lte => try self.genCmp(inst, .{.OpFOrdLessThanEqual, .OpSLessThanEqual, .OpULessThanEqual}),
|
||||
.cmp_eq => try self.airCmp(inst, .{.OpFOrdEqual, .OpLogicalEqual, .OpIEqual}),
|
||||
.cmp_neq => try self.airCmp(inst, .{.OpFOrdNotEqual, .OpLogicalNotEqual, .OpINotEqual}),
|
||||
.cmp_gt => try self.airCmp(inst, .{.OpFOrdGreaterThan, .OpSGreaterThan, .OpUGreaterThan}),
|
||||
.cmp_gte => try self.airCmp(inst, .{.OpFOrdGreaterThanEqual, .OpSGreaterThanEqual, .OpUGreaterThanEqual}),
|
||||
.cmp_lt => try self.airCmp(inst, .{.OpFOrdLessThan, .OpSLessThan, .OpULessThan}),
|
||||
.cmp_lte => try self.airCmp(inst, .{.OpFOrdLessThanEqual, .OpSLessThanEqual, .OpULessThanEqual}),
|
||||
|
||||
.arg => self.genArg(),
|
||||
.alloc => try self.genAlloc(inst),
|
||||
.block => (try self.genBlock(inst)) orelse return,
|
||||
.load => try self.genLoad(inst),
|
||||
.arg => self.airArg(),
|
||||
.alloc => try self.airAlloc(inst),
|
||||
.block => (try self.airBlock(inst)) orelse return,
|
||||
.load => try self.airLoad(inst),
|
||||
|
||||
.br => return self.genBr(inst),
|
||||
.br => return self.airBr(inst),
|
||||
.breakpoint => return,
|
||||
.cond_br => return self.genCondBr(inst),
|
||||
.cond_br => return self.airCondBr(inst),
|
||||
.constant => unreachable,
|
||||
.dbg_stmt => return self.genDbgStmt(inst),
|
||||
.loop => return self.genLoop(inst),
|
||||
.ret => return self.genRet(inst),
|
||||
.store => return self.genStore(inst),
|
||||
.unreach => return self.genUnreach(),
|
||||
.dbg_stmt => return self.airDbgStmt(inst),
|
||||
.loop => return self.airLoop(inst),
|
||||
.ret => return self.airRet(inst),
|
||||
.store => return self.airStore(inst),
|
||||
.unreach => return self.airUnreach(),
|
||||
// zig fmt: on
|
||||
|
||||
else => |tag| return self.fail("TODO: SPIR-V backend: implement AIR tag {s}", .{
|
||||
@ -707,21 +710,22 @@ pub const DeclGen = struct {
|
||||
try self.inst_results.putNoClobber(inst, result_id);
|
||||
}
|
||||
|
||||
fn genBinOpSimple(self: *DeclGen, inst: Air.Inst.Index, opcode: Opcode) !ResultId {
|
||||
fn airBinOpSimple(self: *DeclGen, inst: Air.Inst.Index, opcode: Opcode) !ResultId {
|
||||
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
|
||||
const lhs_id = try self.resolve(bin_op.lhs);
|
||||
const rhs_id = try self.resolve(bin_op.rhs);
|
||||
const result_id = self.spv.allocResultId();
|
||||
const result_type_id = try self.genType(self.air.typeOfIndex(inst));
|
||||
try writeInstruction(&self.code, opcode, &[_]Word{
|
||||
result_type_id, result_id, lhs_id, rhs_id,
|
||||
});
|
||||
return result_id;
|
||||
}
|
||||
|
||||
fn genArithOp(self: *DeclGen, inst: Air.Inst.Index, ops: [3]Opcode) !ResultId {
|
||||
fn airArithOp(self: *DeclGen, inst: Air.Inst.Index, ops: [3]Opcode) !ResultId {
|
||||
// LHS and RHS are guaranteed to have the same type, and AIR guarantees
|
||||
// the result to be the same as the LHS and RHS, which matches SPIR-V.
|
||||
const ty = self.air.getType(inst);
|
||||
const ty = self.air.typeOfIndex(inst);
|
||||
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
|
||||
const lhs_id = try self.resolve(bin_op.lhs);
|
||||
const rhs_id = try self.resolve(bin_op.rhs);
|
||||
@ -729,8 +733,8 @@ pub const DeclGen = struct {
|
||||
const result_id = self.spv.allocResultId();
|
||||
const result_type_id = try self.genType(ty);
|
||||
|
||||
assert(self.air.getType(bin_op.lhs).eql(ty));
|
||||
assert(self.air.getType(bin_op.rhs).eql(ty));
|
||||
assert(self.air.typeOf(bin_op.lhs).eql(ty));
|
||||
assert(self.air.typeOf(bin_op.rhs).eql(ty));
|
||||
|
||||
// Binary operations are generally applicable to both scalar and vector operations
|
||||
// in SPIR-V, but int and float versions of operations require different opcodes.
|
||||
@ -744,8 +748,8 @@ pub const DeclGen = struct {
|
||||
return self.fail("TODO: SPIR-V backend: binary operations for strange integers", .{});
|
||||
},
|
||||
.integer => switch (info.signedness) {
|
||||
.signed => 1,
|
||||
.unsigned => 2,
|
||||
.signed => @as(usize, 1),
|
||||
.unsigned => @as(usize, 2),
|
||||
},
|
||||
.float => 0,
|
||||
else => unreachable,
|
||||
@ -759,14 +763,14 @@ pub const DeclGen = struct {
|
||||
return result_id;
|
||||
}
|
||||
|
||||
fn genCmp(self: *DeclGen, inst: Air.Inst.Index, ops: [3]Opcode) !ResultId {
|
||||
fn airCmp(self: *DeclGen, inst: Air.Inst.Index, ops: [3]Opcode) !ResultId {
|
||||
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
|
||||
const lhs_id = try self.resolve(bin_op.lhs);
|
||||
const rhs_id = try self.resolve(bin_op.rhs);
|
||||
const result_id = self.spv.allocResultId();
|
||||
const result_type_id = try self.genType(Type.initTag(.bool));
|
||||
const op_ty = self.air.getType(bin_op.lhs);
|
||||
assert(op_ty.eql(self.air.getType(bin_op.rhs)));
|
||||
const op_ty = self.air.typeOf(bin_op.lhs);
|
||||
assert(op_ty.eql(self.air.typeOf(bin_op.rhs)));
|
||||
|
||||
// Comparisons are generally applicable to both scalar and vector operations in SPIR-V,
|
||||
// but int and float versions of operations require different opcodes.
|
||||
@ -782,10 +786,9 @@ pub const DeclGen = struct {
|
||||
.float => 0,
|
||||
.bool => 1,
|
||||
.integer => switch (info.signedness) {
|
||||
.signed => 1,
|
||||
.unsigned => 2,
|
||||
.signed => @as(usize, 1),
|
||||
.unsigned => @as(usize, 2),
|
||||
},
|
||||
else => unreachable,
|
||||
};
|
||||
const opcode = ops[opcode_index];
|
||||
|
||||
@ -793,7 +796,7 @@ pub const DeclGen = struct {
|
||||
return result_id;
|
||||
}
|
||||
|
||||
fn genNot(self: *DeclGen, inst: Air.Inst.Index) !ResultId {
|
||||
fn airNot(self: *DeclGen, inst: Air.Inst.Index) !ResultId {
|
||||
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
|
||||
const operand_id = try self.resolve(ty_op.operand);
|
||||
const result_id = self.spv.allocResultId();
|
||||
@ -803,8 +806,8 @@ pub const DeclGen = struct {
|
||||
return result_id;
|
||||
}
|
||||
|
||||
fn genAlloc(self: *DeclGen, inst: Air.Inst.Index) !ResultId {
|
||||
const ty = self.air.getType(inst);
|
||||
fn airAlloc(self: *DeclGen, inst: Air.Inst.Index) !ResultId {
|
||||
const ty = self.air.typeOfIndex(inst);
|
||||
const storage_class = spec.StorageClass.Function;
|
||||
const result_type_id = try self.genPointerType(ty, storage_class);
|
||||
const result_id = self.spv.allocResultId();
|
||||
@ -816,12 +819,12 @@ pub const DeclGen = struct {
|
||||
return result_id;
|
||||
}
|
||||
|
||||
fn genArg(self: *DeclGen) ResultId {
|
||||
fn airArg(self: *DeclGen) ResultId {
|
||||
defer self.next_arg_index += 1;
|
||||
return self.args.items[self.next_arg_index];
|
||||
}
|
||||
|
||||
fn genBlock(self: *DeclGen, inst: Air.Inst.Index) !?ResultId {
|
||||
fn airBlock(self: *DeclGen, inst: Air.Inst.Index) !?ResultId {
|
||||
// In IR, a block doesn't really define an entry point like a block, but more like a scope that breaks can jump out of and
|
||||
// "return" a value from. This cannot be directly modelled in SPIR-V, so in a block instruction, we're going to split up
|
||||
// the current block by first generating the code of the block, then a label, and then generate the rest of the current
|
||||
@ -841,7 +844,7 @@ pub const DeclGen = struct {
|
||||
incoming_blocks.deinit(self.spv.gpa);
|
||||
}
|
||||
|
||||
const ty = self.air.getType(inst);
|
||||
const ty = self.air.typeOfIndex(inst);
|
||||
const inst_datas = self.air.instructions.items(.data);
|
||||
const extra = self.air.extraData(Air.Block, inst_datas[inst].ty_pl.payload);
|
||||
const body = self.air.extra[extra.end..][0..extra.data.body_len];
|
||||
@ -872,10 +875,10 @@ pub const DeclGen = struct {
|
||||
return result_id;
|
||||
}
|
||||
|
||||
fn genBr(self: *DeclGen, inst: Air.Inst.Index) !void {
|
||||
fn airBr(self: *DeclGen, inst: Air.Inst.Index) !void {
|
||||
const br = self.air.instructions.items(.data)[inst].br;
|
||||
const block = self.blocks.get(br.block_inst).?;
|
||||
const operand_ty = self.air.getType(br.operand);
|
||||
const operand_ty = self.air.typeOf(br.operand);
|
||||
|
||||
if (operand_ty.hasCodeGenBits()) {
|
||||
const operand_id = try self.resolve(br.operand);
|
||||
@ -886,7 +889,7 @@ pub const DeclGen = struct {
|
||||
try writeInstruction(&self.code, .OpBranch, &[_]Word{block.label_id});
|
||||
}
|
||||
|
||||
fn genCondBr(self: *DeclGen, inst: *Inst.CondBr) !void {
|
||||
fn airCondBr(self: *DeclGen, inst: Air.Inst.Index) !void {
|
||||
const pl_op = self.air.instructions.items(.data)[inst].pl_op;
|
||||
const cond_br = self.air.extraData(Air.CondBr, pl_op.payload);
|
||||
const then_body = self.air.extra[cond_br.end..][0..cond_br.data.then_body_len];
|
||||
@ -912,16 +915,16 @@ pub const DeclGen = struct {
|
||||
try self.genBody(else_body);
|
||||
}
|
||||
|
||||
fn genDbgStmt(self: *DeclGen, inst: Air.Inst.Index) !void {
|
||||
fn airDbgStmt(self: *DeclGen, inst: Air.Inst.Index) !void {
|
||||
const dbg_stmt = self.air.instructions.items(.data)[inst].dbg_stmt;
|
||||
const src_fname_id = try self.spv.resolveSourceFileName(self.decl);
|
||||
try writeInstruction(&self.code, .OpLine, &[_]Word{ src_fname_id, dbg_stmt.line, dbg_stmt.column });
|
||||
}
|
||||
|
||||
fn genLoad(self: *DeclGen, inst: Air.Inst.Index) !ResultId {
|
||||
fn airLoad(self: *DeclGen, inst: Air.Inst.Index) !ResultId {
|
||||
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
|
||||
const operand_id = try self.resolve(ty_op.operand);
|
||||
const ty = self.air.getType(inst);
|
||||
const ty = self.air.typeOfIndex(inst);
|
||||
|
||||
const result_type_id = try self.genType(ty);
|
||||
const result_id = self.spv.allocResultId();
|
||||
@ -936,8 +939,9 @@ pub const DeclGen = struct {
|
||||
return result_id;
|
||||
}
|
||||
|
||||
fn genLoop(self: *DeclGen, inst: Air.Inst.Index) !void {
|
||||
const loop = self.air.extraData(Air.Block, inst_datas[inst].ty_pl.payload);
|
||||
fn airLoop(self: *DeclGen, inst: Air.Inst.Index) !void {
|
||||
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
|
||||
const loop = self.air.extraData(Air.Block, ty_pl.payload);
|
||||
const body = self.air.extra[loop.end..][0..loop.data.body_len];
|
||||
const loop_label_id = self.spv.allocResultId();
|
||||
|
||||
@ -952,9 +956,9 @@ pub const DeclGen = struct {
|
||||
try writeInstruction(&self.code, .OpBranch, &[_]Word{loop_label_id});
|
||||
}
|
||||
|
||||
fn genRet(self: *DeclGen, inst: Air.Inst.Index) !void {
|
||||
const operand = inst_datas[inst].un_op;
|
||||
const operand_ty = self.air.getType(operand);
|
||||
fn airRet(self: *DeclGen, inst: Air.Inst.Index) !void {
|
||||
const operand = self.air.instructions.items(.data)[inst].un_op;
|
||||
const operand_ty = self.air.typeOf(operand);
|
||||
if (operand_ty.hasCodeGenBits()) {
|
||||
const operand_id = try self.resolve(operand);
|
||||
try writeInstruction(&self.code, .OpReturnValue, &[_]Word{operand_id});
|
||||
@ -963,11 +967,11 @@ pub const DeclGen = struct {
|
||||
}
|
||||
}
|
||||
|
||||
fn genStore(self: *DeclGen, inst: Air.Inst.Index) !void {
|
||||
fn airStore(self: *DeclGen, inst: Air.Inst.Index) !void {
|
||||
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
|
||||
const dst_ptr_id = try self.resolve(bin_op.lhs);
|
||||
const src_val_id = try self.resolve(bin_op.rhs);
|
||||
const lhs_ty = self.air.getType(bin_op.lhs);
|
||||
const lhs_ty = self.air.typeOf(bin_op.lhs);
|
||||
|
||||
const operands = if (lhs_ty.isVolatilePtr())
|
||||
&[_]Word{ dst_ptr_id, src_val_id, @bitCast(u32, spec.MemoryAccess{ .Volatile = true }) }
|
||||
@ -977,7 +981,7 @@ pub const DeclGen = struct {
|
||||
try writeInstruction(&self.code, .OpStore, operands);
|
||||
}
|
||||
|
||||
fn genUnreach(self: *DeclGen) !void {
|
||||
fn airUnreach(self: *DeclGen) !void {
|
||||
try writeInstruction(&self.code, .OpUnreachable, &[_]Word{});
|
||||
}
|
||||
};
|
||||
|
@ -774,7 +774,7 @@ pub const Context = struct {
|
||||
}
|
||||
}
|
||||
return Result{ .externally_managed = payload.data };
|
||||
} else return self.fail(.{ .node_offset = 0 }, "TODO implement gen for more kinds of arrays", .{});
|
||||
} else return self.fail("TODO implement gen for more kinds of arrays", .{});
|
||||
},
|
||||
.Int => {
|
||||
const info = typed_value.ty.intInfo(self.target);
|
||||
@ -783,9 +783,9 @@ pub const Context = struct {
|
||||
try self.code.append(@intCast(u8, int_byte));
|
||||
return Result.appended;
|
||||
}
|
||||
return self.fail(.{ .node_offset = 0 }, "TODO: Implement codegen for int type: '{}'", .{typed_value.ty});
|
||||
return self.fail("TODO: Implement codegen for int type: '{}'", .{typed_value.ty});
|
||||
},
|
||||
else => |tag| return self.fail(.{ .node_offset = 0 }, "TODO: Implement zig type codegen for type: '{s}'", .{tag}),
|
||||
else => |tag| return self.fail("TODO: Implement zig type codegen for type: '{s}'", .{tag}),
|
||||
}
|
||||
}
|
||||
|
||||
@ -883,7 +883,7 @@ pub const Context = struct {
|
||||
}
|
||||
|
||||
fn genAlloc(self: *Context, inst: Air.Inst.Index) InnerError!WValue {
|
||||
const elem_type = self.air.getType(inst).elemType();
|
||||
const elem_type = self.air.typeOfIndex(inst).elemType();
|
||||
return self.allocLocal(elem_type);
|
||||
}
|
||||
|
||||
|
@ -657,11 +657,16 @@ fn writeOffsetTableEntry(self: *Coff, index: usize) !void {
|
||||
}
|
||||
|
||||
pub fn updateFunc(self: *Coff, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void {
|
||||
if (build_options.skip_non_native and builtin.object_format != .coff and builtin.object_format != .pe) {
|
||||
if (build_options.skip_non_native and
|
||||
builtin.object_format != .coff and
|
||||
builtin.object_format != .pe)
|
||||
{
|
||||
@panic("Attempted to compile for object format that was disabled by build configuration");
|
||||
}
|
||||
if (build_options.have_llvm) {
|
||||
if (self.llvm_object) |llvm_object| return llvm_object.updateFunc(module, func, air, liveness);
|
||||
if (self.llvm_object) |llvm_object| {
|
||||
return llvm_object.updateFunc(module, func, air, liveness);
|
||||
}
|
||||
}
|
||||
const tracy = trace(@src());
|
||||
defer tracy.end();
|
||||
@ -669,6 +674,7 @@ pub fn updateFunc(self: *Coff, module: *Module, func: *Module.Fn, air: Air, live
|
||||
var code_buffer = std.ArrayList(u8).init(self.base.allocator);
|
||||
defer code_buffer.deinit();
|
||||
|
||||
const decl = func.owner_decl;
|
||||
const res = try codegen.generateFunction(
|
||||
&self.base,
|
||||
decl.srcLoc(),
|
||||
@ -679,7 +685,6 @@ pub fn updateFunc(self: *Coff, module: *Module, func: *Module.Fn, air: Air, live
|
||||
.none,
|
||||
);
|
||||
const code = switch (res) {
|
||||
.externally_managed => |x| x,
|
||||
.appended => code_buffer.items,
|
||||
.fail => |em| {
|
||||
decl.analysis = .codegen_failure;
|
||||
@ -725,10 +730,10 @@ pub fn updateDecl(self: *Coff, module: *Module, decl: *Module.Decl) !void {
|
||||
},
|
||||
};
|
||||
|
||||
return self.finishUpdateDecl(module, func.owner_decl, code);
|
||||
return self.finishUpdateDecl(module, decl, code);
|
||||
}
|
||||
|
||||
fn finishUpdateDecl(self: *Coff, decl: *Module.Decl, code: []const u8) !void {
|
||||
fn finishUpdateDecl(self: *Coff, module: *Module, decl: *Module.Decl, code: []const u8) !void {
|
||||
const required_alignment = decl.ty.abiAlignment(self.base.options.target);
|
||||
const curr_size = decl.link.coff.size;
|
||||
if (curr_size != 0) {
|
||||
|
@ -1150,9 +1150,13 @@ pub fn updateFunc(self: *MachO, module: *Module, func: *Module.Fn, air: Air, liv
|
||||
var code_buffer = std.ArrayList(u8).init(self.base.allocator);
|
||||
defer code_buffer.deinit();
|
||||
|
||||
var debug_buffers = if (self.d_sym) |*ds| try ds.initDeclDebugBuffers(self.base.allocator, module, decl) else null;
|
||||
var debug_buffers_buf: DebugSymbols.DeclDebugBuffers = undefined;
|
||||
const debug_buffers = if (self.d_sym) |*ds| blk: {
|
||||
debug_buffers_buf = try ds.initDeclDebugBuffers(self.base.allocator, module, decl);
|
||||
break :blk &debug_buffers_buf;
|
||||
} else null;
|
||||
defer {
|
||||
if (debug_buffers) |*dbg| {
|
||||
if (debug_buffers) |dbg| {
|
||||
dbg.dbg_line_buffer.deinit();
|
||||
dbg.dbg_info_buffer.deinit();
|
||||
var it = dbg.dbg_info_type_relocs.valueIterator();
|
||||
@ -1163,7 +1167,7 @@ pub fn updateFunc(self: *MachO, module: *Module, func: *Module.Fn, air: Air, liv
|
||||
}
|
||||
}
|
||||
|
||||
const res = if (debug_buffers) |*dbg|
|
||||
const res = if (debug_buffers) |dbg|
|
||||
try codegen.generateFunction(&self.base, decl.srcLoc(), func, air, liveness, &code_buffer, .{
|
||||
.dwarf = .{
|
||||
.dbg_line = &dbg.dbg_line_buffer,
|
||||
@ -1172,9 +1176,109 @@ pub fn updateFunc(self: *MachO, module: *Module, func: *Module.Fn, air: Air, liv
|
||||
},
|
||||
})
|
||||
else
|
||||
try codegen.generateSymbol(&self.base, decl.srcLoc(), func, air, liveness, &code_buffer, .none);
|
||||
try codegen.generateFunction(&self.base, decl.srcLoc(), func, air, liveness, &code_buffer, .none);
|
||||
switch (res) {
|
||||
.appended => {},
|
||||
.fail => |em| {
|
||||
// Clear any PIE fixups for this decl.
|
||||
self.pie_fixups.shrinkRetainingCapacity(0);
|
||||
// Clear any stub fixups for this decl.
|
||||
self.stub_fixups.shrinkRetainingCapacity(0);
|
||||
decl.analysis = .codegen_failure;
|
||||
try module.failed_decls.put(module.gpa, decl, em);
|
||||
return;
|
||||
},
|
||||
}
|
||||
const symbol = try self.placeDecl(decl, code_buffer.items.len);
|
||||
|
||||
return self.finishUpdateDecl(module, decl, res);
|
||||
// Calculate displacements to target addr (if any).
|
||||
while (self.pie_fixups.popOrNull()) |fixup| {
|
||||
assert(fixup.size == 4);
|
||||
const this_addr = symbol.n_value + fixup.offset;
|
||||
const target_addr = fixup.target_addr;
|
||||
|
||||
switch (self.base.options.target.cpu.arch) {
|
||||
.x86_64 => {
|
||||
const displacement = try math.cast(u32, target_addr - this_addr - 4);
|
||||
mem.writeIntLittle(u32, code_buffer.items[fixup.offset..][0..4], displacement);
|
||||
},
|
||||
.aarch64 => {
|
||||
// TODO optimize instruction based on jump length (use ldr(literal) + nop if possible).
|
||||
{
|
||||
const inst = code_buffer.items[fixup.offset..][0..4];
|
||||
const parsed = mem.bytesAsValue(meta.TagPayload(
|
||||
aarch64.Instruction,
|
||||
aarch64.Instruction.pc_relative_address,
|
||||
), inst);
|
||||
const this_page = @intCast(i32, this_addr >> 12);
|
||||
const target_page = @intCast(i32, target_addr >> 12);
|
||||
const pages = @bitCast(u21, @intCast(i21, target_page - this_page));
|
||||
parsed.immhi = @truncate(u19, pages >> 2);
|
||||
parsed.immlo = @truncate(u2, pages);
|
||||
}
|
||||
{
|
||||
const inst = code_buffer.items[fixup.offset + 4 ..][0..4];
|
||||
const parsed = mem.bytesAsValue(meta.TagPayload(
|
||||
aarch64.Instruction,
|
||||
aarch64.Instruction.load_store_register,
|
||||
), inst);
|
||||
const narrowed = @truncate(u12, target_addr);
|
||||
const offset = try math.divExact(u12, narrowed, 8);
|
||||
parsed.offset = offset;
|
||||
}
|
||||
},
|
||||
else => unreachable, // unsupported target architecture
|
||||
}
|
||||
}
|
||||
|
||||
// Resolve stubs (if any)
|
||||
const text_segment = self.load_commands.items[self.text_segment_cmd_index.?].Segment;
|
||||
const stubs = text_segment.sections.items[self.stubs_section_index.?];
|
||||
for (self.stub_fixups.items) |fixup| {
|
||||
const stub_addr = stubs.addr + fixup.symbol * stubs.reserved2;
|
||||
const text_addr = symbol.n_value + fixup.start;
|
||||
switch (self.base.options.target.cpu.arch) {
|
||||
.x86_64 => {
|
||||
assert(stub_addr >= text_addr + fixup.len);
|
||||
const displacement = try math.cast(u32, stub_addr - text_addr - fixup.len);
|
||||
const placeholder = code_buffer.items[fixup.start + fixup.len - @sizeOf(u32) ..][0..@sizeOf(u32)];
|
||||
mem.writeIntSliceLittle(u32, placeholder, displacement);
|
||||
},
|
||||
.aarch64 => {
|
||||
assert(stub_addr >= text_addr);
|
||||
const displacement = try math.cast(i28, stub_addr - text_addr);
|
||||
const placeholder = code_buffer.items[fixup.start..][0..fixup.len];
|
||||
mem.writeIntSliceLittle(u32, placeholder, aarch64.Instruction.bl(displacement).toU32());
|
||||
},
|
||||
else => unreachable, // unsupported target architecture
|
||||
}
|
||||
if (!fixup.already_defined) {
|
||||
try self.writeStub(fixup.symbol);
|
||||
try self.writeStubInStubHelper(fixup.symbol);
|
||||
try self.writeLazySymbolPointer(fixup.symbol);
|
||||
|
||||
self.rebase_info_dirty = true;
|
||||
self.lazy_binding_info_dirty = true;
|
||||
}
|
||||
}
|
||||
self.stub_fixups.shrinkRetainingCapacity(0);
|
||||
|
||||
try self.writeCode(symbol, code_buffer.items);
|
||||
|
||||
if (debug_buffers) |db| {
|
||||
try self.d_sym.?.commitDeclDebugInfo(
|
||||
self.base.allocator,
|
||||
module,
|
||||
decl,
|
||||
db,
|
||||
self.base.options.target,
|
||||
);
|
||||
}
|
||||
|
||||
// Since we updated the vaddr and the size, each corresponding export symbol also
|
||||
// needs to be updated.
|
||||
const decl_exports = module.decl_exports.get(decl) orelse &[0]*Module.Export{};
|
||||
try self.updateDeclExports(module, decl, decl_exports);
|
||||
}
|
||||
|
||||
pub fn updateDecl(self: *MachO, module: *Module, decl: *Module.Decl) !void {
|
||||
@ -1194,9 +1298,13 @@ pub fn updateDecl(self: *MachO, module: *Module, decl: *Module.Decl) !void {
|
||||
var code_buffer = std.ArrayList(u8).init(self.base.allocator);
|
||||
defer code_buffer.deinit();
|
||||
|
||||
var debug_buffers = if (self.d_sym) |*ds| try ds.initDeclDebugBuffers(self.base.allocator, module, decl) else null;
|
||||
var debug_buffers_buf: DebugSymbols.DeclDebugBuffers = undefined;
|
||||
const debug_buffers = if (self.d_sym) |*ds| blk: {
|
||||
debug_buffers_buf = try ds.initDeclDebugBuffers(self.base.allocator, module, decl);
|
||||
break :blk &debug_buffers_buf;
|
||||
} else null;
|
||||
defer {
|
||||
if (debug_buffers) |*dbg| {
|
||||
if (debug_buffers) |dbg| {
|
||||
dbg.dbg_line_buffer.deinit();
|
||||
dbg.dbg_info_buffer.deinit();
|
||||
var it = dbg.dbg_info_type_relocs.valueIterator();
|
||||
@ -1207,7 +1315,7 @@ pub fn updateDecl(self: *MachO, module: *Module, decl: *Module.Decl) !void {
|
||||
}
|
||||
}
|
||||
|
||||
const res = if (debug_buffers) |*dbg|
|
||||
const res = if (debug_buffers) |dbg|
|
||||
try codegen.generateSymbol(&self.base, decl.srcLoc(), .{
|
||||
.ty = decl.ty,
|
||||
.val = decl.val,
|
||||
@ -1224,33 +1332,37 @@ pub fn updateDecl(self: *MachO, module: *Module, decl: *Module.Decl) !void {
|
||||
.val = decl.val,
|
||||
}, &code_buffer, .none);
|
||||
|
||||
return self.finishUpdateDecl(module, decl, res);
|
||||
}
|
||||
|
||||
fn finishUpdateDecl(self: *MachO, module: *Module, decl: *Module.Decl, res: codegen.Result) !void {
|
||||
const code = switch (res) {
|
||||
.externally_managed => |x| x,
|
||||
.appended => code_buffer.items,
|
||||
.fail => |em| {
|
||||
// Clear any PIE fixups for this decl.
|
||||
self.pie_fixups.shrinkRetainingCapacity(0);
|
||||
// Clear any stub fixups for this decl.
|
||||
self.stub_fixups.shrinkRetainingCapacity(0);
|
||||
decl.analysis = .codegen_failure;
|
||||
try module.failed_decls.put(module.gpa, decl, em);
|
||||
return;
|
||||
},
|
||||
};
|
||||
const symbol = try self.placeDecl(decl, code.len);
|
||||
assert(self.pie_fixups.items.len == 0);
|
||||
assert(self.stub_fixups.items.len == 0);
|
||||
|
||||
try self.writeCode(symbol, code);
|
||||
|
||||
// Since we updated the vaddr and the size, each corresponding export symbol also
|
||||
// needs to be updated.
|
||||
const decl_exports = module.decl_exports.get(decl) orelse &[0]*Module.Export{};
|
||||
try self.updateDeclExports(module, decl, decl_exports);
|
||||
}
|
||||
|
||||
fn placeDecl(self: *MachO, decl: *Module.Decl, code_len: usize) !*macho.nlist_64 {
|
||||
const required_alignment = decl.ty.abiAlignment(self.base.options.target);
|
||||
assert(decl.link.macho.local_sym_index != 0); // Caller forgot to call allocateDeclIndexes()
|
||||
const symbol = &self.locals.items[decl.link.macho.local_sym_index];
|
||||
|
||||
if (decl.link.macho.size != 0) {
|
||||
const capacity = decl.link.macho.capacity(self.*);
|
||||
const need_realloc = code.len > capacity or !mem.isAlignedGeneric(u64, symbol.n_value, required_alignment);
|
||||
const need_realloc = code_len > capacity or !mem.isAlignedGeneric(u64, symbol.n_value, required_alignment);
|
||||
if (need_realloc) {
|
||||
const vaddr = try self.growTextBlock(&decl.link.macho, code.len, required_alignment);
|
||||
const vaddr = try self.growTextBlock(&decl.link.macho, code_len, required_alignment);
|
||||
|
||||
log.debug("growing {s} and moving from 0x{x} to 0x{x}", .{ decl.name, symbol.n_value, vaddr });
|
||||
|
||||
@ -1265,10 +1377,10 @@ fn finishUpdateDecl(self: *MachO, module: *Module, decl: *Module.Decl, res: code
|
||||
}
|
||||
|
||||
symbol.n_value = vaddr;
|
||||
} else if (code.len < decl.link.macho.size) {
|
||||
self.shrinkTextBlock(&decl.link.macho, code.len);
|
||||
} else if (code_len < decl.link.macho.size) {
|
||||
self.shrinkTextBlock(&decl.link.macho, code_len);
|
||||
}
|
||||
decl.link.macho.size = code.len;
|
||||
decl.link.macho.size = code_len;
|
||||
|
||||
const new_name = try std.fmt.allocPrint(self.base.allocator, "_{s}", .{mem.spanZ(decl.name)});
|
||||
defer self.base.allocator.free(new_name);
|
||||
@ -1286,7 +1398,7 @@ fn finishUpdateDecl(self: *MachO, module: *Module, decl: *Module.Decl, res: code
|
||||
defer self.base.allocator.free(decl_name);
|
||||
|
||||
const name_str_index = try self.makeString(decl_name);
|
||||
const addr = try self.allocateTextBlock(&decl.link.macho, code.len, required_alignment);
|
||||
const addr = try self.allocateTextBlock(&decl.link.macho, code_len, required_alignment);
|
||||
|
||||
log.debug("allocated text block for {s} at 0x{x}", .{ decl_name, addr });
|
||||
|
||||
@ -1311,96 +1423,15 @@ fn finishUpdateDecl(self: *MachO, module: *Module, decl: *Module.Decl, res: code
|
||||
try self.writeOffsetTableEntry(decl.link.macho.offset_table_index);
|
||||
}
|
||||
|
||||
// Calculate displacements to target addr (if any).
|
||||
while (self.pie_fixups.popOrNull()) |fixup| {
|
||||
assert(fixup.size == 4);
|
||||
const this_addr = symbol.n_value + fixup.offset;
|
||||
const target_addr = fixup.target_addr;
|
||||
|
||||
switch (self.base.options.target.cpu.arch) {
|
||||
.x86_64 => {
|
||||
const displacement = try math.cast(u32, target_addr - this_addr - 4);
|
||||
mem.writeIntLittle(u32, code_buffer.items[fixup.offset..][0..4], displacement);
|
||||
},
|
||||
.aarch64 => {
|
||||
// TODO optimize instruction based on jump length (use ldr(literal) + nop if possible).
|
||||
{
|
||||
const inst = code_buffer.items[fixup.offset..][0..4];
|
||||
var parsed = mem.bytesAsValue(meta.TagPayload(
|
||||
aarch64.Instruction,
|
||||
aarch64.Instruction.pc_relative_address,
|
||||
), inst);
|
||||
const this_page = @intCast(i32, this_addr >> 12);
|
||||
const target_page = @intCast(i32, target_addr >> 12);
|
||||
const pages = @bitCast(u21, @intCast(i21, target_page - this_page));
|
||||
parsed.immhi = @truncate(u19, pages >> 2);
|
||||
parsed.immlo = @truncate(u2, pages);
|
||||
}
|
||||
{
|
||||
const inst = code_buffer.items[fixup.offset + 4 ..][0..4];
|
||||
var parsed = mem.bytesAsValue(meta.TagPayload(
|
||||
aarch64.Instruction,
|
||||
aarch64.Instruction.load_store_register,
|
||||
), inst);
|
||||
const narrowed = @truncate(u12, target_addr);
|
||||
const offset = try math.divExact(u12, narrowed, 8);
|
||||
parsed.offset = offset;
|
||||
}
|
||||
},
|
||||
else => unreachable, // unsupported target architecture
|
||||
}
|
||||
}
|
||||
|
||||
// Resolve stubs (if any)
|
||||
const text_segment = self.load_commands.items[self.text_segment_cmd_index.?].Segment;
|
||||
const stubs = text_segment.sections.items[self.stubs_section_index.?];
|
||||
for (self.stub_fixups.items) |fixup| {
|
||||
const stub_addr = stubs.addr + fixup.symbol * stubs.reserved2;
|
||||
const text_addr = symbol.n_value + fixup.start;
|
||||
switch (self.base.options.target.cpu.arch) {
|
||||
.x86_64 => {
|
||||
assert(stub_addr >= text_addr + fixup.len);
|
||||
const displacement = try math.cast(u32, stub_addr - text_addr - fixup.len);
|
||||
var placeholder = code_buffer.items[fixup.start + fixup.len - @sizeOf(u32) ..][0..@sizeOf(u32)];
|
||||
mem.writeIntSliceLittle(u32, placeholder, displacement);
|
||||
},
|
||||
.aarch64 => {
|
||||
assert(stub_addr >= text_addr);
|
||||
const displacement = try math.cast(i28, stub_addr - text_addr);
|
||||
var placeholder = code_buffer.items[fixup.start..][0..fixup.len];
|
||||
mem.writeIntSliceLittle(u32, placeholder, aarch64.Instruction.bl(displacement).toU32());
|
||||
},
|
||||
else => unreachable, // unsupported target architecture
|
||||
}
|
||||
if (!fixup.already_defined) {
|
||||
try self.writeStub(fixup.symbol);
|
||||
try self.writeStubInStubHelper(fixup.symbol);
|
||||
try self.writeLazySymbolPointer(fixup.symbol);
|
||||
|
||||
self.rebase_info_dirty = true;
|
||||
self.lazy_binding_info_dirty = true;
|
||||
}
|
||||
}
|
||||
self.stub_fixups.shrinkRetainingCapacity(0);
|
||||
return symbol;
|
||||
}
|
||||
|
||||
fn writeCode(self: *MachO, symbol: *macho.nlist_64, code: []const u8) !void {
|
||||
const text_segment = &self.load_commands.items[self.text_segment_cmd_index.?].Segment;
|
||||
const text_section = text_segment.sections.items[self.text_section_index.?];
|
||||
const section_offset = symbol.n_value - text_section.addr;
|
||||
const file_offset = text_section.offset + section_offset;
|
||||
try self.base.file.?.pwriteAll(code, file_offset);
|
||||
|
||||
if (debug_buffers) |*db| {
|
||||
try self.d_sym.?.commitDeclDebugInfo(
|
||||
self.base.allocator,
|
||||
module,
|
||||
decl,
|
||||
db,
|
||||
self.base.options.target,
|
||||
);
|
||||
}
|
||||
|
||||
// Since we updated the vaddr and the size, each corresponding export symbol also needs to be updated.
|
||||
const decl_exports = module.decl_exports.get(decl) orelse &[0]*Module.Export{};
|
||||
try self.updateDeclExports(module, decl, decl_exports);
|
||||
}
|
||||
|
||||
pub fn updateDeclLineNumber(self: *MachO, module: *Module, decl: *const Module.Decl) !void {
|
||||
|
@ -51,7 +51,12 @@ base: link.File,
|
||||
/// This linker backend does not try to incrementally link output SPIR-V code.
|
||||
/// Instead, it tracks all declarations in this table, and iterates over it
|
||||
/// in the flush function.
|
||||
decl_table: std.AutoArrayHashMapUnmanaged(*Module.Decl, void) = .{},
|
||||
decl_table: std.AutoArrayHashMapUnmanaged(*Module.Decl, DeclGenContext) = .{},
|
||||
|
||||
const DeclGenContext = struct {
|
||||
air: Air,
|
||||
liveness: Liveness,
|
||||
};
|
||||
|
||||
pub fn createEmpty(gpa: *Allocator, options: link.Options) !*SpirV {
|
||||
const spirv = try gpa.create(SpirV);
|
||||
@ -181,10 +186,15 @@ pub fn flushModule(self: *SpirV, comp: *Compilation) !void {
|
||||
var decl_gen = codegen.DeclGen.init(&spv);
|
||||
defer decl_gen.deinit();
|
||||
|
||||
for (self.decl_table.keys()) |decl| {
|
||||
var it = self.decl_table.iterator();
|
||||
while (it.next()) |entry| {
|
||||
const decl = entry.key_ptr.*;
|
||||
if (!decl.has_tv) continue;
|
||||
|
||||
if (try decl_gen.gen(decl)) |msg| {
|
||||
const air = entry.value_ptr.air;
|
||||
const liveness = entry.value_ptr.liveness;
|
||||
|
||||
if (try decl_gen.gen(decl, air, liveness)) |msg| {
|
||||
try module.failed_decls.put(module.gpa, decl, msg);
|
||||
return; // TODO: Attempt to generate more decls?
|
||||
}
|
||||
|
@ -250,6 +250,8 @@ pub fn updateDecl(self: *Wasm, module: *Module, decl: *Module.Decl) !void {
|
||||
|
||||
var context = codegen.Context{
|
||||
.gpa = self.base.allocator,
|
||||
.air = undefined,
|
||||
.liveness = undefined,
|
||||
.values = .{},
|
||||
.code = fn_data.code.toManaged(self.base.allocator),
|
||||
.func_type_data = fn_data.functype.toManaged(self.base.allocator),
|
||||
|
Loading…
Reference in New Issue
Block a user