Merge pull request #12772 from ziglang/coff-basic-imports

coff: implement enough of the incremental linker to pass behavior and incremental tests on Windows
This commit is contained in:
Jakub Konka 2022-09-09 13:08:58 +02:00 committed by GitHub
commit 56b96cd61b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
28 changed files with 1465 additions and 614 deletions

View File

@ -73,8 +73,7 @@ jobs:
& "$ZIGINSTALLDIR\bin\zig.exe" build test docs `
--search-prefix "$ZIGPREFIXPATH" `
-Dstatic-llvm `
-Dskip-non-native `
-Dskip-stage2-tests
-Dskip-non-native
CheckLastExitCode
name: test
displayName: 'Test'

View File

@ -990,6 +990,8 @@ pub const File = struct {
return index;
}
/// On Windows, this function currently does alter the file pointer.
/// https://github.com/ziglang/zig/issues/12783
pub fn pread(self: File, buffer: []u8, offset: u64) PReadError!usize {
if (is_windows) {
return windows.ReadFile(self.handle, buffer, offset, self.intended_io_mode);
@ -1004,6 +1006,8 @@ pub const File = struct {
/// Returns the number of bytes read. If the number read is smaller than `buffer.len`, it
/// means the file reached the end. Reaching the end of a file is not an error condition.
/// On Windows, this function currently does alter the file pointer.
/// https://github.com/ziglang/zig/issues/12783
pub fn preadAll(self: File, buffer: []u8, offset: u64) PReadError!usize {
var index: usize = 0;
while (index != buffer.len) {
@ -1058,6 +1062,8 @@ pub const File = struct {
}
/// See https://github.com/ziglang/zig/issues/7699
/// On Windows, this function currently does alter the file pointer.
/// https://github.com/ziglang/zig/issues/12783
pub fn preadv(self: File, iovecs: []const os.iovec, offset: u64) PReadError!usize {
if (is_windows) {
// TODO improve this to use ReadFileScatter
@ -1079,6 +1085,8 @@ pub const File = struct {
/// The `iovecs` parameter is mutable because this function needs to mutate the fields in
/// order to handle partial reads from the underlying OS layer.
/// See https://github.com/ziglang/zig/issues/7699
/// On Windows, this function currently does alter the file pointer.
/// https://github.com/ziglang/zig/issues/12783
pub fn preadvAll(self: File, iovecs: []os.iovec, offset: u64) PReadError!usize {
if (iovecs.len == 0) return 0;
@ -1122,6 +1130,8 @@ pub const File = struct {
}
}
/// On Windows, this function currently does alter the file pointer.
/// https://github.com/ziglang/zig/issues/12783
pub fn pwrite(self: File, bytes: []const u8, offset: u64) PWriteError!usize {
if (is_windows) {
return windows.WriteFile(self.handle, bytes, offset, self.intended_io_mode);
@ -1134,6 +1144,8 @@ pub const File = struct {
}
}
/// On Windows, this function currently does alter the file pointer.
/// https://github.com/ziglang/zig/issues/12783
pub fn pwriteAll(self: File, bytes: []const u8, offset: u64) PWriteError!void {
var index: usize = 0;
while (index < bytes.len) {
@ -1179,6 +1191,8 @@ pub const File = struct {
}
/// See https://github.com/ziglang/zig/issues/7699
/// On Windows, this function currently does alter the file pointer.
/// https://github.com/ziglang/zig/issues/12783
pub fn pwritev(self: File, iovecs: []os.iovec_const, offset: u64) PWriteError!usize {
if (is_windows) {
// TODO improve this to use WriteFileScatter
@ -1197,6 +1211,8 @@ pub const File = struct {
/// The `iovecs` parameter is mutable because this function needs to mutate the fields in
/// order to handle partial writes from the underlying OS layer.
/// See https://github.com/ziglang/zig/issues/7699
/// On Windows, this function currently does alter the file pointer.
/// https://github.com/ziglang/zig/issues/12783
pub fn pwritevAll(self: File, iovecs: []os.iovec_const, offset: u64) PWriteError!void {
if (iovecs.len == 0) return;

View File

@ -36,6 +36,10 @@ pub const default_mode: ModeOverride = if (is_async) Mode.evented else .blocking
fn getStdOutHandle() os.fd_t {
if (builtin.os.tag == .windows) {
if (builtin.zig_backend == .stage2_x86_64) {
// TODO: this is just a temporary workaround until we advance x86 backend further along.
return os.windows.GetStdHandle(os.windows.STD_OUTPUT_HANDLE) catch os.windows.INVALID_HANDLE_VALUE;
}
return os.windows.peb().ProcessParameters.hStdOutput;
}
@ -58,6 +62,10 @@ pub fn getStdOut() File {
fn getStdErrHandle() os.fd_t {
if (builtin.os.tag == .windows) {
if (builtin.zig_backend == .stage2_x86_64) {
// TODO: this is just a temporary workaround until we advance x86 backend further along.
return os.windows.GetStdHandle(os.windows.STD_ERROR_HANDLE) catch os.windows.INVALID_HANDLE_VALUE;
}
return os.windows.peb().ProcessParameters.hStdError;
}
@ -80,6 +88,10 @@ pub fn getStdErr() File {
fn getStdInHandle() os.fd_t {
if (builtin.os.tag == .windows) {
if (builtin.zig_backend == .stage2_x86_64) {
// TODO: this is just a temporary workaround until we advance x86 backend further along.
return os.windows.GetStdHandle(os.windows.STD_INPUT_HANDLE) catch os.windows.INVALID_HANDLE_VALUE;
}
return os.windows.peb().ProcessParameters.hStdInput;
}

View File

@ -348,7 +348,13 @@ pub extern "kernel32" fn WriteFile(
in_out_lpOverlapped: ?*OVERLAPPED,
) callconv(WINAPI) BOOL;
pub extern "kernel32" fn WriteFileEx(hFile: HANDLE, lpBuffer: [*]const u8, nNumberOfBytesToWrite: DWORD, lpOverlapped: *OVERLAPPED, lpCompletionRoutine: LPOVERLAPPED_COMPLETION_ROUTINE) callconv(WINAPI) BOOL;
pub extern "kernel32" fn WriteFileEx(
hFile: HANDLE,
lpBuffer: [*]const u8,
nNumberOfBytesToWrite: DWORD,
lpOverlapped: *OVERLAPPED,
lpCompletionRoutine: LPOVERLAPPED_COMPLETION_ROUTINE,
) callconv(WINAPI) BOOL;
pub extern "kernel32" fn LoadLibraryW(lpLibFileName: [*:0]const u16) callconv(WINAPI) ?HMODULE;

View File

@ -36,6 +36,10 @@ comptime {
if (@typeInfo(@TypeOf(root.main)).Fn.calling_convention != .C) {
@export(main2, .{ .name = "main" });
}
} else if (builtin.os.tag == .windows) {
if (!@hasDecl(root, "wWinMainCRTStartup") and !@hasDecl(root, "mainCRTStartup")) {
@export(wWinMainCRTStartup2, .{ .name = "wWinMainCRTStartup" });
}
} else if (builtin.os.tag == .wasi and @hasDecl(root, "main")) {
@export(wasiMain2, .{ .name = "_start" });
} else {

File diff suppressed because it is too large Load Diff

View File

@ -283,10 +283,11 @@ fn mirPushPopRegisterList(emit: *Emit, tag: Tag, inst: Mir.Inst.Index) InnerErro
const ops = emit.mir.instructions.items(.ops)[inst].decode();
const payload = emit.mir.instructions.items(.data)[inst].payload;
const save_reg_list = emit.mir.extraData(Mir.SaveRegisterList, payload).data;
const reg_list = Mir.RegisterList(Register, &abi.callee_preserved_regs).fromInt(save_reg_list.register_list);
var disp: i32 = -@intCast(i32, save_reg_list.stack_end);
inline for (abi.callee_preserved_regs) |reg| {
if (reg_list.isSet(reg)) {
const reg_list = Mir.RegisterList.fromInt(save_reg_list.register_list);
const callee_preserved_regs = abi.getCalleePreservedRegs(emit.target.*);
for (callee_preserved_regs) |reg| {
if (reg_list.isSet(callee_preserved_regs, reg)) {
switch (tag) {
.push => try lowerToMrEnc(.mov, RegisterOrMemory.mem(.qword_ptr, .{
.disp = @bitCast(u32, disp),
@ -614,14 +615,15 @@ inline fn immOpSize(u_imm: u32) u6 {
fn mirArithScaleSrc(emit: *Emit, tag: Tag, inst: Mir.Inst.Index) InnerError!void {
const ops = emit.mir.instructions.items(.ops)[inst].decode();
const scale = ops.flags;
const imm = emit.mir.instructions.items(.data)[inst].imm;
// OP reg1, [reg2 + scale*rcx + imm32]
const payload = emit.mir.instructions.items(.data)[inst].payload;
const index_reg_disp = emit.mir.extraData(Mir.IndexRegisterDisp, payload).data.decode();
// OP reg1, [reg2 + scale*index + imm32]
const scale_index = ScaleIndex{
.scale = scale,
.index = .rcx,
.index = index_reg_disp.index,
};
return lowerToRmEnc(tag, ops.reg1, RegisterOrMemory.mem(Memory.PtrSize.new(ops.reg1.size()), .{
.disp = imm,
.disp = index_reg_disp.disp,
.base = ops.reg2,
.scale_index = scale_index,
}), emit.code);
@ -630,22 +632,16 @@ fn mirArithScaleSrc(emit: *Emit, tag: Tag, inst: Mir.Inst.Index) InnerError!void
fn mirArithScaleDst(emit: *Emit, tag: Tag, inst: Mir.Inst.Index) InnerError!void {
const ops = emit.mir.instructions.items(.ops)[inst].decode();
const scale = ops.flags;
const imm = emit.mir.instructions.items(.data)[inst].imm;
const payload = emit.mir.instructions.items(.data)[inst].payload;
const index_reg_disp = emit.mir.extraData(Mir.IndexRegisterDisp, payload).data.decode();
const scale_index = ScaleIndex{
.scale = scale,
.index = .rax,
.index = index_reg_disp.index,
};
if (ops.reg2 == .none) {
// OP qword ptr [reg1 + scale*rax + 0], imm32
return lowerToMiEnc(tag, RegisterOrMemory.mem(.qword_ptr, .{
.disp = 0,
.base = ops.reg1,
.scale_index = scale_index,
}), imm, emit.code);
}
// OP [reg1 + scale*rax + imm32], reg2
assert(ops.reg2 != .none);
// OP [reg1 + scale*index + imm32], reg2
return lowerToMrEnc(tag, RegisterOrMemory.mem(Memory.PtrSize.new(ops.reg2.size()), .{
.disp = imm,
.disp = index_reg_disp.disp,
.base = ops.reg1,
.scale_index = scale_index,
}), ops.reg2, emit.code);
@ -655,24 +651,24 @@ fn mirArithScaleImm(emit: *Emit, tag: Tag, inst: Mir.Inst.Index) InnerError!void
const ops = emit.mir.instructions.items(.ops)[inst].decode();
const scale = ops.flags;
const payload = emit.mir.instructions.items(.data)[inst].payload;
const imm_pair = emit.mir.extraData(Mir.ImmPair, payload).data;
const index_reg_disp_imm = emit.mir.extraData(Mir.IndexRegisterDispImm, payload).data.decode();
const scale_index = ScaleIndex{
.scale = scale,
.index = .rax,
.index = index_reg_disp_imm.index,
};
// OP qword ptr [reg1 + scale*rax + imm32], imm32
// OP qword ptr [reg1 + scale*index + imm32], imm32
return lowerToMiEnc(tag, RegisterOrMemory.mem(.qword_ptr, .{
.disp = imm_pair.dest_off,
.disp = index_reg_disp_imm.disp,
.base = ops.reg1,
.scale_index = scale_index,
}), imm_pair.operand, emit.code);
}), index_reg_disp_imm.imm, emit.code);
}
fn mirArithMemIndexImm(emit: *Emit, tag: Tag, inst: Mir.Inst.Index) InnerError!void {
const ops = emit.mir.instructions.items(.ops)[inst].decode();
assert(ops.reg2 == .none);
const payload = emit.mir.instructions.items(.data)[inst].payload;
const imm_pair = emit.mir.extraData(Mir.ImmPair, payload).data;
const index_reg_disp_imm = emit.mir.extraData(Mir.IndexRegisterDispImm, payload).data.decode();
const ptr_size: Memory.PtrSize = switch (ops.flags) {
0b00 => .byte_ptr,
0b01 => .word_ptr,
@ -681,14 +677,14 @@ fn mirArithMemIndexImm(emit: *Emit, tag: Tag, inst: Mir.Inst.Index) InnerError!v
};
const scale_index = ScaleIndex{
.scale = 0,
.index = .rax,
.index = index_reg_disp_imm.index,
};
// OP ptr [reg1 + rax*1 + imm32], imm32
// OP ptr [reg1 + index + imm32], imm32
return lowerToMiEnc(tag, RegisterOrMemory.mem(ptr_size, .{
.disp = imm_pair.dest_off,
.disp = index_reg_disp_imm.disp,
.base = ops.reg1,
.scale_index = scale_index,
}), imm_pair.operand, emit.code);
}), index_reg_disp_imm.imm, emit.code);
}
fn mirMovSignExtend(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
@ -956,18 +952,19 @@ fn mirLea(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
mem.writeIntLittle(i32, emit.code.items[end_offset - 4 ..][0..4], disp);
},
0b10 => {
// lea reg, [rbp + rcx + imm32]
const imm = emit.mir.instructions.items(.data)[inst].imm;
// lea reg, [rbp + index + imm32]
const payload = emit.mir.instructions.items(.data)[inst].payload;
const index_reg_disp = emit.mir.extraData(Mir.IndexRegisterDisp, payload).data.decode();
const src_reg: ?Register = if (ops.reg2 != .none) ops.reg2 else null;
const scale_index = ScaleIndex{
.scale = 0,
.index = .rcx,
.index = index_reg_disp.index,
};
return lowerToRmEnc(
.lea,
ops.reg1,
RegisterOrMemory.mem(Memory.PtrSize.new(ops.reg1.size()), .{
.disp = imm,
.disp = index_reg_disp.disp,
.base = src_reg,
.scale_index = scale_index,
}),
@ -985,8 +982,8 @@ fn mirLeaPic(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
const relocation = emit.mir.instructions.items(.data)[inst].relocation;
switch (ops.flags) {
0b00, 0b01 => {},
else => return emit.fail("TODO unused LEA PIC variants 0b10 and 0b11", .{}),
0b00, 0b01, 0b10 => {},
else => return emit.fail("TODO unused LEA PIC variant 0b11", .{}),
}
// lea reg1, [rip + reloc]
@ -1024,6 +1021,7 @@ fn mirLeaPic(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
.@"type" = switch (ops.flags) {
0b00 => .got,
0b01 => .direct,
0b10 => .imports,
else => unreachable,
},
.target = .{ .sym_index = relocation.sym_index, .file = null },
@ -1031,7 +1029,6 @@ fn mirLeaPic(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
.addend = 0,
.pcrel = true,
.length = 2,
.prev_vaddr = atom.getSymbol(coff_file).value,
});
} else {
return emit.fail("TODO implement lea reg, [rip + reloc] for linking backends different than MachO", .{});
@ -1157,6 +1154,17 @@ fn mirCallExtern(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
.length = 2,
.@"type" = @enumToInt(std.macho.reloc_type_x86_64.X86_64_RELOC_BRANCH),
});
} else if (emit.bin_file.cast(link.File.Coff)) |coff_file| {
// Add relocation to the decl.
const atom = coff_file.atom_by_index_table.get(relocation.atom_index).?;
try atom.addRelocation(coff_file, .{
.@"type" = .direct,
.target = .{ .sym_index = relocation.sym_index, .file = null },
.offset = offset,
.addend = 0,
.pcrel = true,
.length = 2,
});
} else {
return emit.fail("TODO implement call_extern for linking backends different than MachO", .{});
}
@ -2241,6 +2249,7 @@ fn lowerToMxEnc(tag: Tag, reg_or_mem: RegisterOrMemory, enc: Encoding, code: *st
encoder.rex(.{
.w = wide,
.b = base.isExtended(),
.x = if (mem_op.scale_index) |si| si.index.isExtended() else false,
});
}
opc.encode(encoder);
@ -2346,10 +2355,12 @@ fn lowerToMiXEnc(
encoder.rex(.{
.w = dst_mem.ptr_size == .qword_ptr,
.b = base.isExtended(),
.x = if (dst_mem.scale_index) |si| si.index.isExtended() else false,
});
} else {
encoder.rex(.{
.w = dst_mem.ptr_size == .qword_ptr,
.x = if (dst_mem.scale_index) |si| si.index.isExtended() else false,
});
}
opc.encode(encoder);
@ -2401,11 +2412,13 @@ fn lowerToRmEnc(
.w = setRexWRegister(reg),
.r = reg.isExtended(),
.b = base.isExtended(),
.x = if (src_mem.scale_index) |si| si.index.isExtended() else false,
});
} else {
encoder.rex(.{
.w = setRexWRegister(reg),
.r = reg.isExtended(),
.x = if (src_mem.scale_index) |si| si.index.isExtended() else false,
});
}
opc.encode(encoder);
@ -2446,11 +2459,13 @@ fn lowerToMrEnc(
.w = dst_mem.ptr_size == .qword_ptr or setRexWRegister(reg),
.r = reg.isExtended(),
.b = base.isExtended(),
.x = if (dst_mem.scale_index) |si| si.index.isExtended() else false,
});
} else {
encoder.rex(.{
.w = dst_mem.ptr_size == .qword_ptr or setRexWRegister(reg),
.r = reg.isExtended(),
.x = if (dst_mem.scale_index) |si| si.index.isExtended() else false,
});
}
opc.encode(encoder);
@ -2490,11 +2505,13 @@ fn lowerToRmiEnc(
.w = setRexWRegister(reg),
.r = reg.isExtended(),
.b = base.isExtended(),
.x = if (src_mem.scale_index) |si| si.index.isExtended() else false,
});
} else {
encoder.rex(.{
.w = setRexWRegister(reg),
.r = reg.isExtended(),
.x = if (src_mem.scale_index) |si| si.index.isExtended() else false,
});
}
opc.encode(encoder);
@ -2531,10 +2548,12 @@ fn lowerToVmEnc(
vex.rex(.{
.r = reg.isExtended(),
.b = base.isExtended(),
.x = if (src_mem.scale_index) |si| si.index.isExtended() else false,
});
} else {
vex.rex(.{
.r = reg.isExtended(),
.x = if (src_mem.scale_index) |si| si.index.isExtended() else false,
});
}
encoder.vex(enc.prefix);
@ -2571,10 +2590,12 @@ fn lowerToMvEnc(
vex.rex(.{
.r = reg.isExtended(),
.b = base.isExtended(),
.x = if (dst_mem.scale_index) |si| si.index.isExtended() else false,
});
} else {
vex.rex(.{
.r = reg.isExtended(),
.x = if (dst_mem.scale_index) |si| si.index.isExtended() else false,
});
}
encoder.vex(enc.prefix);

View File

@ -44,25 +44,28 @@ pub const Inst = struct {
/// 0b01 word ptr [reg1 + imm32], imm16
/// 0b10 dword ptr [reg1 + imm32], imm32
/// 0b11 qword ptr [reg1 + imm32], imm32 (sign-extended to imm64)
/// Notes:
/// * Uses `ImmPair` as payload
adc_mem_imm,
/// form: reg1, [reg2 + scale*rcx + imm32]
/// ops flags scale
/// 0b00 1
/// 0b01 2
/// 0b10 4
/// 0b11 8
adc_scale_src,
/// form: [reg1 + scale*rax + imm32], reg2
/// form: [reg1 + scale*rax + 0], imm32
/// form: reg1, [reg2 + scale*index + imm32]
/// ops flags scale
/// 0b00 1
/// 0b01 2
/// 0b10 4
/// 0b11 8
/// Notes:
/// * If reg2 is `none` then it means Data field `imm` is used as the immediate.
/// * Uses `IndexRegisterDisp` as payload
adc_scale_src,
/// form: [reg1 + scale*index + imm32], reg2
/// ops flags scale
/// 0b00 1
/// 0b01 2
/// 0b10 4
/// 0b11 8
/// Notes:
/// * Uses `IndexRegisterDisp` payload.
adc_scale_dst,
/// form: [reg1 + scale*rax + imm32], imm32
@ -72,14 +75,16 @@ pub const Inst = struct {
/// 0b10 4
/// 0b11 8
/// Notes:
/// * Data field `payload` points at `ImmPair`.
/// * Uses `IndexRegisterDispImm` payload.
adc_scale_imm,
/// ops flags: form:
/// 0b00 byte ptr [reg1 + rax + imm32], imm8
/// 0b01 word ptr [reg1 + rax + imm32], imm16
/// 0b10 dword ptr [reg1 + rax + imm32], imm32
/// 0b11 qword ptr [reg1 + rax + imm32], imm32 (sign-extended to imm64)
/// 0b00 byte ptr [reg1 + index + imm32], imm8
/// 0b01 word ptr [reg1 + index + imm32], imm16
/// 0b10 dword ptr [reg1 + index + imm32], imm32
/// 0b11 qword ptr [reg1 + index + imm32], imm32 (sign-extended to imm64)
/// Notes:
/// * Uses `IndexRegisterDispImm` payload.
adc_mem_index_imm,
// The following instructions all have the same encoding as `adc`.
@ -174,12 +179,15 @@ pub const Inst = struct {
/// 0b00 reg1, [reg2 + imm32]
/// 0b00 reg1, [ds:imm32]
/// 0b01 reg1, [rip + imm32]
/// 0b10 reg1, [reg2 + rcx + imm32]
/// 0b10 reg1, [reg2 + index + imm32]
/// Notes:
/// * 0b10 uses `IndexRegisterDisp` payload
lea,
/// ops flags: form:
/// 0b00 reg1, [rip + reloc] // via GOT PIC
/// 0b01 reg1, [rip + reloc] // direct load PIC
/// 0b10 reg1, [rip + reloc] // via imports table PIC
/// Notes:
/// * `Data` contains `relocation`
lea_pic,
@ -460,46 +468,103 @@ pub const Inst = struct {
}
};
pub fn RegisterList(comptime Reg: type, comptime registers: []const Reg) type {
assert(registers.len <= @bitSizeOf(u32));
return struct {
bitset: RegBitSet = RegBitSet.initEmpty(),
pub const IndexRegisterDisp = struct {
/// Index register to use with SIB-based encoding
index: u32,
const RegBitSet = IntegerBitSet(registers.len);
const Self = @This();
/// Displacement value
disp: u32,
fn getIndexForReg(reg: Reg) RegBitSet.MaskInt {
inline for (registers) |cpreg, i| {
if (reg.id() == cpreg.id()) return i;
}
unreachable; // register not in input register list!
pub fn encode(index: Register, disp: u32) IndexRegisterDisp {
return .{
.index = @enumToInt(index),
.disp = disp,
};
}
pub fn decode(this: IndexRegisterDisp) struct {
index: Register,
disp: u32,
} {
return .{
.index = @intToEnum(Register, this.index),
.disp = this.disp,
};
}
};
/// TODO: would it be worth making `IndexRegisterDisp` and `IndexRegisterDispImm` a variable length list
/// instead of having two structs, one a superset of the other one?
pub const IndexRegisterDispImm = struct {
/// Index register to use with SIB-based encoding
index: u32,
/// Displacement value
disp: u32,
/// Immediate
imm: u32,
pub fn encode(index: Register, disp: u32, imm: u32) IndexRegisterDispImm {
return .{
.index = @enumToInt(index),
.disp = disp,
.imm = imm,
};
}
pub fn decode(this: IndexRegisterDispImm) struct {
index: Register,
disp: u32,
imm: u32,
} {
return .{
.index = @intToEnum(Register, this.index),
.disp = this.disp,
.imm = this.imm,
};
}
};
/// Used in conjunction with `SaveRegisterList` payload to transfer a list of used registers
/// in a compact manner.
pub const RegisterList = struct {
bitset: BitSet = BitSet.initEmpty(),
const BitSet = IntegerBitSet(@ctz(@as(u32, 0)));
const Self = @This();
fn getIndexForReg(registers: []const Register, reg: Register) BitSet.MaskInt {
for (registers) |cpreg, i| {
if (reg.id() == cpreg.id()) return @intCast(u32, i);
}
unreachable; // register not in input register list!
}
pub fn push(self: *Self, reg: Reg) void {
const index = getIndexForReg(reg);
self.bitset.set(index);
}
pub fn push(self: *Self, registers: []const Register, reg: Register) void {
const index = getIndexForReg(registers, reg);
self.bitset.set(index);
}
pub fn isSet(self: Self, reg: Reg) bool {
const index = getIndexForReg(reg);
return self.bitset.isSet(index);
}
pub fn isSet(self: Self, registers: []const Register, reg: Register) bool {
const index = getIndexForReg(registers, reg);
return self.bitset.isSet(index);
}
pub fn asInt(self: Self) u32 {
return self.bitset.mask;
}
pub fn asInt(self: Self) u32 {
return self.bitset.mask;
}
pub fn fromInt(mask: u32) Self {
return .{
.bitset = RegBitSet{ .mask = @intCast(RegBitSet.MaskInt, mask) },
};
}
pub fn fromInt(mask: u32) Self {
return .{
.bitset = BitSet{ .mask = @intCast(BitSet.MaskInt, mask) },
};
}
pub fn count(self: Self) u32 {
return @intCast(u32, self.bitset.count());
}
};
}
pub fn count(self: Self) u32 {
return @intCast(u32, self.bitset.count());
}
};
pub const SaveRegisterList = struct {
/// Use `RegisterList` to populate.

View File

@ -392,23 +392,69 @@ pub fn classifySystemV(ty: Type, target: Target) [8]Class {
}
}
/// Note that .rsp and .rbp also belong to this set, however, we never expect to use them
/// for anything else but stack offset tracking therefore we exclude them from this set.
pub const callee_preserved_regs = [_]Register{ .rbx, .r12, .r13, .r14, .r15 };
/// These registers need to be preserved (saved on the stack) and restored by the caller before
/// the caller relinquishes control to a subroutine via call instruction (or similar).
/// In other words, these registers are free to use by the callee.
pub const caller_preserved_regs = [_]Register{ .rax, .rcx, .rdx, .rsi, .rdi, .r8, .r9, .r10, .r11 };
pub const SysV = struct {
/// Note that .rsp and .rbp also belong to this set, however, we never expect to use them
/// for anything else but stack offset tracking therefore we exclude them from this set.
pub const callee_preserved_regs = [_]Register{ .rbx, .r12, .r13, .r14, .r15 };
/// These registers need to be preserved (saved on the stack) and restored by the caller before
/// the caller relinquishes control to a subroutine via call instruction (or similar).
/// In other words, these registers are free to use by the callee.
pub const caller_preserved_regs = [_]Register{ .rax, .rcx, .rdx, .rsi, .rdi, .r8, .r9, .r10, .r11 };
pub const c_abi_int_param_regs = [_]Register{ .rdi, .rsi, .rdx, .rcx, .r8, .r9 };
pub const c_abi_int_return_regs = [_]Register{ .rax, .rdx };
pub const c_abi_int_param_regs = [_]Register{ .rdi, .rsi, .rdx, .rcx, .r8, .r9 };
pub const c_abi_int_return_regs = [_]Register{ .rax, .rdx };
};
pub const Win64 = struct {
/// Note that .rsp and .rbp also belong to this set, however, we never expect to use them
/// for anything else but stack offset tracking therefore we exclude them from this set.
pub const callee_preserved_regs = [_]Register{ .rbx, .rsi, .rdi, .r12, .r13, .r14, .r15 };
/// These registers need to be preserved (saved on the stack) and restored by the caller before
/// the caller relinquishes control to a subroutine via call instruction (or similar).
/// In other words, these registers are free to use by the callee.
pub const caller_preserved_regs = [_]Register{ .rax, .rcx, .rdx, .r8, .r9, .r10, .r11 };
pub const c_abi_int_param_regs = [_]Register{ .rcx, .rdx, .r8, .r9 };
pub const c_abi_int_return_regs = [_]Register{.rax};
};
pub fn getCalleePreservedRegs(target: Target) []const Register {
return switch (target.os.tag) {
.windows => &Win64.callee_preserved_regs,
else => &SysV.callee_preserved_regs,
};
}
pub fn getCallerPreservedRegs(target: Target) []const Register {
return switch (target.os.tag) {
.windows => &Win64.caller_preserved_regs,
else => &SysV.caller_preserved_regs,
};
}
pub fn getCAbiIntParamRegs(target: Target) []const Register {
return switch (target.os.tag) {
.windows => &Win64.c_abi_int_param_regs,
else => &SysV.c_abi_int_param_regs,
};
}
pub fn getCAbiIntReturnRegs(target: Target) []const Register {
return switch (target.os.tag) {
.windows => &Win64.c_abi_int_return_regs,
else => &SysV.c_abi_int_return_regs,
};
}
const gp_regs = [_]Register{
.rbx, .r12, .r13, .r14, .r15, .rax, .rcx, .rdx, .rsi, .rdi, .r8, .r9, .r10, .r11,
};
const sse_avx_regs = [_]Register{
.ymm0, .ymm1, .ymm2, .ymm3, .ymm4, .ymm5, .ymm6, .ymm7,
.ymm8, .ymm9, .ymm10, .ymm11, .ymm12, .ymm13, .ymm14, .ymm15,
};
const allocatable_registers = callee_preserved_regs ++ caller_preserved_regs ++ sse_avx_regs;
pub const RegisterManager = RegisterManagerFn(@import("CodeGen.zig"), Register, &allocatable_registers);
const allocatable_regs = gp_regs ++ sse_avx_regs;
pub const RegisterManager = RegisterManagerFn(@import("CodeGen.zig"), Register, &allocatable_regs);
// Register classes
const RegisterBitSet = RegisterManager.RegisterBitSet;
@ -417,15 +463,15 @@ pub const RegisterClass = struct {
var set = RegisterBitSet.initEmpty();
set.setRangeValue(.{
.start = 0,
.end = caller_preserved_regs.len + callee_preserved_regs.len,
.end = gp_regs.len,
}, true);
break :blk set;
};
pub const sse: RegisterBitSet = blk: {
var set = RegisterBitSet.initEmpty();
set.setRangeValue(.{
.start = caller_preserved_regs.len + callee_preserved_regs.len,
.end = allocatable_registers.len,
.start = gp_regs.len,
.end = allocatable_regs.len,
}, true);
break :blk set;
};

View File

@ -476,7 +476,7 @@ pub const File = struct {
log.debug("getGlobalSymbol '{s}'", .{name});
switch (base.tag) {
// zig fmt: off
.coff => unreachable,
.coff => return @fieldParentPtr(Coff, "base", base).getGlobalSymbol(name),
.elf => unreachable,
.macho => return @fieldParentPtr(MachO, "base", base).getGlobalSymbol(name),
.plan9 => unreachable,

File diff suppressed because it is too large Load Diff

View File

@ -4,8 +4,6 @@ const std = @import("std");
const coff = std.coff;
const log = std.log.scoped(.link);
const Allocator = std.mem.Allocator;
const Coff = @import("../Coff.zig");
const Reloc = Coff.Reloc;
const SymbolWithLoc = Coff.SymbolWithLoc;
@ -41,11 +39,6 @@ pub const empty = Atom{
.next = null,
};
pub fn deinit(self: *Atom, gpa: Allocator) void {
_ = self;
_ = gpa;
}
/// Returns symbol referencing this atom.
pub fn getSymbol(self: Atom, coff_file: *const Coff) *const coff.Symbol {
return coff_file.getSymbol(.{
@ -118,3 +111,13 @@ pub fn addBaseRelocation(self: *Atom, coff_file: *Coff, offset: u32) !void {
}
try gop.value_ptr.append(gpa, offset);
}
pub fn addBinding(self: *Atom, coff_file: *Coff, target: SymbolWithLoc) !void {
const gpa = coff_file.base.allocator;
log.debug(" (adding binding to target %{d} in %{d})", .{ target.sym_index, self.sym_index });
const gop = try coff_file.bindings.getOrPut(gpa, self);
if (!gop.found_existing) {
gop.value_ptr.* = .{};
}
try gop.value_ptr.append(gpa, target);
}

View File

@ -793,11 +793,13 @@ fn linkOneShot(self: *MachO, comp: *Compilation, prog_node: *std.Progress.Node)
}
} else {
const sub_path = self.base.options.emit.?.sub_path;
self.base.file = try directory.handle.createFile(sub_path, .{
.truncate = true,
.read = true,
.mode = link.determineMode(self.base.options),
});
if (self.base.file == null) {
self.base.file = try directory.handle.createFile(sub_path, .{
.truncate = true,
.read = true,
.mode = link.determineMode(self.base.options),
});
}
// Index 0 is always a null symbol.
try self.locals.append(gpa, .{
.n_strx = 0,
@ -1155,6 +1157,29 @@ fn linkOneShot(self: *MachO, comp: *Compilation, prog_node: *std.Progress.Node)
var ncmds: u32 = 0;
try self.writeLinkeditSegmentData(&ncmds, lc_writer);
// If the last section of __DATA segment is zerofill section, we need to ensure
// that the free space between the end of the last non-zerofill section of __DATA
// segment and the beginning of __LINKEDIT segment is zerofilled as the loader will
// copy-paste this space into memory for quicker zerofill operation.
if (self.data_segment_cmd_index) |data_seg_id| blk: {
var physical_zerofill_start: u64 = 0;
const section_indexes = self.getSectionIndexes(data_seg_id);
for (self.sections.items(.header)[section_indexes.start..section_indexes.end]) |header| {
if (header.isZerofill() and header.size > 0) break;
physical_zerofill_start = header.offset + header.size;
} else break :blk;
const linkedit = self.segments.items[self.linkedit_segment_cmd_index.?];
const physical_zerofill_size = math.cast(usize, linkedit.fileoff - physical_zerofill_start) orelse
return error.Overflow;
if (physical_zerofill_size > 0) {
var padding = try self.base.allocator.alloc(u8, physical_zerofill_size);
defer self.base.allocator.free(padding);
mem.set(u8, padding, 0);
try self.base.file.?.pwriteAll(padding, physical_zerofill_start);
}
}
try writeDylinkerLC(&ncmds, lc_writer);
try self.writeMainLC(&ncmds, lc_writer);
try self.writeDylibIdLC(&ncmds, lc_writer);
@ -1435,7 +1460,6 @@ fn parseArchive(self: *MachO, path: []const u8, force_load: bool) !bool {
if (force_load) {
defer archive.deinit(gpa);
defer file.close();
// Get all offsets from the ToC
var offsets = std.AutoArrayHashMap(u32, void).init(gpa);
defer offsets.deinit();
@ -3086,15 +3110,6 @@ pub fn deinit(self: *MachO) void {
self.atom_by_index_table.deinit(gpa);
}
pub fn closeFiles(self: MachO) void {
for (self.archives.items) |archive| {
archive.file.close();
}
if (self.d_sym) |ds| {
ds.file.close();
}
}
fn freeAtom(self: *MachO, atom: *Atom, sect_id: u8, owns_atom: bool) void {
log.debug("freeAtom {*}", .{atom});
if (!owns_atom) {
@ -5698,8 +5713,10 @@ fn writeHeader(self: *MachO, ncmds: u32, sizeofcmds: u32) !void {
else => unreachable,
}
if (self.getSectionByName("__DATA", "__thread_vars")) |_| {
header.flags |= macho.MH_HAS_TLV_DESCRIPTORS;
if (self.getSectionByName("__DATA", "__thread_vars")) |sect_id| {
if (self.sections.items(.header)[sect_id].size > 0) {
header.flags |= macho.MH_HAS_TLV_DESCRIPTORS;
}
}
header.ncmds = ncmds;

View File

@ -88,6 +88,7 @@ const ar_hdr = extern struct {
};
pub fn deinit(self: *Archive, allocator: Allocator) void {
self.file.close();
for (self.toc.keys()) |*key| {
allocator.free(key.*);
}

View File

@ -306,6 +306,7 @@ pub fn flushModule(self: *DebugSymbols, allocator: Allocator, options: link.Opti
}
pub fn deinit(self: *DebugSymbols, allocator: Allocator) void {
self.file.close();
self.segments.deinit(allocator);
self.sections.deinit(allocator);
self.dwarf.deinit();

View File

@ -695,12 +695,10 @@ pub fn deinit(self: *Wasm) void {
gpa.free(segment_info.name);
}
for (self.objects.items) |*object| {
object.file.?.close();
object.deinit(gpa);
}
for (self.archives.items) |*archive| {
archive.file.close();
archive.deinit(gpa);
}
@ -3218,14 +3216,26 @@ fn writeVecSectionHeader(file: fs.File, offset: u64, section: wasm.Section, size
buf[0] = @enumToInt(section);
leb.writeUnsignedFixed(5, buf[1..6], size);
leb.writeUnsignedFixed(5, buf[6..], items);
try file.pwriteAll(&buf, offset);
if (builtin.target.os.tag == .windows) {
// https://github.com/ziglang/zig/issues/12783
const curr_pos = try file.getPos();
try file.pwriteAll(&buf, offset);
try file.seekTo(curr_pos);
} else try file.pwriteAll(&buf, offset);
}
fn writeCustomSectionHeader(file: fs.File, offset: u64, size: u32) !void {
var buf: [1 + 5]u8 = undefined;
buf[0] = 0; // 0 = 'custom' section
leb.writeUnsignedFixed(5, buf[1..6], size);
try file.pwriteAll(&buf, offset);
if (builtin.target.os.tag == .windows) {
// https://github.com/ziglang/zig/issues/12783
const curr_pos = try file.getPos();
try file.pwriteAll(&buf, offset);
try file.seekTo(curr_pos);
} else try file.pwriteAll(&buf, offset);
}
fn emitLinkSection(self: *Wasm, file: fs.File, arena: Allocator, symbol_table: *std.AutoArrayHashMap(SymbolLoc, u32)) !void {

View File

@ -95,6 +95,7 @@ const ar_hdr = extern struct {
};
pub fn deinit(archive: *Archive, allocator: Allocator) void {
archive.file.close();
for (archive.toc.keys()) |*key| {
allocator.free(key.*);
}

View File

@ -154,6 +154,9 @@ pub fn create(gpa: Allocator, file: std.fs.File, name: []const u8, maybe_max_siz
/// Frees all memory of `Object` at once. The given `Allocator` must be
/// the same allocator that was used when `init` was called.
pub fn deinit(self: *Object, gpa: Allocator) void {
if (self.file) |file| {
file.close();
}
for (self.func_types) |func_ty| {
gpa.free(func_ty.params);
gpa.free(func_ty.returns);

View File

@ -110,6 +110,10 @@ pub fn StringTable(comptime log_scope: @Type(.EnumLiteral)) type {
return self.get(off) orelse unreachable;
}
pub fn items(self: Self) []const u8 {
return self.buffer.items;
}
pub fn len(self: Self) usize {
return self.buffer.items.len;
}

View File

@ -177,6 +177,8 @@ const TestManifestConfigDefaults = struct {
inline for (&[_][]const u8{ "x86_64", "aarch64" }) |arch| {
defaults = defaults ++ arch ++ "-macos" ++ ",";
}
// Windows
defaults = defaults ++ "x86_64-windows" ++ ",";
// Wasm
defaults = defaults ++ "wasm32-wasi";
return defaults;
@ -1546,6 +1548,12 @@ pub const TestContext = struct {
.self_exe_path = std.testing.zig_exe_path,
// TODO instead of turning off color, pass in a std.Progress.Node
.color = .off,
// TODO: force self-hosted linkers with stage2 backend to avoid LLD creeping in
// until the auto-select mechanism deems them worthy
.use_lld = switch (case.backend) {
.stage2 => false,
else => null,
},
});
defer comp.destroy();

View File

@ -2,5 +2,5 @@
// output_mode=Exe
// target=aarch64-macos
//
// :105:9: error: struct 'tmp.tmp' has no member named 'main'
// :109:9: error: struct 'tmp.tmp' has no member named 'main'
// :7:1: note: struct declared here

View File

@ -2,5 +2,5 @@
// output_mode=Exe
// target=x86_64-linux
//
// :105:9: error: struct 'tmp.tmp' has no member named 'main'
// :109:9: error: struct 'tmp.tmp' has no member named 'main'
// :7:1: note: struct declared here

View File

@ -2,5 +2,5 @@
// output_mode=Exe
// target=x86_64-macos
//
// :105:9: error: struct 'tmp.tmp' has no member named 'main'
// :109:9: error: struct 'tmp.tmp' has no member named 'main'
// :7:1: note: struct declared here

View File

@ -0,0 +1,6 @@
// error
// output_mode=Exe
// target=x86_64-windows
//
// :130:9: error: struct 'tmp.tmp' has no member named 'main'
// :7:1: note: struct declared here

View File

@ -0,0 +1,6 @@
pub export fn main() noreturn {}
// error
//
// :1:32: error: function declared 'noreturn' returns
// :1:22: note: 'noreturn' declared here

View File

@ -0,0 +1,16 @@
const std = @import("std");
pub fn main() void {
print();
}
fn print() void {
const msg = "Hello, World!\n";
const stdout = std.io.getStdOut();
stdout.writeAll(msg) catch unreachable;
}
// run
//
// Hello, World!
//

View File

@ -28,11 +28,22 @@ pub fn addCases(cases: *tests.StandaloneContext) void {
}
fn addWasmCases(cases: *tests.StandaloneContext) void {
cases.addBuildFile("test/link/wasm/archive/build.zig", .{
.build_modes = true,
.requires_stage2 = true,
});
cases.addBuildFile("test/link/wasm/bss/build.zig", .{
.build_modes = true,
.requires_stage2 = true,
});
cases.addBuildFile("test/link/wasm/extern/build.zig", .{
.build_modes = true,
.requires_stage2 = true,
.use_emulation = true,
});
cases.addBuildFile("test/link/wasm/segments/build.zig", .{
.build_modes = true,
.requires_stage2 = true,
@ -47,17 +58,6 @@ fn addWasmCases(cases: *tests.StandaloneContext) void {
.build_modes = true,
.requires_stage2 = true,
});
cases.addBuildFile("test/link/wasm/archive/build.zig", .{
.build_modes = true,
.requires_stage2 = true,
});
cases.addBuildFile("test/link/wasm/extern/build.zig", .{
.build_modes = true,
.requires_stage2 = true,
.use_emulation = true,
});
}
fn addMachOCases(cases: *tests.StandaloneContext) void {

View File

@ -108,6 +108,14 @@ const test_targets = blk: {
},
.backend = .stage2_x86_64,
},
.{
.target = .{
.cpu_arch = .x86_64,
.os_tag = .windows,
.abi = .gnu,
},
.backend = .stage2_x86_64,
},
.{
.target = .{
@ -693,6 +701,8 @@ pub fn addPkgTests(
else => {
these_tests.use_stage1 = false;
these_tests.use_llvm = false;
// TODO: force self-hosted linkers to avoid LLD creeping in until the auto-select mechanism deems them worthy
these_tests.use_lld = false;
},
};