Merge pull request #14502 from ziglang/link-owned-atoms

link: move ownership of linker atom from frontend to the linkers
This commit is contained in:
Jakub Konka 2023-02-02 01:39:01 +01:00 committed by GitHub
commit 304420b99c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
31 changed files with 2381 additions and 2172 deletions

View File

@ -3299,7 +3299,7 @@ fn processOneJob(comp: *Compilation, job: Job) !void {
const gpa = comp.gpa;
const module = comp.bin_file.options.module.?;
const decl = module.declPtr(decl_index);
comp.bin_file.updateDeclLineNumber(module, decl) catch |err| {
comp.bin_file.updateDeclLineNumber(module, decl_index) catch |err| {
try module.failed_decls.ensureUnusedCapacity(gpa, 1);
module.failed_decls.putAssumeCapacityNoClobber(decl_index, try Module.ErrorMsg.create(
gpa,

View File

@ -328,8 +328,6 @@ pub const ErrorInt = u32;
pub const Export = struct {
options: std.builtin.ExportOptions,
src: LazySrcLoc,
/// Represents the position of the export, if any, in the output file.
link: link.File.Export,
/// The Decl that performs the export. Note that this is *not* the Decl being exported.
owner_decl: Decl.Index,
/// The Decl containing the export statement. Inline function calls
@ -533,16 +531,8 @@ pub const Decl = struct {
/// What kind of a declaration is this.
kind: Kind,
/// Represents the position of the code in the output file.
/// This is populated regardless of semantic analysis and code generation.
link: link.File.LinkBlock,
/// Represents the function in the linked output file, if the `Decl` is a function.
/// This is stored here and not in `Fn` because `Decl` survives across updates but
/// `Fn` does not.
/// TODO Look into making `Fn` a longer lived structure and moving this field there
/// to save on memory usage.
fn_link: link.File.LinkFn,
/// TODO remove this once Wasm backend catches up
fn_link: ?link.File.Wasm.FnData = null,
/// The shallow set of other decls whose typed_value could possibly change if this Decl's
/// typed_value is modified.
@ -4098,7 +4088,7 @@ pub fn ensureDeclAnalyzed(mod: *Module, decl_index: Decl.Index) SemaError!void {
// The exports this Decl performs will be re-discovered, so we remove them here
// prior to re-analysis.
mod.deleteDeclExports(decl_index);
try mod.deleteDeclExports(decl_index);
// Similarly, `@setAlignStack` invocations will be re-discovered.
if (decl.getFunction()) |func| {
@ -5183,20 +5173,7 @@ fn scanDecl(iter: *ScanDeclIter, decl_sub_index: usize, flags: u4) Allocator.Err
decl.zir_decl_index = @intCast(u32, decl_sub_index);
if (decl.getFunction()) |_| {
switch (comp.bin_file.tag) {
.coff => {
// TODO Implement for COFF
},
.elf => if (decl.fn_link.elf.len != 0) {
// TODO Look into detecting when this would be unnecessary by storing enough state
// in `Decl` to notice that the line number did not change.
comp.work_queue.writeItemAssumeCapacity(.{ .update_line_number = decl_index });
},
.macho => if (decl.fn_link.macho.len != 0) {
// TODO Look into detecting when this would be unnecessary by storing enough state
// in `Decl` to notice that the line number did not change.
comp.work_queue.writeItemAssumeCapacity(.{ .update_line_number = decl_index });
},
.plan9 => {
.coff, .elf, .macho, .plan9 => {
// TODO Look into detecting when this would be unnecessary by storing enough state
// in `Decl` to notice that the line number did not change.
comp.work_queue.writeItemAssumeCapacity(.{ .update_line_number = decl_index });
@ -5265,33 +5242,15 @@ pub fn clearDecl(
assert(emit_h.decl_table.swapRemove(decl_index));
}
_ = mod.compile_log_decls.swapRemove(decl_index);
mod.deleteDeclExports(decl_index);
try mod.deleteDeclExports(decl_index);
if (decl.has_tv) {
if (decl.ty.isFnOrHasRuntimeBits()) {
mod.comp.bin_file.freeDecl(decl_index);
// TODO instead of a union, put this memory trailing Decl objects,
// and allow it to be variably sized.
decl.link = switch (mod.comp.bin_file.tag) {
.coff => .{ .coff = link.File.Coff.Atom.empty },
.elf => .{ .elf = link.File.Elf.TextBlock.empty },
.macho => .{ .macho = link.File.MachO.Atom.empty },
.plan9 => .{ .plan9 = link.File.Plan9.DeclBlock.empty },
.c => .{ .c = {} },
.wasm => .{ .wasm = link.File.Wasm.DeclBlock.empty },
.spirv => .{ .spirv = {} },
.nvptx => .{ .nvptx = {} },
};
decl.fn_link = switch (mod.comp.bin_file.tag) {
.coff => .{ .coff = {} },
.elf => .{ .elf = link.File.Dwarf.SrcFn.empty },
.macho => .{ .macho = link.File.Dwarf.SrcFn.empty },
.plan9 => .{ .plan9 = {} },
.c => .{ .c = {} },
.wasm => .{ .wasm = link.File.Wasm.FnData.empty },
.spirv => .{ .spirv = .{} },
.nvptx => .{ .nvptx = {} },
.wasm => link.File.Wasm.FnData.empty,
else => null,
};
}
if (decl.getInnerNamespace()) |namespace| {
@ -5358,7 +5317,7 @@ pub fn abortAnonDecl(mod: *Module, decl_index: Decl.Index) void {
/// Delete all the Export objects that are caused by this Decl. Re-analysis of
/// this Decl will cause them to be re-created (or not).
fn deleteDeclExports(mod: *Module, decl_index: Decl.Index) void {
fn deleteDeclExports(mod: *Module, decl_index: Decl.Index) Allocator.Error!void {
var export_owners = (mod.export_owners.fetchSwapRemove(decl_index) orelse return).value;
for (export_owners.items) |exp| {
@ -5381,16 +5340,16 @@ fn deleteDeclExports(mod: *Module, decl_index: Decl.Index) void {
}
}
if (mod.comp.bin_file.cast(link.File.Elf)) |elf| {
elf.deleteExport(exp.link.elf);
elf.deleteDeclExport(decl_index, exp.options.name);
}
if (mod.comp.bin_file.cast(link.File.MachO)) |macho| {
macho.deleteExport(exp.link.macho);
try macho.deleteDeclExport(decl_index, exp.options.name);
}
if (mod.comp.bin_file.cast(link.File.Wasm)) |wasm| {
wasm.deleteExport(exp.link.wasm);
wasm.deleteDeclExport(decl_index);
}
if (mod.comp.bin_file.cast(link.File.Coff)) |coff| {
coff.deleteExport(exp.link.coff);
coff.deleteDeclExport(decl_index, exp.options.name);
}
if (mod.failed_exports.fetchSwapRemove(exp)) |failed_kv| {
failed_kv.value.destroy(mod.gpa);
@ -5693,25 +5652,9 @@ pub fn allocateNewDecl(
.deletion_flag = false,
.zir_decl_index = 0,
.src_scope = src_scope,
.link = switch (mod.comp.bin_file.tag) {
.coff => .{ .coff = link.File.Coff.Atom.empty },
.elf => .{ .elf = link.File.Elf.TextBlock.empty },
.macho => .{ .macho = link.File.MachO.Atom.empty },
.plan9 => .{ .plan9 = link.File.Plan9.DeclBlock.empty },
.c => .{ .c = {} },
.wasm => .{ .wasm = link.File.Wasm.DeclBlock.empty },
.spirv => .{ .spirv = {} },
.nvptx => .{ .nvptx = {} },
},
.fn_link = switch (mod.comp.bin_file.tag) {
.coff => .{ .coff = {} },
.elf => .{ .elf = link.File.Dwarf.SrcFn.empty },
.macho => .{ .macho = link.File.Dwarf.SrcFn.empty },
.plan9 => .{ .plan9 = {} },
.c => .{ .c = {} },
.wasm => .{ .wasm = link.File.Wasm.FnData.empty },
.spirv => .{ .spirv = .{} },
.nvptx => .{ .nvptx = {} },
.wasm => link.File.Wasm.FnData.empty,
else => null,
},
.generation = 0,
.is_pub = false,

View File

@ -5564,16 +5564,6 @@ pub fn analyzeExport(
.visibility = borrowed_options.visibility,
},
.src = src,
.link = switch (mod.comp.bin_file.tag) {
.coff => .{ .coff = .{} },
.elf => .{ .elf = .{} },
.macho => .{ .macho = .{} },
.plan9 => .{ .plan9 = null },
.c => .{ .c = {} },
.wasm => .{ .wasm = .{} },
.spirv => .{ .spirv = {} },
.nvptx => .{ .nvptx = {} },
},
.owner_decl = sema.owner_decl_index,
.src_decl = block.src_decl,
.exported_decl = exported_decl_index,

View File

@ -203,13 +203,7 @@ const DbgInfoReloc = struct {
else => unreachable, // not a possible argument
};
try dw.genArgDbgInfo(
reloc.name,
reloc.ty,
function.bin_file.tag,
function.mod_fn.owner_decl,
loc,
);
try dw.genArgDbgInfo(reloc.name, reloc.ty, function.mod_fn.owner_decl, loc);
},
.plan9 => {},
.none => {},
@ -255,14 +249,7 @@ const DbgInfoReloc = struct {
break :blk .nop;
},
};
try dw.genVarDbgInfo(
reloc.name,
reloc.ty,
function.bin_file.tag,
function.mod_fn.owner_decl,
is_ptr,
loc,
);
try dw.genVarDbgInfo(reloc.name, reloc.ty, function.mod_fn.owner_decl, is_ptr, loc);
},
.plan9 => {},
.none => {},
@ -4019,11 +4006,17 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type
.direct => .load_memory_ptr_direct,
.import => unreachable,
};
const mod = self.bin_file.options.module.?;
const owner_decl = mod.declPtr(self.mod_fn.owner_decl);
const atom_index = switch (self.bin_file.tag) {
.macho => owner_decl.link.macho.getSymbolIndex().?,
.coff => owner_decl.link.coff.getSymbolIndex().?,
.macho => blk: {
const macho_file = self.bin_file.cast(link.File.MachO).?;
const atom = try macho_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl);
break :blk macho_file.getAtom(atom).getSymbolIndex().?;
},
.coff => blk: {
const coff_file = self.bin_file.cast(link.File.Coff).?;
const atom = try coff_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl);
break :blk coff_file.getAtom(atom).getSymbolIndex().?;
},
else => unreachable, // unsupported target format
};
_ = try self.addInst(.{
@ -4301,34 +4294,37 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
if (self.air.value(callee)) |func_value| {
if (func_value.castTag(.function)) |func_payload| {
const func = func_payload.data;
const fn_owner_decl = mod.declPtr(func.owner_decl);
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
try fn_owner_decl.link.elf.ensureInitialized(elf_file);
const got_addr = @intCast(u32, fn_owner_decl.link.elf.getOffsetTableAddress(elf_file));
const atom_index = try elf_file.getOrCreateAtomForDecl(func.owner_decl);
const atom = elf_file.getAtom(atom_index);
const got_addr = @intCast(u32, atom.getOffsetTableAddress(elf_file));
try self.genSetReg(Type.initTag(.usize), .x30, .{ .memory = got_addr });
} else if (self.bin_file.cast(link.File.MachO)) |macho_file| {
try fn_owner_decl.link.macho.ensureInitialized(macho_file);
const atom = try macho_file.getOrCreateAtomForDecl(func.owner_decl);
const sym_index = macho_file.getAtom(atom).getSymbolIndex().?;
try self.genSetReg(Type.initTag(.u64), .x30, .{
.linker_load = .{
.type = .got,
.sym_index = fn_owner_decl.link.macho.getSymbolIndex().?,
.sym_index = sym_index,
},
});
} else if (self.bin_file.cast(link.File.Coff)) |coff_file| {
try fn_owner_decl.link.coff.ensureInitialized(coff_file);
const atom = try coff_file.getOrCreateAtomForDecl(func.owner_decl);
const sym_index = coff_file.getAtom(atom).getSymbolIndex().?;
try self.genSetReg(Type.initTag(.u64), .x30, .{
.linker_load = .{
.type = .got,
.sym_index = fn_owner_decl.link.coff.getSymbolIndex().?,
.sym_index = sym_index,
},
});
} else if (self.bin_file.cast(link.File.Plan9)) |p9| {
try p9.seeDecl(func.owner_decl);
const decl_block_index = try p9.seeDecl(func.owner_decl);
const decl_block = p9.getDeclBlock(decl_block_index);
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
const ptr_bytes: u64 = @divExact(ptr_bits, 8);
const got_addr = p9.bases.data;
const got_index = fn_owner_decl.link.plan9.got_index.?;
const got_index = decl_block.got_index.?;
const fn_got_addr = got_addr + got_index * ptr_bytes;
try self.genSetReg(Type.initTag(.usize), .x30, .{ .memory = fn_got_addr });
} else unreachable;
@ -4349,11 +4345,13 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
if (self.bin_file.cast(link.File.MachO)) |macho_file| {
const sym_index = try macho_file.getGlobalSymbol(mem.sliceTo(decl_name, 0));
const atom = try macho_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl);
const atom_index = macho_file.getAtom(atom).getSymbolIndex().?;
_ = try self.addInst(.{
.tag = .call_extern,
.data = .{
.relocation = .{
.atom_index = mod.declPtr(self.mod_fn.owner_decl).link.macho.getSymbolIndex().?,
.atom_index = atom_index,
.sym_index = sym_index,
},
},
@ -5488,11 +5486,17 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
.direct => .load_memory_ptr_direct,
.import => unreachable,
};
const mod = self.bin_file.options.module.?;
const owner_decl = mod.declPtr(self.mod_fn.owner_decl);
const atom_index = switch (self.bin_file.tag) {
.macho => owner_decl.link.macho.getSymbolIndex().?,
.coff => owner_decl.link.coff.getSymbolIndex().?,
.macho => blk: {
const macho_file = self.bin_file.cast(link.File.MachO).?;
const atom = try macho_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl);
break :blk macho_file.getAtom(atom).getSymbolIndex().?;
},
.coff => blk: {
const coff_file = self.bin_file.cast(link.File.Coff).?;
const atom = try coff_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl);
break :blk coff_file.getAtom(atom).getSymbolIndex().?;
},
else => unreachable, // unsupported target format
};
_ = try self.addInst(.{
@ -5602,11 +5606,17 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
.direct => .load_memory_direct,
.import => .load_memory_import,
};
const mod = self.bin_file.options.module.?;
const owner_decl = mod.declPtr(self.mod_fn.owner_decl);
const atom_index = switch (self.bin_file.tag) {
.macho => owner_decl.link.macho.getSymbolIndex().?,
.coff => owner_decl.link.coff.getSymbolIndex().?,
.macho => blk: {
const macho_file = self.bin_file.cast(link.File.MachO).?;
const atom = try macho_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl);
break :blk macho_file.getAtom(atom).getSymbolIndex().?;
},
.coff => blk: {
const coff_file = self.bin_file.cast(link.File.Coff).?;
const atom = try coff_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl);
break :blk coff_file.getAtom(atom).getSymbolIndex().?;
},
else => unreachable, // unsupported target format
};
_ = try self.addInst(.{
@ -5796,11 +5806,17 @@ fn genSetStackArgument(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) I
.direct => .load_memory_ptr_direct,
.import => unreachable,
};
const mod = self.bin_file.options.module.?;
const owner_decl = mod.declPtr(self.mod_fn.owner_decl);
const atom_index = switch (self.bin_file.tag) {
.macho => owner_decl.link.macho.getSymbolIndex().?,
.coff => owner_decl.link.coff.getSymbolIndex().?,
.macho => blk: {
const macho_file = self.bin_file.cast(link.File.MachO).?;
const atom = try macho_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl);
break :blk macho_file.getAtom(atom).getSymbolIndex().?;
},
.coff => blk: {
const coff_file = self.bin_file.cast(link.File.Coff).?;
const atom = try coff_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl);
break :blk coff_file.getAtom(atom).getSymbolIndex().?;
},
else => unreachable, // unsupported target format
};
_ = try self.addInst(.{
@ -6119,23 +6135,27 @@ fn lowerDeclRef(self: *Self, tv: TypedValue, decl_index: Module.Decl.Index) Inne
mod.markDeclAlive(decl);
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
try decl.link.elf.ensureInitialized(elf_file);
return MCValue{ .memory = decl.link.elf.getOffsetTableAddress(elf_file) };
const atom_index = try elf_file.getOrCreateAtomForDecl(decl_index);
const atom = elf_file.getAtom(atom_index);
return MCValue{ .memory = atom.getOffsetTableAddress(elf_file) };
} else if (self.bin_file.cast(link.File.MachO)) |macho_file| {
try decl.link.macho.ensureInitialized(macho_file);
const atom = try macho_file.getOrCreateAtomForDecl(decl_index);
const sym_index = macho_file.getAtom(atom).getSymbolIndex().?;
return MCValue{ .linker_load = .{
.type = .got,
.sym_index = decl.link.macho.getSymbolIndex().?,
.sym_index = sym_index,
} };
} else if (self.bin_file.cast(link.File.Coff)) |coff_file| {
try decl.link.coff.ensureInitialized(coff_file);
const atom_index = try coff_file.getOrCreateAtomForDecl(decl_index);
const sym_index = coff_file.getAtom(atom_index).getSymbolIndex().?;
return MCValue{ .linker_load = .{
.type = .got,
.sym_index = decl.link.coff.getSymbolIndex().?,
.sym_index = sym_index,
} };
} else if (self.bin_file.cast(link.File.Plan9)) |p9| {
try p9.seeDecl(decl_index);
const got_addr = p9.bases.data + decl.link.plan9.got_index.? * ptr_bytes;
const decl_block_index = try p9.seeDecl(decl_index);
const decl_block = p9.getDeclBlock(decl_block_index);
const got_addr = p9.bases.data + decl_block.got_index.? * ptr_bytes;
return MCValue{ .memory = got_addr };
} else {
return self.fail("TODO codegen non-ELF const Decl pointer", .{});
@ -6148,8 +6168,7 @@ fn lowerUnnamedConst(self: *Self, tv: TypedValue) InnerError!MCValue {
return self.fail("lowering unnamed constant failed: {s}", .{@errorName(err)});
};
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
const vaddr = elf_file.local_symbols.items[local_sym_index].st_value;
return MCValue{ .memory = vaddr };
return MCValue{ .memory = elf_file.getSymbol(local_sym_index).st_value };
} else if (self.bin_file.cast(link.File.MachO)) |_| {
return MCValue{ .linker_load = .{
.type = .direct,

View File

@ -670,9 +670,9 @@ fn mirCallExtern(emit: *Emit, inst: Mir.Inst.Index) !void {
if (emit.bin_file.cast(link.File.MachO)) |macho_file| {
// Add relocation to the decl.
const atom = macho_file.getAtomForSymbol(.{ .sym_index = relocation.atom_index, .file = null }).?;
const atom_index = macho_file.getAtomIndexForSymbol(.{ .sym_index = relocation.atom_index, .file = null }).?;
const target = macho_file.getGlobalByIndex(relocation.sym_index);
try atom.addRelocation(macho_file, .{
try link.File.MachO.Atom.addRelocation(macho_file, atom_index, .{
.type = @enumToInt(std.macho.reloc_type_arm64.ARM64_RELOC_BRANCH26),
.target = target,
.offset = offset,
@ -883,10 +883,10 @@ fn mirLoadMemoryPie(emit: *Emit, inst: Mir.Inst.Index) !void {
}
if (emit.bin_file.cast(link.File.MachO)) |macho_file| {
const atom = macho_file.getAtomForSymbol(.{ .sym_index = data.atom_index, .file = null }).?;
const atom_index = macho_file.getAtomIndexForSymbol(.{ .sym_index = data.atom_index, .file = null }).?;
// TODO this causes segfault in stage1
// try atom.addRelocations(macho_file, 2, .{
try atom.addRelocation(macho_file, .{
try link.File.MachO.Atom.addRelocation(macho_file, atom_index, .{
.target = .{ .sym_index = data.sym_index, .file = null },
.offset = offset,
.addend = 0,
@ -902,7 +902,7 @@ fn mirLoadMemoryPie(emit: *Emit, inst: Mir.Inst.Index) !void {
else => unreachable,
},
});
try atom.addRelocation(macho_file, .{
try link.File.MachO.Atom.addRelocation(macho_file, atom_index, .{
.target = .{ .sym_index = data.sym_index, .file = null },
.offset = offset + 4,
.addend = 0,
@ -919,7 +919,7 @@ fn mirLoadMemoryPie(emit: *Emit, inst: Mir.Inst.Index) !void {
},
});
} else if (emit.bin_file.cast(link.File.Coff)) |coff_file| {
const atom = coff_file.getAtomForSymbol(.{ .sym_index = data.atom_index, .file = null }).?;
const atom_index = coff_file.getAtomIndexForSymbol(.{ .sym_index = data.atom_index, .file = null }).?;
const target = switch (tag) {
.load_memory_got,
.load_memory_ptr_got,
@ -929,7 +929,7 @@ fn mirLoadMemoryPie(emit: *Emit, inst: Mir.Inst.Index) !void {
.load_memory_import => coff_file.getGlobalByIndex(data.sym_index),
else => unreachable,
};
try atom.addRelocation(coff_file, .{
try link.File.Coff.Atom.addRelocation(coff_file, atom_index, .{
.target = target,
.offset = offset,
.addend = 0,
@ -946,7 +946,7 @@ fn mirLoadMemoryPie(emit: *Emit, inst: Mir.Inst.Index) !void {
else => unreachable,
},
});
try atom.addRelocation(coff_file, .{
try link.File.Coff.Atom.addRelocation(coff_file, atom_index, .{
.target = target,
.offset = offset + 4,
.addend = 0,

View File

@ -282,13 +282,7 @@ const DbgInfoReloc = struct {
else => unreachable, // not a possible argument
};
try dw.genArgDbgInfo(
reloc.name,
reloc.ty,
function.bin_file.tag,
function.mod_fn.owner_decl,
loc,
);
try dw.genArgDbgInfo(reloc.name, reloc.ty, function.mod_fn.owner_decl, loc);
},
.plan9 => {},
.none => {},
@ -331,14 +325,7 @@ const DbgInfoReloc = struct {
break :blk .nop;
},
};
try dw.genVarDbgInfo(
reloc.name,
reloc.ty,
function.bin_file.tag,
function.mod_fn.owner_decl,
is_ptr,
loc,
);
try dw.genVarDbgInfo(reloc.name, reloc.ty, function.mod_fn.owner_decl, is_ptr, loc);
},
.plan9 => {},
.none => {},
@ -4256,12 +4243,11 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
if (self.air.value(callee)) |func_value| {
if (func_value.castTag(.function)) |func_payload| {
const func = func_payload.data;
const mod = self.bin_file.options.module.?;
const fn_owner_decl = mod.declPtr(func.owner_decl);
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
try fn_owner_decl.link.elf.ensureInitialized(elf_file);
const got_addr = @intCast(u32, fn_owner_decl.link.elf.getOffsetTableAddress(elf_file));
const atom_index = try elf_file.getOrCreateAtomForDecl(func.owner_decl);
const atom = elf_file.getAtom(atom_index);
const got_addr = @intCast(u32, atom.getOffsetTableAddress(elf_file));
try self.genSetReg(Type.initTag(.usize), .lr, .{ .memory = got_addr });
} else if (self.bin_file.cast(link.File.MachO)) |_| {
unreachable; // unsupported architecture for MachO
@ -6084,15 +6070,17 @@ fn lowerDeclRef(self: *Self, tv: TypedValue, decl_index: Module.Decl.Index) Inne
mod.markDeclAlive(decl);
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
try decl.link.elf.ensureInitialized(elf_file);
return MCValue{ .memory = decl.link.elf.getOffsetTableAddress(elf_file) };
const atom_index = try elf_file.getOrCreateAtomForDecl(decl_index);
const atom = elf_file.getAtom(atom_index);
return MCValue{ .memory = atom.getOffsetTableAddress(elf_file) };
} else if (self.bin_file.cast(link.File.MachO)) |_| {
unreachable; // unsupported architecture for MachO
} else if (self.bin_file.cast(link.File.Coff)) |_| {
return self.fail("TODO codegen COFF const Decl pointer", .{});
} else if (self.bin_file.cast(link.File.Plan9)) |p9| {
try p9.seeDecl(decl_index);
const got_addr = p9.bases.data + decl.link.plan9.got_index.? * ptr_bytes;
const decl_block_index = try p9.seeDecl(decl_index);
const decl_block = p9.getDeclBlock(decl_block_index);
const got_addr = p9.bases.data + decl_block.got_index.? * ptr_bytes;
return MCValue{ .memory = got_addr };
} else {
return self.fail("TODO codegen non-ELF const Decl pointer", .{});
@ -6106,8 +6094,7 @@ fn lowerUnnamedConst(self: *Self, tv: TypedValue) InnerError!MCValue {
return self.fail("lowering unnamed constant failed: {s}", .{@errorName(err)});
};
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
const vaddr = elf_file.local_symbols.items[local_sym_index].st_value;
return MCValue{ .memory = vaddr };
return MCValue{ .memory = elf_file.getSymbol(local_sym_index).st_value };
} else if (self.bin_file.cast(link.File.MachO)) |_| {
unreachable;
} else if (self.bin_file.cast(link.File.Coff)) |_| {

View File

@ -1615,13 +1615,9 @@ fn genArgDbgInfo(self: Self, inst: Air.Inst.Index, mcv: MCValue) !void {
switch (self.debug_output) {
.dwarf => |dw| switch (mcv) {
.register => |reg| try dw.genArgDbgInfo(
name,
ty,
self.bin_file.tag,
self.mod_fn.owner_decl,
.{ .register = reg.dwarfLocOp() },
),
.register => |reg| try dw.genArgDbgInfo(name, ty, self.mod_fn.owner_decl, .{
.register = reg.dwarfLocOp(),
}),
.stack_offset => {},
else => {},
},
@ -1721,12 +1717,9 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
if (self.air.value(callee)) |func_value| {
if (func_value.castTag(.function)) |func_payload| {
const func = func_payload.data;
const mod = self.bin_file.options.module.?;
const fn_owner_decl = mod.declPtr(func.owner_decl);
try fn_owner_decl.link.elf.ensureInitialized(elf_file);
const got_addr = @intCast(u32, fn_owner_decl.link.elf.getOffsetTableAddress(elf_file));
const atom_index = try elf_file.getOrCreateAtomForDecl(func.owner_decl);
const atom = elf_file.getAtom(atom_index);
const got_addr = @intCast(u32, atom.getOffsetTableAddress(elf_file));
try self.genSetReg(Type.initTag(.usize), .ra, .{ .memory = got_addr });
_ = try self.addInst(.{
.tag = .jalr,
@ -2553,17 +2546,17 @@ fn lowerDeclRef(self: *Self, tv: TypedValue, decl_index: Module.Decl.Index) Inne
const decl = mod.declPtr(decl_index);
mod.markDeclAlive(decl);
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
try decl.link.elf.ensureInitialized(elf_file);
return MCValue{ .memory = decl.link.elf.getOffsetTableAddress(elf_file) };
const atom_index = try elf_file.getOrCreateAtomForDecl(decl_index);
const atom = elf_file.getAtom(atom_index);
return MCValue{ .memory = atom.getOffsetTableAddress(elf_file) };
} else if (self.bin_file.cast(link.File.MachO)) |_| {
// TODO I'm hacking my way through here by repurposing .memory for storing
// index to the GOT target symbol index.
return MCValue{ .memory = decl.link.macho.sym_index };
unreachable;
} else if (self.bin_file.cast(link.File.Coff)) |_| {
return self.fail("TODO codegen COFF const Decl pointer", .{});
} else if (self.bin_file.cast(link.File.Plan9)) |p9| {
try p9.seeDecl(decl_index);
const got_addr = p9.bases.data + decl.link.plan9.got_index.? * ptr_bytes;
const decl_block_index = try p9.seeDecl(decl_index);
const decl_block = p9.getDeclBlock(decl_block_index);
const got_addr = p9.bases.data + decl_block.got_index.? * ptr_bytes;
return MCValue{ .memory = got_addr };
} else {
return self.fail("TODO codegen non-ELF const Decl pointer", .{});

View File

@ -1216,11 +1216,10 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
if (self.bin_file.tag == link.File.Elf.base_tag) {
if (func_value.castTag(.function)) |func_payload| {
const func = func_payload.data;
const mod = self.bin_file.options.module.?;
const fn_owner_decl = mod.declPtr(func.owner_decl);
const got_addr = if (self.bin_file.cast(link.File.Elf)) |elf_file| blk: {
try fn_owner_decl.link.elf.ensureInitialized(elf_file);
break :blk @intCast(u32, fn_owner_decl.link.elf.getOffsetTableAddress(elf_file));
const atom_index = try elf_file.getOrCreateAtomForDecl(func.owner_decl);
const atom = elf_file.getAtom(atom_index);
break :blk @intCast(u32, atom.getOffsetTableAddress(elf_file));
} else unreachable;
try self.genSetReg(Type.initTag(.usize), .o7, .{ .memory = got_addr });
@ -3413,13 +3412,9 @@ fn genArgDbgInfo(self: Self, inst: Air.Inst.Index, mcv: MCValue) !void {
switch (self.debug_output) {
.dwarf => |dw| switch (mcv) {
.register => |reg| try dw.genArgDbgInfo(
name,
ty,
self.bin_file.tag,
self.mod_fn.owner_decl,
.{ .register = reg.dwarfLocOp() },
),
.register => |reg| try dw.genArgDbgInfo(name, ty, self.mod_fn.owner_decl, .{
.register = reg.dwarfLocOp(),
}),
else => {},
},
else => {},
@ -4205,8 +4200,9 @@ fn lowerDeclRef(self: *Self, tv: TypedValue, decl_index: Module.Decl.Index) Inne
mod.markDeclAlive(decl);
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
try decl.link.elf.ensureInitialized(elf_file);
return MCValue{ .memory = decl.link.elf.getOffsetTableAddress(elf_file) };
const atom_index = try elf_file.getOrCreateAtomForDecl(decl_index);
const atom = elf_file.getAtom(atom_index);
return MCValue{ .memory = atom.getOffsetTableAddress(elf_file) };
} else {
return self.fail("TODO codegen non-ELF const Decl pointer", .{});
}

View File

@ -1194,7 +1194,7 @@ fn genFunc(func: *CodeGen) InnerError!void {
const fn_info = func.decl.ty.fnInfo();
var func_type = try genFunctype(func.gpa, fn_info.cc, fn_info.param_types, fn_info.return_type, func.target);
defer func_type.deinit(func.gpa);
func.decl.fn_link.wasm.type_index = try func.bin_file.putOrGetFuncType(func_type);
func.decl.fn_link.?.type_index = try func.bin_file.putOrGetFuncType(func_type);
var cc_result = try func.resolveCallingConventionValues(func.decl.ty);
defer cc_result.deinit(func.gpa);
@ -1269,10 +1269,10 @@ fn genFunc(func: *CodeGen) InnerError!void {
var emit: Emit = .{
.mir = mir,
.bin_file = &func.bin_file.base,
.bin_file = func.bin_file,
.code = func.code,
.locals = func.locals.items,
.decl = func.decl,
.decl_index = func.decl_index,
.dbg_output = func.debug_output,
.prev_di_line = 0,
.prev_di_column = 0,
@ -2117,33 +2117,31 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif
const fn_info = fn_ty.fnInfo();
const first_param_sret = firstParamSRet(fn_info.cc, fn_info.return_type, func.target);
const callee: ?*Decl = blk: {
const callee: ?Decl.Index = blk: {
const func_val = func.air.value(pl_op.operand) orelse break :blk null;
const module = func.bin_file.base.options.module.?;
if (func_val.castTag(.function)) |function| {
const decl = module.declPtr(function.data.owner_decl);
try decl.link.wasm.ensureInitialized(func.bin_file);
break :blk decl;
_ = try func.bin_file.getOrCreateAtomForDecl(function.data.owner_decl);
break :blk function.data.owner_decl;
} else if (func_val.castTag(.extern_fn)) |extern_fn| {
const ext_decl = module.declPtr(extern_fn.data.owner_decl);
const ext_info = ext_decl.ty.fnInfo();
var func_type = try genFunctype(func.gpa, ext_info.cc, ext_info.param_types, ext_info.return_type, func.target);
defer func_type.deinit(func.gpa);
const atom = &ext_decl.link.wasm;
try atom.ensureInitialized(func.bin_file);
ext_decl.fn_link.wasm.type_index = try func.bin_file.putOrGetFuncType(func_type);
const atom_index = try func.bin_file.getOrCreateAtomForDecl(extern_fn.data.owner_decl);
const atom = func.bin_file.getAtomPtr(atom_index);
ext_decl.fn_link.?.type_index = try func.bin_file.putOrGetFuncType(func_type);
try func.bin_file.addOrUpdateImport(
mem.sliceTo(ext_decl.name, 0),
atom.getSymbolIndex().?,
ext_decl.getExternFn().?.lib_name,
ext_decl.fn_link.wasm.type_index,
ext_decl.fn_link.?.type_index,
);
break :blk ext_decl;
break :blk extern_fn.data.owner_decl;
} else if (func_val.castTag(.decl_ref)) |decl_ref| {
const decl = module.declPtr(decl_ref.data);
try decl.link.wasm.ensureInitialized(func.bin_file);
break :blk decl;
_ = try func.bin_file.getOrCreateAtomForDecl(decl_ref.data);
break :blk decl_ref.data;
}
return func.fail("Expected a function, but instead found type '{}'", .{func_val.tag()});
};
@ -2164,7 +2162,8 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif
}
if (callee) |direct| {
try func.addLabel(.call, direct.link.wasm.sym_index);
const atom_index = func.bin_file.decls.get(direct).?;
try func.addLabel(.call, func.bin_file.getAtom(atom_index).sym_index);
} else {
// in this case we call a function pointer
// so load its value onto the stack
@ -2477,7 +2476,7 @@ fn airArg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
.dwarf => |dwarf| {
const src_index = func.air.instructions.items(.data)[inst].arg.src_index;
const name = func.mod_fn.getParamName(func.bin_file.base.options.module.?, src_index);
try dwarf.genArgDbgInfo(name, arg_ty, .wasm, func.mod_fn.owner_decl, .{
try dwarf.genArgDbgInfo(name, arg_ty, func.mod_fn.owner_decl, .{
.wasm_local = arg.local.value,
});
},
@ -2760,9 +2759,10 @@ fn lowerDeclRefValue(func: *CodeGen, tv: TypedValue, decl_index: Module.Decl.Ind
}
module.markDeclAlive(decl);
try decl.link.wasm.ensureInitialized(func.bin_file);
const atom_index = try func.bin_file.getOrCreateAtomForDecl(decl_index);
const atom = func.bin_file.getAtom(atom_index);
const target_sym_index = decl.link.wasm.sym_index;
const target_sym_index = atom.sym_index;
if (decl.ty.zigTypeTag() == .Fn) {
try func.bin_file.addTableFunction(target_sym_index);
return WValue{ .function_index = target_sym_index };
@ -5547,7 +5547,7 @@ fn airDbgVar(func: *CodeGen, inst: Air.Inst.Index, is_ptr: bool) !void {
break :blk .nop;
},
};
try func.debug_output.dwarf.genVarDbgInfo(name, ty, .wasm, func.mod_fn.owner_decl, is_ptr, loc);
try func.debug_output.dwarf.genVarDbgInfo(name, ty, func.mod_fn.owner_decl, is_ptr, loc);
func.finishAir(inst, .none, &.{});
}

View File

@ -11,8 +11,8 @@ const leb128 = std.leb;
/// Contains our list of instructions
mir: Mir,
/// Reference to the file handler
bin_file: *link.File,
/// Reference to the Wasm module linker
bin_file: *link.File.Wasm,
/// Possible error message. When set, the value is allocated and
/// must be freed manually.
error_msg: ?*Module.ErrorMsg = null,
@ -21,7 +21,7 @@ code: *std.ArrayList(u8),
/// List of allocated locals.
locals: []const u8,
/// The declaration that code is being generated for.
decl: *Module.Decl,
decl_index: Module.Decl.Index,
// Debug information
/// Holds the debug information for this emission
@ -252,8 +252,8 @@ fn offset(self: Emit) u32 {
fn fail(emit: *Emit, comptime format: []const u8, args: anytype) InnerError {
@setCold(true);
std.debug.assert(emit.error_msg == null);
// TODO: Determine the source location.
emit.error_msg = try Module.ErrorMsg.create(emit.bin_file.allocator, emit.decl.srcLoc(), format, args);
const mod = emit.bin_file.base.options.module.?;
emit.error_msg = try Module.ErrorMsg.create(emit.bin_file.base.allocator, mod.declPtr(emit.decl_index).srcLoc(), format, args);
return error.EmitFail;
}
@ -304,8 +304,9 @@ fn emitGlobal(emit: *Emit, tag: Mir.Inst.Tag, inst: Mir.Inst.Index) !void {
const global_offset = emit.offset();
try emit.code.appendSlice(&buf);
// globals can have index 0 as it represents the stack pointer
try emit.decl.link.wasm.relocs.append(emit.bin_file.allocator, .{
const atom_index = emit.bin_file.decls.get(emit.decl_index).?;
const atom = emit.bin_file.getAtomPtr(atom_index);
try atom.relocs.append(emit.bin_file.base.allocator, .{
.index = label,
.offset = global_offset,
.relocation_type = .R_WASM_GLOBAL_INDEX_LEB,
@ -361,7 +362,9 @@ fn emitCall(emit: *Emit, inst: Mir.Inst.Index) !void {
try emit.code.appendSlice(&buf);
if (label != 0) {
try emit.decl.link.wasm.relocs.append(emit.bin_file.allocator, .{
const atom_index = emit.bin_file.decls.get(emit.decl_index).?;
const atom = emit.bin_file.getAtomPtr(atom_index);
try atom.relocs.append(emit.bin_file.base.allocator, .{
.offset = call_offset,
.index = label,
.relocation_type = .R_WASM_FUNCTION_INDEX_LEB,
@ -387,7 +390,9 @@ fn emitFunctionIndex(emit: *Emit, inst: Mir.Inst.Index) !void {
try emit.code.appendSlice(&buf);
if (symbol_index != 0) {
try emit.decl.link.wasm.relocs.append(emit.bin_file.allocator, .{
const atom_index = emit.bin_file.decls.get(emit.decl_index).?;
const atom = emit.bin_file.getAtomPtr(atom_index);
try atom.relocs.append(emit.bin_file.base.allocator, .{
.offset = index_offset,
.index = symbol_index,
.relocation_type = .R_WASM_TABLE_INDEX_SLEB,
@ -399,7 +404,7 @@ fn emitMemAddress(emit: *Emit, inst: Mir.Inst.Index) !void {
const extra_index = emit.mir.instructions.items(.data)[inst].payload;
const mem = emit.mir.extraData(Mir.Memory, extra_index).data;
const mem_offset = emit.offset() + 1;
const is_wasm32 = emit.bin_file.options.target.cpu.arch == .wasm32;
const is_wasm32 = emit.bin_file.base.options.target.cpu.arch == .wasm32;
if (is_wasm32) {
try emit.code.append(std.wasm.opcode(.i32_const));
var buf: [5]u8 = undefined;
@ -413,7 +418,9 @@ fn emitMemAddress(emit: *Emit, inst: Mir.Inst.Index) !void {
}
if (mem.pointer != 0) {
try emit.decl.link.wasm.relocs.append(emit.bin_file.allocator, .{
const atom_index = emit.bin_file.decls.get(emit.decl_index).?;
const atom = emit.bin_file.getAtomPtr(atom_index);
try atom.relocs.append(emit.bin_file.base.allocator, .{
.offset = mem_offset,
.index = mem.pointer,
.relocation_type = if (is_wasm32) .R_WASM_MEMORY_ADDR_LEB else .R_WASM_MEMORY_ADDR_LEB64,

View File

@ -2668,12 +2668,13 @@ fn loadMemPtrIntoRegister(self: *Self, reg: Register, ptr_ty: Type, ptr: MCValue
switch (ptr) {
.linker_load => |load_struct| {
const abi_size = @intCast(u32, ptr_ty.abiSize(self.target.*));
const mod = self.bin_file.options.module.?;
const fn_owner_decl = mod.declPtr(self.mod_fn.owner_decl);
const atom_index = if (self.bin_file.tag == link.File.MachO.base_tag)
fn_owner_decl.link.macho.getSymbolIndex().?
else
fn_owner_decl.link.coff.getSymbolIndex().?;
const atom_index = if (self.bin_file.cast(link.File.MachO)) |macho_file| blk: {
const atom = try macho_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl);
break :blk macho_file.getAtom(atom).getSymbolIndex().?;
} else if (self.bin_file.cast(link.File.Coff)) |coff_file| blk: {
const atom = try coff_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl);
break :blk coff_file.getAtom(atom).getSymbolIndex().?;
} else unreachable;
const flags: u2 = switch (load_struct.type) {
.got => 0b00,
.direct => 0b01,
@ -3835,7 +3836,7 @@ fn genArgDbgInfo(self: Self, ty: Type, name: [:0]const u8, mcv: MCValue) !void {
},
else => unreachable, // not a valid function parameter
};
try dw.genArgDbgInfo(name, ty, self.bin_file.tag, self.mod_fn.owner_decl, loc);
try dw.genArgDbgInfo(name, ty, self.mod_fn.owner_decl, loc);
},
.plan9 => {},
.none => {},
@ -3875,7 +3876,7 @@ fn genVarDbgInfo(
break :blk .nop;
},
};
try dw.genVarDbgInfo(name, ty, self.bin_file.tag, self.mod_fn.owner_decl, is_ptr, loc);
try dw.genVarDbgInfo(name, ty, self.mod_fn.owner_decl, is_ptr, loc);
},
.plan9 => {},
.none => {},
@ -3995,19 +3996,19 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
if (self.air.value(callee)) |func_value| {
if (func_value.castTag(.function)) |func_payload| {
const func = func_payload.data;
const fn_owner_decl = mod.declPtr(func.owner_decl);
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
try fn_owner_decl.link.elf.ensureInitialized(elf_file);
const got_addr = @intCast(u32, fn_owner_decl.link.elf.getOffsetTableAddress(elf_file));
const atom_index = try elf_file.getOrCreateAtomForDecl(func.owner_decl);
const atom = elf_file.getAtom(atom_index);
const got_addr = @intCast(u32, atom.getOffsetTableAddress(elf_file));
_ = try self.addInst(.{
.tag = .call,
.ops = Mir.Inst.Ops.encode(.{ .flags = 0b01 }),
.data = .{ .imm = got_addr },
});
} else if (self.bin_file.cast(link.File.Coff)) |coff_file| {
try fn_owner_decl.link.coff.ensureInitialized(coff_file);
const sym_index = fn_owner_decl.link.coff.getSymbolIndex().?;
const atom_index = try coff_file.getOrCreateAtomForDecl(func.owner_decl);
const sym_index = coff_file.getAtom(atom_index).getSymbolIndex().?;
try self.genSetReg(Type.initTag(.usize), .rax, .{
.linker_load = .{
.type = .got,
@ -4023,8 +4024,8 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
.data = undefined,
});
} else if (self.bin_file.cast(link.File.MachO)) |macho_file| {
try fn_owner_decl.link.macho.ensureInitialized(macho_file);
const sym_index = fn_owner_decl.link.macho.getSymbolIndex().?;
const atom_index = try macho_file.getOrCreateAtomForDecl(func.owner_decl);
const sym_index = macho_file.getAtom(atom_index).getSymbolIndex().?;
try self.genSetReg(Type.initTag(.usize), .rax, .{
.linker_load = .{
.type = .got,
@ -4040,11 +4041,12 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
.data = undefined,
});
} else if (self.bin_file.cast(link.File.Plan9)) |p9| {
try p9.seeDecl(func.owner_decl);
const decl_block_index = try p9.seeDecl(func.owner_decl);
const decl_block = p9.getDeclBlock(decl_block_index);
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
const ptr_bytes: u64 = @divExact(ptr_bits, 8);
const got_addr = p9.bases.data;
const got_index = fn_owner_decl.link.plan9.got_index.?;
const got_index = decl_block.got_index.?;
const fn_got_addr = got_addr + got_index * ptr_bytes;
_ = try self.addInst(.{
.tag = .call,
@ -4080,15 +4082,15 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
});
} else if (self.bin_file.cast(link.File.MachO)) |macho_file| {
const sym_index = try macho_file.getGlobalSymbol(mem.sliceTo(decl_name, 0));
const atom = try macho_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl);
const atom_index = macho_file.getAtom(atom).getSymbolIndex().?;
_ = try self.addInst(.{
.tag = .call_extern,
.ops = undefined,
.data = .{
.relocation = .{
.atom_index = mod.declPtr(self.mod_fn.owner_decl).link.macho.getSymbolIndex().?,
.sym_index = sym_index,
},
},
.data = .{ .relocation = .{
.atom_index = atom_index,
.sym_index = sym_index,
} },
});
} else {
return self.fail("TODO implement calling extern functions", .{});
@ -6719,23 +6721,27 @@ fn lowerDeclRef(self: *Self, tv: TypedValue, decl_index: Module.Decl.Index) Inne
module.markDeclAlive(decl);
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
try decl.link.elf.ensureInitialized(elf_file);
return MCValue{ .memory = decl.link.elf.getOffsetTableAddress(elf_file) };
const atom_index = try elf_file.getOrCreateAtomForDecl(decl_index);
const atom = elf_file.getAtom(atom_index);
return MCValue{ .memory = atom.getOffsetTableAddress(elf_file) };
} else if (self.bin_file.cast(link.File.MachO)) |macho_file| {
try decl.link.macho.ensureInitialized(macho_file);
const atom_index = try macho_file.getOrCreateAtomForDecl(decl_index);
const sym_index = macho_file.getAtom(atom_index).getSymbolIndex().?;
return MCValue{ .linker_load = .{
.type = .got,
.sym_index = decl.link.macho.getSymbolIndex().?,
.sym_index = sym_index,
} };
} else if (self.bin_file.cast(link.File.Coff)) |coff_file| {
try decl.link.coff.ensureInitialized(coff_file);
const atom_index = try coff_file.getOrCreateAtomForDecl(decl_index);
const sym_index = coff_file.getAtom(atom_index).getSymbolIndex().?;
return MCValue{ .linker_load = .{
.type = .got,
.sym_index = decl.link.coff.getSymbolIndex().?,
.sym_index = sym_index,
} };
} else if (self.bin_file.cast(link.File.Plan9)) |p9| {
try p9.seeDecl(decl_index);
const got_addr = p9.bases.data + decl.link.plan9.got_index.? * ptr_bytes;
const decl_block_index = try p9.seeDecl(decl_index);
const decl_block = p9.getDeclBlock(decl_block_index);
const got_addr = p9.bases.data + decl_block.got_index.? * ptr_bytes;
return MCValue{ .memory = got_addr };
} else {
return self.fail("TODO codegen non-ELF const Decl pointer", .{});
@ -6748,8 +6754,7 @@ fn lowerUnnamedConst(self: *Self, tv: TypedValue) InnerError!MCValue {
return self.fail("lowering unnamed constant failed: {s}", .{@errorName(err)});
};
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
const vaddr = elf_file.local_symbols.items[local_sym_index].st_value;
return MCValue{ .memory = vaddr };
return MCValue{ .memory = elf_file.getSymbol(local_sym_index).st_value };
} else if (self.bin_file.cast(link.File.MachO)) |_| {
return MCValue{ .linker_load = .{
.type = .direct,

View File

@ -1001,8 +1001,8 @@ fn mirLeaPic(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
0b01 => @enumToInt(std.macho.reloc_type_x86_64.X86_64_RELOC_SIGNED),
else => unreachable,
};
const atom = macho_file.getAtomForSymbol(.{ .sym_index = relocation.atom_index, .file = null }).?;
try atom.addRelocation(macho_file, .{
const atom_index = macho_file.getAtomIndexForSymbol(.{ .sym_index = relocation.atom_index, .file = null }).?;
try link.File.MachO.Atom.addRelocation(macho_file, atom_index, .{
.type = reloc_type,
.target = .{ .sym_index = relocation.sym_index, .file = null },
.offset = @intCast(u32, end_offset - 4),
@ -1011,8 +1011,8 @@ fn mirLeaPic(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
.length = 2,
});
} else if (emit.bin_file.cast(link.File.Coff)) |coff_file| {
const atom = coff_file.getAtomForSymbol(.{ .sym_index = relocation.atom_index, .file = null }).?;
try atom.addRelocation(coff_file, .{
const atom_index = coff_file.getAtomIndexForSymbol(.{ .sym_index = relocation.atom_index, .file = null }).?;
try link.File.Coff.Atom.addRelocation(coff_file, atom_index, .{
.type = switch (ops.flags) {
0b00 => .got,
0b01 => .direct,
@ -1140,9 +1140,9 @@ fn mirCallExtern(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
if (emit.bin_file.cast(link.File.MachO)) |macho_file| {
// Add relocation to the decl.
const atom = macho_file.getAtomForSymbol(.{ .sym_index = relocation.atom_index, .file = null }).?;
const atom_index = macho_file.getAtomIndexForSymbol(.{ .sym_index = relocation.atom_index, .file = null }).?;
const target = macho_file.getGlobalByIndex(relocation.sym_index);
try atom.addRelocation(macho_file, .{
try link.File.MachO.Atom.addRelocation(macho_file, atom_index, .{
.type = @enumToInt(std.macho.reloc_type_x86_64.X86_64_RELOC_BRANCH),
.target = target,
.offset = offset,
@ -1152,9 +1152,9 @@ fn mirCallExtern(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
});
} else if (emit.bin_file.cast(link.File.Coff)) |coff_file| {
// Add relocation to the decl.
const atom = coff_file.getAtomForSymbol(.{ .sym_index = relocation.atom_index, .file = null }).?;
const atom_index = coff_file.getAtomIndexForSymbol(.{ .sym_index = relocation.atom_index, .file = null }).?;
const target = coff_file.getGlobalByIndex(relocation.sym_index);
try atom.addRelocation(coff_file, .{
try link.File.Coff.Atom.addRelocation(coff_file, atom_index, .{
.type = .direct,
.target = target,
.offset = offset,

View File

@ -49,7 +49,7 @@ pub const DeclGen = struct {
spv: *SpvModule,
/// The decl we are currently generating code for.
decl: *Decl,
decl_index: Decl.Index,
/// The intermediate code of the declaration we are currently generating. Note: If
/// the declaration is not a function, this value will be undefined!
@ -59,6 +59,8 @@ pub const DeclGen = struct {
/// Note: If the declaration is not a function, this value will be undefined!
liveness: Liveness,
ids: *const std.AutoHashMap(Decl.Index, IdResult),
/// An array of function argument result-ids. Each index corresponds with the
/// function argument of the same index.
args: std.ArrayListUnmanaged(IdRef) = .{},
@ -133,14 +135,20 @@ pub const DeclGen = struct {
/// Initialize the common resources of a DeclGen. Some fields are left uninitialized,
/// only set when `gen` is called.
pub fn init(allocator: Allocator, module: *Module, spv: *SpvModule) DeclGen {
pub fn init(
allocator: Allocator,
module: *Module,
spv: *SpvModule,
ids: *const std.AutoHashMap(Decl.Index, IdResult),
) DeclGen {
return .{
.gpa = allocator,
.module = module,
.spv = spv,
.decl = undefined,
.decl_index = undefined,
.air = undefined,
.liveness = undefined,
.ids = ids,
.next_arg_index = undefined,
.current_block_label_id = undefined,
.error_msg = undefined,
@ -150,9 +158,9 @@ pub const DeclGen = struct {
/// Generate the code for `decl`. If a reportable error occurred during code generation,
/// a message is returned by this function. Callee owns the memory. If this function
/// returns such a reportable error, it is valid to be called again for a different decl.
pub fn gen(self: *DeclGen, decl: *Decl, air: Air, liveness: Liveness) !?*Module.ErrorMsg {
pub fn gen(self: *DeclGen, decl_index: Decl.Index, air: Air, liveness: Liveness) !?*Module.ErrorMsg {
// Reset internal resources, we don't want to re-allocate these.
self.decl = decl;
self.decl_index = decl_index;
self.air = air;
self.liveness = liveness;
self.args.items.len = 0;
@ -194,7 +202,7 @@ pub const DeclGen = struct {
pub fn fail(self: *DeclGen, comptime format: []const u8, args: anytype) Error {
@setCold(true);
const src = LazySrcLoc.nodeOffset(0);
const src_loc = src.toSrcLoc(self.decl);
const src_loc = src.toSrcLoc(self.module.declPtr(self.decl_index));
assert(self.error_msg == null);
self.error_msg = try Module.ErrorMsg.create(self.module.gpa, src_loc, format, args);
return error.CodegenFail;
@ -332,7 +340,7 @@ pub const DeclGen = struct {
};
const decl = self.module.declPtr(fn_decl_index);
self.module.markDeclAlive(decl);
return decl.fn_link.spirv.id.toRef();
return self.ids.get(fn_decl_index).?.toRef();
}
const target = self.getTarget();
@ -553,8 +561,8 @@ pub const DeclGen = struct {
}
fn genDecl(self: *DeclGen) !void {
const decl = self.decl;
const result_id = decl.fn_link.spirv.id;
const result_id = self.ids.get(self.decl_index).?;
const decl = self.module.declPtr(self.decl_index);
if (decl.val.castTag(.function)) |_| {
assert(decl.ty.zigTypeTag() == .Fn);
@ -945,7 +953,7 @@ pub const DeclGen = struct {
fn airDbgStmt(self: *DeclGen, inst: Air.Inst.Index) !void {
const dbg_stmt = self.air.instructions.items(.data)[inst].dbg_stmt;
const src_fname_id = try self.spv.resolveSourceFileName(self.decl);
const src_fname_id = try self.spv.resolveSourceFileName(self.module.declPtr(self.decl_index));
try self.func.body.emit(self.spv.gpa, .OpLine, .{
.file = src_fname_id,
.line = dbg_stmt.line,
@ -1106,7 +1114,7 @@ pub const DeclGen = struct {
assert(as.errors.items.len != 0);
assert(self.error_msg == null);
const loc = LazySrcLoc.nodeOffset(0);
const src_loc = loc.toSrcLoc(self.decl);
const src_loc = loc.toSrcLoc(self.module.declPtr(self.decl_index));
self.error_msg = try Module.ErrorMsg.create(self.module.gpa, src_loc, "failed to assemble SPIR-V inline assembly", .{});
const notes = try self.module.gpa.alloc(Module.ErrorMsg, as.errors.items.len);

View File

@ -261,39 +261,6 @@ pub const File = struct {
/// of this linking operation.
lock: ?Cache.Lock = null,
pub const LinkBlock = union {
elf: Elf.TextBlock,
coff: Coff.Atom,
macho: MachO.Atom,
plan9: Plan9.DeclBlock,
c: void,
wasm: Wasm.DeclBlock,
spirv: void,
nvptx: void,
};
pub const LinkFn = union {
elf: Dwarf.SrcFn,
coff: Coff.SrcFn,
macho: Dwarf.SrcFn,
plan9: void,
c: void,
wasm: Wasm.FnData,
spirv: SpirV.FnData,
nvptx: void,
};
pub const Export = union {
elf: Elf.Export,
coff: Coff.Export,
macho: MachO.Export,
plan9: Plan9.Export,
c: void,
wasm: Wasm.Export,
spirv: void,
nvptx: void,
};
/// Attempts incremental linking, if the file already exists. If
/// incremental linking fails, falls back to truncating the file and
/// rewriting it. A malicious file is detected as incremental link failure
@ -580,22 +547,23 @@ pub const File = struct {
}
}
pub fn updateDeclLineNumber(base: *File, module: *Module, decl: *Module.Decl) UpdateDeclError!void {
pub fn updateDeclLineNumber(base: *File, module: *Module, decl_index: Module.Decl.Index) UpdateDeclError!void {
const decl = module.declPtr(decl_index);
log.debug("updateDeclLineNumber {*} ({s}), line={}", .{
decl, decl.name, decl.src_line + 1,
});
assert(decl.has_tv);
if (build_options.only_c) {
assert(base.tag == .c);
return @fieldParentPtr(C, "base", base).updateDeclLineNumber(module, decl);
return @fieldParentPtr(C, "base", base).updateDeclLineNumber(module, decl_index);
}
switch (base.tag) {
.coff => return @fieldParentPtr(Coff, "base", base).updateDeclLineNumber(module, decl),
.elf => return @fieldParentPtr(Elf, "base", base).updateDeclLineNumber(module, decl),
.macho => return @fieldParentPtr(MachO, "base", base).updateDeclLineNumber(module, decl),
.c => return @fieldParentPtr(C, "base", base).updateDeclLineNumber(module, decl),
.wasm => return @fieldParentPtr(Wasm, "base", base).updateDeclLineNumber(module, decl),
.plan9 => return @fieldParentPtr(Plan9, "base", base).updateDeclLineNumber(module, decl),
.coff => return @fieldParentPtr(Coff, "base", base).updateDeclLineNumber(module, decl_index),
.elf => return @fieldParentPtr(Elf, "base", base).updateDeclLineNumber(module, decl_index),
.macho => return @fieldParentPtr(MachO, "base", base).updateDeclLineNumber(module, decl_index),
.c => return @fieldParentPtr(C, "base", base).updateDeclLineNumber(module, decl_index),
.wasm => return @fieldParentPtr(Wasm, "base", base).updateDeclLineNumber(module, decl_index),
.plan9 => return @fieldParentPtr(Plan9, "base", base).updateDeclLineNumber(module, decl_index),
.spirv, .nvptx => {},
}
}

View File

@ -219,12 +219,12 @@ pub fn updateDecl(self: *C, module: *Module, decl_index: Module.Decl.Index) !voi
code.shrinkAndFree(module.gpa, code.items.len);
}
pub fn updateDeclLineNumber(self: *C, module: *Module, decl: *Module.Decl) !void {
pub fn updateDeclLineNumber(self: *C, module: *Module, decl_index: Module.Decl.Index) !void {
// The C backend does not have the ability to fix line numbers without re-generating
// the entire Decl.
_ = self;
_ = module;
_ = decl;
_ = decl_index;
}
pub fn flush(self: *C, comp: *Compilation, prog_node: *std.Progress.Node) !void {

View File

@ -79,13 +79,13 @@ entry_addr: ?u32 = null,
/// We store them here so that we can properly dispose of any allocated
/// memory within the atom in the incremental linker.
/// TODO consolidate this.
decls: std.AutoHashMapUnmanaged(Module.Decl.Index, ?u16) = .{},
decls: std.AutoHashMapUnmanaged(Module.Decl.Index, DeclMetadata) = .{},
/// List of atoms that are either synthetic or map directly to the Zig source program.
managed_atoms: std.ArrayListUnmanaged(*Atom) = .{},
atoms: std.ArrayListUnmanaged(Atom) = .{},
/// Table of atoms indexed by the symbol index.
atom_by_index_table: std.AutoHashMapUnmanaged(u32, *Atom) = .{},
atom_by_index_table: std.AutoHashMapUnmanaged(u32, Atom.Index) = .{},
/// Table of unnamed constants associated with a parent `Decl`.
/// We store them here so that we can free the constants whenever the `Decl`
@ -124,9 +124,9 @@ const Entry = struct {
sym_index: u32,
};
const RelocTable = std.AutoHashMapUnmanaged(*Atom, std.ArrayListUnmanaged(Relocation));
const BaseRelocationTable = std.AutoHashMapUnmanaged(*Atom, std.ArrayListUnmanaged(u32));
const UnnamedConstTable = std.AutoHashMapUnmanaged(Module.Decl.Index, std.ArrayListUnmanaged(*Atom));
const RelocTable = std.AutoHashMapUnmanaged(Atom.Index, std.ArrayListUnmanaged(Relocation));
const BaseRelocationTable = std.AutoHashMapUnmanaged(Atom.Index, std.ArrayListUnmanaged(u32));
const UnnamedConstTable = std.AutoHashMapUnmanaged(Module.Decl.Index, std.ArrayListUnmanaged(Atom.Index));
const default_file_alignment: u16 = 0x200;
const default_size_of_stack_reserve: u32 = 0x1000000;
@ -137,7 +137,7 @@ const default_size_of_heap_commit: u32 = 0x1000;
const Section = struct {
header: coff.SectionHeader,
last_atom: ?*Atom = null,
last_atom_index: ?Atom.Index = null,
/// A list of atoms that have surplus capacity. This list can have false
/// positives, as functions grow and shrink over time, only sometimes being added
@ -154,7 +154,34 @@ const Section = struct {
/// overcapacity can be negative. A simple way to have negative overcapacity is to
/// allocate a fresh atom, which will have ideal capacity, and then grow it
/// by 1 byte. It will then have -1 overcapacity.
free_list: std.ArrayListUnmanaged(*Atom) = .{},
free_list: std.ArrayListUnmanaged(Atom.Index) = .{},
};
const DeclMetadata = struct {
atom: Atom.Index,
section: u16,
/// A list of all exports aliases of this Decl.
exports: std.ArrayListUnmanaged(u32) = .{},
fn getExport(m: DeclMetadata, coff_file: *const Coff, name: []const u8) ?u32 {
for (m.exports.items) |exp| {
if (mem.eql(u8, name, coff_file.getSymbolName(.{
.sym_index = exp,
.file = null,
}))) return exp;
}
return null;
}
fn getExportPtr(m: *DeclMetadata, coff_file: *Coff, name: []const u8) ?*u32 {
for (m.exports.items) |*exp| {
if (mem.eql(u8, name, coff_file.getSymbolName(.{
.sym_index = exp.*,
.file = null,
}))) return exp;
}
return null;
}
};
pub const PtrWidth = enum {
@ -168,11 +195,6 @@ pub const PtrWidth = enum {
};
}
};
pub const SrcFn = void;
pub const Export = struct {
sym_index: ?u32 = null,
};
pub const SymbolWithLoc = struct {
// Index into the respective symbol table.
@ -271,11 +293,7 @@ pub fn deinit(self: *Coff) void {
}
self.sections.deinit(gpa);
for (self.managed_atoms.items) |atom| {
gpa.destroy(atom);
}
self.managed_atoms.deinit(gpa);
self.atoms.deinit(gpa);
self.locals.deinit(gpa);
self.globals.deinit(gpa);
@ -297,7 +315,15 @@ pub fn deinit(self: *Coff) void {
self.imports.deinit(gpa);
self.imports_free_list.deinit(gpa);
self.imports_table.deinit(gpa);
self.decls.deinit(gpa);
{
var it = self.decls.iterator();
while (it.next()) |entry| {
entry.value_ptr.exports.deinit(gpa);
}
self.decls.deinit(gpa);
}
self.atom_by_index_table.deinit(gpa);
{
@ -461,17 +487,18 @@ fn growSectionVM(self: *Coff, sect_id: u32, needed_size: u32) !void {
// TODO: enforce order by increasing VM addresses in self.sections container.
// This is required by the loader anyhow as far as I can tell.
for (self.sections.items(.header)[sect_id + 1 ..]) |*next_header, next_sect_id| {
const maybe_last_atom = &self.sections.items(.last_atom)[sect_id + 1 + next_sect_id];
const maybe_last_atom_index = self.sections.items(.last_atom_index)[sect_id + 1 + next_sect_id];
next_header.virtual_address += diff;
if (maybe_last_atom.*) |last_atom| {
var atom = last_atom;
if (maybe_last_atom_index) |last_atom_index| {
var atom_index = last_atom_index;
while (true) {
const atom = self.getAtom(atom_index);
const sym = atom.getSymbolPtr(self);
sym.value += diff;
if (atom.prev) |prev| {
atom = prev;
if (atom.prev_index) |prev_index| {
atom_index = prev_index;
} else break;
}
}
@ -480,14 +507,15 @@ fn growSectionVM(self: *Coff, sect_id: u32, needed_size: u32) !void {
header.virtual_size = increased_size;
}
fn allocateAtom(self: *Coff, atom: *Atom, new_atom_size: u32, alignment: u32) !u32 {
fn allocateAtom(self: *Coff, atom_index: Atom.Index, new_atom_size: u32, alignment: u32) !u32 {
const tracy = trace(@src());
defer tracy.end();
const atom = self.getAtom(atom_index);
const sect_id = @enumToInt(atom.getSymbol(self).section_number) - 1;
const header = &self.sections.items(.header)[sect_id];
const free_list = &self.sections.items(.free_list)[sect_id];
const maybe_last_atom = &self.sections.items(.last_atom)[sect_id];
const maybe_last_atom_index = &self.sections.items(.last_atom_index)[sect_id];
const new_atom_ideal_capacity = if (header.isCode()) padToIdeal(new_atom_size) else new_atom_size;
// We use these to indicate our intention to update metadata, placing the new atom,
@ -495,7 +523,7 @@ fn allocateAtom(self: *Coff, atom: *Atom, new_atom_size: u32, alignment: u32) !u
// It would be simpler to do it inside the for loop below, but that would cause a
// problem if an error was returned later in the function. So this action
// is actually carried out at the end of the function, when errors are no longer possible.
var atom_placement: ?*Atom = null;
var atom_placement: ?Atom.Index = null;
var free_list_removal: ?usize = null;
// First we look for an appropriately sized free list node.
@ -503,7 +531,8 @@ fn allocateAtom(self: *Coff, atom: *Atom, new_atom_size: u32, alignment: u32) !u
var vaddr = blk: {
var i: usize = 0;
while (i < free_list.items.len) {
const big_atom = free_list.items[i];
const big_atom_index = free_list.items[i];
const big_atom = self.getAtom(big_atom_index);
// We now have a pointer to a live atom that has too much capacity.
// Is it enough that we could fit this new atom?
const sym = big_atom.getSymbol(self);
@ -531,34 +560,43 @@ fn allocateAtom(self: *Coff, atom: *Atom, new_atom_size: u32, alignment: u32) !u
const keep_free_list_node = remaining_capacity >= min_text_capacity;
// Set up the metadata to be updated, after errors are no longer possible.
atom_placement = big_atom;
atom_placement = big_atom_index;
if (!keep_free_list_node) {
free_list_removal = i;
}
break :blk new_start_vaddr;
} else if (maybe_last_atom.*) |last| {
} else if (maybe_last_atom_index.*) |last_index| {
const last = self.getAtom(last_index);
const last_symbol = last.getSymbol(self);
const ideal_capacity = if (header.isCode()) padToIdeal(last.size) else last.size;
const ideal_capacity_end_vaddr = last_symbol.value + ideal_capacity;
const new_start_vaddr = mem.alignForwardGeneric(u32, ideal_capacity_end_vaddr, alignment);
atom_placement = last;
atom_placement = last_index;
break :blk new_start_vaddr;
} else {
break :blk mem.alignForwardGeneric(u32, header.virtual_address, alignment);
}
};
const expand_section = atom_placement == null or atom_placement.?.next == null;
const expand_section = if (atom_placement) |placement_index|
self.getAtom(placement_index).next_index == null
else
true;
if (expand_section) {
const sect_capacity = self.allocatedSize(header.pointer_to_raw_data);
const needed_size: u32 = (vaddr + new_atom_size) - header.virtual_address;
if (needed_size > sect_capacity) {
const new_offset = self.findFreeSpace(needed_size, default_file_alignment);
const current_size = if (maybe_last_atom.*) |last_atom| blk: {
const current_size = if (maybe_last_atom_index.*) |last_atom_index| blk: {
const last_atom = self.getAtom(last_atom_index);
const sym = last_atom.getSymbol(self);
break :blk (sym.value + last_atom.size) - header.virtual_address;
} else 0;
log.debug("moving {s} from 0x{x} to 0x{x}", .{ self.getSectionName(header), header.pointer_to_raw_data, new_offset });
log.debug("moving {s} from 0x{x} to 0x{x}", .{
self.getSectionName(header),
header.pointer_to_raw_data,
new_offset,
});
const amt = try self.base.file.?.copyRangeAll(
header.pointer_to_raw_data,
self.base.file.?,
@ -577,26 +615,34 @@ fn allocateAtom(self: *Coff, atom: *Atom, new_atom_size: u32, alignment: u32) !u
header.virtual_size = @max(header.virtual_size, needed_size);
header.size_of_raw_data = needed_size;
maybe_last_atom.* = atom;
maybe_last_atom_index.* = atom_index;
}
atom.size = new_atom_size;
atom.alignment = alignment;
if (atom.prev) |prev| {
prev.next = atom.next;
}
if (atom.next) |next| {
next.prev = atom.prev;
{
const atom_ptr = self.getAtomPtr(atom_index);
atom_ptr.size = new_atom_size;
atom_ptr.alignment = alignment;
}
if (atom_placement) |big_atom| {
atom.prev = big_atom;
atom.next = big_atom.next;
big_atom.next = atom;
if (atom.prev_index) |prev_index| {
const prev = self.getAtomPtr(prev_index);
prev.next_index = atom.next_index;
}
if (atom.next_index) |next_index| {
const next = self.getAtomPtr(next_index);
next.prev_index = atom.prev_index;
}
if (atom_placement) |big_atom_index| {
const big_atom = self.getAtomPtr(big_atom_index);
const atom_ptr = self.getAtomPtr(atom_index);
atom_ptr.prev_index = big_atom_index;
atom_ptr.next_index = big_atom.next_index;
big_atom.next_index = atom_index;
} else {
atom.prev = null;
atom.next = null;
const atom_ptr = self.getAtomPtr(atom_index);
atom_ptr.prev_index = null;
atom_ptr.next_index = null;
}
if (free_list_removal) |i| {
_ = free_list.swapRemove(i);
@ -701,24 +747,37 @@ pub fn allocateImportEntry(self: *Coff, target: SymbolWithLoc) !u32 {
return index;
}
fn createGotAtom(self: *Coff, target: SymbolWithLoc) !*Atom {
pub fn createAtom(self: *Coff) !Atom.Index {
const gpa = self.base.allocator;
const atom = try gpa.create(Atom);
errdefer gpa.destroy(atom);
atom.* = Atom.empty;
try atom.ensureInitialized(self);
const atom_index = @intCast(Atom.Index, self.atoms.items.len);
const atom = try self.atoms.addOne(gpa);
const sym_index = try self.allocateSymbol();
try self.atom_by_index_table.putNoClobber(gpa, sym_index, atom_index);
atom.* = .{
.sym_index = sym_index,
.file = null,
.size = 0,
.alignment = 0,
.prev_index = null,
.next_index = null,
};
log.debug("creating ATOM(%{d}) at index {d}", .{ sym_index, atom_index });
return atom_index;
}
fn createGotAtom(self: *Coff, target: SymbolWithLoc) !Atom.Index {
const atom_index = try self.createAtom();
const atom = self.getAtomPtr(atom_index);
atom.size = @sizeOf(u64);
atom.alignment = @alignOf(u64);
try self.managed_atoms.append(gpa, atom);
const sym = atom.getSymbolPtr(self);
sym.section_number = @intToEnum(coff.SectionNumber, self.got_section_index.? + 1);
sym.value = try self.allocateAtom(atom, atom.size, atom.alignment);
sym.value = try self.allocateAtom(atom_index, atom.size, atom.alignment);
log.debug("allocated GOT atom at 0x{x}", .{sym.value});
try atom.addRelocation(self, .{
try Atom.addRelocation(self, atom_index, .{
.type = .direct,
.target = target,
.offset = 0,
@ -732,49 +791,46 @@ fn createGotAtom(self: *Coff, target: SymbolWithLoc) !*Atom {
.UNDEFINED => @panic("TODO generate a binding for undefined GOT target"),
.ABSOLUTE => {},
.DEBUG => unreachable, // not possible
else => try atom.addBaseRelocation(self, 0),
else => try Atom.addBaseRelocation(self, atom_index, 0),
}
return atom;
return atom_index;
}
fn createImportAtom(self: *Coff) !*Atom {
const gpa = self.base.allocator;
const atom = try gpa.create(Atom);
errdefer gpa.destroy(atom);
atom.* = Atom.empty;
try atom.ensureInitialized(self);
fn createImportAtom(self: *Coff) !Atom.Index {
const atom_index = try self.createAtom();
const atom = self.getAtomPtr(atom_index);
atom.size = @sizeOf(u64);
atom.alignment = @alignOf(u64);
try self.managed_atoms.append(gpa, atom);
const sym = atom.getSymbolPtr(self);
sym.section_number = @intToEnum(coff.SectionNumber, self.idata_section_index.? + 1);
sym.value = try self.allocateAtom(atom, atom.size, atom.alignment);
sym.value = try self.allocateAtom(atom_index, atom.size, atom.alignment);
log.debug("allocated import atom at 0x{x}", .{sym.value});
return atom;
return atom_index;
}
fn growAtom(self: *Coff, atom: *Atom, new_atom_size: u32, alignment: u32) !u32 {
fn growAtom(self: *Coff, atom_index: Atom.Index, new_atom_size: u32, alignment: u32) !u32 {
const atom = self.getAtom(atom_index);
const sym = atom.getSymbol(self);
const align_ok = mem.alignBackwardGeneric(u32, sym.value, alignment) == sym.value;
const need_realloc = !align_ok or new_atom_size > atom.capacity(self);
if (!need_realloc) return sym.value;
return self.allocateAtom(atom, new_atom_size, alignment);
return self.allocateAtom(atom_index, new_atom_size, alignment);
}
fn shrinkAtom(self: *Coff, atom: *Atom, new_block_size: u32) void {
fn shrinkAtom(self: *Coff, atom_index: Atom.Index, new_block_size: u32) void {
_ = self;
_ = atom;
_ = atom_index;
_ = new_block_size;
// TODO check the new capacity, and if it crosses the size threshold into a big enough
// capacity, insert a free list node for it.
}
fn writeAtom(self: *Coff, atom: *Atom, code: []const u8) !void {
fn writeAtom(self: *Coff, atom_index: Atom.Index, code: []const u8) !void {
const atom = self.getAtom(atom_index);
const sym = atom.getSymbol(self);
const section = self.sections.get(@enumToInt(sym.section_number) - 1);
const file_offset = section.header.pointer_to_raw_data + sym.value - section.header.virtual_address;
@ -784,18 +840,18 @@ fn writeAtom(self: *Coff, atom: *Atom, code: []const u8) !void {
file_offset + code.len,
});
try self.base.file.?.pwriteAll(code, file_offset);
try self.resolveRelocs(atom);
try self.resolveRelocs(atom_index);
}
fn writePtrWidthAtom(self: *Coff, atom: *Atom) !void {
fn writePtrWidthAtom(self: *Coff, atom_index: Atom.Index) !void {
switch (self.ptr_width) {
.p32 => {
var buffer: [@sizeOf(u32)]u8 = [_]u8{0} ** @sizeOf(u32);
try self.writeAtom(atom, &buffer);
try self.writeAtom(atom_index, &buffer);
},
.p64 => {
var buffer: [@sizeOf(u64)]u8 = [_]u8{0} ** @sizeOf(u64);
try self.writeAtom(atom, &buffer);
try self.writeAtom(atom_index, &buffer);
},
}
}
@ -815,7 +871,8 @@ fn markRelocsDirtyByAddress(self: *Coff, addr: u32) void {
var it = self.relocs.valueIterator();
while (it.next()) |relocs| {
for (relocs.items) |*reloc| {
const target_atom = reloc.getTargetAtom(self) orelse continue;
const target_atom_index = reloc.getTargetAtomIndex(self) orelse continue;
const target_atom = self.getAtom(target_atom_index);
const target_sym = target_atom.getSymbol(self);
if (target_sym.value < addr) continue;
reloc.dirty = true;
@ -823,24 +880,26 @@ fn markRelocsDirtyByAddress(self: *Coff, addr: u32) void {
}
}
fn resolveRelocs(self: *Coff, atom: *Atom) !void {
const relocs = self.relocs.get(atom) orelse return;
fn resolveRelocs(self: *Coff, atom_index: Atom.Index) !void {
const relocs = self.relocs.get(atom_index) orelse return;
log.debug("relocating '{s}'", .{atom.getName(self)});
log.debug("relocating '{s}'", .{self.getAtom(atom_index).getName(self)});
for (relocs.items) |*reloc| {
if (!reloc.dirty) continue;
try reloc.resolve(atom, self);
try reloc.resolve(atom_index, self);
}
}
fn freeAtom(self: *Coff, atom: *Atom) void {
log.debug("freeAtom {*}", .{atom});
// Remove any relocs and base relocs associated with this Atom
self.freeRelocationsForAtom(atom);
fn freeAtom(self: *Coff, atom_index: Atom.Index) void {
log.debug("freeAtom {d}", .{atom_index});
const gpa = self.base.allocator;
// Remove any relocs and base relocs associated with this Atom
Atom.freeRelocations(self, atom_index);
const atom = self.getAtom(atom_index);
const sym = atom.getSymbol(self);
const sect_id = @enumToInt(sym.section_number) - 1;
const free_list = &self.sections.items(.free_list)[sect_id];
@ -849,45 +908,46 @@ fn freeAtom(self: *Coff, atom: *Atom) void {
var i: usize = 0;
// TODO turn free_list into a hash map
while (i < free_list.items.len) {
if (free_list.items[i] == atom) {
if (free_list.items[i] == atom_index) {
_ = free_list.swapRemove(i);
continue;
}
if (free_list.items[i] == atom.prev) {
if (free_list.items[i] == atom.prev_index) {
already_have_free_list_node = true;
}
i += 1;
}
}
const maybe_last_atom = &self.sections.items(.last_atom)[sect_id];
if (maybe_last_atom.*) |last_atom| {
if (last_atom == atom) {
if (atom.prev) |prev| {
const maybe_last_atom_index = &self.sections.items(.last_atom_index)[sect_id];
if (maybe_last_atom_index.*) |last_atom_index| {
if (last_atom_index == atom_index) {
if (atom.prev_index) |prev_index| {
// TODO shrink the section size here
maybe_last_atom.* = prev;
maybe_last_atom_index.* = prev_index;
} else {
maybe_last_atom.* = null;
maybe_last_atom_index.* = null;
}
}
}
if (atom.prev) |prev| {
prev.next = atom.next;
if (atom.prev_index) |prev_index| {
const prev = self.getAtomPtr(prev_index);
prev.next_index = atom.next_index;
if (!already_have_free_list_node and prev.freeListEligible(self)) {
if (!already_have_free_list_node and prev.*.freeListEligible(self)) {
// The free list is heuristics, it doesn't have to be perfect, so we can
// ignore the OOM here.
free_list.append(gpa, prev) catch {};
free_list.append(gpa, prev_index) catch {};
}
} else {
atom.prev = null;
self.getAtomPtr(atom_index).prev_index = null;
}
if (atom.next) |next| {
next.prev = atom.prev;
if (atom.next_index) |next_index| {
self.getAtomPtr(next_index).prev_index = atom.prev_index;
} else {
atom.next = null;
self.getAtomPtr(atom_index).next_index = null;
}
// Appending to free lists is allowed to fail because the free lists are heuristics based anyway.
@ -910,7 +970,7 @@ fn freeAtom(self: *Coff, atom: *Atom) void {
self.locals.items[sym_index].section_number = .UNDEFINED;
_ = self.atom_by_index_table.remove(sym_index);
log.debug(" adding local symbol index {d} to free list", .{sym_index});
atom.sym_index = 0;
self.getAtomPtr(atom_index).sym_index = 0;
}
pub fn updateFunc(self: *Coff, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void {
@ -927,15 +987,10 @@ pub fn updateFunc(self: *Coff, module: *Module, func: *Module.Fn, air: Air, live
const decl_index = func.owner_decl;
const decl = module.declPtr(decl_index);
const atom = &decl.link.coff;
try atom.ensureInitialized(self);
const gop = try self.decls.getOrPut(self.base.allocator, decl_index);
if (gop.found_existing) {
self.freeUnnamedConsts(decl_index);
self.freeRelocationsForAtom(&decl.link.coff);
} else {
gop.value_ptr.* = null;
}
const atom_index = try self.getOrCreateAtomForDecl(decl_index);
self.freeUnnamedConsts(decl_index);
Atom.freeRelocations(self, atom_index);
var code_buffer = std.ArrayList(u8).init(self.base.allocator);
defer code_buffer.deinit();
@ -979,11 +1034,7 @@ pub fn lowerUnnamedConst(self: *Coff, tv: TypedValue, decl_index: Module.Decl.In
}
const unnamed_consts = gop.value_ptr;
const atom = try gpa.create(Atom);
errdefer gpa.destroy(atom);
atom.* = Atom.empty;
try atom.ensureInitialized(self);
try self.managed_atoms.append(gpa, atom);
const atom_index = try self.createAtom();
const sym_name = blk: {
const decl_name = try decl.getFullyQualifiedName(mod);
@ -993,11 +1044,15 @@ pub fn lowerUnnamedConst(self: *Coff, tv: TypedValue, decl_index: Module.Decl.In
break :blk try std.fmt.allocPrint(gpa, "__unnamed_{s}_{d}", .{ decl_name, index });
};
defer gpa.free(sym_name);
try self.setSymbolName(atom.getSymbolPtr(self), sym_name);
atom.getSymbolPtr(self).section_number = @intToEnum(coff.SectionNumber, self.rdata_section_index.? + 1);
{
const atom = self.getAtom(atom_index);
const sym = atom.getSymbolPtr(self);
try self.setSymbolName(sym, sym_name);
sym.section_number = @intToEnum(coff.SectionNumber, self.rdata_section_index.? + 1);
}
const res = try codegen.generateSymbol(&self.base, decl.srcLoc(), tv, &code_buffer, .none, .{
.parent_atom_index = atom.getSymbolIndex().?,
.parent_atom_index = self.getAtom(atom_index).getSymbolIndex().?,
});
const code = switch (res) {
.ok => code_buffer.items,
@ -1010,17 +1065,18 @@ pub fn lowerUnnamedConst(self: *Coff, tv: TypedValue, decl_index: Module.Decl.In
};
const required_alignment = tv.ty.abiAlignment(self.base.options.target);
const atom = self.getAtomPtr(atom_index);
atom.alignment = required_alignment;
atom.size = @intCast(u32, code.len);
atom.getSymbolPtr(self).value = try self.allocateAtom(atom, atom.size, atom.alignment);
errdefer self.freeAtom(atom);
atom.getSymbolPtr(self).value = try self.allocateAtom(atom_index, atom.size, atom.alignment);
errdefer self.freeAtom(atom_index);
try unnamed_consts.append(gpa, atom);
try unnamed_consts.append(gpa, atom_index);
log.debug("allocated atom for {s} at 0x{x}", .{ sym_name, atom.getSymbol(self).value });
log.debug(" (required alignment 0x{x})", .{required_alignment});
try self.writeAtom(atom, code);
try self.writeAtom(atom_index, code);
return atom.getSymbolIndex().?;
}
@ -1047,14 +1103,9 @@ pub fn updateDecl(self: *Coff, module: *Module, decl_index: Module.Decl.Index) !
}
}
const atom = &decl.link.coff;
try atom.ensureInitialized(self);
const gop = try self.decls.getOrPut(self.base.allocator, decl_index);
if (gop.found_existing) {
self.freeRelocationsForAtom(atom);
} else {
gop.value_ptr.* = null;
}
const atom_index = try self.getOrCreateAtomForDecl(decl_index);
Atom.freeRelocations(self, atom_index);
const atom = self.getAtom(atom_index);
var code_buffer = std.ArrayList(u8).init(self.base.allocator);
defer code_buffer.deinit();
@ -1064,7 +1115,7 @@ pub fn updateDecl(self: *Coff, module: *Module, decl_index: Module.Decl.Index) !
.ty = decl.ty,
.val = decl_val,
}, &code_buffer, .none, .{
.parent_atom_index = decl.link.coff.getSymbolIndex().?,
.parent_atom_index = atom.getSymbolIndex().?,
});
const code = switch (res) {
.ok => code_buffer.items,
@ -1082,7 +1133,20 @@ pub fn updateDecl(self: *Coff, module: *Module, decl_index: Module.Decl.Index) !
return self.updateDeclExports(module, decl_index, module.getDeclExports(decl_index));
}
fn getDeclOutputSection(self: *Coff, decl: *Module.Decl) u16 {
pub fn getOrCreateAtomForDecl(self: *Coff, decl_index: Module.Decl.Index) !Atom.Index {
const gop = try self.decls.getOrPut(self.base.allocator, decl_index);
if (!gop.found_existing) {
gop.value_ptr.* = .{
.atom = try self.createAtom(),
.section = self.getDeclOutputSection(decl_index),
.exports = .{},
};
}
return gop.value_ptr.atom;
}
fn getDeclOutputSection(self: *Coff, decl_index: Module.Decl.Index) u16 {
const decl = self.base.options.module.?.declPtr(decl_index);
const ty = decl.ty;
const zig_ty = ty.zigTypeTag();
const val = decl.val;
@ -1117,14 +1181,11 @@ fn updateDeclCode(self: *Coff, decl_index: Module.Decl.Index, code: []const u8,
log.debug("updateDeclCode {s}{*}", .{ decl_name, decl });
const required_alignment = decl.getAlignment(self.base.options.target);
const decl_ptr = self.decls.getPtr(decl_index).?;
if (decl_ptr.* == null) {
decl_ptr.* = self.getDeclOutputSection(decl);
}
const sect_index = decl_ptr.*.?;
const decl_metadata = self.decls.get(decl_index).?;
const atom_index = decl_metadata.atom;
const atom = self.getAtom(atom_index);
const sect_index = decl_metadata.section;
const code_len = @intCast(u32, code.len);
const atom = &decl.link.coff;
if (atom.size != 0) {
const sym = atom.getSymbolPtr(self);
@ -1135,7 +1196,7 @@ fn updateDeclCode(self: *Coff, decl_index: Module.Decl.Index, code: []const u8,
const capacity = atom.capacity(self);
const need_realloc = code.len > capacity or !mem.isAlignedGeneric(u64, sym.value, required_alignment);
if (need_realloc) {
const vaddr = try self.growAtom(atom, code_len, required_alignment);
const vaddr = try self.growAtom(atom_index, code_len, required_alignment);
log.debug("growing {s} from 0x{x} to 0x{x}", .{ decl_name, sym.value, vaddr });
log.debug(" (required alignment 0x{x}", .{required_alignment});
@ -1143,49 +1204,43 @@ fn updateDeclCode(self: *Coff, decl_index: Module.Decl.Index, code: []const u8,
sym.value = vaddr;
log.debug(" (updating GOT entry)", .{});
const got_target = SymbolWithLoc{ .sym_index = atom.getSymbolIndex().?, .file = null };
const got_atom = self.getGotAtomForSymbol(got_target).?;
const got_atom_index = self.getGotAtomIndexForSymbol(got_target).?;
self.markRelocsDirtyByTarget(got_target);
try self.writePtrWidthAtom(got_atom);
try self.writePtrWidthAtom(got_atom_index);
}
} else if (code_len < atom.size) {
self.shrinkAtom(atom, code_len);
self.shrinkAtom(atom_index, code_len);
}
atom.size = code_len;
self.getAtomPtr(atom_index).size = code_len;
} else {
const sym = atom.getSymbolPtr(self);
try self.setSymbolName(sym, decl_name);
sym.section_number = @intToEnum(coff.SectionNumber, sect_index + 1);
sym.type = .{ .complex_type = complex_type, .base_type = .NULL };
const vaddr = try self.allocateAtom(atom, code_len, required_alignment);
errdefer self.freeAtom(atom);
const vaddr = try self.allocateAtom(atom_index, code_len, required_alignment);
errdefer self.freeAtom(atom_index);
log.debug("allocated atom for {s} at 0x{x}", .{ decl_name, vaddr });
atom.size = code_len;
self.getAtomPtr(atom_index).size = code_len;
sym.value = vaddr;
const got_target = SymbolWithLoc{ .sym_index = atom.getSymbolIndex().?, .file = null };
const got_index = try self.allocateGotEntry(got_target);
const got_atom = try self.createGotAtom(got_target);
const got_atom_index = try self.createGotAtom(got_target);
const got_atom = self.getAtom(got_atom_index);
self.got_entries.items[got_index].sym_index = got_atom.getSymbolIndex().?;
try self.writePtrWidthAtom(got_atom);
try self.writePtrWidthAtom(got_atom_index);
}
self.markRelocsDirtyByTarget(atom.getSymbolWithLoc());
try self.writeAtom(atom, code);
}
fn freeRelocationsForAtom(self: *Coff, atom: *Atom) void {
var removed_relocs = self.relocs.fetchRemove(atom);
if (removed_relocs) |*relocs| relocs.value.deinit(self.base.allocator);
var removed_base_relocs = self.base_relocs.fetchRemove(atom);
if (removed_base_relocs) |*base_relocs| base_relocs.value.deinit(self.base.allocator);
try self.writeAtom(atom_index, code);
}
fn freeUnnamedConsts(self: *Coff, decl_index: Module.Decl.Index) void {
const gpa = self.base.allocator;
const unnamed_consts = self.unnamed_const_atoms.getPtr(decl_index) orelse return;
for (unnamed_consts.items) |atom| {
self.freeAtom(atom);
for (unnamed_consts.items) |atom_index| {
self.freeAtom(atom_index);
}
unnamed_consts.clearAndFree(gpa);
}
@ -1200,11 +1255,11 @@ pub fn freeDecl(self: *Coff, decl_index: Module.Decl.Index) void {
log.debug("freeDecl {*}", .{decl});
if (self.decls.fetchRemove(decl_index)) |kv| {
if (kv.value) |_| {
self.freeAtom(&decl.link.coff);
self.freeUnnamedConsts(decl_index);
}
if (self.decls.fetchRemove(decl_index)) |const_kv| {
var kv = const_kv;
self.freeAtom(kv.value.atom);
self.freeUnnamedConsts(decl_index);
kv.value.exports.deinit(self.base.allocator);
}
}
@ -1257,16 +1312,10 @@ pub fn updateDeclExports(
const gpa = self.base.allocator;
const decl = module.declPtr(decl_index);
const atom = &decl.link.coff;
if (atom.getSymbolIndex() == null) return;
const gop = try self.decls.getOrPut(gpa, decl_index);
if (!gop.found_existing) {
gop.value_ptr.* = self.getDeclOutputSection(decl);
}
const atom_index = try self.getOrCreateAtomForDecl(decl_index);
const atom = self.getAtom(atom_index);
const decl_sym = atom.getSymbol(self);
const decl_metadata = self.decls.getPtr(decl_index).?;
for (exports) |exp| {
log.debug("adding new export '{s}'", .{exp.options.name});
@ -1301,9 +1350,9 @@ pub fn updateDeclExports(
continue;
}
const sym_index = exp.link.coff.sym_index orelse blk: {
const sym_index = decl_metadata.getExport(self, exp.options.name) orelse blk: {
const sym_index = try self.allocateSymbol();
exp.link.coff.sym_index = sym_index;
try decl_metadata.exports.append(gpa, sym_index);
break :blk sym_index;
};
const sym_loc = SymbolWithLoc{ .sym_index = sym_index, .file = null };
@ -1326,16 +1375,15 @@ pub fn updateDeclExports(
}
}
pub fn deleteExport(self: *Coff, exp: Export) void {
pub fn deleteDeclExport(self: *Coff, decl_index: Module.Decl.Index, name: []const u8) void {
if (self.llvm_object) |_| return;
const sym_index = exp.sym_index orelse return;
const metadata = self.decls.getPtr(decl_index) orelse return;
const sym_index = metadata.getExportPtr(self, name) orelse return;
const gpa = self.base.allocator;
const sym_loc = SymbolWithLoc{ .sym_index = sym_index, .file = null };
const sym_loc = SymbolWithLoc{ .sym_index = sym_index.*, .file = null };
const sym = self.getSymbolPtr(sym_loc);
const sym_name = self.getSymbolName(sym_loc);
log.debug("deleting export '{s}'", .{sym_name});
log.debug("deleting export '{s}'", .{name});
assert(sym.storage_class == .EXTERNAL and sym.section_number != .UNDEFINED);
sym.* = .{
.name = [_]u8{0} ** 8,
@ -1345,9 +1393,9 @@ pub fn deleteExport(self: *Coff, exp: Export) void {
.storage_class = .NULL,
.number_of_aux_symbols = 0,
};
self.locals_free_list.append(gpa, sym_index) catch {};
self.locals_free_list.append(gpa, sym_index.*) catch {};
if (self.resolver.fetchRemove(sym_name)) |entry| {
if (self.resolver.fetchRemove(name)) |entry| {
defer gpa.free(entry.key);
self.globals_free_list.append(gpa, entry.value) catch {};
self.globals.items[entry.value] = .{
@ -1355,6 +1403,8 @@ pub fn deleteExport(self: *Coff, exp: Export) void {
.file = null,
};
}
sym_index.* = 0;
}
fn resolveGlobalSymbol(self: *Coff, current: SymbolWithLoc) !void {
@ -1419,9 +1469,10 @@ pub fn flushModule(self: *Coff, comp: *Compilation, prog_node: *std.Progress.Nod
if (self.imports_table.contains(global)) continue;
const import_index = try self.allocateImportEntry(global);
const import_atom = try self.createImportAtom();
const import_atom_index = try self.createImportAtom();
const import_atom = self.getAtom(import_atom_index);
self.imports.items[import_index].sym_index = import_atom.getSymbolIndex().?;
try self.writePtrWidthAtom(import_atom);
try self.writePtrWidthAtom(import_atom_index);
}
if (build_options.enable_logging) {
@ -1455,22 +1506,14 @@ pub fn flushModule(self: *Coff, comp: *Compilation, prog_node: *std.Progress.Nod
}
}
pub fn getDeclVAddr(
self: *Coff,
decl_index: Module.Decl.Index,
reloc_info: link.File.RelocInfo,
) !u64 {
const mod = self.base.options.module.?;
const decl = mod.declPtr(decl_index);
pub fn getDeclVAddr(self: *Coff, decl_index: Module.Decl.Index, reloc_info: link.File.RelocInfo) !u64 {
assert(self.llvm_object == null);
try decl.link.coff.ensureInitialized(self);
const sym_index = decl.link.coff.getSymbolIndex().?;
const atom = self.getAtomForSymbol(.{ .sym_index = reloc_info.parent_atom_index, .file = null }).?;
const this_atom_index = try self.getOrCreateAtomForDecl(decl_index);
const sym_index = self.getAtom(this_atom_index).getSymbolIndex().?;
const atom_index = self.getAtomIndexForSymbol(.{ .sym_index = reloc_info.parent_atom_index, .file = null }).?;
const target = SymbolWithLoc{ .sym_index = sym_index, .file = null };
try atom.addRelocation(self, .{
try Atom.addRelocation(self, atom_index, .{
.type = .direct,
.target = target,
.offset = @intCast(u32, reloc_info.offset),
@ -1478,7 +1521,7 @@ pub fn getDeclVAddr(
.pcrel = false,
.length = 3,
});
try atom.addBaseRelocation(self, @intCast(u32, reloc_info.offset));
try Atom.addBaseRelocation(self, atom_index, @intCast(u32, reloc_info.offset));
return 0;
}
@ -1505,10 +1548,10 @@ pub fn getGlobalSymbol(self: *Coff, name: []const u8) !u32 {
return global_index;
}
pub fn updateDeclLineNumber(self: *Coff, module: *Module, decl: *Module.Decl) !void {
pub fn updateDeclLineNumber(self: *Coff, module: *Module, decl_index: Module.Decl.Index) !void {
_ = self;
_ = module;
_ = decl;
_ = decl_index;
log.debug("TODO implement updateDeclLineNumber", .{});
}
@ -1529,7 +1572,8 @@ fn writeBaseRelocations(self: *Coff) !void {
var it = self.base_relocs.iterator();
while (it.next()) |entry| {
const atom = entry.key_ptr.*;
const atom_index = entry.key_ptr.*;
const atom = self.getAtom(atom_index);
const offsets = entry.value_ptr.*;
for (offsets.items) |offset| {
@ -1613,7 +1657,8 @@ fn writeImportTable(self: *Coff) !void {
const gpa = self.base.allocator;
const section = self.sections.get(self.idata_section_index.?);
const last_atom = section.last_atom orelse return;
const last_atom_index = section.last_atom_index orelse return;
const last_atom = self.getAtom(last_atom_index);
const iat_rva = section.header.virtual_address;
const iat_size = last_atom.getSymbol(self).value + last_atom.size * 2 - iat_rva; // account for sentinel zero pointer
@ -2051,27 +2096,37 @@ pub fn getOrPutGlobalPtr(self: *Coff, name: []const u8) !GetOrPutGlobalPtrResult
return GetOrPutGlobalPtrResult{ .found_existing = false, .value_ptr = ptr };
}
pub fn getAtom(self: *const Coff, atom_index: Atom.Index) Atom {
assert(atom_index < self.atoms.items.len);
return self.atoms.items[atom_index];
}
pub fn getAtomPtr(self: *Coff, atom_index: Atom.Index) *Atom {
assert(atom_index < self.atoms.items.len);
return &self.atoms.items[atom_index];
}
/// Returns atom if there is an atom referenced by the symbol described by `sym_loc` descriptor.
/// Returns null on failure.
pub fn getAtomForSymbol(self: *Coff, sym_loc: SymbolWithLoc) ?*Atom {
pub fn getAtomIndexForSymbol(self: *const Coff, sym_loc: SymbolWithLoc) ?Atom.Index {
assert(sym_loc.file == null); // TODO linking with object files
return self.atom_by_index_table.get(sym_loc.sym_index);
}
/// Returns GOT atom that references `sym_loc` if one exists.
/// Returns null otherwise.
pub fn getGotAtomForSymbol(self: *Coff, sym_loc: SymbolWithLoc) ?*Atom {
pub fn getGotAtomIndexForSymbol(self: *const Coff, sym_loc: SymbolWithLoc) ?Atom.Index {
const got_index = self.got_entries_table.get(sym_loc) orelse return null;
const got_entry = self.got_entries.items[got_index];
return self.getAtomForSymbol(.{ .sym_index = got_entry.sym_index, .file = null });
return self.getAtomIndexForSymbol(.{ .sym_index = got_entry.sym_index, .file = null });
}
/// Returns import atom that references `sym_loc` if one exists.
/// Returns null otherwise.
pub fn getImportAtomForSymbol(self: *Coff, sym_loc: SymbolWithLoc) ?*Atom {
pub fn getImportAtomIndexForSymbol(self: *const Coff, sym_loc: SymbolWithLoc) ?Atom.Index {
const imports_index = self.imports_table.get(sym_loc) orelse return null;
const imports_entry = self.imports.items[imports_index];
return self.getAtomForSymbol(.{ .sym_index = imports_entry.sym_index, .file = null });
return self.getAtomIndexForSymbol(.{ .sym_index = imports_entry.sym_index, .file = null });
}
fn setSectionName(self: *Coff, header: *coff.SectionHeader, name: []const u8) !void {

View File

@ -27,23 +27,10 @@ alignment: u32,
/// Points to the previous and next neighbors, based on the `text_offset`.
/// This can be used to find, for example, the capacity of this `Atom`.
prev: ?*Atom,
next: ?*Atom,
prev_index: ?Index,
next_index: ?Index,
pub const empty = Atom{
.sym_index = 0,
.file = null,
.size = 0,
.alignment = 0,
.prev = null,
.next = null,
};
pub fn ensureInitialized(self: *Atom, coff_file: *Coff) !void {
if (self.getSymbolIndex() != null) return; // Already initialized
self.sym_index = try coff_file.allocateSymbol();
try coff_file.atom_by_index_table.putNoClobber(coff_file.base.allocator, self.sym_index, self);
}
pub const Index = u32;
pub fn getSymbolIndex(self: Atom) ?u32 {
if (self.sym_index == 0) return null;
@ -85,7 +72,8 @@ pub fn getName(self: Atom, coff_file: *const Coff) []const u8 {
/// Returns how much room there is to grow in virtual address space.
pub fn capacity(self: Atom, coff_file: *const Coff) u32 {
const self_sym = self.getSymbol(coff_file);
if (self.next) |next| {
if (self.next_index) |next_index| {
const next = coff_file.getAtom(next_index);
const next_sym = next.getSymbol(coff_file);
return next_sym.value - self_sym.value;
} else {
@ -97,7 +85,8 @@ pub fn capacity(self: Atom, coff_file: *const Coff) u32 {
pub fn freeListEligible(self: Atom, coff_file: *const Coff) bool {
// No need to keep a free list node for the last atom.
const next = self.next orelse return false;
const next_index = self.next_index orelse return false;
const next = coff_file.getAtom(next_index);
const self_sym = self.getSymbol(coff_file);
const next_sym = next.getSymbol(coff_file);
const cap = next_sym.value - self_sym.value;
@ -107,22 +96,33 @@ pub fn freeListEligible(self: Atom, coff_file: *const Coff) bool {
return surplus >= Coff.min_text_capacity;
}
pub fn addRelocation(self: *Atom, coff_file: *Coff, reloc: Relocation) !void {
pub fn addRelocation(coff_file: *Coff, atom_index: Index, reloc: Relocation) !void {
const gpa = coff_file.base.allocator;
log.debug(" (adding reloc of type {s} to target %{d})", .{ @tagName(reloc.type), reloc.target.sym_index });
const gop = try coff_file.relocs.getOrPut(gpa, self);
const gop = try coff_file.relocs.getOrPut(gpa, atom_index);
if (!gop.found_existing) {
gop.value_ptr.* = .{};
}
try gop.value_ptr.append(gpa, reloc);
}
pub fn addBaseRelocation(self: *Atom, coff_file: *Coff, offset: u32) !void {
pub fn addBaseRelocation(coff_file: *Coff, atom_index: Index, offset: u32) !void {
const gpa = coff_file.base.allocator;
log.debug(" (adding base relocation at offset 0x{x} in %{d})", .{ offset, self.sym_index });
const gop = try coff_file.base_relocs.getOrPut(gpa, self);
log.debug(" (adding base relocation at offset 0x{x} in %{d})", .{
offset,
coff_file.getAtom(atom_index).getSymbolIndex().?,
});
const gop = try coff_file.base_relocs.getOrPut(gpa, atom_index);
if (!gop.found_existing) {
gop.value_ptr.* = .{};
}
try gop.value_ptr.append(gpa, offset);
}
pub fn freeRelocations(coff_file: *Coff, atom_index: Index) void {
const gpa = coff_file.base.allocator;
var removed_relocs = coff_file.relocs.fetchRemove(atom_index);
if (removed_relocs) |*relocs| relocs.value.deinit(gpa);
var removed_base_relocs = coff_file.base_relocs.fetchRemove(atom_index);
if (removed_base_relocs) |*base_relocs| base_relocs.value.deinit(gpa);
}

View File

@ -46,33 +46,35 @@ length: u2,
dirty: bool = true,
/// Returns an Atom which is the target node of this relocation edge (if any).
pub fn getTargetAtom(self: Relocation, coff_file: *Coff) ?*Atom {
pub fn getTargetAtomIndex(self: Relocation, coff_file: *const Coff) ?Atom.Index {
switch (self.type) {
.got,
.got_page,
.got_pageoff,
=> return coff_file.getGotAtomForSymbol(self.target),
=> return coff_file.getGotAtomIndexForSymbol(self.target),
.direct,
.page,
.pageoff,
=> return coff_file.getAtomForSymbol(self.target),
=> return coff_file.getAtomIndexForSymbol(self.target),
.import,
.import_page,
.import_pageoff,
=> return coff_file.getImportAtomForSymbol(self.target),
=> return coff_file.getImportAtomIndexForSymbol(self.target),
}
}
pub fn resolve(self: *Relocation, atom: *Atom, coff_file: *Coff) !void {
pub fn resolve(self: *Relocation, atom_index: Atom.Index, coff_file: *Coff) !void {
const atom = coff_file.getAtom(atom_index);
const source_sym = atom.getSymbol(coff_file);
const source_section = coff_file.sections.get(@enumToInt(source_sym.section_number) - 1).header;
const source_vaddr = source_sym.value + self.offset;
const file_offset = source_section.pointer_to_raw_data + source_sym.value - source_section.virtual_address;
const target_atom = self.getTargetAtom(coff_file) orelse return;
const target_atom_index = self.getTargetAtomIndex(coff_file) orelse return;
const target_atom = coff_file.getAtom(target_atom_index);
const target_vaddr = target_atom.getSymbol(coff_file).value;
const target_vaddr_with_addend = target_vaddr + self.addend;
@ -107,7 +109,7 @@ const Context = struct {
image_base: u64,
};
fn resolveAarch64(self: *Relocation, ctx: Context, coff_file: *Coff) !void {
fn resolveAarch64(self: Relocation, ctx: Context, coff_file: *Coff) !void {
var buffer: [@sizeOf(u64)]u8 = undefined;
switch (self.length) {
2 => {
@ -197,7 +199,7 @@ fn resolveAarch64(self: *Relocation, ctx: Context, coff_file: *Coff) !void {
}
}
fn resolveX86(self: *Relocation, ctx: Context, coff_file: *Coff) !void {
fn resolveX86(self: Relocation, ctx: Context, coff_file: *Coff) !void {
switch (self.type) {
.got_page => unreachable,
.got_pageoff => unreachable,

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -4,7 +4,6 @@ const std = @import("std");
const assert = std.debug.assert;
const elf = std.elf;
const Dwarf = @import("../Dwarf.zig");
const Elf = @import("../Elf.zig");
/// Each decl always gets a local symbol with the fully qualified name.
@ -20,44 +19,33 @@ offset_table_index: u32,
/// Points to the previous and next neighbors, based on the `text_offset`.
/// This can be used to find, for example, the capacity of this `TextBlock`.
prev: ?*Atom,
next: ?*Atom,
prev_index: ?Index,
next_index: ?Index,
dbg_info_atom: Dwarf.Atom,
pub const Index = u32;
pub const empty = Atom{
.local_sym_index = 0,
.offset_table_index = undefined,
.prev = null,
.next = null,
.dbg_info_atom = undefined,
pub const Reloc = struct {
target: u32,
offset: u64,
addend: u32,
prev_vaddr: u64,
};
pub fn ensureInitialized(self: *Atom, elf_file: *Elf) !void {
if (self.getSymbolIndex() != null) return; // Already initialized
self.local_sym_index = try elf_file.allocateLocalSymbol();
self.offset_table_index = try elf_file.allocateGotOffset();
try elf_file.atom_by_index_table.putNoClobber(elf_file.base.allocator, self.local_sym_index, self);
}
pub fn getSymbolIndex(self: Atom) ?u32 {
if (self.local_sym_index == 0) return null;
return self.local_sym_index;
}
pub fn getSymbol(self: Atom, elf_file: *Elf) elf.Elf64_Sym {
const sym_index = self.getSymbolIndex().?;
return elf_file.local_symbols.items[sym_index];
pub fn getSymbol(self: Atom, elf_file: *const Elf) elf.Elf64_Sym {
return elf_file.getSymbol(self.getSymbolIndex().?);
}
pub fn getSymbolPtr(self: Atom, elf_file: *Elf) *elf.Elf64_Sym {
const sym_index = self.getSymbolIndex().?;
return &elf_file.local_symbols.items[sym_index];
return elf_file.getSymbolPtr(self.getSymbolIndex().?);
}
pub fn getName(self: Atom, elf_file: *Elf) []const u8 {
const sym = self.getSymbol();
return elf_file.getString(sym.st_name);
pub fn getName(self: Atom, elf_file: *const Elf) []const u8 {
return elf_file.getSymbolName(self.getSymbolIndex().?);
}
pub fn getOffsetTableAddress(self: Atom, elf_file: *Elf) u64 {
@ -72,9 +60,10 @@ pub fn getOffsetTableAddress(self: Atom, elf_file: *Elf) u64 {
/// Returns how much room there is to grow in virtual address space.
/// File offset relocation happens transparently, so it is not included in
/// this calculation.
pub fn capacity(self: Atom, elf_file: *Elf) u64 {
pub fn capacity(self: Atom, elf_file: *const Elf) u64 {
const self_sym = self.getSymbol(elf_file);
if (self.next) |next| {
if (self.next_index) |next_index| {
const next = elf_file.getAtom(next_index);
const next_sym = next.getSymbol(elf_file);
return next_sym.st_value - self_sym.st_value;
} else {
@ -83,9 +72,10 @@ pub fn capacity(self: Atom, elf_file: *Elf) u64 {
}
}
pub fn freeListEligible(self: Atom, elf_file: *Elf) bool {
pub fn freeListEligible(self: Atom, elf_file: *const Elf) bool {
// No need to keep a free list node for the last block.
const next = self.next orelse return false;
const next_index = self.next_index orelse return false;
const next = elf_file.getAtom(next_index);
const self_sym = self.getSymbol(elf_file);
const next_sym = next.getSymbol(elf_file);
const cap = next_sym.st_value - self_sym.st_value;
@ -94,3 +84,17 @@ pub fn freeListEligible(self: Atom, elf_file: *Elf) bool {
const surplus = cap - ideal_cap;
return surplus >= Elf.min_text_capacity;
}
pub fn addRelocation(elf_file: *Elf, atom_index: Index, reloc: Reloc) !void {
const gpa = elf_file.base.allocator;
const gop = try elf_file.relocs.getOrPut(gpa, atom_index);
if (!gop.found_existing) {
gop.value_ptr.* = .{};
}
try gop.value_ptr.append(gpa, reloc);
}
pub fn freeRelocations(elf_file: *Elf, atom_index: Index) void {
var removed_relocs = elf_file.relocs.fetchRemove(atom_index);
if (removed_relocs) |*relocs| relocs.value.deinit(elf_file.base.allocator);
}

File diff suppressed because it is too large Load Diff

View File

@ -13,7 +13,6 @@ const trace = @import("../../tracy.zig").trace;
const Allocator = mem.Allocator;
const Arch = std.Target.Cpu.Arch;
const Dwarf = @import("../Dwarf.zig");
const MachO = @import("../MachO.zig");
const Relocation = @import("Relocation.zig");
const SymbolWithLoc = MachO.SymbolWithLoc;
@ -39,10 +38,11 @@ size: u64,
alignment: u32,
/// Points to the previous and next neighbours
next: ?*Atom,
prev: ?*Atom,
/// TODO use the same trick as with symbols: reserve index 0 as null atom
next_index: ?Index,
prev_index: ?Index,
dbg_info_atom: Dwarf.Atom,
pub const Index = u32;
pub const Binding = struct {
target: SymbolWithLoc,
@ -54,22 +54,6 @@ pub const SymbolAtOffset = struct {
offset: u64,
};
pub const empty = Atom{
.sym_index = 0,
.file = null,
.size = 0,
.alignment = 0,
.prev = null,
.next = null,
.dbg_info_atom = undefined,
};
pub fn ensureInitialized(self: *Atom, macho_file: *MachO) !void {
if (self.getSymbolIndex() != null) return; // Already initialized
self.sym_index = try macho_file.allocateSymbol();
try macho_file.atom_by_index_table.putNoClobber(macho_file.base.allocator, self.sym_index, self);
}
pub fn getSymbolIndex(self: Atom) ?u32 {
if (self.sym_index == 0) return null;
return self.sym_index;
@ -108,7 +92,8 @@ pub fn getName(self: Atom, macho_file: *MachO) []const u8 {
/// this calculation.
pub fn capacity(self: Atom, macho_file: *MachO) u64 {
const self_sym = self.getSymbol(macho_file);
if (self.next) |next| {
if (self.next_index) |next_index| {
const next = macho_file.getAtom(next_index);
const next_sym = next.getSymbol(macho_file);
return next_sym.n_value - self_sym.n_value;
} else {
@ -120,7 +105,8 @@ pub fn capacity(self: Atom, macho_file: *MachO) u64 {
pub fn freeListEligible(self: Atom, macho_file: *MachO) bool {
// No need to keep a free list node for the last atom.
const next = self.next orelse return false;
const next_index = self.next_index orelse return false;
const next = macho_file.getAtom(next_index);
const self_sym = self.getSymbol(macho_file);
const next_sym = next.getSymbol(macho_file);
const cap = next_sym.n_value - self_sym.n_value;
@ -130,19 +116,19 @@ pub fn freeListEligible(self: Atom, macho_file: *MachO) bool {
return surplus >= MachO.min_text_capacity;
}
pub fn addRelocation(self: *Atom, macho_file: *MachO, reloc: Relocation) !void {
return self.addRelocations(macho_file, 1, .{reloc});
pub fn addRelocation(macho_file: *MachO, atom_index: Index, reloc: Relocation) !void {
return addRelocations(macho_file, atom_index, 1, .{reloc});
}
pub fn addRelocations(
self: *Atom,
macho_file: *MachO,
atom_index: Index,
comptime count: comptime_int,
relocs: [count]Relocation,
) !void {
const gpa = macho_file.base.allocator;
const target = macho_file.base.options.target;
const gop = try macho_file.relocs.getOrPut(gpa, self);
const gop = try macho_file.relocs.getOrPut(gpa, atom_index);
if (!gop.found_existing) {
gop.value_ptr.* = .{};
}
@ -156,56 +142,72 @@ pub fn addRelocations(
}
}
pub fn addRebase(self: *Atom, macho_file: *MachO, offset: u32) !void {
pub fn addRebase(macho_file: *MachO, atom_index: Index, offset: u32) !void {
const gpa = macho_file.base.allocator;
log.debug(" (adding rebase at offset 0x{x} in %{?d})", .{ offset, self.getSymbolIndex() });
const gop = try macho_file.rebases.getOrPut(gpa, self);
const atom = macho_file.getAtom(atom_index);
log.debug(" (adding rebase at offset 0x{x} in %{?d})", .{ offset, atom.getSymbolIndex() });
const gop = try macho_file.rebases.getOrPut(gpa, atom_index);
if (!gop.found_existing) {
gop.value_ptr.* = .{};
}
try gop.value_ptr.append(gpa, offset);
}
pub fn addBinding(self: *Atom, macho_file: *MachO, binding: Binding) !void {
pub fn addBinding(macho_file: *MachO, atom_index: Index, binding: Binding) !void {
const gpa = macho_file.base.allocator;
const atom = macho_file.getAtom(atom_index);
log.debug(" (adding binding to symbol {s} at offset 0x{x} in %{?d})", .{
macho_file.getSymbolName(binding.target),
binding.offset,
self.getSymbolIndex(),
atom.getSymbolIndex(),
});
const gop = try macho_file.bindings.getOrPut(gpa, self);
const gop = try macho_file.bindings.getOrPut(gpa, atom_index);
if (!gop.found_existing) {
gop.value_ptr.* = .{};
}
try gop.value_ptr.append(gpa, binding);
}
pub fn addLazyBinding(self: *Atom, macho_file: *MachO, binding: Binding) !void {
pub fn addLazyBinding(macho_file: *MachO, atom_index: Index, binding: Binding) !void {
const gpa = macho_file.base.allocator;
const atom = macho_file.getAtom(atom_index);
log.debug(" (adding lazy binding to symbol {s} at offset 0x{x} in %{?d})", .{
macho_file.getSymbolName(binding.target),
binding.offset,
self.getSymbolIndex(),
atom.getSymbolIndex(),
});
const gop = try macho_file.lazy_bindings.getOrPut(gpa, self);
const gop = try macho_file.lazy_bindings.getOrPut(gpa, atom_index);
if (!gop.found_existing) {
gop.value_ptr.* = .{};
}
try gop.value_ptr.append(gpa, binding);
}
pub fn resolveRelocations(self: *Atom, macho_file: *MachO) !void {
const relocs = macho_file.relocs.get(self) orelse return;
const source_sym = self.getSymbol(macho_file);
pub fn resolveRelocations(macho_file: *MachO, atom_index: Index) !void {
const atom = macho_file.getAtom(atom_index);
const relocs = macho_file.relocs.get(atom_index) orelse return;
const source_sym = atom.getSymbol(macho_file);
const source_section = macho_file.sections.get(source_sym.n_sect - 1).header;
const file_offset = source_section.offset + source_sym.n_value - source_section.addr;
log.debug("relocating '{s}'", .{self.getName(macho_file)});
log.debug("relocating '{s}'", .{atom.getName(macho_file)});
for (relocs.items) |*reloc| {
if (!reloc.dirty) continue;
try reloc.resolve(self, macho_file, file_offset);
try reloc.resolve(macho_file, atom_index, file_offset);
reloc.dirty = false;
}
}
pub fn freeRelocations(macho_file: *MachO, atom_index: Index) void {
const gpa = macho_file.base.allocator;
var removed_relocs = macho_file.relocs.fetchOrderedRemove(atom_index);
if (removed_relocs) |*relocs| relocs.value.deinit(gpa);
var removed_rebases = macho_file.rebases.fetchOrderedRemove(atom_index);
if (removed_rebases) |*rebases| rebases.value.deinit(gpa);
var removed_bindings = macho_file.bindings.fetchOrderedRemove(atom_index);
if (removed_bindings) |*bindings| bindings.value.deinit(gpa);
var removed_lazy_bindings = macho_file.lazy_bindings.fetchOrderedRemove(atom_index);
if (removed_lazy_bindings) |*lazy_bindings| lazy_bindings.value.deinit(gpa);
}

View File

@ -82,11 +82,11 @@ pub fn populateMissingMetadata(self: *DebugSymbols) !void {
}
if (self.debug_str_section_index == null) {
assert(self.dwarf.strtab.items.len == 0);
try self.dwarf.strtab.append(self.allocator, 0);
assert(self.dwarf.strtab.buffer.items.len == 0);
try self.dwarf.strtab.buffer.append(self.allocator, 0);
self.debug_str_section_index = try self.allocateSection(
"__debug_str",
@intCast(u32, self.dwarf.strtab.items.len),
@intCast(u32, self.dwarf.strtab.buffer.items.len),
0,
);
self.debug_string_table_dirty = true;
@ -291,10 +291,10 @@ pub fn flushModule(self: *DebugSymbols, macho_file: *MachO) !void {
{
const sect_index = self.debug_str_section_index.?;
if (self.debug_string_table_dirty or self.dwarf.strtab.items.len != self.getSection(sect_index).size) {
const needed_size = @intCast(u32, self.dwarf.strtab.items.len);
if (self.debug_string_table_dirty or self.dwarf.strtab.buffer.items.len != self.getSection(sect_index).size) {
const needed_size = @intCast(u32, self.dwarf.strtab.buffer.items.len);
try self.growSection(sect_index, needed_size, false);
try self.file.pwriteAll(self.dwarf.strtab.items, self.getSection(sect_index).offset);
try self.file.pwriteAll(self.dwarf.strtab.buffer.items, self.getSection(sect_index).offset);
self.debug_string_table_dirty = false;
}
}

View File

@ -29,33 +29,35 @@ pub fn fmtType(self: Relocation, target: std.Target) []const u8 {
}
}
pub fn getTargetAtom(self: Relocation, macho_file: *MachO) ?*Atom {
pub fn getTargetAtomIndex(self: Relocation, macho_file: *MachO) ?Atom.Index {
switch (macho_file.base.options.target.cpu.arch) {
.aarch64 => switch (@intToEnum(macho.reloc_type_arm64, self.type)) {
.ARM64_RELOC_GOT_LOAD_PAGE21,
.ARM64_RELOC_GOT_LOAD_PAGEOFF12,
.ARM64_RELOC_POINTER_TO_GOT,
=> return macho_file.getGotAtomForSymbol(self.target),
=> return macho_file.getGotAtomIndexForSymbol(self.target),
else => {},
},
.x86_64 => switch (@intToEnum(macho.reloc_type_x86_64, self.type)) {
.X86_64_RELOC_GOT,
.X86_64_RELOC_GOT_LOAD,
=> return macho_file.getGotAtomForSymbol(self.target),
=> return macho_file.getGotAtomIndexForSymbol(self.target),
else => {},
},
else => unreachable,
}
if (macho_file.getStubsAtomForSymbol(self.target)) |stubs_atom| return stubs_atom;
return macho_file.getAtomForSymbol(self.target);
if (macho_file.getStubsAtomIndexForSymbol(self.target)) |stubs_atom| return stubs_atom;
return macho_file.getAtomIndexForSymbol(self.target);
}
pub fn resolve(self: Relocation, atom: *Atom, macho_file: *MachO, base_offset: u64) !void {
pub fn resolve(self: Relocation, macho_file: *MachO, atom_index: Atom.Index, base_offset: u64) !void {
const arch = macho_file.base.options.target.cpu.arch;
const atom = macho_file.getAtom(atom_index);
const source_sym = atom.getSymbol(macho_file);
const source_addr = source_sym.n_value + self.offset;
const target_atom = self.getTargetAtom(macho_file) orelse return;
const target_atom_index = self.getTargetAtomIndex(macho_file) orelse return;
const target_atom = macho_file.getAtom(target_atom_index);
const target_addr = @intCast(i64, target_atom.getSymbol(macho_file).n_value) + self.addend;
log.debug(" ({x}: [() => 0x{x} ({s})) ({s})", .{

View File

@ -21,14 +21,7 @@ const Allocator = std.mem.Allocator;
const log = std.log.scoped(.link);
const assert = std.debug.assert;
const FnDeclOutput = struct {
/// this code is modified when relocated so it is mutable
code: []u8,
/// this might have to be modified in the linker, so thats why its mutable
lineinfo: []u8,
start_line: u32,
end_line: u32,
};
pub const base_tag = .plan9;
base: link.File,
sixtyfour_bit: bool,
@ -101,6 +94,9 @@ got_index_free_list: std.ArrayListUnmanaged(usize) = .{},
syms_index_free_list: std.ArrayListUnmanaged(usize) = .{},
decl_blocks: std.ArrayListUnmanaged(DeclBlock) = .{},
decls: std.AutoHashMapUnmanaged(Module.Decl.Index, DeclMetadata) = .{},
const Reloc = struct {
target: Module.Decl.Index,
offset: u64,
@ -115,6 +111,42 @@ const Bases = struct {
const UnnamedConstTable = std.AutoHashMapUnmanaged(Module.Decl.Index, std.ArrayListUnmanaged(struct { info: DeclBlock, code: []const u8 }));
pub const PtrWidth = enum { p32, p64 };
pub const DeclBlock = struct {
type: aout.Sym.Type,
/// offset in the text or data sects
offset: ?u64,
/// offset into syms
sym_index: ?usize,
/// offset into got
got_index: ?usize,
pub const Index = u32;
};
const DeclMetadata = struct {
index: DeclBlock.Index,
exports: std.ArrayListUnmanaged(usize) = .{},
fn getExport(m: DeclMetadata, p9: *const Plan9, name: []const u8) ?usize {
for (m.exports.items) |exp| {
const sym = p9.syms.items[exp];
if (mem.eql(u8, name, sym.name)) return exp;
}
return null;
}
};
const FnDeclOutput = struct {
/// this code is modified when relocated so it is mutable
code: []u8,
/// this might have to be modified in the linker, so thats why its mutable
lineinfo: []u8,
start_line: u32,
end_line: u32,
};
fn getAddr(self: Plan9, addr: u64, t: aout.Sym.Type) u64 {
return addr + switch (t) {
.T, .t, .l, .L => self.bases.text,
@ -127,22 +159,6 @@ fn getSymAddr(self: Plan9, s: aout.Sym) u64 {
return self.getAddr(s.value, s.type);
}
pub const DeclBlock = struct {
type: aout.Sym.Type,
/// offset in the text or data sects
offset: ?u64,
/// offset into syms
sym_index: ?usize,
/// offset into got
got_index: ?usize,
pub const empty = DeclBlock{
.type = .t,
.offset = null,
.sym_index = null,
.got_index = null,
};
};
pub fn defaultBaseAddrs(arch: std.Target.Cpu.Arch) Bases {
return switch (arch) {
.x86_64 => .{
@ -164,8 +180,6 @@ pub fn defaultBaseAddrs(arch: std.Target.Cpu.Arch) Bases {
};
}
pub const PtrWidth = enum { p32, p64 };
pub fn createEmpty(gpa: Allocator, options: link.Options) !*Plan9 {
if (options.use_llvm)
return error.LLVMBackendDoesNotSupportPlan9;
@ -271,7 +285,7 @@ pub fn updateFunc(self: *Plan9, module: *Module, func: *Module.Fn, air: Air, liv
const decl = module.declPtr(decl_index);
self.freeUnnamedConsts(decl_index);
try self.seeDecl(decl_index);
_ = try self.seeDecl(decl_index);
log.debug("codegen decl {*} ({s})", .{ decl, decl.name });
var code_buffer = std.ArrayList(u8).init(self.base.allocator);
@ -313,11 +327,11 @@ pub fn updateFunc(self: *Plan9, module: *Module, func: *Module.Fn, air: Air, liv
.end_line = end_line,
};
try self.putFn(decl_index, out);
return self.updateFinish(decl);
return self.updateFinish(decl_index);
}
pub fn lowerUnnamedConst(self: *Plan9, tv: TypedValue, decl_index: Module.Decl.Index) !u32 {
try self.seeDecl(decl_index);
_ = try self.seeDecl(decl_index);
var code_buffer = std.ArrayList(u8).init(self.base.allocator);
defer code_buffer.deinit();
@ -387,7 +401,7 @@ pub fn updateDecl(self: *Plan9, module: *Module, decl_index: Module.Decl.Index)
}
}
try self.seeDecl(decl_index);
_ = try self.seeDecl(decl_index);
log.debug("codegen decl {*} ({s}) ({d})", .{ decl, decl.name, decl_index });
@ -414,28 +428,31 @@ pub fn updateDecl(self: *Plan9, module: *Module, decl_index: Module.Decl.Index)
if (self.data_decl_table.fetchPutAssumeCapacity(decl_index, duped_code)) |old_entry| {
self.base.allocator.free(old_entry.value);
}
return self.updateFinish(decl);
return self.updateFinish(decl_index);
}
/// called at the end of update{Decl,Func}
fn updateFinish(self: *Plan9, decl: *Module.Decl) !void {
fn updateFinish(self: *Plan9, decl_index: Module.Decl.Index) !void {
const decl = self.base.options.module.?.declPtr(decl_index);
const is_fn = (decl.ty.zigTypeTag() == .Fn);
log.debug("update the symbol table and got for decl {*} ({s})", .{ decl, decl.name });
const sym_t: aout.Sym.Type = if (is_fn) .t else .d;
const decl_block = self.getDeclBlockPtr(self.decls.get(decl_index).?.index);
// write the internal linker metadata
decl.link.plan9.type = sym_t;
decl_block.type = sym_t;
// write the symbol
// we already have the got index
const sym: aout.Sym = .{
.value = undefined, // the value of stuff gets filled in in flushModule
.type = decl.link.plan9.type,
.type = decl_block.type,
.name = mem.span(decl.name),
};
if (decl.link.plan9.sym_index) |s| {
if (decl_block.sym_index) |s| {
self.syms.items[s] = sym;
} else {
const s = try self.allocateSymbolIndex();
decl.link.plan9.sym_index = s;
decl_block.sym_index = s;
self.syms.items[s] = sym;
}
}
@ -550,6 +567,7 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No
while (it.next()) |entry| {
const decl_index = entry.key_ptr.*;
const decl = mod.declPtr(decl_index);
const decl_block = self.getDeclBlockPtr(self.decls.get(decl_index).?.index);
const out = entry.value_ptr.*;
log.debug("write text decl {*} ({s}), lines {d} to {d}", .{ decl, decl.name, out.start_line + 1, out.end_line });
{
@ -568,16 +586,16 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No
iovecs_i += 1;
const off = self.getAddr(text_i, .t);
text_i += out.code.len;
decl.link.plan9.offset = off;
decl_block.offset = off;
if (!self.sixtyfour_bit) {
mem.writeIntNative(u32, got_table[decl.link.plan9.got_index.? * 4 ..][0..4], @intCast(u32, off));
mem.writeInt(u32, got_table[decl.link.plan9.got_index.? * 4 ..][0..4], @intCast(u32, off), self.base.options.target.cpu.arch.endian());
mem.writeIntNative(u32, got_table[decl_block.got_index.? * 4 ..][0..4], @intCast(u32, off));
mem.writeInt(u32, got_table[decl_block.got_index.? * 4 ..][0..4], @intCast(u32, off), self.base.options.target.cpu.arch.endian());
} else {
mem.writeInt(u64, got_table[decl.link.plan9.got_index.? * 8 ..][0..8], off, self.base.options.target.cpu.arch.endian());
mem.writeInt(u64, got_table[decl_block.got_index.? * 8 ..][0..8], off, self.base.options.target.cpu.arch.endian());
}
self.syms.items[decl.link.plan9.sym_index.?].value = off;
self.syms.items[decl_block.sym_index.?].value = off;
if (mod.decl_exports.get(decl_index)) |exports| {
try self.addDeclExports(mod, decl, exports.items);
try self.addDeclExports(mod, decl_index, exports.items);
}
}
}
@ -598,6 +616,7 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No
while (it.next()) |entry| {
const decl_index = entry.key_ptr.*;
const decl = mod.declPtr(decl_index);
const decl_block = self.getDeclBlockPtr(self.decls.get(decl_index).?.index);
const code = entry.value_ptr.*;
log.debug("write data decl {*} ({s})", .{ decl, decl.name });
@ -606,15 +625,15 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No
iovecs_i += 1;
const off = self.getAddr(data_i, .d);
data_i += code.len;
decl.link.plan9.offset = off;
decl_block.offset = off;
if (!self.sixtyfour_bit) {
mem.writeInt(u32, got_table[decl.link.plan9.got_index.? * 4 ..][0..4], @intCast(u32, off), self.base.options.target.cpu.arch.endian());
mem.writeInt(u32, got_table[decl_block.got_index.? * 4 ..][0..4], @intCast(u32, off), self.base.options.target.cpu.arch.endian());
} else {
mem.writeInt(u64, got_table[decl.link.plan9.got_index.? * 8 ..][0..8], off, self.base.options.target.cpu.arch.endian());
mem.writeInt(u64, got_table[decl_block.got_index.? * 8 ..][0..8], off, self.base.options.target.cpu.arch.endian());
}
self.syms.items[decl.link.plan9.sym_index.?].value = off;
self.syms.items[decl_block.sym_index.?].value = off;
if (mod.decl_exports.get(decl_index)) |exports| {
try self.addDeclExports(mod, decl, exports.items);
try self.addDeclExports(mod, decl_index, exports.items);
}
}
// write the unnamed constants after the other data decls
@ -676,7 +695,8 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No
for (kv.value_ptr.items) |reloc| {
const target_decl_index = reloc.target;
const target_decl = mod.declPtr(target_decl_index);
const target_decl_offset = target_decl.link.plan9.offset.?;
const target_decl_block = self.getDeclBlock(self.decls.get(target_decl_index).?.index);
const target_decl_offset = target_decl_block.offset.?;
const offset = reloc.offset;
const addend = reloc.addend;
@ -709,28 +729,36 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No
fn addDeclExports(
self: *Plan9,
module: *Module,
decl: *Module.Decl,
decl_index: Module.Decl.Index,
exports: []const *Module.Export,
) !void {
const metadata = self.decls.getPtr(decl_index).?;
const decl_block = self.getDeclBlock(metadata.index);
for (exports) |exp| {
// plan9 does not support custom sections
if (exp.options.section) |section_name| {
if (!mem.eql(u8, section_name, ".text") or !mem.eql(u8, section_name, ".data")) {
try module.failed_exports.put(module.gpa, exp, try Module.ErrorMsg.create(self.base.allocator, decl.srcLoc(), "plan9 does not support extra sections", .{}));
try module.failed_exports.put(module.gpa, exp, try Module.ErrorMsg.create(
self.base.allocator,
module.declPtr(decl_index).srcLoc(),
"plan9 does not support extra sections",
.{},
));
break;
}
}
const sym = .{
.value = decl.link.plan9.offset.?,
.type = decl.link.plan9.type.toGlobal(),
.value = decl_block.offset.?,
.type = decl_block.type.toGlobal(),
.name = exp.options.name,
};
if (exp.link.plan9) |i| {
if (metadata.getExport(self, exp.options.name)) |i| {
self.syms.items[i] = sym;
} else {
try self.syms.append(self.base.allocator, sym);
exp.link.plan9 = self.syms.items.len - 1;
try metadata.exports.append(self.base.allocator, self.syms.items.len - 1);
}
}
}
@ -760,13 +788,18 @@ pub fn freeDecl(self: *Plan9, decl_index: Module.Decl.Index) void {
self.base.allocator.free(removed_entry.value);
}
}
if (decl.link.plan9.got_index) |i| {
// TODO: if this catch {} is triggered, an assertion in flushModule will be triggered, because got_index_free_list will have the wrong length
self.got_index_free_list.append(self.base.allocator, i) catch {};
}
if (decl.link.plan9.sym_index) |i| {
self.syms_index_free_list.append(self.base.allocator, i) catch {};
self.syms.items[i] = aout.Sym.undefined_symbol;
if (self.decls.fetchRemove(decl_index)) |const_kv| {
var kv = const_kv;
const decl_block = self.getDeclBlock(kv.value.index);
if (decl_block.got_index) |i| {
// TODO: if this catch {} is triggered, an assertion in flushModule will be triggered, because got_index_free_list will have the wrong length
self.got_index_free_list.append(self.base.allocator, i) catch {};
}
if (decl_block.sym_index) |i| {
self.syms_index_free_list.append(self.base.allocator, i) catch {};
self.syms.items[i] = aout.Sym.undefined_symbol;
}
kv.value.exports.deinit(self.base.allocator);
}
self.freeUnnamedConsts(decl_index);
{
@ -786,12 +819,30 @@ fn freeUnnamedConsts(self: *Plan9, decl_index: Module.Decl.Index) void {
unnamed_consts.clearAndFree(self.base.allocator);
}
pub fn seeDecl(self: *Plan9, decl_index: Module.Decl.Index) !void {
const mod = self.base.options.module.?;
const decl = mod.declPtr(decl_index);
if (decl.link.plan9.got_index == null) {
decl.link.plan9.got_index = self.allocateGotIndex();
fn createDeclBlock(self: *Plan9) !DeclBlock.Index {
const gpa = self.base.allocator;
const index = @intCast(DeclBlock.Index, self.decl_blocks.items.len);
const decl_block = try self.decl_blocks.addOne(gpa);
decl_block.* = .{
.type = .t,
.offset = null,
.sym_index = null,
.got_index = null,
};
return index;
}
pub fn seeDecl(self: *Plan9, decl_index: Module.Decl.Index) !DeclBlock.Index {
const gop = try self.decls.getOrPut(self.base.allocator, decl_index);
if (!gop.found_existing) {
const index = try self.createDeclBlock();
self.getDeclBlockPtr(index).got_index = self.allocateGotIndex();
gop.value_ptr.* = .{
.index = index,
.exports = .{},
};
}
return gop.value_ptr.index;
}
pub fn updateDeclExports(
@ -800,7 +851,7 @@ pub fn updateDeclExports(
decl_index: Module.Decl.Index,
exports: []const *Module.Export,
) !void {
try self.seeDecl(decl_index);
_ = try self.seeDecl(decl_index);
// we do all the things in flush
_ = module;
_ = exports;
@ -842,10 +893,17 @@ pub fn deinit(self: *Plan9) void {
self.syms_index_free_list.deinit(gpa);
self.file_segments.deinit(gpa);
self.path_arena.deinit();
self.decl_blocks.deinit(gpa);
{
var it = self.decls.iterator();
while (it.next()) |entry| {
entry.value_ptr.exports.deinit(gpa);
}
self.decls.deinit(gpa);
}
}
pub const Export = ?usize;
pub const base_tag = .plan9;
pub fn openPath(allocator: Allocator, sub_path: []const u8, options: link.Options) !*Plan9 {
if (options.use_llvm)
return error.LLVMBackendDoesNotSupportPlan9;
@ -911,20 +969,19 @@ pub fn writeSyms(self: *Plan9, buf: *std.ArrayList(u8)) !void {
}
}
const mod = self.base.options.module.?;
// write the data symbols
{
var it = self.data_decl_table.iterator();
while (it.next()) |entry| {
const decl_index = entry.key_ptr.*;
const decl = mod.declPtr(decl_index);
const sym = self.syms.items[decl.link.plan9.sym_index.?];
const decl_metadata = self.decls.get(decl_index).?;
const decl_block = self.getDeclBlock(decl_metadata.index);
const sym = self.syms.items[decl_block.sym_index.?];
try self.writeSym(writer, sym);
if (self.base.options.module.?.decl_exports.get(decl_index)) |exports| {
for (exports.items) |e| {
try self.writeSym(writer, self.syms.items[e.link.plan9.?]);
}
for (exports.items) |e| if (decl_metadata.getExport(self, e.options.name)) |exp_i| {
try self.writeSym(writer, self.syms.items[exp_i]);
};
}
}
}
@ -943,16 +1000,17 @@ pub fn writeSyms(self: *Plan9, buf: *std.ArrayList(u8)) !void {
var submap_it = symidx_and_submap.functions.iterator();
while (submap_it.next()) |entry| {
const decl_index = entry.key_ptr.*;
const decl = mod.declPtr(decl_index);
const sym = self.syms.items[decl.link.plan9.sym_index.?];
const decl_metadata = self.decls.get(decl_index).?;
const decl_block = self.getDeclBlock(decl_metadata.index);
const sym = self.syms.items[decl_block.sym_index.?];
try self.writeSym(writer, sym);
if (self.base.options.module.?.decl_exports.get(decl_index)) |exports| {
for (exports.items) |e| {
const s = self.syms.items[e.link.plan9.?];
for (exports.items) |e| if (decl_metadata.getExport(self, e.options.name)) |exp_i| {
const s = self.syms.items[exp_i];
if (mem.eql(u8, s.name, "_start"))
self.entry_val = s.value;
try self.writeSym(writer, s);
}
};
}
}
}
@ -960,10 +1018,10 @@ pub fn writeSyms(self: *Plan9, buf: *std.ArrayList(u8)) !void {
}
/// Must be called only after a successful call to `updateDecl`.
pub fn updateDeclLineNumber(self: *Plan9, mod: *Module, decl: *const Module.Decl) !void {
pub fn updateDeclLineNumber(self: *Plan9, mod: *Module, decl_index: Module.Decl.Index) !void {
_ = self;
_ = mod;
_ = decl;
_ = decl_index;
}
pub fn getDeclVAddr(
@ -1004,3 +1062,11 @@ pub fn getDeclVAddr(
});
return undefined;
}
pub fn getDeclBlock(self: *const Plan9, index: DeclBlock.Index) DeclBlock {
return self.decl_blocks.items[index];
}
fn getDeclBlockPtr(self: *Plan9, index: DeclBlock.Index) *DeclBlock {
return &self.decl_blocks.items[index];
}

View File

@ -42,13 +42,6 @@ const SpvModule = @import("../codegen/spirv/Module.zig");
const spec = @import("../codegen/spirv/spec.zig");
const IdResult = spec.IdResult;
// TODO: Should this struct be used at all rather than just a hashmap of aux data for every decl?
pub const FnData = struct {
// We're going to fill these in flushModule, and we're going to fill them unconditionally,
// so just set it to undefined.
id: IdResult = undefined,
};
base: link.File,
/// This linker backend does not try to incrementally link output SPIR-V code.
@ -209,16 +202,19 @@ pub fn flushModule(self: *SpirV, comp: *Compilation, prog_node: *std.Progress.No
// so that we can access them before processing them.
// TODO: We're allocating an ID unconditionally now, are there
// declarations which don't generate a result?
// TODO: fn_link is used here, but thats probably not the right field. It will work anyway though.
var ids = std.AutoHashMap(Module.Decl.Index, IdResult).init(self.base.allocator);
defer ids.deinit();
try ids.ensureTotalCapacity(@intCast(u32, self.decl_table.count()));
for (self.decl_table.keys()) |decl_index| {
const decl = module.declPtr(decl_index);
if (decl.has_tv) {
decl.fn_link.spirv.id = spv.allocId();
ids.putAssumeCapacityNoClobber(decl_index, spv.allocId());
}
}
// Now, actually generate the code for all declarations.
var decl_gen = codegen.DeclGen.init(self.base.allocator, module, &spv);
var decl_gen = codegen.DeclGen.init(self.base.allocator, module, &spv, &ids);
defer decl_gen.deinit();
var it = self.decl_table.iterator();
@ -231,7 +227,7 @@ pub fn flushModule(self: *SpirV, comp: *Compilation, prog_node: *std.Progress.No
const liveness = entry.value_ptr.liveness;
// Note, if `decl` is not a function, air/liveness may be undefined.
if (try decl_gen.gen(decl, air, liveness)) |msg| {
if (try decl_gen.gen(decl_index, air, liveness)) |msg| {
try module.failed_decls.put(module.gpa, decl_index, msg);
return; // TODO: Attempt to generate more decls?
}

File diff suppressed because it is too large Load Diff

View File

@ -4,7 +4,6 @@ const std = @import("std");
const types = @import("types.zig");
const Wasm = @import("../Wasm.zig");
const Symbol = @import("Symbol.zig");
const Dwarf = @import("../Dwarf.zig");
const leb = std.leb;
const log = std.log.scoped(.link);
@ -30,17 +29,17 @@ file: ?u16,
/// Next atom in relation to this atom.
/// When null, this atom is the last atom
next: ?*Atom,
next: ?Atom.Index,
/// Previous atom in relation to this atom.
/// is null when this atom is the first in its order
prev: ?*Atom,
prev: ?Atom.Index,
/// Contains atoms local to a decl, all managed by this `Atom`.
/// When the parent atom is being freed, it will also do so for all local atoms.
locals: std.ArrayListUnmanaged(Atom) = .{},
locals: std.ArrayListUnmanaged(Atom.Index) = .{},
/// Represents the debug Atom that holds all debug information of this Atom.
dbg_info_atom: Dwarf.Atom,
/// Alias to an unsigned 32-bit integer
pub const Index = u32;
/// Represents a default empty wasm `Atom`
pub const empty: Atom = .{
@ -51,18 +50,15 @@ pub const empty: Atom = .{
.prev = null,
.size = 0,
.sym_index = 0,
.dbg_info_atom = undefined,
};
/// Frees all resources owned by this `Atom`.
pub fn deinit(atom: *Atom, gpa: Allocator) void {
pub fn deinit(atom: *Atom, wasm: *Wasm) void {
const gpa = wasm.base.allocator;
atom.relocs.deinit(gpa);
atom.code.deinit(gpa);
for (atom.locals.items) |*local| {
local.deinit(gpa);
}
atom.locals.deinit(gpa);
atom.* = undefined;
}
/// Sets the length of relocations and code to '0',
@ -83,24 +79,11 @@ pub fn format(atom: Atom, comptime fmt: []const u8, options: std.fmt.FormatOptio
});
}
/// Returns the first `Atom` from a given atom
pub fn getFirst(atom: *Atom) *Atom {
var tmp = atom;
while (tmp.prev) |prev| tmp = prev;
return tmp;
}
/// Returns the location of the symbol that represents this `Atom`
pub fn symbolLoc(atom: Atom) Wasm.SymbolLoc {
return .{ .file = atom.file, .index = atom.sym_index };
}
pub fn ensureInitialized(atom: *Atom, wasm_bin: *Wasm) !void {
if (atom.getSymbolIndex() != null) return; // already initialized
atom.sym_index = try wasm_bin.allocateSymbol();
try wasm_bin.symbol_atom.putNoClobber(wasm_bin.base.allocator, atom.symbolLoc(), atom);
}
pub fn getSymbolIndex(atom: Atom) ?u32 {
if (atom.sym_index == 0) return null;
return atom.sym_index;
@ -203,20 +186,28 @@ fn relocationValue(atom: Atom, relocation: types.Relocation, wasm_bin: *const Wa
if (symbol.isUndefined()) {
return 0;
}
const target_atom = wasm_bin.symbol_atom.get(target_loc).?;
const target_atom_index = wasm_bin.symbol_atom.get(target_loc) orelse {
// this can only occur during incremental-compilation when a relocation
// still points to a freed decl. It is fine to emit the value 0 here
// as no actual code will point towards it.
return 0;
};
const target_atom = wasm_bin.getAtom(target_atom_index);
const va = @intCast(i32, target_atom.getVA(wasm_bin, symbol));
return @intCast(u32, va + relocation.addend);
},
.R_WASM_EVENT_INDEX_LEB => return symbol.index,
.R_WASM_SECTION_OFFSET_I32 => {
const target_atom = wasm_bin.symbol_atom.get(target_loc).?;
const target_atom_index = wasm_bin.symbol_atom.get(target_loc).?;
const target_atom = wasm_bin.getAtom(target_atom_index);
const rel_value = @intCast(i32, target_atom.offset) + relocation.addend;
return @intCast(u32, rel_value);
},
.R_WASM_FUNCTION_OFFSET_I32 => {
const target_atom = wasm_bin.symbol_atom.get(target_loc) orelse {
const target_atom_index = wasm_bin.symbol_atom.get(target_loc) orelse {
return @bitCast(u32, @as(i32, -1));
};
const target_atom = wasm_bin.getAtom(target_atom_index);
const offset: u32 = 11 + Wasm.getULEB128Size(target_atom.size); // Header (11 bytes fixed-size) + body size (leb-encoded)
const rel_value = @intCast(i32, target_atom.offset + offset) + relocation.addend;
return @intCast(u32, rel_value);

View File

@ -901,14 +901,9 @@ pub fn parseIntoAtoms(object: *Object, gpa: Allocator, object_index: u16, wasm_b
continue; // found unknown section, so skip parsing into atom as we do not know how to handle it.
};
const atom = try gpa.create(Atom);
const atom_index = @intCast(Atom.Index, wasm_bin.managed_atoms.items.len);
const atom = try wasm_bin.managed_atoms.addOne(gpa);
atom.* = Atom.empty;
errdefer {
atom.deinit(gpa);
gpa.destroy(atom);
}
try wasm_bin.managed_atoms.append(gpa, atom);
atom.file = object_index;
atom.size = relocatable_data.size;
atom.alignment = relocatable_data.getAlignment(object);
@ -938,12 +933,12 @@ pub fn parseIntoAtoms(object: *Object, gpa: Allocator, object_index: u16, wasm_b
.index = relocatable_data.getIndex(),
})) |symbols| {
atom.sym_index = symbols.pop();
try wasm_bin.symbol_atom.putNoClobber(gpa, atom.symbolLoc(), atom);
try wasm_bin.symbol_atom.putNoClobber(gpa, atom.symbolLoc(), atom_index);
// symbols referencing the same atom will be added as alias
// or as 'parent' when they are global.
while (symbols.popOrNull()) |idx| {
try wasm_bin.symbol_atom.putNoClobber(gpa, .{ .file = atom.file, .index = idx }, atom);
try wasm_bin.symbol_atom.putNoClobber(gpa, .{ .file = atom.file, .index = idx }, atom_index);
const alias_symbol = object.symtable[idx];
if (alias_symbol.isGlobal()) {
atom.sym_index = idx;
@ -956,7 +951,7 @@ pub fn parseIntoAtoms(object: *Object, gpa: Allocator, object_index: u16, wasm_b
segment.alignment = std.math.max(segment.alignment, atom.alignment);
}
try wasm_bin.appendAtomAtIndex(final_index, atom);
try wasm_bin.appendAtomAtIndex(final_index, atom_index);
log.debug("Parsed into atom: '{s}' at segment index {d}", .{ object.string_table.get(object.symtable[atom.sym_index].name), final_index });
}
}

View File

@ -25,8 +25,8 @@ pub fn build(b: *std.Build) void {
check_lib.checkNext("type i32");
check_lib.checkNext("mutable false");
check_lib.checkNext("i32.const {bar_address}");
check_lib.checkComputeCompare("foo_address", .{ .op = .eq, .value = .{ .literal = 0 } });
check_lib.checkComputeCompare("bar_address", .{ .op = .eq, .value = .{ .literal = 4 } });
check_lib.checkComputeCompare("foo_address", .{ .op = .eq, .value = .{ .literal = 4 } });
check_lib.checkComputeCompare("bar_address", .{ .op = .eq, .value = .{ .literal = 0 } });
check_lib.checkStart("Section export");
check_lib.checkNext("entries 3");