combine codegen work queue and linker task queue

these tasks have some shared data dependencies so they cannot be done
simultaneously. Future work should untangle these data dependencies so
that more can be done in parallel.

for now this commit ensures correctness by making linker input parsing
and codegen tasks part of the same queue.
This commit is contained in:
Andrew Kelley 2024-10-23 00:00:17 -07:00
parent 9a511b4b27
commit ba71079837
6 changed files with 233 additions and 276 deletions

View File

@ -14,6 +14,11 @@ pub fn start(self: *WaitGroup) void {
assert((state / one_pending) < (std.math.maxInt(usize) / one_pending));
}
pub fn startMany(self: *WaitGroup, n: usize) void {
const state = self.state.fetchAdd(one_pending * n, .monotonic);
assert((state / one_pending) < (std.math.maxInt(usize) / one_pending));
}
pub fn finish(self: *WaitGroup) void {
const state = self.state.fetchSub(one_pending, .acq_rel);
assert((state / one_pending) > 0);

View File

@ -111,7 +111,9 @@ win32_resource_table: if (dev.env.supports(.win32_resource)) std.AutoArrayHashMa
} = .{},
link_diags: link.Diags,
link_task_queue: ThreadSafeQueue(link.File.Task) = .empty,
link_task_queue: ThreadSafeQueue(link.Task) = .empty,
/// Ensure only 1 simultaneous call to `flushTaskQueue`.
link_task_queue_safety: std.debug.SafetyLock = .{},
work_queues: [
len: {
@ -123,14 +125,6 @@ work_queues: [
}
]std.fifo.LinearFifo(Job, .Dynamic),
codegen_work: if (InternPool.single_threaded) void else struct {
mutex: std.Thread.Mutex,
cond: std.Thread.Condition,
queue: std.fifo.LinearFifo(CodegenJob, .Dynamic),
job_error: ?JobError,
done: bool,
},
/// These jobs are to invoke the Clang compiler to create an object file, which
/// gets linked with the Compilation.
c_object_work_queue: std.fifo.LinearFifo(*CObject, .Dynamic),
@ -267,7 +261,7 @@ emit_asm: ?EmitLoc,
emit_llvm_ir: ?EmitLoc,
emit_llvm_bc: ?EmitLoc,
work_queue_wait_group: WaitGroup = .{},
link_task_wait_group: WaitGroup = .{},
work_queue_progress_node: std.Progress.Node = .none,
llvm_opt_bisect_limit: c_int,
@ -347,16 +341,14 @@ pub const RcIncludes = enum {
};
const Job = union(enum) {
/// Write the constant value for a Decl to the output file.
/// Corresponds to the task in `link.Task`.
/// Only needed for backends that haven't yet been updated to not race against Sema.
codegen_nav: InternPool.Nav.Index,
/// Write the machine code for a function to the output file.
codegen_func: struct {
/// This will either be a non-generic `func_decl` or a `func_instance`.
func: InternPool.Index,
/// This `Air` is owned by the `Job` and allocated with `gpa`.
/// It must be deinited when the job is processed.
air: Air,
},
/// Corresponds to the task in `link.Task`.
/// Only needed for backends that haven't yet been updated to not race against Sema.
codegen_func: link.Task.CodegenFunc,
/// Corresponds to the task in `link.Task`.
/// Only needed for backends that haven't yet been updated to not race against Sema.
codegen_type: InternPool.Index,
/// The `Cau` must be semantically analyzed (and possibly export itself).
/// This may be its first time being analyzed, or it may be outdated.
@ -408,17 +400,6 @@ const Job = union(enum) {
}
};
const CodegenJob = union(enum) {
nav: InternPool.Nav.Index,
func: struct {
func: InternPool.Index,
/// This `Air` is owned by the `Job` and allocated with `gpa`.
/// It must be deinited when the job is processed.
air: Air,
},
type: InternPool.Index,
};
pub const CObject = struct {
/// Relative to cwd. Owned by arena.
src: CSourceFile,
@ -1465,13 +1446,6 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil
.emit_llvm_ir = options.emit_llvm_ir,
.emit_llvm_bc = options.emit_llvm_bc,
.work_queues = .{std.fifo.LinearFifo(Job, .Dynamic).init(gpa)} ** @typeInfo(std.meta.FieldType(Compilation, .work_queues)).array.len,
.codegen_work = if (InternPool.single_threaded) {} else .{
.mutex = .{},
.cond = .{},
.queue = std.fifo.LinearFifo(CodegenJob, .Dynamic).init(gpa),
.job_error = null,
.done = false,
},
.c_object_work_queue = std.fifo.LinearFifo(*CObject, .Dynamic).init(gpa),
.win32_resource_work_queue = if (dev.env.supports(.win32_resource)) std.fifo.LinearFifo(*Win32Resource, .Dynamic).init(gpa) else .{},
.astgen_work_queue = std.fifo.LinearFifo(Zcu.File.Index, .Dynamic).init(gpa),
@ -1923,7 +1897,6 @@ pub fn destroy(comp: *Compilation) void {
if (comp.zcu) |zcu| zcu.deinit();
comp.cache_use.deinit();
for (comp.work_queues) |work_queue| work_queue.deinit();
if (!InternPool.single_threaded) comp.codegen_work.queue.deinit();
comp.c_object_work_queue.deinit();
comp.win32_resource_work_queue.deinit();
comp.astgen_work_queue.deinit();
@ -3485,7 +3458,6 @@ pub fn performAllTheWork(
zcu.generation += 1;
};
try comp.performAllTheWorkInner(main_progress_node);
if (!InternPool.single_threaded) if (comp.codegen_work.job_error) |job_error| return job_error;
}
fn performAllTheWorkInner(
@ -3497,36 +3469,35 @@ fn performAllTheWorkInner(
// (at least for now) single-threaded main work queue. However, C object compilation
// only needs to be finished by the end of this function.
const work_queue_wait_group = &comp.work_queue_wait_group;
work_queue_wait_group.reset();
var work_queue_wait_group: WaitGroup = .{};
defer work_queue_wait_group.wait();
if (comp.bin_file) |lf| {
if (comp.link_task_queue.start()) {
comp.thread_pool.spawnWg(work_queue_wait_group, link.File.flushTaskQueue, .{ lf, main_progress_node });
}
comp.link_task_wait_group.reset();
defer comp.link_task_wait_group.wait();
if (comp.link_task_queue.start()) {
comp.thread_pool.spawnWgId(&comp.link_task_wait_group, link.flushTaskQueue, .{comp});
}
if (comp.docs_emit != null) {
dev.check(.docs_emit);
comp.thread_pool.spawnWg(work_queue_wait_group, workerDocsCopy, .{comp});
comp.thread_pool.spawnWg(&work_queue_wait_group, workerDocsCopy, .{comp});
work_queue_wait_group.spawnManager(workerDocsWasm, .{ comp, main_progress_node });
}
if (comp.job_queued_compiler_rt_lib) {
comp.job_queued_compiler_rt_lib = false;
work_queue_wait_group.spawnManager(buildRt, .{ comp, "compiler_rt.zig", .compiler_rt, .Lib, &comp.compiler_rt_lib, main_progress_node });
comp.link_task_wait_group.spawnManager(buildRt, .{ comp, "compiler_rt.zig", .compiler_rt, .Lib, &comp.compiler_rt_lib, main_progress_node });
}
if (comp.job_queued_compiler_rt_obj) {
comp.job_queued_compiler_rt_obj = false;
work_queue_wait_group.spawnManager(buildRt, .{ comp, "compiler_rt.zig", .compiler_rt, .Obj, &comp.compiler_rt_obj, main_progress_node });
comp.link_task_wait_group.spawnManager(buildRt, .{ comp, "compiler_rt.zig", .compiler_rt, .Obj, &comp.compiler_rt_obj, main_progress_node });
}
if (comp.job_queued_fuzzer_lib) {
comp.job_queued_fuzzer_lib = false;
work_queue_wait_group.spawnManager(buildRt, .{ comp, "fuzzer.zig", .libfuzzer, .Lib, &comp.fuzzer_lib, main_progress_node });
comp.link_task_wait_group.spawnManager(buildRt, .{ comp, "fuzzer.zig", .libfuzzer, .Lib, &comp.fuzzer_lib, main_progress_node });
}
{
@ -3591,13 +3562,13 @@ fn performAllTheWorkInner(
}
while (comp.c_object_work_queue.readItem()) |c_object| {
comp.thread_pool.spawnWg(work_queue_wait_group, workerUpdateCObject, .{
comp.thread_pool.spawnWg(&comp.link_task_wait_group, workerUpdateCObject, .{
comp, c_object, main_progress_node,
});
}
while (comp.win32_resource_work_queue.readItem()) |win32_resource| {
comp.thread_pool.spawnWg(work_queue_wait_group, workerUpdateWin32Resource, .{
comp.thread_pool.spawnWg(&comp.link_task_wait_group, workerUpdateWin32Resource, .{
comp, win32_resource, main_progress_node,
});
}
@ -3617,18 +3588,12 @@ fn performAllTheWorkInner(
zcu.codegen_prog_node = main_progress_node.start("Code Generation", 0);
}
if (!InternPool.single_threaded) {
comp.codegen_work.done = false; // may be `true` from a prior update
comp.thread_pool.spawnWgId(work_queue_wait_group, codegenThread, .{comp});
if (!comp.separateCodegenThreadOk()) {
// Waits until all input files have been parsed.
comp.link_task_wait_group.wait();
comp.link_task_wait_group.reset();
std.log.scoped(.link).debug("finished waiting for link_task_wait_group", .{});
}
defer if (!InternPool.single_threaded) {
{
comp.codegen_work.mutex.lock();
defer comp.codegen_work.mutex.unlock();
comp.codegen_work.done = true;
}
comp.codegen_work.cond.signal();
};
work: while (true) {
for (&comp.work_queues) |*work_queue| if (work_queue.readItem()) |job| {
@ -3672,16 +3637,14 @@ fn processOneJob(tid: usize, comp: *Compilation, job: Job, prog_node: std.Progre
}
}
assert(nav.status == .resolved);
try comp.queueCodegenJob(tid, .{ .nav = nav_index });
comp.dispatchCodegenTask(tid, .{ .codegen_nav = nav_index });
},
.codegen_func => |func| {
// This call takes ownership of `func.air`.
try comp.queueCodegenJob(tid, .{ .func = .{
.func = func.func,
.air = func.air,
} });
comp.dispatchCodegenTask(tid, .{ .codegen_func = func });
},
.codegen_type => |ty| {
comp.dispatchCodegenTask(tid, .{ .codegen_type = ty });
},
.codegen_type => |ty| try comp.queueCodegenJob(tid, .{ .type = ty }),
.analyze_func => |func| {
const named_frame = tracy.namedFrame("analyze_func");
defer named_frame.end();
@ -3894,66 +3857,20 @@ fn processOneJob(tid: usize, comp: *Compilation, job: Job, prog_node: std.Progre
}
}
fn queueCodegenJob(comp: *Compilation, tid: usize, codegen_job: CodegenJob) !void {
if (InternPool.single_threaded or
!comp.zcu.?.backendSupportsFeature(.separate_thread))
return processOneCodegenJob(tid, comp, codegen_job);
{
comp.codegen_work.mutex.lock();
defer comp.codegen_work.mutex.unlock();
try comp.codegen_work.queue.writeItem(codegen_job);
}
comp.codegen_work.cond.signal();
}
fn codegenThread(tid: usize, comp: *Compilation) void {
comp.codegen_work.mutex.lock();
defer comp.codegen_work.mutex.unlock();
while (true) {
if (comp.codegen_work.queue.readItem()) |codegen_job| {
comp.codegen_work.mutex.unlock();
defer comp.codegen_work.mutex.lock();
processOneCodegenJob(tid, comp, codegen_job) catch |job_error| {
comp.codegen_work.job_error = job_error;
break;
};
continue;
}
if (comp.codegen_work.done) break;
comp.codegen_work.cond.wait(&comp.codegen_work.mutex);
/// The reason for the double-queue here is that the first queue ensures any
/// resolve_type_fully tasks are complete before this dispatch function is called.
fn dispatchCodegenTask(comp: *Compilation, tid: usize, link_task: link.Task) void {
if (comp.separateCodegenThreadOk()) {
comp.queueLinkTasks(&.{link_task});
} else {
link.doTask(comp, tid, link_task);
}
}
fn processOneCodegenJob(tid: usize, comp: *Compilation, codegen_job: CodegenJob) JobError!void {
switch (codegen_job) {
.nav => |nav_index| {
const named_frame = tracy.namedFrame("codegen_nav");
defer named_frame.end();
const pt: Zcu.PerThread = .{ .zcu = comp.zcu.?, .tid = @enumFromInt(tid) };
try pt.linkerUpdateNav(nav_index);
},
.func => |func| {
const named_frame = tracy.namedFrame("codegen_func");
defer named_frame.end();
const pt: Zcu.PerThread = .{ .zcu = comp.zcu.?, .tid = @enumFromInt(tid) };
// This call takes ownership of `func.air`.
try pt.linkerUpdateFunc(func.func, func.air);
},
.type => |ty| {
const named_frame = tracy.namedFrame("codegen_type");
defer named_frame.end();
const pt: Zcu.PerThread = .{ .zcu = comp.zcu.?, .tid = @enumFromInt(tid) };
try pt.linkerUpdateContainerType(ty);
},
}
fn separateCodegenThreadOk(comp: *const Compilation) bool {
if (InternPool.single_threaded) return false;
const zcu = comp.zcu orelse return true;
return zcu.backendSupportsFeature(.separate_thread);
}
fn workerDocsCopy(comp: *Compilation) void {
@ -6465,17 +6382,11 @@ pub fn queueLinkTaskMode(comp: *Compilation, path: Path, output_mode: std.builti
/// Only valid to call during `update`. Automatically handles queuing up a
/// linker worker task if there is not already one.
pub fn queueLinkTasks(comp: *Compilation, tasks: []const link.File.Task) void {
const use_lld = build_options.have_llvm and comp.config.use_lld;
if (use_lld) return;
const target = comp.root_mod.resolved_target.result;
if (target.ofmt != .elf) return;
pub fn queueLinkTasks(comp: *Compilation, tasks: []const link.Task) void {
if (comp.link_task_queue.enqueue(comp.gpa, tasks) catch |err| switch (err) {
error.OutOfMemory => return comp.setAllocFailure(),
}) {
comp.thread_pool.spawnWg(&comp.work_queue_wait_group, link.File.flushTaskQueue, .{
comp.bin_file.?, comp.work_queue_progress_node,
});
comp.thread_pool.spawnWgId(&comp.link_task_wait_group, link.flushTaskQueue, .{comp});
}
}

View File

@ -2899,6 +2899,7 @@ fn zirStructDecl(
codegen_type: {
if (zcu.comp.config.use_llvm) break :codegen_type;
if (block.ownerModule().strip) break :codegen_type;
// This job depends on any resolve_type_fully jobs queued up before it.
try zcu.comp.queueJob(.{ .codegen_type = wip_ty.index });
}
try sema.declareDependency(.{ .interned = wip_ty.index });
@ -3149,6 +3150,7 @@ fn zirEnumDecl(
codegen_type: {
if (zcu.comp.config.use_llvm) break :codegen_type;
if (block.ownerModule().strip) break :codegen_type;
// This job depends on any resolve_type_fully jobs queued up before it.
try zcu.comp.queueJob(.{ .codegen_type = wip_ty.index });
}
return Air.internedToRef(wip_ty.index);
@ -3272,6 +3274,7 @@ fn zirUnionDecl(
codegen_type: {
if (zcu.comp.config.use_llvm) break :codegen_type;
if (block.ownerModule().strip) break :codegen_type;
// This job depends on any resolve_type_fully jobs queued up before it.
try zcu.comp.queueJob(.{ .codegen_type = wip_ty.index });
}
try sema.declareDependency(.{ .interned = wip_ty.index });
@ -3357,6 +3360,7 @@ fn zirOpaqueDecl(
codegen_type: {
if (zcu.comp.config.use_llvm) break :codegen_type;
if (block.ownerModule().strip) break :codegen_type;
// This job depends on any resolve_type_fully jobs queued up before it.
try zcu.comp.queueJob(.{ .codegen_type = wip_ty.index });
}
try sema.addTypeReferenceEntry(src, wip_ty.index);
@ -22456,6 +22460,7 @@ fn reifyEnum(
codegen_type: {
if (zcu.comp.config.use_llvm) break :codegen_type;
if (block.ownerModule().strip) break :codegen_type;
// This job depends on any resolve_type_fully jobs queued up before it.
try zcu.comp.queueJob(.{ .codegen_type = wip_ty.index });
}
return Air.internedToRef(wip_ty.index);
@ -22713,6 +22718,7 @@ fn reifyUnion(
codegen_type: {
if (zcu.comp.config.use_llvm) break :codegen_type;
if (block.ownerModule().strip) break :codegen_type;
// This job depends on any resolve_type_fully jobs queued up before it.
try zcu.comp.queueJob(.{ .codegen_type = wip_ty.index });
}
try sema.declareDependency(.{ .interned = wip_ty.index });
@ -22997,6 +23003,7 @@ fn reifyStruct(
codegen_type: {
if (zcu.comp.config.use_llvm) break :codegen_type;
if (block.ownerModule().strip) break :codegen_type;
// This job depends on any resolve_type_fully jobs queued up before it.
try zcu.comp.queueJob(.{ .codegen_type = wip_ty.index });
}
try sema.declareDependency(.{ .interned = wip_ty.index });

View File

@ -845,6 +845,7 @@ fn ensureFuncBodyAnalyzedInner(
return .{ .ies_outdated = ies_outdated };
}
// This job depends on any resolve_type_fully jobs queued up before it.
try comp.queueJob(.{ .codegen_func = .{
.func = func_index,
.air = air,
@ -1016,6 +1017,7 @@ fn createFileRootStruct(
codegen_type: {
if (zcu.comp.config.use_llvm) break :codegen_type;
if (file.mod.strip) break :codegen_type;
// This job depends on any resolve_type_fully jobs queued up before it.
try zcu.comp.queueJob(.{ .codegen_type = wip_ty.index });
}
zcu.setFileRootType(file_index, wip_ty.index);
@ -1362,6 +1364,7 @@ fn semaCau(pt: Zcu.PerThread, cau_index: InternPool.Cau.Index) !SemaCauResult {
if (file.mod.strip) break :queue_codegen;
}
// This job depends on any resolve_type_fully jobs queued up before it.
try zcu.comp.queueJob(.{ .codegen_nav = nav_index });
}
@ -2593,7 +2596,7 @@ pub fn populateTestFunctions(
}
}
pub fn linkerUpdateNav(pt: Zcu.PerThread, nav_index: InternPool.Nav.Index) !void {
pub fn linkerUpdateNav(pt: Zcu.PerThread, nav_index: InternPool.Nav.Index) error{OutOfMemory}!void {
const zcu = pt.zcu;
const comp = zcu.comp;
const ip = &zcu.intern_pool;
@ -3163,6 +3166,7 @@ pub fn navPtrType(pt: Zcu.PerThread, nav_index: InternPool.Nav.Index) Allocator.
pub fn getExtern(pt: Zcu.PerThread, key: InternPool.Key.Extern) Allocator.Error!InternPool.Index {
const result = try pt.zcu.intern_pool.getExtern(pt.zcu.gpa, pt.tid, key);
if (result.new_nav.unwrap()) |nav| {
// This job depends on any resolve_type_fully jobs queued up before it.
try pt.zcu.comp.queueJob(.{ .codegen_nav = nav });
}
return result.index;

View File

@ -1222,7 +1222,7 @@ fn queueSharedObjects(comp: *Compilation, so_files: BuiltSharedObjects) void {
assert(comp.glibc_so_files == null);
comp.glibc_so_files = so_files;
var task_buffer: [libs.len]link.File.Task = undefined;
var task_buffer: [libs.len]link.Task = undefined;
var task_buffer_i: usize = 0;
{

View File

@ -370,9 +370,6 @@ pub const File = struct {
lock: ?Cache.Lock = null,
child_pid: ?std.process.Child.Id = null,
/// Ensure only 1 simultaneous call to `flushTaskQueue`.
task_queue_safety: std.debug.SafetyLock = .{},
pub const OpenOptions = struct {
symbol_count_hint: u64 = 32,
program_code_size_hint: u64 = 256 * 1024,
@ -1085,6 +1082,8 @@ pub const File = struct {
}
pub fn loadInput(base: *File, input: Input) anyerror!void {
const use_lld = build_options.have_llvm and base.comp.config.use_lld;
if (use_lld) return;
switch (base.tag) {
inline .elf => |tag| {
dev.check(tag.devFeature());
@ -1360,151 +1359,182 @@ pub const File = struct {
pub const Wasm = @import("link/Wasm.zig");
pub const NvPtx = @import("link/NvPtx.zig");
pub const Dwarf = @import("link/Dwarf.zig");
};
/// Does all the tasks in the queue. Runs in exactly one separate thread
/// from the rest of compilation. All tasks performed here are
/// single-threaded with respect to one another.
pub fn flushTaskQueue(base: *File, parent_prog_node: std.Progress.Node) void {
const comp = base.comp;
base.task_queue_safety.lock();
defer base.task_queue_safety.unlock();
const prog_node = parent_prog_node.start("Parse Linker Inputs", 0);
defer prog_node.end();
while (comp.link_task_queue.check()) |tasks| {
for (tasks) |task| doTask(base, task);
}
/// Does all the tasks in the queue. Runs in exactly one separate thread
/// from the rest of compilation. All tasks performed here are
/// single-threaded with respect to one another.
pub fn flushTaskQueue(tid: usize, comp: *Compilation) void {
comp.link_task_queue_safety.lock();
defer comp.link_task_queue_safety.unlock();
const prog_node = comp.work_queue_progress_node.start("Parse Linker Inputs", 0);
defer prog_node.end();
while (comp.link_task_queue.check()) |tasks| {
for (tasks) |task| doTask(comp, tid, task);
}
}
pub const Task = union(enum) {
/// Loads the objects, shared objects, and archives that are already
/// known from the command line.
load_explicitly_provided,
/// Loads the shared objects and archives by resolving
/// `target_util.libcFullLinkFlags()` against the host libc
/// installation.
load_host_libc,
/// Tells the linker to load an object file by path.
load_object: Path,
/// Tells the linker to load a static library by path.
load_archive: Path,
/// Tells the linker to load a shared library, possibly one that is a
/// GNU ld script.
load_dso: Path,
/// Tells the linker to load an input which could be an object file,
/// archive, or shared library.
load_input: Input,
pub const Task = union(enum) {
/// Loads the objects, shared objects, and archives that are already
/// known from the command line.
load_explicitly_provided,
/// Loads the shared objects and archives by resolving
/// `target_util.libcFullLinkFlags()` against the host libc
/// installation.
load_host_libc,
/// Tells the linker to load an object file by path.
load_object: Path,
/// Tells the linker to load a static library by path.
load_archive: Path,
/// Tells the linker to load a shared library, possibly one that is a
/// GNU ld script.
load_dso: Path,
/// Tells the linker to load an input which could be an object file,
/// archive, or shared library.
load_input: Input,
/// Write the constant value for a Decl to the output file.
codegen_nav: InternPool.Nav.Index,
/// Write the machine code for a function to the output file.
codegen_func: CodegenFunc,
codegen_type: InternPool.Index,
pub const CodegenFunc = struct {
/// This will either be a non-generic `func_decl` or a `func_instance`.
func: InternPool.Index,
/// This `Air` is owned by the `Job` and allocated with `gpa`.
/// It must be deinited when the job is processed.
air: Air,
};
};
fn doTask(base: *File, task: Task) void {
const comp = base.comp;
switch (task) {
.load_explicitly_provided => {
for (comp.link_inputs) |input| {
base.loadInput(input) catch |err| switch (err) {
error.LinkFailure => return, // error reported via link_diags
else => |e| switch (input) {
.dso => |dso| comp.link_diags.addParseError(dso.path, "failed to parse shared library: {s}", .{@errorName(e)}),
.object => |obj| comp.link_diags.addParseError(obj.path, "failed to parse object: {s}", .{@errorName(e)}),
.archive => |obj| comp.link_diags.addParseError(obj.path, "failed to parse archive: {s}", .{@errorName(e)}),
.res => |res| comp.link_diags.addParseError(res.path, "failed to parse Windows resource: {s}", .{@errorName(e)}),
.dso_exact => comp.link_diags.addError("failed to handle dso_exact: {s}", .{@errorName(e)}),
},
};
}
},
.load_host_libc => {
const target = comp.root_mod.resolved_target.result;
const flags = target_util.libcFullLinkFlags(target);
const crt_dir = comp.libc_installation.?.crt_dir.?;
const sep = std.fs.path.sep_str;
const diags = &comp.link_diags;
for (flags) |flag| {
assert(mem.startsWith(u8, flag, "-l"));
const lib_name = flag["-l".len..];
switch (comp.config.link_mode) {
.dynamic => {
const dso_path = Path.initCwd(
std.fmt.allocPrint(comp.arena, "{s}" ++ sep ++ "{s}{s}{s}", .{
crt_dir, target.libPrefix(), lib_name, target.dynamicLibSuffix(),
}) catch return diags.setAllocFailure(),
);
base.openLoadDso(dso_path, .{
.preferred_mode = .dynamic,
.search_strategy = .paths_first,
}) catch |err| switch (err) {
error.FileNotFound => {
// Also try static.
const archive_path = Path.initCwd(
std.fmt.allocPrint(comp.arena, "{s}" ++ sep ++ "{s}{s}{s}", .{
crt_dir, target.libPrefix(), lib_name, target.staticLibSuffix(),
}) catch return diags.setAllocFailure(),
);
base.openLoadArchive(archive_path, .{
.preferred_mode = .dynamic,
.search_strategy = .paths_first,
}) catch |archive_err| switch (archive_err) {
error.LinkFailure => return, // error reported via diags
else => |e| diags.addParseError(dso_path, "failed to parse archive {}: {s}", .{ archive_path, @errorName(e) }),
};
},
error.LinkFailure => return, // error reported via diags
else => |e| diags.addParseError(dso_path, "failed to parse shared library: {s}", .{@errorName(e)}),
};
},
.static => {
const path = Path.initCwd(
std.fmt.allocPrint(comp.arena, "{s}" ++ sep ++ "{s}{s}{s}", .{
crt_dir, target.libPrefix(), lib_name, target.staticLibSuffix(),
}) catch return diags.setAllocFailure(),
);
// glibc sometimes makes even archive files GNU ld scripts.
base.openLoadArchive(path, .{
.preferred_mode = .static,
.search_strategy = .no_fallback,
}) catch |err| switch (err) {
error.LinkFailure => return, // error reported via diags
else => |e| diags.addParseError(path, "failed to parse archive: {s}", .{@errorName(e)}),
};
},
}
}
},
.load_object => |path| {
base.openLoadObject(path) catch |err| switch (err) {
error.LinkFailure => return, // error reported via link_diags
else => |e| comp.link_diags.addParseError(path, "failed to parse object: {s}", .{@errorName(e)}),
};
},
.load_archive => |path| {
base.openLoadArchive(path, null) catch |err| switch (err) {
error.LinkFailure => return, // error reported via link_diags
else => |e| comp.link_diags.addParseError(path, "failed to parse archive: {s}", .{@errorName(e)}),
};
},
.load_dso => |path| {
base.openLoadDso(path, .{
.preferred_mode = .dynamic,
.search_strategy = .paths_first,
}) catch |err| switch (err) {
error.LinkFailure => return, // error reported via link_diags
else => |e| comp.link_diags.addParseError(path, "failed to parse shared library: {s}", .{@errorName(e)}),
};
},
.load_input => |input| {
pub fn doTask(comp: *Compilation, tid: usize, task: Task) void {
const diags = &comp.link_diags;
switch (task) {
.load_explicitly_provided => if (comp.bin_file) |base| {
for (comp.link_inputs) |input| {
base.loadInput(input) catch |err| switch (err) {
error.LinkFailure => return, // error reported via link_diags
else => |e| {
if (input.path()) |path| {
comp.link_diags.addParseError(path, "failed to parse linker input: {s}", .{@errorName(e)});
} else {
comp.link_diags.addError("failed to {s}: {s}", .{ input.taskName(), @errorName(e) });
}
error.LinkFailure => return, // error reported via diags
else => |e| switch (input) {
.dso => |dso| diags.addParseError(dso.path, "failed to parse shared library: {s}", .{@errorName(e)}),
.object => |obj| diags.addParseError(obj.path, "failed to parse object: {s}", .{@errorName(e)}),
.archive => |obj| diags.addParseError(obj.path, "failed to parse archive: {s}", .{@errorName(e)}),
.res => |res| diags.addParseError(res.path, "failed to parse Windows resource: {s}", .{@errorName(e)}),
.dso_exact => diags.addError("failed to handle dso_exact: {s}", .{@errorName(e)}),
},
};
},
}
}
},
.load_host_libc => if (comp.bin_file) |base| {
const target = comp.root_mod.resolved_target.result;
const flags = target_util.libcFullLinkFlags(target);
const crt_dir = comp.libc_installation.?.crt_dir.?;
const sep = std.fs.path.sep_str;
for (flags) |flag| {
assert(mem.startsWith(u8, flag, "-l"));
const lib_name = flag["-l".len..];
switch (comp.config.link_mode) {
.dynamic => {
const dso_path = Path.initCwd(
std.fmt.allocPrint(comp.arena, "{s}" ++ sep ++ "{s}{s}{s}", .{
crt_dir, target.libPrefix(), lib_name, target.dynamicLibSuffix(),
}) catch return diags.setAllocFailure(),
);
base.openLoadDso(dso_path, .{
.preferred_mode = .dynamic,
.search_strategy = .paths_first,
}) catch |err| switch (err) {
error.FileNotFound => {
// Also try static.
const archive_path = Path.initCwd(
std.fmt.allocPrint(comp.arena, "{s}" ++ sep ++ "{s}{s}{s}", .{
crt_dir, target.libPrefix(), lib_name, target.staticLibSuffix(),
}) catch return diags.setAllocFailure(),
);
base.openLoadArchive(archive_path, .{
.preferred_mode = .dynamic,
.search_strategy = .paths_first,
}) catch |archive_err| switch (archive_err) {
error.LinkFailure => return, // error reported via diags
else => |e| diags.addParseError(dso_path, "failed to parse archive {}: {s}", .{ archive_path, @errorName(e) }),
};
},
error.LinkFailure => return, // error reported via diags
else => |e| diags.addParseError(dso_path, "failed to parse shared library: {s}", .{@errorName(e)}),
};
},
.static => {
const path = Path.initCwd(
std.fmt.allocPrint(comp.arena, "{s}" ++ sep ++ "{s}{s}{s}", .{
crt_dir, target.libPrefix(), lib_name, target.staticLibSuffix(),
}) catch return diags.setAllocFailure(),
);
// glibc sometimes makes even archive files GNU ld scripts.
base.openLoadArchive(path, .{
.preferred_mode = .static,
.search_strategy = .no_fallback,
}) catch |err| switch (err) {
error.LinkFailure => return, // error reported via diags
else => |e| diags.addParseError(path, "failed to parse archive: {s}", .{@errorName(e)}),
};
},
}
}
},
.load_object => |path| if (comp.bin_file) |base| {
base.openLoadObject(path) catch |err| switch (err) {
error.LinkFailure => return, // error reported via diags
else => |e| diags.addParseError(path, "failed to parse object: {s}", .{@errorName(e)}),
};
},
.load_archive => |path| if (comp.bin_file) |base| {
base.openLoadArchive(path, null) catch |err| switch (err) {
error.LinkFailure => return, // error reported via link_diags
else => |e| diags.addParseError(path, "failed to parse archive: {s}", .{@errorName(e)}),
};
},
.load_dso => |path| if (comp.bin_file) |base| {
base.openLoadDso(path, .{
.preferred_mode = .dynamic,
.search_strategy = .paths_first,
}) catch |err| switch (err) {
error.LinkFailure => return, // error reported via link_diags
else => |e| diags.addParseError(path, "failed to parse shared library: {s}", .{@errorName(e)}),
};
},
.load_input => |input| if (comp.bin_file) |base| {
base.loadInput(input) catch |err| switch (err) {
error.LinkFailure => return, // error reported via link_diags
else => |e| {
if (input.path()) |path| {
diags.addParseError(path, "failed to parse linker input: {s}", .{@errorName(e)});
} else {
diags.addError("failed to {s}: {s}", .{ input.taskName(), @errorName(e) });
}
},
};
},
.codegen_nav => |nav_index| {
const pt: Zcu.PerThread = .{ .zcu = comp.zcu.?, .tid = @enumFromInt(tid) };
pt.linkerUpdateNav(nav_index) catch |err| switch (err) {
error.OutOfMemory => diags.setAllocFailure(),
};
},
.codegen_func => |func| {
const pt: Zcu.PerThread = .{ .zcu = comp.zcu.?, .tid = @enumFromInt(tid) };
// This call takes ownership of `func.air`.
pt.linkerUpdateFunc(func.func, func.air) catch |err| switch (err) {
error.OutOfMemory => diags.setAllocFailure(),
};
},
.codegen_type => |ty| {
const pt: Zcu.PerThread = .{ .zcu = comp.zcu.?, .tid = @enumFromInt(tid) };
pt.linkerUpdateContainerType(ty) catch |err| switch (err) {
error.OutOfMemory => diags.setAllocFailure(),
};
},
}
};
}
pub fn spawnLld(
comp: *Compilation,