mirror of
https://github.com/ziglang/zig.git
synced 2024-11-21 19:42:56 +00:00
Air: Fix mustLower() for atomic_load with inter-thread ordering.
This commit is contained in:
parent
cc507c5024
commit
f13011c843
@ -1825,7 +1825,10 @@ pub fn mustLower(air: Air, inst: Air.Inst.Index, ip: *const InternPool) bool {
|
||||
},
|
||||
.load => air.typeOf(data.ty_op.operand, ip).isVolatilePtrIp(ip),
|
||||
.slice_elem_val, .ptr_elem_val => air.typeOf(data.bin_op.lhs, ip).isVolatilePtrIp(ip),
|
||||
.atomic_load => air.typeOf(data.atomic_load.ptr, ip).isVolatilePtrIp(ip),
|
||||
.atomic_load => switch (data.atomic_load.order) {
|
||||
.unordered, .monotonic => air.typeOf(data.atomic_load.ptr, ip).isVolatilePtrIp(ip),
|
||||
else => true, // Stronger memory orderings have inter-thread side effects.
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
|
@ -7792,48 +7792,56 @@ fn airAtomicLoad(func: *Func, inst: Air.Inst.Index) !void {
|
||||
const pt = func.pt;
|
||||
const zcu = pt.zcu;
|
||||
const atomic_load = func.air.instructions.items(.data)[@intFromEnum(inst)].atomic_load;
|
||||
const order: std.builtin.AtomicOrder = atomic_load.order;
|
||||
const result: MCValue = result: {
|
||||
const order: std.builtin.AtomicOrder = atomic_load.order;
|
||||
|
||||
const ptr_ty = func.typeOf(atomic_load.ptr);
|
||||
const elem_ty = ptr_ty.childType(zcu);
|
||||
const ptr_mcv = try func.resolveInst(atomic_load.ptr);
|
||||
const ptr_ty = func.typeOf(atomic_load.ptr);
|
||||
const elem_ty = ptr_ty.childType(zcu);
|
||||
const ptr_mcv = try func.resolveInst(atomic_load.ptr);
|
||||
|
||||
const bit_size = elem_ty.bitSize(zcu);
|
||||
if (bit_size > 64) return func.fail("TODO: airAtomicLoad > 64 bits", .{});
|
||||
const bit_size = elem_ty.bitSize(zcu);
|
||||
if (bit_size > 64) return func.fail("TODO: airAtomicLoad > 64 bits", .{});
|
||||
|
||||
const result_mcv = try func.allocRegOrMem(elem_ty, inst, true);
|
||||
assert(result_mcv == .register); // should be less than 8 bytes
|
||||
const unused = func.liveness.isUnused(inst);
|
||||
|
||||
if (order == .seq_cst) {
|
||||
_ = try func.addInst(.{
|
||||
.tag = .fence,
|
||||
.data = .{ .fence = .{
|
||||
.pred = .rw,
|
||||
.succ = .rw,
|
||||
} },
|
||||
});
|
||||
}
|
||||
const result_mcv: MCValue = if (func.liveness.isUnused(inst))
|
||||
.{ .register = .zero }
|
||||
else
|
||||
try func.allocRegOrMem(elem_ty, inst, true);
|
||||
assert(result_mcv == .register); // should be less than 8 bytes
|
||||
|
||||
try func.load(result_mcv, ptr_mcv, ptr_ty);
|
||||
|
||||
switch (order) {
|
||||
// Don't guarnetee other memory operations to be ordered after the load.
|
||||
.unordered => {},
|
||||
.monotonic => {},
|
||||
// Make sure all previous reads happen before any reading or writing accurs.
|
||||
.seq_cst, .acquire => {
|
||||
if (order == .seq_cst) {
|
||||
_ = try func.addInst(.{
|
||||
.tag = .fence,
|
||||
.data = .{ .fence = .{
|
||||
.pred = .r,
|
||||
.pred = .rw,
|
||||
.succ = .rw,
|
||||
} },
|
||||
});
|
||||
},
|
||||
else => unreachable,
|
||||
}
|
||||
}
|
||||
|
||||
return func.finishAir(inst, result_mcv, .{ atomic_load.ptr, .none, .none });
|
||||
try func.load(result_mcv, ptr_mcv, ptr_ty);
|
||||
|
||||
switch (order) {
|
||||
// Don't guarantee other memory operations to be ordered after the load.
|
||||
.unordered, .monotonic => {},
|
||||
// Make sure all previous reads happen before any reading or writing occurs.
|
||||
.acquire, .seq_cst => {
|
||||
_ = try func.addInst(.{
|
||||
.tag = .fence,
|
||||
.data = .{ .fence = .{
|
||||
.pred = .r,
|
||||
.succ = .rw,
|
||||
} },
|
||||
});
|
||||
},
|
||||
else => unreachable,
|
||||
}
|
||||
|
||||
break :result if (unused) .unreach else result_mcv;
|
||||
};
|
||||
|
||||
return func.finishAir(inst, result, .{ atomic_load.ptr, .none, .none });
|
||||
}
|
||||
|
||||
fn airAtomicStore(func: *Func, inst: Air.Inst.Index, order: std.builtin.AtomicOrder) !void {
|
||||
|
@ -16596,23 +16596,29 @@ fn airAtomicRmw(self: *Self, inst: Air.Inst.Index) !void {
|
||||
|
||||
fn airAtomicLoad(self: *Self, inst: Air.Inst.Index) !void {
|
||||
const atomic_load = self.air.instructions.items(.data)[@intFromEnum(inst)].atomic_load;
|
||||
const result: MCValue = result: {
|
||||
const ptr_ty = self.typeOf(atomic_load.ptr);
|
||||
const ptr_mcv = try self.resolveInst(atomic_load.ptr);
|
||||
const ptr_lock = switch (ptr_mcv) {
|
||||
.register => |reg| self.register_manager.lockRegAssumeUnused(reg),
|
||||
else => null,
|
||||
};
|
||||
defer if (ptr_lock) |lock| self.register_manager.unlockReg(lock);
|
||||
|
||||
const ptr_ty = self.typeOf(atomic_load.ptr);
|
||||
const ptr_mcv = try self.resolveInst(atomic_load.ptr);
|
||||
const ptr_lock = switch (ptr_mcv) {
|
||||
.register => |reg| self.register_manager.lockRegAssumeUnused(reg),
|
||||
else => null,
|
||||
const unused = self.liveness.isUnused(inst);
|
||||
|
||||
const dst_mcv: MCValue = if (unused)
|
||||
.{ .register = try self.register_manager.allocReg(null, self.regClassForType(ptr_ty.childType(self.pt.zcu))) }
|
||||
else if (self.reuseOperand(inst, atomic_load.ptr, 0, ptr_mcv))
|
||||
ptr_mcv
|
||||
else
|
||||
try self.allocRegOrMem(inst, true);
|
||||
|
||||
try self.load(dst_mcv, ptr_ty, ptr_mcv);
|
||||
|
||||
break :result if (unused) .unreach else dst_mcv;
|
||||
};
|
||||
defer if (ptr_lock) |lock| self.register_manager.unlockReg(lock);
|
||||
|
||||
const dst_mcv =
|
||||
if (self.reuseOperand(inst, atomic_load.ptr, 0, ptr_mcv))
|
||||
ptr_mcv
|
||||
else
|
||||
try self.allocRegOrMem(inst, true);
|
||||
|
||||
try self.load(dst_mcv, ptr_ty, ptr_mcv);
|
||||
return self.finishAir(inst, dst_mcv, .{ atomic_load.ptr, .none, .none });
|
||||
return self.finishAir(inst, result, .{ atomic_load.ptr, .none, .none });
|
||||
}
|
||||
|
||||
fn airAtomicStore(self: *Self, inst: Air.Inst.Index, order: std.builtin.AtomicOrder) !void {
|
||||
|
Loading…
Reference in New Issue
Block a user