stage2: progress towards stage3

* The `@bitCast` workaround is removed in favor of `@ptrCast` properly
   doing element casting for slice element types. This required an
   enhancement both to stage1 and stage2.
 * stage1 incorrectly accepts `.{}` instead of `{}`. stage2 code that
   abused this is fixed.
 * Make some parameters comptime to support functions in switch
   expressions (as opposed to making them function pointers).
 * Avoid relying on local temporaries being mutable.
 * Workarounds for when stage1 and stage2 disagree on function pointer
   types.
 * Workaround recursive formatting bug with a `@panic("TODO")`.
 * Remove unreachable `else` prongs for some inferred error sets.

All in effort towards #89.
This commit is contained in:
Andrew Kelley 2022-04-14 10:12:45 -07:00
parent 0739770739
commit 2587474717
26 changed files with 156 additions and 91 deletions

View File

@ -766,12 +766,10 @@ fn formatFloatValue(
} else if (comptime std.mem.eql(u8, fmt, "d")) {
formatFloatDecimal(value, options, buf_stream.writer()) catch |err| switch (err) {
error.NoSpaceLeft => unreachable,
else => |e| return e,
};
} else if (comptime std.mem.eql(u8, fmt, "x")) {
formatFloatHexadecimal(value, options, buf_stream.writer()) catch |err| switch (err) {
error.NoSpaceLeft => unreachable,
else => |e| return e,
};
} else {
@compileError("Unsupported format string '" ++ fmt ++ "' for type '" ++ @typeName(@TypeOf(value)) ++ "'");

View File

@ -9,7 +9,6 @@ const os = std.os;
const fs = std.fs;
const mem = std.mem;
const meta = std.meta;
const trait = meta.trait;
const File = std.fs.File;
pub const Mode = enum {

View File

@ -66,9 +66,7 @@ pub const Random = struct {
/// Returns a random value from an enum, evenly distributed.
pub fn enumValue(r: Random, comptime EnumType: type) EnumType {
if (comptime !std.meta.trait.is(.Enum)(EnumType)) {
@compileError("Random.enumValue requires an enum type, not a " ++ @typeName(EnumType));
}
comptime assert(@typeInfo(EnumType) == .Enum);
// We won't use int -> enum casting because enum elements can have
// arbitrary values. Instead we'll randomly pick one of the type's values.

View File

@ -85,12 +85,12 @@ fn reserveExtra(astgen: *AstGen, size: usize) Allocator.Error!u32 {
}
fn appendRefs(astgen: *AstGen, refs: []const Zir.Inst.Ref) !void {
const coerced = @bitCast([]const u32, refs);
const coerced = @ptrCast([]const u32, refs);
return astgen.extra.appendSlice(astgen.gpa, coerced);
}
fn appendRefsAssumeCapacity(astgen: *AstGen, refs: []const Zir.Inst.Ref) void {
const coerced = @bitCast([]const u32, refs);
const coerced = @ptrCast([]const u32, refs);
astgen.extra.appendSliceAssumeCapacity(coerced);
}

View File

@ -454,7 +454,7 @@ fn analyzeInst(
const inst_data = inst_datas[inst].pl_op;
const callee = inst_data.operand;
const extra = a.air.extraData(Air.Call, inst_data.payload);
const args = @bitCast([]const Air.Inst.Ref, a.air.extra[extra.end..][0..extra.data.args_len]);
const args = @ptrCast([]const Air.Inst.Ref, a.air.extra[extra.end..][0..extra.data.args_len]);
if (args.len + 1 <= bpi - 1) {
var buf = [1]Air.Inst.Ref{.none} ** (bpi - 1);
buf[0] = callee;
@ -495,7 +495,7 @@ fn analyzeInst(
const ty_pl = inst_datas[inst].ty_pl;
const aggregate_ty = a.air.getRefType(ty_pl.ty);
const len = @intCast(usize, aggregate_ty.arrayLen());
const elements = @bitCast([]const Air.Inst.Ref, a.air.extra[ty_pl.payload..][0..len]);
const elements = @ptrCast([]const Air.Inst.Ref, a.air.extra[ty_pl.payload..][0..len]);
if (elements.len <= bpi - 1) {
var buf = [1]Air.Inst.Ref{.none} ** (bpi - 1);
@ -571,9 +571,9 @@ fn analyzeInst(
.assembly => {
const extra = a.air.extraData(Air.Asm, inst_datas[inst].ty_pl.payload);
var extra_i: usize = extra.end;
const outputs = @bitCast([]const Air.Inst.Ref, a.air.extra[extra_i..][0..extra.data.outputs_len]);
const outputs = @ptrCast([]const Air.Inst.Ref, a.air.extra[extra_i..][0..extra.data.outputs_len]);
extra_i += outputs.len;
const inputs = @bitCast([]const Air.Inst.Ref, a.air.extra[extra_i..][0..extra.data.inputs_len]);
const inputs = @ptrCast([]const Air.Inst.Ref, a.air.extra[extra_i..][0..extra.data.inputs_len]);
extra_i += inputs.len;
simple: {

View File

@ -4593,7 +4593,7 @@ pub fn clearDecl(
.c => .{ .c = {} },
.wasm => .{ .wasm = link.File.Wasm.FnData.empty },
.spirv => .{ .spirv = .{} },
.nvptx => .{ .nvptx = .{} },
.nvptx => .{ .nvptx = {} },
};
}
if (decl.getInnerNamespace()) |namespace| {
@ -4975,7 +4975,7 @@ pub fn allocateNewDecl(
.c => .{ .c = {} },
.wasm => .{ .wasm = link.File.Wasm.FnData.empty },
.spirv => .{ .spirv = .{} },
.nvptx => .{ .nvptx = .{} },
.nvptx => .{ .nvptx = {} },
},
.generation = 0,
.is_pub = false,

View File

@ -14110,21 +14110,27 @@ fn zirPtrCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
try sema.checkPtrType(block, dest_ty_src, dest_ty);
try sema.checkPtrOperand(block, operand_src, operand_ty);
if (dest_ty.isSlice()) {
const dest_is_slice = dest_ty.isSlice();
const operand_is_slice = operand_ty.isSlice();
if (dest_is_slice and !operand_is_slice) {
return sema.fail(block, dest_ty_src, "illegal pointer cast to slice", .{});
}
const ptr = if (operand_ty.isSlice())
const ptr = if (operand_is_slice and !dest_is_slice)
try sema.analyzeSlicePtr(block, operand_src, operand, operand_ty)
else
operand;
try sema.resolveTypeLayout(block, dest_ty_src, dest_ty.elemType2());
const dest_elem_ty = dest_ty.elemType2();
try sema.resolveTypeLayout(block, dest_ty_src, dest_elem_ty);
const dest_align = dest_ty.ptrAlignment(target);
try sema.resolveTypeLayout(block, operand_src, operand_ty.elemType2());
const operand_elem_ty = operand_ty.elemType2();
try sema.resolveTypeLayout(block, operand_src, operand_elem_ty);
const operand_align = operand_ty.ptrAlignment(target);
// If the destination is less aligned than the source, preserve the source alignment
var aligned_dest_ty = if (operand_align <= dest_align) dest_ty else blk: {
const aligned_dest_ty = if (operand_align <= dest_align) dest_ty else blk: {
// Unwrap the pointer (or pointer-like optional) type, set alignment, and re-wrap into result
if (dest_ty.zigTypeTag() == .Optional) {
var buf: Type.Payload.ElemType = undefined;
@ -14138,6 +14144,16 @@ fn zirPtrCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
}
};
if (dest_is_slice) {
const operand_elem_size = operand_elem_ty.abiSize(target);
const dest_elem_size = dest_elem_ty.abiSize(target);
if (operand_elem_size != dest_elem_size) {
// note that this is not implemented in stage1 so we should probably wait
// until that codebase is replaced before implementing this in stage2.
return sema.fail(block, dest_ty_src, "TODO: implement @ptrCast between slices changing the length", .{});
}
}
return sema.coerceCompatiblePtrs(block, aligned_dest_ty, ptr, operand_src);
}
@ -15743,7 +15759,7 @@ fn zirMinMax(
sema: *Sema,
block: *Block,
inst: Zir.Inst.Index,
air_tag: Air.Inst.Tag,
comptime air_tag: Air.Inst.Tag,
) CompileError!Air.Inst.Ref {
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
@ -15763,7 +15779,7 @@ fn analyzeMinMax(
src: LazySrcLoc,
lhs: Air.Inst.Ref,
rhs: Air.Inst.Ref,
air_tag: Air.Inst.Tag,
comptime air_tag: Air.Inst.Tag,
lhs_src: LazySrcLoc,
rhs_src: LazySrcLoc,
) CompileError!Air.Inst.Ref {
@ -20976,7 +20992,7 @@ fn resolvePeerTypes(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
instructions: []Air.Inst.Ref,
instructions: []const Air.Inst.Ref,
candidate_srcs: Module.PeerTypeCandidateSrc,
) !Type {
switch (instructions.len) {
@ -22794,7 +22810,7 @@ pub fn addExtraAssumeCapacity(sema: *Sema, extra: anytype) u32 {
}
fn appendRefsAssumeCapacity(sema: *Sema, refs: []const Air.Inst.Ref) void {
const coerced = @bitCast([]const u32, refs);
const coerced = @ptrCast([]const u32, refs);
sema.air_extra.appendSliceAssumeCapacity(coerced);
}

View File

@ -2398,7 +2398,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallOptions.
const pl_op = self.air.instructions.items(.data)[inst].pl_op;
const callee = pl_op.operand;
const extra = self.air.extraData(Air.Call, pl_op.payload);
const args = @bitCast([]const Air.Inst.Ref, self.air.extra[extra.end..][0..extra.data.args_len]);
const args = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra.end..][0..extra.data.args_len]);
const ty = self.air.typeOf(callee);
const fn_ty = switch (ty.zigTypeTag()) {
@ -2865,7 +2865,10 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void {
// TODO track the new register / stack allocation
}
self.branch_stack.pop().deinit(self.gpa);
{
var item = self.branch_stack.pop();
item.deinit(self.gpa);
}
// We already took care of pl_op.operand earlier, so we're going
// to pass .none here
@ -3162,9 +3165,9 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void {
const is_volatile = @truncate(u1, extra.data.flags >> 31) != 0;
const clobbers_len = @truncate(u31, extra.data.flags);
var extra_i: usize = extra.end;
const outputs = @bitCast([]const Air.Inst.Ref, self.air.extra[extra_i..][0..extra.data.outputs_len]);
const outputs = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra_i..][0..extra.data.outputs_len]);
extra_i += outputs.len;
const inputs = @bitCast([]const Air.Inst.Ref, self.air.extra[extra_i..][0..extra.data.inputs_len]);
const inputs = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra_i..][0..extra.data.inputs_len]);
extra_i += inputs.len;
const dead = !is_volatile and self.liveness.isUnused(inst);
@ -3686,7 +3689,7 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void {
const vector_ty = self.air.typeOfIndex(inst);
const len = vector_ty.vectorLen();
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const elements = @bitCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]);
const elements = @ptrCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]);
const result: MCValue = res: {
if (self.liveness.isUnused(inst)) break :res MCValue.dead;
return self.fail("TODO implement airAggregateInit for {}", .{self.target.cpu.arch});

View File

@ -3144,7 +3144,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallOptions.
const pl_op = self.air.instructions.items(.data)[inst].pl_op;
const callee = pl_op.operand;
const extra = self.air.extraData(Air.Call, pl_op.payload);
const args = @bitCast([]const Air.Inst.Ref, self.air.extra[extra.end..][0..extra.data.args_len]);
const args = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra.end..][0..extra.data.args_len]);
const ty = self.air.typeOf(callee);
const fn_ty = switch (ty.zigTypeTag()) {
@ -3650,7 +3650,10 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void {
// TODO track the new register / stack allocation
}
self.branch_stack.pop().deinit(self.gpa);
{
var item = self.branch_stack.pop();
item.deinit(self.gpa);
}
// We already took care of pl_op.operand earlier, so we're going
// to pass .none here
@ -3951,9 +3954,9 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void {
const is_volatile = @truncate(u1, extra.data.flags >> 31) != 0;
const clobbers_len = @truncate(u31, extra.data.flags);
var extra_i: usize = extra.end;
const outputs = @bitCast([]const Air.Inst.Ref, self.air.extra[extra_i..][0..extra.data.outputs_len]);
const outputs = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra_i..][0..extra.data.outputs_len]);
extra_i += outputs.len;
const inputs = @bitCast([]const Air.Inst.Ref, self.air.extra[extra_i..][0..extra.data.inputs_len]);
const inputs = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra_i..][0..extra.data.inputs_len]);
extra_i += inputs.len;
const dead = !is_volatile and self.liveness.isUnused(inst);
@ -4735,7 +4738,7 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void {
const vector_ty = self.air.typeOfIndex(inst);
const len = vector_ty.vectorLen();
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const elements = @bitCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]);
const elements = @ptrCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]);
const result: MCValue = res: {
if (self.liveness.isUnused(inst)) break :res MCValue.dead;
return self.fail("TODO implement airAggregateInit for arm", .{});

View File

@ -2,6 +2,7 @@
//! machine code
const Emit = @This();
const builtin = @import("builtin");
const std = @import("std");
const math = std.math;
const Mir = @import("Mir.zig");
@ -622,12 +623,17 @@ fn mirLoadStackArgument(emit: *Emit, inst: Mir.Inst.Index) !void {
} else return emit.fail("TODO mirLoadStack larger offsets", .{});
const ldr = switch (tag) {
.ldr_stack_argument => Instruction.ldr,
.ldrb_stack_argument => Instruction.ldrb,
.ldr_stack_argument => &Instruction.ldr,
.ldrb_stack_argument => &Instruction.ldrb,
else => unreachable,
};
try emit.writeInstruction(ldr(
const ldr_workaround = switch (builtin.zig_backend) {
.stage1 => ldr.*,
else => ldr,
};
try emit.writeInstruction(ldr_workaround(
cond,
r_stack_offset.rt,
.fp,
@ -643,13 +649,18 @@ fn mirLoadStackArgument(emit: *Emit, inst: Mir.Inst.Index) !void {
} else return emit.fail("TODO mirLoadStack larger offsets", .{});
const ldr = switch (tag) {
.ldrh_stack_argument => Instruction.ldrh,
.ldrsb_stack_argument => Instruction.ldrsb,
.ldrsh_stack_argument => Instruction.ldrsh,
.ldrh_stack_argument => &Instruction.ldrh,
.ldrsb_stack_argument => &Instruction.ldrsb,
.ldrsh_stack_argument => &Instruction.ldrsh,
else => unreachable,
};
try emit.writeInstruction(ldr(
const ldr_workaround = switch (builtin.zig_backend) {
.stage1 => ldr.*,
else => ldr,
};
try emit.writeInstruction(ldr_workaround(
cond,
r_stack_offset.rt,
.fp,

View File

@ -1640,7 +1640,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallOptions.
const fn_ty = self.air.typeOf(pl_op.operand);
const callee = pl_op.operand;
const extra = self.air.extraData(Air.Call, pl_op.payload);
const args = @bitCast([]const Air.Inst.Ref, self.air.extra[extra.end..][0..extra.data.args_len]);
const args = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra.end..][0..extra.data.args_len]);
var info = try self.resolveCallingConventionValues(fn_ty);
defer info.deinit(self);
@ -2075,9 +2075,9 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void {
const is_volatile = @truncate(u1, extra.data.flags >> 31) != 0;
const clobbers_len = @truncate(u31, extra.data.flags);
var extra_i: usize = extra.end;
const outputs = @bitCast([]const Air.Inst.Ref, self.air.extra[extra_i..][0..extra.data.outputs_len]);
const outputs = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra_i..][0..extra.data.outputs_len]);
extra_i += outputs.len;
const inputs = @bitCast([]const Air.Inst.Ref, self.air.extra[extra_i..][0..extra.data.inputs_len]);
const inputs = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra_i..][0..extra.data.inputs_len]);
extra_i += inputs.len;
const dead = !is_volatile and self.liveness.isUnused(inst);
@ -2413,7 +2413,7 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void {
const vector_ty = self.air.typeOfIndex(inst);
const len = vector_ty.vectorLen();
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const elements = @bitCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]);
const elements = @ptrCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]);
const result: MCValue = res: {
if (self.liveness.isUnused(inst)) break :res MCValue.dead;
return self.fail("TODO implement airAggregateInit for riscv64", .{});

View File

@ -2425,7 +2425,7 @@ fn airSwitchBr(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
var highest_maybe: ?i32 = null;
while (case_i < switch_br.data.cases_len) : (case_i += 1) {
const case = self.air.extraData(Air.SwitchBr.Case, extra_index);
const items = @bitCast([]const Air.Inst.Ref, self.air.extra[case.end..][0..case.data.items_len]);
const items = @ptrCast([]const Air.Inst.Ref, self.air.extra[case.end..][0..case.data.items_len]);
const case_body = self.air.extra[case.end + items.len ..][0..case.data.body_len];
extra_index = case.end + items.len + case_body.len;
const values = try self.gpa.alloc(CaseValue, items.len);
@ -3328,7 +3328,7 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const result_ty = self.air.typeOfIndex(inst);
const len = @intCast(usize, result_ty.arrayLen());
const elements = @bitCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]);
const elements = @ptrCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]);
switch (result_ty.zigTypeTag()) {
.Vector => return self.fail("TODO: Wasm backend: implement airAggregateInit for vectors", .{}),

View File

@ -3501,7 +3501,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallOptions.
const pl_op = self.air.instructions.items(.data)[inst].pl_op;
const callee = pl_op.operand;
const extra = self.air.extraData(Air.Call, pl_op.payload);
const args = @bitCast([]const Air.Inst.Ref, self.air.extra[extra.end..][0..extra.data.args_len]);
const args = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra.end..][0..extra.data.args_len]);
const ty = self.air.typeOf(callee);
const fn_ty = switch (ty.zigTypeTag()) {
@ -3684,7 +3684,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallOptions.
.ops = (Mir.Ops{
.flags = 0b01,
}).encode(),
.data = .{ .imm = @bitCast(i32, @intCast(u32, fn_got_addr)) },
.data = .{ .imm = @intCast(u32, fn_got_addr) },
});
} else return self.fail("TODO implement calling extern fn on plan9", .{});
} else {
@ -4220,7 +4220,10 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void {
// TODO track the new register / stack allocation
}
self.branch_stack.pop().deinit(self.gpa);
{
var item = self.branch_stack.pop();
item.deinit(self.gpa);
}
// We already took care of pl_op.operand earlier, so we're going
// to pass .none here
@ -4562,7 +4565,7 @@ fn airSwitch(self: *Self, inst: Air.Inst.Index) !void {
while (case_i < switch_br.data.cases_len) : (case_i += 1) {
const case = self.air.extraData(Air.SwitchBr.Case, extra_index);
const items = @bitCast([]const Air.Inst.Ref, self.air.extra[case.end..][0..case.data.items_len]);
const items = @ptrCast([]const Air.Inst.Ref, self.air.extra[case.end..][0..case.data.items_len]);
const case_body = self.air.extra[case.end + items.len ..][0..case.data.body_len];
extra_index = case.end + items.len + case_body.len;
@ -4615,7 +4618,10 @@ fn airSwitch(self: *Self, inst: Air.Inst.Index) !void {
if (switch_br.data.else_body_len > 0) {
const else_body = self.air.extra[extra_index..][0..switch_br.data.else_body_len];
try self.branch_stack.append(.{});
defer self.branch_stack.pop().deinit(self.gpa);
defer {
var item = self.branch_stack.pop();
item.deinit(self.gpa);
}
const else_deaths = liveness.deaths.len - 1;
try self.ensureProcessDeathCapacity(liveness.deaths[else_deaths].len);
@ -4705,9 +4711,9 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void {
const is_volatile = @truncate(u1, extra.data.flags >> 31) != 0;
const clobbers_len = @truncate(u31, extra.data.flags);
var extra_i: usize = extra.end;
const outputs = @bitCast([]const Air.Inst.Ref, self.air.extra[extra_i..][0..extra.data.outputs_len]);
const outputs = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra_i..][0..extra.data.outputs_len]);
extra_i += outputs.len;
const inputs = @bitCast([]const Air.Inst.Ref, self.air.extra[extra_i..][0..extra.data.inputs_len]);
const inputs = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra_i..][0..extra.data.inputs_len]);
extra_i += inputs.len;
const dead = !is_volatile and self.liveness.isUnused(inst);
@ -5975,7 +5981,7 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void {
const result_ty = self.air.typeOfIndex(inst);
const len = @intCast(usize, result_ty.arrayLen());
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const elements = @bitCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]);
const elements = @ptrCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]);
const abi_size = @intCast(u32, result_ty.abiSize(self.target.*));
const abi_align = result_ty.abiAlignment(self.target.*);
const result: MCValue = res: {

View File

@ -220,6 +220,9 @@ fn formatIdent(
}
pub fn fmtIdent(ident: []const u8) std.fmt.Formatter(formatIdent) {
if (builtin.zig_backend != .stage1) {
@panic("TODO");
}
return .{ .data = ident };
}
@ -2310,7 +2313,6 @@ fn airWrapOp(
const val = -1 * std.math.pow(i64, 2, @intCast(i64, bits - 1));
break :blk std.fmt.bufPrint(&min_buf, "{d}", .{val}) catch |err| switch (err) {
error.NoSpaceLeft => unreachable,
else => |e| return e,
};
},
},
@ -2336,7 +2338,6 @@ fn airWrapOp(
const val = std.math.pow(u64, 2, pow_bits) - 1;
break :blk std.fmt.bufPrint(&max_buf, "{}", .{val}) catch |err| switch (err) {
error.NoSpaceLeft => unreachable,
else => |e| return e,
};
},
};
@ -2418,7 +2419,6 @@ fn airSatOp(f: *Function, inst: Air.Inst.Index, fn_op: [*:0]const u8) !CValue {
const val = -1 * std.math.pow(i65, 2, @intCast(i65, bits - 1));
break :blk std.fmt.bufPrint(&min_buf, "{d}", .{val}) catch |err| switch (err) {
error.NoSpaceLeft => unreachable,
else => |e| return e,
};
},
},
@ -2444,7 +2444,6 @@ fn airSatOp(f: *Function, inst: Air.Inst.Index, fn_op: [*:0]const u8) !CValue {
const val = std.math.pow(u65, 2, pow_bits) - 1;
break :blk std.fmt.bufPrint(&max_buf, "{}", .{val}) catch |err| switch (err) {
error.NoSpaceLeft => unreachable,
else => |e| return e,
};
},
};
@ -2702,7 +2701,7 @@ fn airCall(
}
const pl_op = f.air.instructions.items(.data)[inst].pl_op;
const extra = f.air.extraData(Air.Call, pl_op.payload);
const args = @bitCast([]const Air.Inst.Ref, f.air.extra[extra.end..][0..extra.data.args_len]);
const args = @ptrCast([]const Air.Inst.Ref, f.air.extra[extra.end..][0..extra.data.args_len]);
const callee_ty = f.air.typeOf(pl_op.operand);
const fn_ty = switch (callee_ty.zigTypeTag()) {
.Fn => callee_ty,
@ -2959,7 +2958,7 @@ fn airSwitchBr(f: *Function, inst: Air.Inst.Index) !CValue {
var case_i: u32 = 0;
while (case_i < switch_br.data.cases_len) : (case_i += 1) {
const case = f.air.extraData(Air.SwitchBr.Case, extra_index);
const items = @bitCast([]const Air.Inst.Ref, f.air.extra[case.end..][0..case.data.items_len]);
const items = @ptrCast([]const Air.Inst.Ref, f.air.extra[case.end..][0..case.data.items_len]);
const case_body = f.air.extra[case.end + items.len ..][0..case.data.body_len];
extra_index = case.end + case.data.items_len + case_body.len;
@ -2990,9 +2989,9 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue {
const is_volatile = @truncate(u1, extra.data.flags >> 31) != 0;
const clobbers_len = @truncate(u31, extra.data.flags);
var extra_i: usize = extra.end;
const outputs = @bitCast([]const Air.Inst.Ref, f.air.extra[extra_i..][0..extra.data.outputs_len]);
const outputs = @ptrCast([]const Air.Inst.Ref, f.air.extra[extra_i..][0..extra.data.outputs_len]);
extra_i += outputs.len;
const inputs = @bitCast([]const Air.Inst.Ref, f.air.extra[extra_i..][0..extra.data.inputs_len]);
const inputs = @ptrCast([]const Air.Inst.Ref, f.air.extra[extra_i..][0..extra.data.inputs_len]);
extra_i += inputs.len;
if (!is_volatile and f.liveness.isUnused(inst)) return CValue.none;
@ -3860,7 +3859,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue {
const ty_pl = f.air.instructions.items(.data)[inst].ty_pl;
const vector_ty = f.air.getRefType(ty_pl.ty);
const len = vector_ty.vectorLen();
const elements = @bitCast([]const Air.Inst.Ref, f.air.extra[ty_pl.payload..][0..len]);
const elements = @ptrCast([]const Air.Inst.Ref, f.air.extra[ty_pl.payload..][0..len]);
const writer = f.object.writer();
const local = try f.allocLocal(inst_ty, .Const);

View File

@ -3657,7 +3657,7 @@ pub const FuncGen = struct {
fn airCall(self: *FuncGen, inst: Air.Inst.Index, attr: llvm.CallAttr) !?*const llvm.Value {
const pl_op = self.air.instructions.items(.data)[inst].pl_op;
const extra = self.air.extraData(Air.Call, pl_op.payload);
const args = @bitCast([]const Air.Inst.Ref, self.air.extra[extra.end..][0..extra.data.args_len]);
const args = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra.end..][0..extra.data.args_len]);
const callee_ty = self.air.typeOf(pl_op.operand);
const zig_fn_ty = switch (callee_ty.zigTypeTag()) {
.Fn => callee_ty,
@ -4037,7 +4037,7 @@ pub const FuncGen = struct {
while (case_i < switch_br.data.cases_len) : (case_i += 1) {
const case = self.air.extraData(Air.SwitchBr.Case, extra_index);
const items = @bitCast([]const Air.Inst.Ref, self.air.extra[case.end..][0..case.data.items_len]);
const items = @ptrCast([]const Air.Inst.Ref, self.air.extra[case.end..][0..case.data.items_len]);
const case_body = self.air.extra[case.end + items.len ..][0..case.data.body_len];
extra_index = case.end + case.data.items_len + case_body.len;
@ -4538,9 +4538,9 @@ pub const FuncGen = struct {
if (!is_volatile and self.liveness.isUnused(inst)) return null;
const outputs = @bitCast([]const Air.Inst.Ref, self.air.extra[extra_i..][0..extra.data.outputs_len]);
const outputs = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra_i..][0..extra.data.outputs_len]);
extra_i += outputs.len;
const inputs = @bitCast([]const Air.Inst.Ref, self.air.extra[extra_i..][0..extra.data.inputs_len]);
const inputs = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra_i..][0..extra.data.inputs_len]);
extra_i += inputs.len;
if (outputs.len > 1) {
@ -6660,7 +6660,7 @@ pub const FuncGen = struct {
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const result_ty = self.air.typeOfIndex(inst);
const len = @intCast(usize, result_ty.arrayLen());
const elements = @bitCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]);
const elements = @ptrCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]);
const llvm_result_ty = try self.dg.llvmType(result_ty);
const target = self.dg.module.getTarget();

View File

@ -649,6 +649,11 @@ pub const File = struct {
}
}
pub const UpdateDeclExportsError = error{
OutOfMemory,
AnalysisFail,
};
/// May be called before or after updateDecl, but must be called after
/// allocateDeclIndexes for any given Decl.
pub fn updateDeclExports(
@ -656,7 +661,7 @@ pub const File = struct {
module: *Module,
decl: *Module.Decl,
exports: []const *Module.Export,
) !void {
) UpdateDeclExportsError!void {
log.debug("updateDeclExports {*} ({s})", .{ decl, decl.name });
assert(decl.has_tv);
switch (base.tag) {

View File

@ -89,8 +89,9 @@ pub fn deinit(self: *C) void {
pub fn freeDecl(self: *C, decl: *Module.Decl) void {
const gpa = self.base.allocator;
if (self.decl_table.fetchSwapRemove(decl)) |*kv| {
kv.value.deinit(gpa);
if (self.decl_table.fetchSwapRemove(decl)) |kv| {
var decl_block = kv.value;
decl_block.deinit(gpa);
}
}

View File

@ -2482,7 +2482,7 @@ pub fn lowerUnnamedConst(self: *Elf, typed_value: TypedValue, decl: *Module.Decl
try self.atom_by_index_table.putNoClobber(self.base.allocator, atom.local_sym_index, atom);
const res = try codegen.generateSymbol(&self.base, decl.srcLoc(), typed_value, &code_buffer, .{
.none = .{},
.none = {},
}, .{
.parent_atom_index = atom.local_sym_index,
});

View File

@ -625,7 +625,6 @@ pub fn parseDataInCode(self: *Object, allocator: Allocator) !void {
while (true) {
const dice = reader.readStruct(macho.data_in_code_entry) catch |err| switch (err) {
error.EndOfStream => break,
else => |e| return e,
};
try self.data_in_code_entries.append(allocator, dice);
}

View File

@ -40,7 +40,6 @@ pub fn getLibraryOffset(reader: anytype, target: std.Target) !u64 {
// fine because we can keep looking for one that might match.
const lib_arch = decodeArch(fat_arch.cputype, false) catch |err| switch (err) {
error.UnsupportedCpuArchitecture => continue,
else => |e| return e,
};
if (lib_arch == target.cpu.arch) {
// We have found a matching architecture!

View File

@ -307,7 +307,7 @@ pub fn updateDecl(self: *Plan9, module: *Module, decl: *Module.Decl) !void {
const res = try codegen.generateSymbol(&self.base, decl.srcLoc(), .{
.ty = decl.ty,
.val = decl_val,
}, &code_buffer, .{ .none = .{} }, .{
}, &code_buffer, .{ .none = {} }, .{
.parent_atom_index = @intCast(u32, sym_index),
});
const code = switch (res) {

View File

@ -2551,7 +2551,8 @@ fn emitSymbolTable(self: *Wasm, file: fs.File, arena: Allocator, symbol_table: *
.iov_base = payload.items.ptr,
.iov_len = payload.items.len,
};
try file.writevAll(&.{iovec});
var iovecs = [_]std.os.iovec_const{iovec};
try file.writevAll(&iovecs);
}
fn emitSegmentInfo(self: *Wasm, file: fs.File, arena: Allocator) !void {
@ -2576,7 +2577,8 @@ fn emitSegmentInfo(self: *Wasm, file: fs.File, arena: Allocator) !void {
.iov_base = payload.items.ptr,
.iov_len = payload.items.len,
};
try file.writevAll(&.{iovec});
var iovecs = [_]std.os.iovec_const{iovec};
try file.writevAll(&iovecs);
}
fn getULEB128Size(uint_value: anytype) u32 {
@ -2635,12 +2637,14 @@ fn emitCodeRelocations(
var buf: [5]u8 = undefined;
leb.writeUnsignedFixed(5, &buf, count);
try payload.insertSlice(reloc_start, &buf);
const iovec: std.os.iovec_const = .{
.iov_base = payload.items.ptr,
.iov_len = payload.items.len,
var iovecs = [_]std.os.iovec_const{
.{
.iov_base = payload.items.ptr,
.iov_len = payload.items.len,
},
};
const header_offset = try reserveCustomSectionHeader(file);
try file.writevAll(&.{iovec});
try file.writevAll(&iovecs);
const size = @intCast(u32, payload.items.len);
try writeCustomSectionHeader(file, header_offset, size);
}
@ -2694,12 +2698,14 @@ fn emitDataRelocations(
var buf: [5]u8 = undefined;
leb.writeUnsignedFixed(5, &buf, count);
try payload.insertSlice(reloc_start, &buf);
const iovec: std.os.iovec_const = .{
.iov_base = payload.items.ptr,
.iov_len = payload.items.len,
var iovecs = [_]std.os.iovec_const{
.{
.iov_base = payload.items.ptr,
.iov_len = payload.items.len,
},
};
const header_offset = try reserveCustomSectionHeader(file);
try file.writevAll(&.{iovec});
try file.writevAll(&iovecs);
const size = @intCast(u32, payload.items.len);
try writeCustomSectionHeader(file, header_offset, size);
}

View File

@ -328,7 +328,7 @@ const Writer = struct {
const ty_pl = w.air.instructions.items(.data)[inst].ty_pl;
const vector_ty = w.air.getRefType(ty_pl.ty);
const len = @intCast(usize, vector_ty.arrayLen());
const elements = @bitCast([]const Air.Inst.Ref, w.air.extra[ty_pl.payload..][0..len]);
const elements = @ptrCast([]const Air.Inst.Ref, w.air.extra[ty_pl.payload..][0..len]);
try s.print("{}, [", .{vector_ty.fmtDebug()});
for (elements) |elem, i| {
@ -533,9 +533,9 @@ const Writer = struct {
try s.writeAll(", volatile");
}
const outputs = @bitCast([]const Air.Inst.Ref, w.air.extra[extra_i..][0..extra.data.outputs_len]);
const outputs = @ptrCast([]const Air.Inst.Ref, w.air.extra[extra_i..][0..extra.data.outputs_len]);
extra_i += outputs.len;
const inputs = @bitCast([]const Air.Inst.Ref, w.air.extra[extra_i..][0..extra.data.inputs_len]);
const inputs = @ptrCast([]const Air.Inst.Ref, w.air.extra[extra_i..][0..extra.data.inputs_len]);
extra_i += inputs.len;
for (outputs) |output| {
@ -604,7 +604,7 @@ const Writer = struct {
fn writeCall(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void {
const pl_op = w.air.instructions.items(.data)[inst].pl_op;
const extra = w.air.extraData(Air.Call, pl_op.payload);
const args = @bitCast([]const Air.Inst.Ref, w.air.extra[extra.end..][0..extra.data.args_len]);
const args = @ptrCast([]const Air.Inst.Ref, w.air.extra[extra.end..][0..extra.data.args_len]);
try w.writeOperand(s, inst, 0, pl_op.operand);
try s.writeAll(", [");
for (args) |arg, i| {
@ -674,7 +674,7 @@ const Writer = struct {
while (case_i < switch_br.data.cases_len) : (case_i += 1) {
const case = w.air.extraData(Air.SwitchBr.Case, extra_index);
const items = @bitCast([]const Air.Inst.Ref, w.air.extra[case.end..][0..case.data.items_len]);
const items = @ptrCast([]const Air.Inst.Ref, w.air.extra[case.end..][0..case.data.items_len]);
const case_body = w.air.extra[case.end + items.len ..][0..case.data.body_len];
extra_index = case.end + case.data.items_len + case_body.len;

View File

@ -23052,6 +23052,12 @@ static Stage1AirInst *ir_analyze_instruction_ptr_cast(IrAnalyze *ira, Stage1ZirI
if (type_is_invalid(src_type))
return ira->codegen->invalid_inst_gen;
// This logic is not quite right; this is just to get stage1 to accept valid code
// we use in the self-hosted compiler.
if (is_slice(dest_type) && is_slice(src_type)) {
return ir_analyze_bit_cast(ira, instruction->base.scope, instruction->base.source_node, ptr, dest_type);
}
bool keep_bigger_alignment = true;
return ir_analyze_ptr_cast(ira, instruction->base.scope, instruction->base.source_node, ptr,
instruction->ptr->source_node, dest_type, dest_type_value->source_node,

View File

@ -1451,7 +1451,7 @@ pub const Type = extern union {
var duped_names = Module.ErrorSet.NameMap{};
try duped_names.ensureTotalCapacity(allocator, names.len);
for (names) |name| {
duped_names.putAssumeCapacityNoClobber(name, .{});
duped_names.putAssumeCapacityNoClobber(name, {});
}
return Tag.error_set_merged.create(allocator, duped_names);
},

View File

@ -217,3 +217,19 @@ test "implicit optional pointer to optional anyopaque pointer" {
var z = @ptrCast(*[4]u8, y);
try expect(std.mem.eql(u8, z, "aoeu"));
}
test "@ptrCast slice to slice" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
const S = struct {
fn foo(slice: []u32) []i32 {
return @ptrCast([]i32, slice);
}
};
var buf: [4]u32 = .{ 0, 0, 0, 0 };
const alias = S.foo(&buf);
alias[1] = 42;
try expect(buf[1] == 42);
try expect(alias.len == 4);
}