Compare commits

...

5 Commits

Author SHA1 Message Date
David Rubin
fdce0bd75b
Merge bfedbf8dab into f845fa04a0 2024-11-21 05:37:16 +00:00
Alex Rønne Petersen
f845fa04a0 std.debug: Gracefully handle process_vm_readv() EPERM in MemoryAccessor.read().
Closes #21815.
2024-11-20 23:07:46 +01:00
Frank Denis
a5d4ad17b7
crypto.keccak.State: add checks to prevent insecure transitions (#22020)
* crypto.keccak.State: don't unconditionally permute after a squeeze()

Now, squeeze() behaves like absorb()

Namely,

squeeze(x[0..t]);
squeeze(x[t..n)); with t <= n

becomes equivalent to squeeze(x[0..n]).

* keccak: in debug mode, track transitions to prevent insecure ones.

Fixes #22019
2024-11-20 11:16:09 +01:00
Shawn Gao
dafe1a910d Append disabled LLVM CPU features after enabled ones 2024-11-20 10:09:03 +01:00
David Rubin
bfedbf8dab
fix ptrFromInt 2024-10-28 21:18:07 -07:00
5 changed files with 185 additions and 34 deletions

View File

@ -4,6 +4,7 @@ const assert = std.debug.assert;
const math = std.math; const math = std.math;
const mem = std.mem; const mem = std.mem;
const native_endian = builtin.cpu.arch.endian(); const native_endian = builtin.cpu.arch.endian();
const mode = @import("builtin").mode;
/// The Keccak-f permutation. /// The Keccak-f permutation.
pub fn KeccakF(comptime f: u11) type { pub fn KeccakF(comptime f: u11) type {
@ -199,6 +200,46 @@ pub fn State(comptime f: u11, comptime capacity: u11, comptime rounds: u5) type
comptime assert(f >= 200 and f <= 1600 and f % 200 == 0); // invalid state size comptime assert(f >= 200 and f <= 1600 and f % 200 == 0); // invalid state size
comptime assert(capacity < f and capacity % 8 == 0); // invalid capacity size comptime assert(capacity < f and capacity % 8 == 0); // invalid capacity size
// In debug mode, track transitions to prevent insecure ones.
const Op = enum { uninitialized, initialized, updated, absorb, squeeze };
const TransitionTracker = if (mode == .Debug) struct {
op: Op = .uninitialized,
fn to(tracker: *@This(), next_op: Op) void {
switch (next_op) {
.updated => {
switch (tracker.op) {
.uninitialized => @panic("cannot permute before initializing"),
else => {},
}
},
.absorb => {
switch (tracker.op) {
.squeeze => @panic("cannot absorb right after squeezing"),
else => {},
}
},
.squeeze => {
switch (tracker.op) {
.uninitialized => @panic("cannot squeeze before initializing"),
.initialized => @panic("cannot squeeze right after initializing"),
.absorb => @panic("cannot squeeze right after absorbing"),
else => {},
}
},
.uninitialized => @panic("cannot transition to uninitialized"),
.initialized => {},
}
tracker.op = next_op;
}
} else struct {
// No-op in non-debug modes.
inline fn to(tracker: *@This(), next_op: Op) void {
_ = tracker; // no-op
_ = next_op; // no-op
}
};
return struct { return struct {
const Self = @This(); const Self = @This();
@ -215,67 +256,108 @@ pub fn State(comptime f: u11, comptime capacity: u11, comptime rounds: u5) type
st: KeccakF(f) = .{}, st: KeccakF(f) = .{},
transition: TransitionTracker = .{},
/// Absorb a slice of bytes into the sponge. /// Absorb a slice of bytes into the sponge.
pub fn absorb(self: *Self, bytes_: []const u8) void { pub fn absorb(self: *Self, bytes: []const u8) void {
var bytes = bytes_; self.transition.to(.absorb);
var i: usize = 0;
if (self.offset > 0) { if (self.offset > 0) {
const left = @min(rate - self.offset, bytes.len); const left = @min(rate - self.offset, bytes.len);
@memcpy(self.buf[self.offset..][0..left], bytes[0..left]); @memcpy(self.buf[self.offset..][0..left], bytes[0..left]);
self.offset += left; self.offset += left;
if (left == bytes.len) return;
if (self.offset == rate) { if (self.offset == rate) {
self.offset = 0;
self.st.addBytes(self.buf[0..]); self.st.addBytes(self.buf[0..]);
self.st.permuteR(rounds); self.st.permuteR(rounds);
self.offset = 0;
} }
if (left == bytes.len) return; i = left;
bytes = bytes[left..];
} }
while (bytes.len >= rate) { while (i + rate < bytes.len) : (i += rate) {
self.st.addBytes(bytes[0..rate]); self.st.addBytes(bytes[i..][0..rate]);
self.st.permuteR(rounds); self.st.permuteR(rounds);
bytes = bytes[rate..];
} }
if (bytes.len > 0) { const left = bytes.len - i;
@memcpy(self.buf[0..bytes.len], bytes); if (left > 0) {
self.offset = bytes.len; @memcpy(self.buf[0..left], bytes[i..][0..left]);
} }
self.offset = left;
} }
/// Initialize the state from a slice of bytes. /// Initialize the state from a slice of bytes.
pub fn init(bytes: [f / 8]u8) Self { pub fn init(bytes: [f / 8]u8, delim: u8) Self {
return .{ .st = KeccakF(f).init(bytes) }; var st = Self{ .st = KeccakF(f).init(bytes), .delim = delim };
st.transition.to(.initialized);
return st;
} }
/// Permute the state /// Permute the state
pub fn permute(self: *Self) void { pub fn permute(self: *Self) void {
if (mode == .Debug) {
if (self.transition.op == .absorb and self.offset > 0) {
@panic("cannot permute with pending input - call fillBlock() or pad() instead");
}
}
self.transition.to(.updated);
self.st.permuteR(rounds); self.st.permuteR(rounds);
self.offset = 0; self.offset = 0;
} }
/// Align the input to the rate boundary. /// Align the input to the rate boundary and permute.
pub fn fillBlock(self: *Self) void { pub fn fillBlock(self: *Self) void {
self.transition.to(.absorb);
self.st.addBytes(self.buf[0..self.offset]); self.st.addBytes(self.buf[0..self.offset]);
self.st.permuteR(rounds); self.st.permuteR(rounds);
self.offset = 0; self.offset = 0;
self.transition.to(.updated);
} }
/// Mark the end of the input. /// Mark the end of the input.
pub fn pad(self: *Self) void { pub fn pad(self: *Self) void {
self.transition.to(.absorb);
self.st.addBytes(self.buf[0..self.offset]); self.st.addBytes(self.buf[0..self.offset]);
if (self.offset == rate) {
self.st.permuteR(rounds);
self.offset = 0;
}
self.st.addByte(self.delim, self.offset); self.st.addByte(self.delim, self.offset);
self.st.addByte(0x80, rate - 1); self.st.addByte(0x80, rate - 1);
self.st.permuteR(rounds); self.st.permuteR(rounds);
self.offset = 0; self.offset = 0;
self.transition.to(.updated);
} }
/// Squeeze a slice of bytes from the sponge. /// Squeeze a slice of bytes from the sponge.
/// The function can be called multiple times.
pub fn squeeze(self: *Self, out: []u8) void { pub fn squeeze(self: *Self, out: []u8) void {
self.transition.to(.squeeze);
var i: usize = 0; var i: usize = 0;
while (i < out.len) : (i += rate) { if (self.offset == rate) {
const left = @min(rate, out.len - i); self.st.permuteR(rounds);
self.st.extractBytes(out[i..][0..left]); } else if (self.offset > 0) {
@branchHint(.unlikely);
var buf: [rate]u8 = undefined;
self.st.extractBytes(buf[0..]);
const left = @min(rate - self.offset, out.len);
@memcpy(out[0..left], buf[self.offset..][0..left]);
self.offset += left;
if (left == out.len) return;
if (self.offset == rate) {
self.offset = 0;
self.st.permuteR(rounds);
}
i = left;
}
while (i + rate < out.len) : (i += rate) {
self.st.extractBytes(out[i..][0..rate]);
self.st.permuteR(rounds); self.st.permuteR(rounds);
} }
const left = out.len - i;
if (left > 0) {
self.st.extractBytes(out[i..][0..left]);
}
self.offset = left;
} }
}; };
} }
@ -298,3 +380,26 @@ test "Keccak-f800" {
}; };
try std.testing.expectEqualSlices(u32, &st.st, &expected); try std.testing.expectEqualSlices(u32, &st.st, &expected);
} }
test "squeeze" {
var st = State(800, 256, 22).init([_]u8{0x80} ** 100, 0x01);
var out0: [15]u8 = undefined;
var out1: [out0.len]u8 = undefined;
st.permute();
var st0 = st;
st0.squeeze(out0[0..]);
var st1 = st;
st1.squeeze(out1[0 .. out1.len / 2]);
st1.squeeze(out1[out1.len / 2 ..]);
try std.testing.expectEqualSlices(u8, &out0, &out1);
var out2: [100]u8 = undefined;
var out3: [out2.len]u8 = undefined;
var st2 = st;
st2.squeeze(out2[0..]);
var st3 = st;
st3.squeeze(out3[0 .. out2.len / 2]);
st3.squeeze(out3[out2.len / 2 ..]);
try std.testing.expectEqualSlices(u8, &out2, &out3);
}

View File

@ -48,7 +48,8 @@ fn read(ma: *MemoryAccessor, address: usize, buf: []u8) bool {
switch (linux.E.init(bytes_read)) { switch (linux.E.init(bytes_read)) {
.SUCCESS => return bytes_read == buf.len, .SUCCESS => return bytes_read == buf.len,
.FAULT => return false, .FAULT => return false,
.INVAL, .PERM, .SRCH => unreachable, // own pid is always valid .INVAL, .SRCH => unreachable, // own pid is always valid
.PERM => {}, // Known to happen in containers.
.NOMEM => {}, .NOMEM => {},
.NOSYS => {}, // QEMU is known not to implement this syscall. .NOSYS => {}, // QEMU is known not to implement this syscall.
else => unreachable, // unexpected else => unreachable, // unexpected

View File

@ -312,18 +312,29 @@ pub fn create(arena: Allocator, options: CreateOptions) !*Package.Module {
if (!options.global.use_llvm) break :b null; if (!options.global.use_llvm) break :b null;
var buf = std.ArrayList(u8).init(arena); var buf = std.ArrayList(u8).init(arena);
for (target.cpu.arch.allFeaturesList(), 0..) |feature, index_usize| { var disabled_features = std.ArrayList(u8).init(arena);
const index = @as(std.Target.Cpu.Feature.Set.Index, @intCast(index_usize)); defer disabled_features.deinit();
const is_enabled = target.cpu.features.isEnabled(index);
// Append disabled features after enabled ones, so that their effects aren't overwritten.
for (target.cpu.arch.allFeaturesList()) |feature| {
if (feature.llvm_name) |llvm_name| { if (feature.llvm_name) |llvm_name| {
const plus_or_minus = "-+"[@intFromBool(is_enabled)]; const is_enabled = target.cpu.features.isEnabled(feature.index);
try buf.ensureUnusedCapacity(2 + llvm_name.len);
buf.appendAssumeCapacity(plus_or_minus); if (is_enabled) {
buf.appendSliceAssumeCapacity(llvm_name); try buf.ensureUnusedCapacity(2 + llvm_name.len);
buf.appendSliceAssumeCapacity(","); buf.appendAssumeCapacity('+');
buf.appendSliceAssumeCapacity(llvm_name);
buf.appendAssumeCapacity(',');
} else {
try disabled_features.ensureUnusedCapacity(2 + llvm_name.len);
disabled_features.appendAssumeCapacity('-');
disabled_features.appendSliceAssumeCapacity(llvm_name);
disabled_features.appendAssumeCapacity(',');
}
} }
} }
try buf.appendSlice(disabled_features.items);
if (buf.items.len == 0) break :b ""; if (buf.items.len == 0) break :b "";
assert(std.mem.endsWith(u8, buf.items, ",")); assert(std.mem.endsWith(u8, buf.items, ","));
buf.items[buf.items.len - 1] = 0; buf.items[buf.items.len - 1] = 0;

View File

@ -23564,10 +23564,13 @@ fn ptrFromIntVal(
return sema.fail(block, operand_src, "pointer type '{}' requires aligned address", .{ptr_ty.fmt(pt)}); return sema.fail(block, operand_src, "pointer type '{}' requires aligned address", .{ptr_ty.fmt(pt)});
return switch (ptr_ty.zigTypeTag(zcu)) { return switch (ptr_ty.zigTypeTag(zcu)) {
.optional => Value.fromInterned(try pt.intern(.{ .opt = .{ .optional => val: {
.ty = ptr_ty.toIntern(), const is_null: bool = addr == 0 and !ptr_ty.childType(zcu).isAllowzeroPtr(zcu);
.val = if (addr == 0) .none else (try pt.ptrIntValue(ptr_ty.childType(zcu), addr)).toIntern(), break :val Value.fromInterned(try pt.intern(.{ .opt = .{
} })), .ty = ptr_ty.toIntern(),
.val = if (is_null) .none else (try pt.ptrIntValue(ptr_ty.childType(zcu), addr)).toIntern(),
} }));
},
.pointer => try pt.ptrIntValue(ptr_ty, addr), .pointer => try pt.ptrIntValue(ptr_ty, addr),
else => unreachable, else => unreachable,
}; };

View File

@ -1,5 +1,6 @@
const std = @import("std"); const std = @import("std");
const builtin = @import("builtin"); const builtin = @import("builtin");
const expect = std.testing.expect;
const expectEqual = std.testing.expectEqual; const expectEqual = std.testing.expectEqual;
test "casting integer address to function pointer" { test "casting integer address to function pointer" {
@ -35,8 +36,15 @@ test "@ptrFromInt creates null pointer" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const ptr = @as(?*u32, @ptrFromInt(0)); const S = struct {
try expectEqual(@as(?*u32, null), ptr); fn doTest(addr: usize) !void {
const ptr: ?*u32 = @ptrFromInt(addr);
try expectEqual(null, ptr);
}
};
try S.doTest(0);
comptime try S.doTest(0);
} }
test "@ptrFromInt creates allowzero zero pointer" { test "@ptrFromInt creates allowzero zero pointer" {
@ -44,6 +52,29 @@ test "@ptrFromInt creates allowzero zero pointer" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const ptr = @as(*allowzero u32, @ptrFromInt(0)); const S = struct {
try expectEqual(@as(usize, 0), @intFromPtr(ptr)); fn doTest(addr: usize) !void {
const ptr: *allowzero const u32 = @ptrFromInt(addr);
try expectEqual(addr, @intFromPtr(ptr));
}
};
try S.doTest(0);
comptime try S.doTest(0);
}
test "@ptrFromInt creates optional allowzero zero pointer" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
fn doTest(addr: usize) !void {
const ptr: ?*allowzero const u32 = @ptrFromInt(addr);
try expect(ptr != null);
}
};
try S.doTest(0);
comptime try S.doTest(0);
} }