mirror of
https://github.com/ziglang/zig.git
synced 2024-11-21 11:32:24 +00:00
Compare commits
10 Commits
b8d0e16412
...
9c13b8c742
Author | SHA1 | Date | |
---|---|---|---|
|
9c13b8c742 | ||
|
f845fa04a0 | ||
|
a5d4ad17b7 | ||
|
dafe1a910d | ||
|
acba2645f7 | ||
|
98c4e6ea45 | ||
|
decfa371b5 | ||
|
8a17490efc | ||
|
a9dbd33061 | ||
|
ed612f40af |
@ -669,7 +669,7 @@ fn mul(a: u8, b: u8) u8 {
|
||||
return @as(u8, @truncate(s));
|
||||
}
|
||||
|
||||
const cache_line_bytes = 64;
|
||||
const cache_line_bytes = std.atomic.cache_line;
|
||||
|
||||
inline fn sbox_lookup(sbox: *align(64) const [256]u8, idx0: u8, idx1: u8, idx2: u8, idx3: u8) [4]u8 {
|
||||
if (side_channels_mitigations == .none) {
|
||||
@ -683,8 +683,8 @@ inline fn sbox_lookup(sbox: *align(64) const [256]u8, idx0: u8, idx1: u8, idx2:
|
||||
const stride = switch (side_channels_mitigations) {
|
||||
.none => unreachable,
|
||||
.basic => sbox.len / 4,
|
||||
.medium => sbox.len / (sbox.len / cache_line_bytes) * 2,
|
||||
.full => sbox.len / (sbox.len / cache_line_bytes),
|
||||
.medium => @min(sbox.len, 2 * cache_line_bytes),
|
||||
.full => @min(sbox.len, cache_line_bytes),
|
||||
};
|
||||
const of0 = idx0 % stride;
|
||||
const of1 = idx1 % stride;
|
||||
@ -718,12 +718,11 @@ inline fn table_lookup(table: *align(64) const [4][256]u32, idx0: u8, idx1: u8,
|
||||
table[3][idx3],
|
||||
};
|
||||
} else {
|
||||
const table_bytes = @sizeOf(@TypeOf(table[0]));
|
||||
const stride = switch (side_channels_mitigations) {
|
||||
.none => unreachable,
|
||||
.basic => table[0].len / 4,
|
||||
.medium => table[0].len / (table_bytes / cache_line_bytes) * 2,
|
||||
.full => table[0].len / (table_bytes / cache_line_bytes),
|
||||
.medium => @max(1, @min(table[0].len, 2 * cache_line_bytes / 4)),
|
||||
.full => @max(1, @min(table[0].len, cache_line_bytes / 4)),
|
||||
};
|
||||
const of0 = idx0 % stride;
|
||||
const of1 = idx1 % stride;
|
||||
|
@ -4,6 +4,7 @@ const assert = std.debug.assert;
|
||||
const math = std.math;
|
||||
const mem = std.mem;
|
||||
const native_endian = builtin.cpu.arch.endian();
|
||||
const mode = @import("builtin").mode;
|
||||
|
||||
/// The Keccak-f permutation.
|
||||
pub fn KeccakF(comptime f: u11) type {
|
||||
@ -199,6 +200,46 @@ pub fn State(comptime f: u11, comptime capacity: u11, comptime rounds: u5) type
|
||||
comptime assert(f >= 200 and f <= 1600 and f % 200 == 0); // invalid state size
|
||||
comptime assert(capacity < f and capacity % 8 == 0); // invalid capacity size
|
||||
|
||||
// In debug mode, track transitions to prevent insecure ones.
|
||||
const Op = enum { uninitialized, initialized, updated, absorb, squeeze };
|
||||
const TransitionTracker = if (mode == .Debug) struct {
|
||||
op: Op = .uninitialized,
|
||||
|
||||
fn to(tracker: *@This(), next_op: Op) void {
|
||||
switch (next_op) {
|
||||
.updated => {
|
||||
switch (tracker.op) {
|
||||
.uninitialized => @panic("cannot permute before initializing"),
|
||||
else => {},
|
||||
}
|
||||
},
|
||||
.absorb => {
|
||||
switch (tracker.op) {
|
||||
.squeeze => @panic("cannot absorb right after squeezing"),
|
||||
else => {},
|
||||
}
|
||||
},
|
||||
.squeeze => {
|
||||
switch (tracker.op) {
|
||||
.uninitialized => @panic("cannot squeeze before initializing"),
|
||||
.initialized => @panic("cannot squeeze right after initializing"),
|
||||
.absorb => @panic("cannot squeeze right after absorbing"),
|
||||
else => {},
|
||||
}
|
||||
},
|
||||
.uninitialized => @panic("cannot transition to uninitialized"),
|
||||
.initialized => {},
|
||||
}
|
||||
tracker.op = next_op;
|
||||
}
|
||||
} else struct {
|
||||
// No-op in non-debug modes.
|
||||
inline fn to(tracker: *@This(), next_op: Op) void {
|
||||
_ = tracker; // no-op
|
||||
_ = next_op; // no-op
|
||||
}
|
||||
};
|
||||
|
||||
return struct {
|
||||
const Self = @This();
|
||||
|
||||
@ -215,67 +256,108 @@ pub fn State(comptime f: u11, comptime capacity: u11, comptime rounds: u5) type
|
||||
|
||||
st: KeccakF(f) = .{},
|
||||
|
||||
transition: TransitionTracker = .{},
|
||||
|
||||
/// Absorb a slice of bytes into the sponge.
|
||||
pub fn absorb(self: *Self, bytes_: []const u8) void {
|
||||
var bytes = bytes_;
|
||||
pub fn absorb(self: *Self, bytes: []const u8) void {
|
||||
self.transition.to(.absorb);
|
||||
var i: usize = 0;
|
||||
if (self.offset > 0) {
|
||||
const left = @min(rate - self.offset, bytes.len);
|
||||
@memcpy(self.buf[self.offset..][0..left], bytes[0..left]);
|
||||
self.offset += left;
|
||||
if (left == bytes.len) return;
|
||||
if (self.offset == rate) {
|
||||
self.offset = 0;
|
||||
self.st.addBytes(self.buf[0..]);
|
||||
self.st.permuteR(rounds);
|
||||
self.offset = 0;
|
||||
}
|
||||
if (left == bytes.len) return;
|
||||
bytes = bytes[left..];
|
||||
i = left;
|
||||
}
|
||||
while (bytes.len >= rate) {
|
||||
self.st.addBytes(bytes[0..rate]);
|
||||
while (i + rate < bytes.len) : (i += rate) {
|
||||
self.st.addBytes(bytes[i..][0..rate]);
|
||||
self.st.permuteR(rounds);
|
||||
bytes = bytes[rate..];
|
||||
}
|
||||
if (bytes.len > 0) {
|
||||
@memcpy(self.buf[0..bytes.len], bytes);
|
||||
self.offset = bytes.len;
|
||||
const left = bytes.len - i;
|
||||
if (left > 0) {
|
||||
@memcpy(self.buf[0..left], bytes[i..][0..left]);
|
||||
}
|
||||
self.offset = left;
|
||||
}
|
||||
|
||||
/// Initialize the state from a slice of bytes.
|
||||
pub fn init(bytes: [f / 8]u8) Self {
|
||||
return .{ .st = KeccakF(f).init(bytes) };
|
||||
pub fn init(bytes: [f / 8]u8, delim: u8) Self {
|
||||
var st = Self{ .st = KeccakF(f).init(bytes), .delim = delim };
|
||||
st.transition.to(.initialized);
|
||||
return st;
|
||||
}
|
||||
|
||||
/// Permute the state
|
||||
pub fn permute(self: *Self) void {
|
||||
if (mode == .Debug) {
|
||||
if (self.transition.op == .absorb and self.offset > 0) {
|
||||
@panic("cannot permute with pending input - call fillBlock() or pad() instead");
|
||||
}
|
||||
}
|
||||
self.transition.to(.updated);
|
||||
self.st.permuteR(rounds);
|
||||
self.offset = 0;
|
||||
}
|
||||
|
||||
/// Align the input to the rate boundary.
|
||||
/// Align the input to the rate boundary and permute.
|
||||
pub fn fillBlock(self: *Self) void {
|
||||
self.transition.to(.absorb);
|
||||
self.st.addBytes(self.buf[0..self.offset]);
|
||||
self.st.permuteR(rounds);
|
||||
self.offset = 0;
|
||||
self.transition.to(.updated);
|
||||
}
|
||||
|
||||
/// Mark the end of the input.
|
||||
pub fn pad(self: *Self) void {
|
||||
self.transition.to(.absorb);
|
||||
self.st.addBytes(self.buf[0..self.offset]);
|
||||
if (self.offset == rate) {
|
||||
self.st.permuteR(rounds);
|
||||
self.offset = 0;
|
||||
}
|
||||
self.st.addByte(self.delim, self.offset);
|
||||
self.st.addByte(0x80, rate - 1);
|
||||
self.st.permuteR(rounds);
|
||||
self.offset = 0;
|
||||
self.transition.to(.updated);
|
||||
}
|
||||
|
||||
/// Squeeze a slice of bytes from the sponge.
|
||||
/// The function can be called multiple times.
|
||||
pub fn squeeze(self: *Self, out: []u8) void {
|
||||
self.transition.to(.squeeze);
|
||||
var i: usize = 0;
|
||||
while (i < out.len) : (i += rate) {
|
||||
const left = @min(rate, out.len - i);
|
||||
self.st.extractBytes(out[i..][0..left]);
|
||||
if (self.offset == rate) {
|
||||
self.st.permuteR(rounds);
|
||||
} else if (self.offset > 0) {
|
||||
@branchHint(.unlikely);
|
||||
var buf: [rate]u8 = undefined;
|
||||
self.st.extractBytes(buf[0..]);
|
||||
const left = @min(rate - self.offset, out.len);
|
||||
@memcpy(out[0..left], buf[self.offset..][0..left]);
|
||||
self.offset += left;
|
||||
if (left == out.len) return;
|
||||
if (self.offset == rate) {
|
||||
self.offset = 0;
|
||||
self.st.permuteR(rounds);
|
||||
}
|
||||
i = left;
|
||||
}
|
||||
while (i + rate < out.len) : (i += rate) {
|
||||
self.st.extractBytes(out[i..][0..rate]);
|
||||
self.st.permuteR(rounds);
|
||||
}
|
||||
const left = out.len - i;
|
||||
if (left > 0) {
|
||||
self.st.extractBytes(out[i..][0..left]);
|
||||
}
|
||||
self.offset = left;
|
||||
}
|
||||
};
|
||||
}
|
||||
@ -298,3 +380,26 @@ test "Keccak-f800" {
|
||||
};
|
||||
try std.testing.expectEqualSlices(u32, &st.st, &expected);
|
||||
}
|
||||
|
||||
test "squeeze" {
|
||||
var st = State(800, 256, 22).init([_]u8{0x80} ** 100, 0x01);
|
||||
|
||||
var out0: [15]u8 = undefined;
|
||||
var out1: [out0.len]u8 = undefined;
|
||||
st.permute();
|
||||
var st0 = st;
|
||||
st0.squeeze(out0[0..]);
|
||||
var st1 = st;
|
||||
st1.squeeze(out1[0 .. out1.len / 2]);
|
||||
st1.squeeze(out1[out1.len / 2 ..]);
|
||||
try std.testing.expectEqualSlices(u8, &out0, &out1);
|
||||
|
||||
var out2: [100]u8 = undefined;
|
||||
var out3: [out2.len]u8 = undefined;
|
||||
var st2 = st;
|
||||
st2.squeeze(out2[0..]);
|
||||
var st3 = st;
|
||||
st3.squeeze(out3[0 .. out2.len / 2]);
|
||||
st3.squeeze(out3[out2.len / 2 ..]);
|
||||
try std.testing.expectEqualSlices(u8, &out2, &out3);
|
||||
}
|
||||
|
@ -48,7 +48,8 @@ fn read(ma: *MemoryAccessor, address: usize, buf: []u8) bool {
|
||||
switch (linux.E.init(bytes_read)) {
|
||||
.SUCCESS => return bytes_read == buf.len,
|
||||
.FAULT => return false,
|
||||
.INVAL, .PERM, .SRCH => unreachable, // own pid is always valid
|
||||
.INVAL, .SRCH => unreachable, // own pid is always valid
|
||||
.PERM => {}, // Known to happen in containers.
|
||||
.NOMEM => {},
|
||||
.NOSYS => {}, // QEMU is known not to implement this syscall.
|
||||
else => unreachable, // unexpected
|
||||
|
@ -324,20 +324,49 @@ pub fn isBytes(self: Self, slice: []const u8) anyerror!bool {
|
||||
return matches;
|
||||
}
|
||||
|
||||
/// Read a struct from the stream.
|
||||
/// Only packed and extern structs are supported, as they have a defined in-memory layout.
|
||||
/// Packed structs must have a `@bitSizeOf` that is a multiple of eight.
|
||||
pub fn readStruct(self: Self, comptime T: type) anyerror!T {
|
||||
// Only extern and packed structs have defined in-memory layout.
|
||||
comptime assert(@typeInfo(T).@"struct".layout != .auto);
|
||||
var res: [1]T = undefined;
|
||||
try self.readNoEof(mem.sliceAsBytes(res[0..]));
|
||||
return res[0];
|
||||
switch (@typeInfo(T).@"struct".layout) {
|
||||
.auto => @compileError("readStruct only supports packed and extern structs, " ++
|
||||
"but the given type: " ++ @typeName(T) ++ " is a normal struct."),
|
||||
.@"extern" => {
|
||||
var res: [1]T = undefined;
|
||||
try self.readNoEof(mem.sliceAsBytes(res[0..]));
|
||||
return res[0];
|
||||
},
|
||||
.@"packed" => {
|
||||
var bytes: [@divExact(@bitSizeOf(T), 8)]u8 = undefined;
|
||||
try self.readNoEof(&bytes);
|
||||
return @bitCast(bytes);
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
/// Read a struct having the specified endianness into the host endianness representation.
|
||||
/// Only packed and extern structs are supported, as they have a defined in-memory layout.
|
||||
/// Packed structs must have a `@bitSizeOf` that is a multiple of eight.
|
||||
pub fn readStructEndian(self: Self, comptime T: type, endian: std.builtin.Endian) anyerror!T {
|
||||
var res = try self.readStruct(T);
|
||||
if (native_endian != endian) {
|
||||
mem.byteSwapAllFields(T, &res);
|
||||
switch (@typeInfo(T).@"struct".layout) {
|
||||
.auto => @compileError("readStructEndian only supports packed and extern structs, " ++
|
||||
"but the given type: " ++ @typeName(T) ++ " is a normal struct."),
|
||||
.@"extern" => {
|
||||
var res = try self.readStruct(T);
|
||||
if (native_endian != endian) {
|
||||
mem.byteSwapAllFields(T, &res);
|
||||
}
|
||||
return res;
|
||||
},
|
||||
.@"packed" => {
|
||||
var bytes: [@divExact(@bitSizeOf(T), 8)]u8 = undefined;
|
||||
try self.readNoEof(&bytes);
|
||||
if (native_endian != endian) {
|
||||
mem.reverse(u8, &bytes);
|
||||
}
|
||||
return @bitCast(bytes);
|
||||
},
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
/// Reads an integer with the same size as the given enum's tag type. If the integer matches
|
||||
|
@ -1,6 +1,7 @@
|
||||
const builtin = @import("builtin");
|
||||
const std = @import("../../std.zig");
|
||||
const testing = std.testing;
|
||||
const native_endian = @import("builtin").target.cpu.arch.endian();
|
||||
|
||||
test "Reader" {
|
||||
var buf = "a\x02".*;
|
||||
@ -370,3 +371,130 @@ test "readIntoBoundedBytes correctly reads into a provided bounded array" {
|
||||
try reader.readIntoBoundedBytes(10000, &bounded_array);
|
||||
try testing.expectEqualStrings(bounded_array.slice(), test_string);
|
||||
}
|
||||
|
||||
test "readStructEndian reads packed structs without padding and in correct field order" {
|
||||
const buf = [3]u8{ 11, 12, 13 };
|
||||
var fis = std.io.fixedBufferStream(&buf);
|
||||
const reader = fis.reader();
|
||||
|
||||
const PackedStruct = packed struct(u24) { a: u8, b: u8, c: u8 };
|
||||
|
||||
try testing.expectEqual(
|
||||
PackedStruct{ .a = 11, .b = 12, .c = 13 },
|
||||
reader.readStructEndian(PackedStruct, .little),
|
||||
);
|
||||
fis.reset();
|
||||
try testing.expectEqual(
|
||||
PackedStruct{ .a = 13, .b = 12, .c = 11 },
|
||||
reader.readStructEndian(PackedStruct, .big),
|
||||
);
|
||||
}
|
||||
|
||||
test "readStruct reads packed structs without padding and in correct field order" {
|
||||
const buf = [3]u8{ 11, 12, 13 };
|
||||
var fis = std.io.fixedBufferStream(&buf);
|
||||
const reader = fis.reader();
|
||||
|
||||
const PackedStruct = packed struct(u24) { a: u8, b: u8, c: u8 };
|
||||
|
||||
switch (native_endian) {
|
||||
.little => {
|
||||
try testing.expectEqual(
|
||||
PackedStruct{ .a = 11, .b = 12, .c = 13 },
|
||||
reader.readStruct(PackedStruct),
|
||||
);
|
||||
},
|
||||
.big => {
|
||||
try testing.expectEqual(
|
||||
PackedStruct{ .a = 13, .b = 12, .c = 11 },
|
||||
reader.readStruct(PackedStruct),
|
||||
);
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
test "readStruct writeStruct round-trip with packed structs" {
|
||||
var buf: [8]u8 = undefined;
|
||||
var fis = std.io.fixedBufferStream(&buf);
|
||||
const reader = fis.reader();
|
||||
const writer = fis.writer();
|
||||
|
||||
const PackedStruct = packed struct(u64) {
|
||||
a: u16 = 123,
|
||||
b: u16 = 245,
|
||||
c: u16 = 456,
|
||||
d: i13 = -345,
|
||||
e: i3 = 2,
|
||||
};
|
||||
|
||||
const expected_packed_struct = PackedStruct{};
|
||||
try writer.writeStruct(expected_packed_struct);
|
||||
fis.reset();
|
||||
try testing.expectEqual(expected_packed_struct, try reader.readStruct(PackedStruct));
|
||||
}
|
||||
|
||||
test "readStructEndian writeStructEndian round-trip with packed structs" {
|
||||
var buf: [8]u8 = undefined;
|
||||
var fis = std.io.fixedBufferStream(&buf);
|
||||
const reader = fis.reader();
|
||||
const writer = fis.writer();
|
||||
|
||||
const PackedStruct = packed struct(u64) {
|
||||
a: u13 = 123,
|
||||
b: i7 = -24,
|
||||
c: u20 = 83,
|
||||
d: enum(i24) { val = 3452 } = .val,
|
||||
};
|
||||
|
||||
const expected_packed_struct = PackedStruct{};
|
||||
// round-trip little endian
|
||||
try writer.writeStructEndian(expected_packed_struct, .big);
|
||||
fis.reset();
|
||||
try testing.expectEqual(expected_packed_struct, try reader.readStructEndian(PackedStruct, .big));
|
||||
// round-trip big endian
|
||||
fis.reset();
|
||||
try writer.writeStructEndian(expected_packed_struct, .little);
|
||||
fis.reset();
|
||||
try testing.expectEqual(expected_packed_struct, try reader.readStructEndian(PackedStruct, .little));
|
||||
}
|
||||
|
||||
test "readStruct a packed struct with endianness-affected types" {
|
||||
const buf = [4]u8{ 0x12, 0x34, 0x56, 0x78 };
|
||||
var fis = std.io.fixedBufferStream(&buf);
|
||||
const reader = fis.reader();
|
||||
|
||||
const PackedStruct = packed struct(u32) { a: u16, b: u16 };
|
||||
|
||||
switch (native_endian) {
|
||||
.little => {
|
||||
try testing.expectEqual(
|
||||
PackedStruct{ .a = 0x3412, .b = 0x7856 },
|
||||
reader.readStruct(PackedStruct),
|
||||
);
|
||||
},
|
||||
.big => {
|
||||
try testing.expectEqual(
|
||||
PackedStruct{ .a = 0x5678, .b = 0x1234 },
|
||||
reader.readStruct(PackedStruct),
|
||||
);
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
test "readStructEndian a packed struct with endianness-affected types" {
|
||||
const buf = [4]u8{ 0x12, 0x34, 0x56, 0x78 };
|
||||
var fis = std.io.fixedBufferStream(&buf);
|
||||
const reader = fis.reader();
|
||||
|
||||
const PackedStruct = packed struct(u32) { a: u16, b: u16 };
|
||||
|
||||
try testing.expectEqual(
|
||||
PackedStruct{ .a = 0x3412, .b = 0x7856 },
|
||||
reader.readStructEndian(PackedStruct, .little),
|
||||
);
|
||||
fis.reset();
|
||||
try testing.expectEqual(
|
||||
PackedStruct{ .a = 0x5678, .b = 0x1234 },
|
||||
reader.readStructEndian(PackedStruct, .big),
|
||||
);
|
||||
}
|
||||
|
@ -54,20 +54,48 @@ pub inline fn writeInt(self: Self, comptime T: type, value: T, endian: std.built
|
||||
return self.writeAll(&bytes);
|
||||
}
|
||||
|
||||
/// Write a struct to the stream.
|
||||
/// Only packed and extern structs are supported, as they have a defined in-memory layout.
|
||||
/// Packed structs must have a `@bitSizeOf` that is a multiple of eight.
|
||||
pub fn writeStruct(self: Self, value: anytype) anyerror!void {
|
||||
// Only extern and packed structs have defined in-memory layout.
|
||||
comptime assert(@typeInfo(@TypeOf(value)).@"struct".layout != .auto);
|
||||
return self.writeAll(mem.asBytes(&value));
|
||||
switch (@typeInfo(@TypeOf(value)).@"struct".layout) {
|
||||
.auto => @compileError("writeStruct only supports packed and extern structs, " ++
|
||||
"but the given type: " ++ @typeName(@TypeOf(value)) ++ " is a normal struct."),
|
||||
.@"extern" => {
|
||||
return try self.writeAll(mem.asBytes(&value));
|
||||
},
|
||||
.@"packed" => {
|
||||
const bytes: [@divExact(@bitSizeOf(@TypeOf(value)), 8)]u8 = @bitCast(value);
|
||||
try self.writeAll(&bytes);
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
/// Write a struct to the stream in the specified endianness.
|
||||
/// Only packed and extern structs are supported, as they have a defined in-memory layout.
|
||||
/// Packed structs must have a `@bitSizeOf` that is a multiple of eight.
|
||||
pub fn writeStructEndian(self: Self, value: anytype, endian: std.builtin.Endian) anyerror!void {
|
||||
// TODO: make sure this value is not a reference type
|
||||
if (native_endian == endian) {
|
||||
return self.writeStruct(value);
|
||||
} else {
|
||||
var copy = value;
|
||||
mem.byteSwapAllFields(@TypeOf(value), ©);
|
||||
return self.writeStruct(copy);
|
||||
switch (@typeInfo(@TypeOf(value)).@"struct".layout) {
|
||||
.auto => @compileError("writeStructEndian only supports packed and extern structs, " ++
|
||||
"but the given type: " ++ @typeName(@TypeOf(value)) ++ " is a normal struct."),
|
||||
.@"extern" => {
|
||||
if (native_endian == endian) {
|
||||
return try self.writeStruct(value);
|
||||
} else {
|
||||
var copy = value;
|
||||
mem.byteSwapAllFields(@TypeOf(value), ©);
|
||||
return try self.writeStruct(copy);
|
||||
}
|
||||
},
|
||||
.@"packed" => {
|
||||
var bytes: [@divExact(@bitSizeOf(@TypeOf(value)), 8)]u8 = @bitCast(value);
|
||||
if (native_endian != endian) {
|
||||
mem.reverse(u8, &bytes);
|
||||
}
|
||||
return try self.writeAll(&bytes);
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@ -81,3 +109,7 @@ pub fn writeFile(self: Self, file: std.fs.File) anyerror!void {
|
||||
if (n < buf.len) return;
|
||||
}
|
||||
}
|
||||
|
||||
test {
|
||||
_ = @import("Writer/test.zig");
|
||||
}
|
||||
|
63
lib/std/io/Writer/test.zig
Normal file
63
lib/std/io/Writer/test.zig
Normal file
@ -0,0 +1,63 @@
|
||||
const std = @import("../../std.zig");
|
||||
const testing = std.testing;
|
||||
const native_endian = @import("builtin").target.cpu.arch.endian();
|
||||
|
||||
test "writeStruct writes packed structs without padding" {
|
||||
var buf: [3]u8 = undefined;
|
||||
var fis = std.io.fixedBufferStream(&buf);
|
||||
const writer = fis.writer();
|
||||
|
||||
const PackedStruct = packed struct(u24) { a: u8, b: u8, c: u8 };
|
||||
|
||||
try writer.writeStruct(PackedStruct{ .a = 11, .b = 12, .c = 13 });
|
||||
switch (native_endian) {
|
||||
.little => {
|
||||
try testing.expectEqualSlices(u8, &.{ 11, 12, 13 }, &buf);
|
||||
},
|
||||
.big => {
|
||||
try testing.expectEqualSlices(u8, &.{ 13, 12, 11 }, &buf);
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
test "writeStructEndian writes packed structs without padding and in correct field order" {
|
||||
var buf: [3]u8 = undefined;
|
||||
var fis = std.io.fixedBufferStream(&buf);
|
||||
const writer = fis.writer();
|
||||
|
||||
const PackedStruct = packed struct(u24) { a: u8, b: u8, c: u8 };
|
||||
|
||||
try writer.writeStructEndian(PackedStruct{ .a = 11, .b = 12, .c = 13 }, .little);
|
||||
try testing.expectEqualSlices(u8, &.{ 11, 12, 13 }, &buf);
|
||||
fis.reset();
|
||||
try writer.writeStructEndian(PackedStruct{ .a = 11, .b = 12, .c = 13 }, .big);
|
||||
try testing.expectEqualSlices(u8, &.{ 13, 12, 11 }, &buf);
|
||||
}
|
||||
|
||||
test "writeStruct a packed struct with endianness-affected types" {
|
||||
var buf: [4]u8 = undefined;
|
||||
var fis = std.io.fixedBufferStream(&buf);
|
||||
const writer = fis.writer();
|
||||
|
||||
const PackedStruct = packed struct(u32) { a: u16, b: u16 };
|
||||
|
||||
try writer.writeStruct(PackedStruct{ .a = 0x1234, .b = 0x5678 });
|
||||
switch (native_endian) {
|
||||
.little => try testing.expectEqualSlices(u8, &.{ 0x34, 0x12, 0x78, 0x56 }, &buf),
|
||||
.big => try testing.expectEqualSlices(u8, &.{ 0x56, 0x78, 0x12, 0x34 }, &buf),
|
||||
}
|
||||
}
|
||||
|
||||
test "writeStructEndian a packed struct with endianness-affected types" {
|
||||
var buf: [4]u8 = undefined;
|
||||
var fis = std.io.fixedBufferStream(&buf);
|
||||
const writer = fis.writer();
|
||||
|
||||
const PackedStruct = packed struct(u32) { a: u16, b: u16 };
|
||||
|
||||
try writer.writeStructEndian(PackedStruct{ .a = 0x1234, .b = 0x5678 }, .little);
|
||||
try testing.expectEqualSlices(u8, &.{ 0x34, 0x12, 0x78, 0x56 }, &buf);
|
||||
fis.reset();
|
||||
try writer.writeStructEndian(PackedStruct{ .a = 0x1234, .b = 0x5678 }, .big);
|
||||
try testing.expectEqualSlices(u8, &.{ 0x56, 0x78, 0x12, 0x34 }, &buf);
|
||||
}
|
@ -312,18 +312,29 @@ pub fn create(arena: Allocator, options: CreateOptions) !*Package.Module {
|
||||
if (!options.global.use_llvm) break :b null;
|
||||
|
||||
var buf = std.ArrayList(u8).init(arena);
|
||||
for (target.cpu.arch.allFeaturesList(), 0..) |feature, index_usize| {
|
||||
const index = @as(std.Target.Cpu.Feature.Set.Index, @intCast(index_usize));
|
||||
const is_enabled = target.cpu.features.isEnabled(index);
|
||||
var disabled_features = std.ArrayList(u8).init(arena);
|
||||
defer disabled_features.deinit();
|
||||
|
||||
// Append disabled features after enabled ones, so that their effects aren't overwritten.
|
||||
for (target.cpu.arch.allFeaturesList()) |feature| {
|
||||
if (feature.llvm_name) |llvm_name| {
|
||||
const plus_or_minus = "-+"[@intFromBool(is_enabled)];
|
||||
try buf.ensureUnusedCapacity(2 + llvm_name.len);
|
||||
buf.appendAssumeCapacity(plus_or_minus);
|
||||
buf.appendSliceAssumeCapacity(llvm_name);
|
||||
buf.appendSliceAssumeCapacity(",");
|
||||
const is_enabled = target.cpu.features.isEnabled(feature.index);
|
||||
|
||||
if (is_enabled) {
|
||||
try buf.ensureUnusedCapacity(2 + llvm_name.len);
|
||||
buf.appendAssumeCapacity('+');
|
||||
buf.appendSliceAssumeCapacity(llvm_name);
|
||||
buf.appendAssumeCapacity(',');
|
||||
} else {
|
||||
try disabled_features.ensureUnusedCapacity(2 + llvm_name.len);
|
||||
disabled_features.appendAssumeCapacity('-');
|
||||
disabled_features.appendSliceAssumeCapacity(llvm_name);
|
||||
disabled_features.appendAssumeCapacity(',');
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
try buf.appendSlice(disabled_features.items);
|
||||
if (buf.items.len == 0) break :b "";
|
||||
assert(std.mem.endsWith(u8, buf.items, ","));
|
||||
buf.items[buf.items.len - 1] = 0;
|
||||
|
Loading…
Reference in New Issue
Block a user