Merge remote-tracking branch 'origin/master' into stage2-whole-file-astgen

Conflicts:
 * build.zig
 * lib/std/array_list.zig
 * lib/std/c/ast.zig
 * lib/std/c/parse.zig
 * lib/std/os/bits/linux.zig
This commit is contained in:
Andrew Kelley 2021-05-05 10:48:22 -07:00
commit fc40d23723
54 changed files with 7621 additions and 2952 deletions

View File

@ -36,7 +36,7 @@ pub fn build(b: *Builder) !void {
const docs_step = b.step("docs", "Build documentation");
docs_step.dependOn(&docgen_cmd.step);
const test_step = b.step("test", "Run all the tests");
const toolchain_step = b.step("test-toolchain", "Run the tests for the toolchain");
var test_stage2 = b.addTest("src/test.zig");
test_stage2.setBuildMode(mode);
@ -99,7 +99,7 @@ pub fn build(b: *Builder) !void {
exe.install();
exe.setBuildMode(mode);
exe.setTarget(target);
test_step.dependOn(&exe.step);
toolchain_step.dependOn(&exe.step);
b.default_step.dependOn(&exe.step);
exe.addBuildOption(u32, "mem_leak_frames", mem_leak_frames);
@ -244,7 +244,7 @@ pub fn build(b: *Builder) !void {
const test_stage2_step = b.step("test-stage2", "Run the stage2 compiler tests");
test_stage2_step.dependOn(&test_stage2.step);
if (!skip_stage2_tests) {
test_step.dependOn(test_stage2_step);
toolchain_step.dependOn(test_stage2_step);
}
var chosen_modes: [4]builtin.Mode = undefined;
@ -268,33 +268,37 @@ pub fn build(b: *Builder) !void {
const modes = chosen_modes[0..chosen_mode_index];
// run stage1 `zig fmt` on this build.zig file just to make sure it works
test_step.dependOn(&fmt_build_zig.step);
toolchain_step.dependOn(&fmt_build_zig.step);
const fmt_step = b.step("test-fmt", "Run zig fmt against build.zig to make sure it works");
fmt_step.dependOn(&fmt_build_zig.step);
// TODO for the moment, skip wasm32-wasi until bugs are sorted out.
test_step.dependOn(tests.addPkgTests(b, test_filter, "test/behavior.zig", "behavior", "Run the behavior tests", modes, false, skip_non_native, skip_libc, is_wine_enabled, is_qemu_enabled, is_wasmtime_enabled, glibc_multi_dir));
toolchain_step.dependOn(tests.addPkgTests(b, test_filter, "test/behavior.zig", "behavior", "Run the behavior tests", modes, false, skip_non_native, skip_libc, is_wine_enabled, is_qemu_enabled, is_wasmtime_enabled, glibc_multi_dir));
test_step.dependOn(tests.addPkgTests(b, test_filter, "lib/std/std.zig", "std", "Run the standard library tests", modes, false, skip_non_native, skip_libc, is_wine_enabled, is_qemu_enabled, is_wasmtime_enabled, glibc_multi_dir));
toolchain_step.dependOn(tests.addPkgTests(b, test_filter, "lib/std/special/compiler_rt.zig", "compiler-rt", "Run the compiler_rt tests", modes, true, skip_non_native, true, is_wine_enabled, is_qemu_enabled, is_wasmtime_enabled, glibc_multi_dir));
toolchain_step.dependOn(tests.addPkgTests(b, test_filter, "lib/std/special/c.zig", "minilibc", "Run the mini libc tests", modes, true, skip_non_native, true, is_wine_enabled, is_qemu_enabled, is_wasmtime_enabled, glibc_multi_dir));
test_step.dependOn(tests.addPkgTests(b, test_filter, "lib/std/special/compiler_rt.zig", "compiler-rt", "Run the compiler_rt tests", modes, true, skip_non_native, true, is_wine_enabled, is_qemu_enabled, is_wasmtime_enabled, glibc_multi_dir));
test_step.dependOn(tests.addPkgTests(b, test_filter, "lib/std/special/c.zig", "minilibc", "Run the mini libc tests", modes, true, skip_non_native, true, is_wine_enabled, is_qemu_enabled, is_wasmtime_enabled, glibc_multi_dir));
test_step.dependOn(tests.addCompareOutputTests(b, test_filter, modes));
test_step.dependOn(tests.addStandaloneTests(b, test_filter, modes));
test_step.dependOn(tests.addStackTraceTests(b, test_filter, modes));
test_step.dependOn(tests.addCliTests(b, test_filter, modes));
test_step.dependOn(tests.addAssembleAndLinkTests(b, test_filter, modes));
test_step.dependOn(tests.addRuntimeSafetyTests(b, test_filter, modes));
test_step.dependOn(tests.addTranslateCTests(b, test_filter));
toolchain_step.dependOn(tests.addCompareOutputTests(b, test_filter, modes));
toolchain_step.dependOn(tests.addStandaloneTests(b, test_filter, modes));
toolchain_step.dependOn(tests.addStackTraceTests(b, test_filter, modes));
toolchain_step.dependOn(tests.addCliTests(b, test_filter, modes));
toolchain_step.dependOn(tests.addAssembleAndLinkTests(b, test_filter, modes));
toolchain_step.dependOn(tests.addRuntimeSafetyTests(b, test_filter, modes));
toolchain_step.dependOn(tests.addTranslateCTests(b, test_filter));
if (!skip_run_translated_c) {
test_step.dependOn(tests.addRunTranslatedCTests(b, test_filter, target));
toolchain_step.dependOn(tests.addRunTranslatedCTests(b, test_filter, target));
}
// tests for this feature are disabled until we have the self-hosted compiler available
// test_step.dependOn(tests.addGenHTests(b, test_filter));
// toolchain_step.dependOn(tests.addGenHTests(b, test_filter));
if (!skip_compile_errors) {
test_step.dependOn(tests.addCompileErrorTests(b, test_filter, modes));
toolchain_step.dependOn(tests.addCompileErrorTests(b, test_filter, modes));
}
const std_step = tests.addPkgTests(b, test_filter, "lib/std/std.zig", "std", "Run the standard library tests", modes, false, skip_non_native, skip_libc, is_wine_enabled, is_qemu_enabled, is_wasmtime_enabled, glibc_multi_dir);
const test_step = b.step("test", "Run all the tests");
test_step.dependOn(toolchain_step);
test_step.dependOn(std_step);
test_step.dependOn(docs_step);
}

View File

@ -65,7 +65,9 @@ make $JOBS install
cmake .. -DZIG_EXECUTABLE="$(pwd)/release/bin/zig"
make $JOBS install
release/bin/zig build test -Denable-qemu -Denable-wasmtime
for step in test-toolchain test-std docs; do
release/bin/zig build $step -Denable-qemu -Denable-wasmtime
done
# Look for HTML errors.
tidy -qe ../zig-cache/langref.html

View File

@ -18,7 +18,7 @@ tar xf "$CACHE_BASENAME.tar.xz"
ZIG="$PREFIX/bin/zig"
NATIVE_LIBC_TXT="$HOME/native_libc.txt"
$ZIG libc > "$NATIVE_LIBC_TXT"
$ZIG libc >"$NATIVE_LIBC_TXT"
export ZIG_LIBC="$NATIVE_LIBC_TXT"
export CC="$ZIG cc"
export CXX="$ZIG c++"
@ -55,7 +55,9 @@ make $JOBS install
cmake .. -DZIG_EXECUTABLE="$(pwd)/release/bin/zig" -DZIG_TARGET_MCPU="x86_64_v2"
make $JOBS install
release/bin/zig build test
for step in test-toolchain test-std docs; do
release/bin/zig build $step
done
if [ "${BUILD_REASON}" != "PullRequest" ]; then
mv ../LICENSE release/

View File

@ -26,20 +26,8 @@ cd %ZIGBUILDDIR%
cmake.exe .. -Thost=x64 -G"Visual Studio 16 2019" -A x64 "-DCMAKE_INSTALL_PREFIX=%ZIGINSTALLDIR%" "-DCMAKE_PREFIX_PATH=%ZIGPREFIXPATH%" -DCMAKE_BUILD_TYPE=Release -DZIG_OMIT_STAGE2=ON || exit /b
msbuild /maxcpucount /p:Configuration=Release INSTALL.vcxproj || exit /b
"%ZIGINSTALLDIR%\bin\zig.exe" build test-behavior -Dskip-non-native || exit /b
REM Disabled to prevent OOM
REM "%ZIGINSTALLDIR%\bin\zig.exe" build test-stage2 -Dskip-non-native || exit /b
"%ZIGINSTALLDIR%\bin\zig.exe" build test-fmt -Dskip-non-native || exit /b
"%ZIGINSTALLDIR%\bin\zig.exe" build test-toolchain -Dskip-non-native -Dskip-stage2-tests || exit /b
"%ZIGINSTALLDIR%\bin\zig.exe" build test-std -Dskip-non-native || exit /b
"%ZIGINSTALLDIR%\bin\zig.exe" build test-compiler-rt -Dskip-non-native || exit /b
"%ZIGINSTALLDIR%\bin\zig.exe" build test-compare-output -Dskip-non-native || exit /b
"%ZIGINSTALLDIR%\bin\zig.exe" build test-standalone -Dskip-non-native || exit /b
"%ZIGINSTALLDIR%\bin\zig.exe" build test-stack-traces -Dskip-non-native || exit /b
"%ZIGINSTALLDIR%\bin\zig.exe" build test-cli -Dskip-non-native || exit /b
"%ZIGINSTALLDIR%\bin\zig.exe" build test-asm-link -Dskip-non-native || exit /b
"%ZIGINSTALLDIR%\bin\zig.exe" build test-runtime-safety -Dskip-non-native || exit /b
"%ZIGINSTALLDIR%\bin\zig.exe" build test-translate-c -Dskip-non-native || exit /b
"%ZIGINSTALLDIR%\bin\zig.exe" build test-run-translated-c -Dskip-non-native || exit /b
"%ZIGINSTALLDIR%\bin\zig.exe" build docs || exit /b
set "PATH=%CD:~0,2%\msys64\usr\bin;C:\Windows\system32;C:\Windows;C:\Windows\System32\Wbem"

View File

@ -4,7 +4,7 @@ set -x
set -e
sudo pkg update -fq
sudo pkg install -y cmake py37-s3cmd wget curl jq samurai
sudo pkg install -y cmake py38-s3cmd wget curl jq samurai
ZIGDIR="$(pwd)"
CACHE_BASENAME="zig+llvm+lld+clang-x86_64-freebsd-gnu-0.8.0-dev.1939+5a3ea9bec"

View File

@ -68,11 +68,30 @@ else switch (std.Target.current.os.tag) {
};
/// Signals the processor that it is inside a busy-wait spin-loop ("spin lock").
pub fn spinLoopHint() void {
pub fn spinLoopHint() callconv(.Inline) void {
switch (std.Target.current.cpu.arch) {
.i386, .x86_64 => asm volatile ("pause" ::: "memory"),
.arm, .aarch64 => asm volatile ("yield" ::: "memory"),
else => {},
.i386, .x86_64 => {
asm volatile ("pause" ::: "memory");
},
.arm, .armeb, .thumb, .thumbeb => {
// `yield` was introduced in v6k but are also available on v6m.
const can_yield = comptime std.Target.arm.featureSetHasAny(std.Target.current.cpu.features, .{ .has_v6k, .has_v6m });
if (can_yield) asm volatile ("yield" ::: "memory")
// Fallback.
else asm volatile ("" ::: "memory");
},
.aarch64, .aarch64_be, .aarch64_32 => {
asm volatile ("isb" ::: "memory");
},
.powerpc64, .powerpc64le => {
// No-op that serves as `yield` hint.
asm volatile ("or 27, 27, 27" ::: "memory");
},
else => {
// Do nothing but prevent the compiler from optimizing away the
// spinning loop.
asm volatile ("" ::: "memory");
},
}
}

View File

@ -50,7 +50,6 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?u29) type {
allocator: *Allocator,
pub const Slice = if (alignment) |a| ([]align(a) T) else []T;
pub const SliceConst = if (alignment) |a| ([]align(a) const T) else []const T;
/// Deinitialize with `deinit` or use `toOwnedSlice`.
pub fn init(allocator: *Allocator) Self {
@ -141,7 +140,7 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?u29) type {
/// Insert slice `items` at index `i` by moving `list[i .. list.len]` to make room.
/// This operation is O(N).
pub fn insertSlice(self: *Self, i: usize, items: SliceConst) !void {
pub fn insertSlice(self: *Self, i: usize, items: []const T) !void {
try self.ensureUnusedCapacity(items.len);
self.items.len += items.len;
@ -153,7 +152,7 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?u29) type {
/// Grows list if `len < new_items.len`.
/// Shrinks list if `len > new_items.len`.
/// Invalidates pointers if this ArrayList is resized.
pub fn replaceRange(self: *Self, start: usize, len: usize, new_items: SliceConst) !void {
pub fn replaceRange(self: *Self, start: usize, len: usize, new_items: []const T) !void {
const after_range = start + len;
const range = self.items[start..after_range];
@ -220,14 +219,14 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?u29) type {
/// Append the slice of items to the list. Allocates more
/// memory as necessary.
pub fn appendSlice(self: *Self, items: SliceConst) !void {
pub fn appendSlice(self: *Self, items: []const T) !void {
try self.ensureUnusedCapacity(items.len);
self.appendSliceAssumeCapacity(items);
}
/// Append the slice of items to the list, asserting the capacity is already
/// enough to store the new items. **Does not** invalidate pointers.
pub fn appendSliceAssumeCapacity(self: *Self, items: SliceConst) void {
pub fn appendSliceAssumeCapacity(self: *Self, items: []const T) void {
const oldlen = self.items.len;
const newlen = self.items.len + items.len;
self.items.len = newlen;
@ -438,7 +437,6 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ
capacity: usize = 0,
pub const Slice = if (alignment) |a| ([]align(a) T) else []T;
pub const SliceConst = if (alignment) |a| ([]align(a) const T) else []const T;
/// Initialize with capacity to hold at least num elements.
/// Deinitialize with `deinit` or use `toOwnedSlice`.
@ -492,7 +490,7 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ
/// Insert slice `items` at index `i`. Moves `list[i .. list.len]` to
/// higher indicices make room.
/// This operation is O(N).
pub fn insertSlice(self: *Self, allocator: *Allocator, i: usize, items: SliceConst) !void {
pub fn insertSlice(self: *Self, allocator: *Allocator, i: usize, items: []const T) !void {
try self.ensureUnusedCapacity(allocator, items.len);
self.items.len += items.len;
@ -504,7 +502,7 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ
/// Grows list if `len < new_items.len`.
/// Shrinks list if `len > new_items.len`
/// Invalidates pointers if this ArrayList is resized.
pub fn replaceRange(self: *Self, allocator: *Allocator, start: usize, len: usize, new_items: SliceConst) !void {
pub fn replaceRange(self: *Self, allocator: *Allocator, start: usize, len: usize, new_items: []const T) !void {
var managed = self.toManaged(allocator);
try managed.replaceRange(start, len, new_items);
self.* = managed.toUnmanaged();
@ -552,14 +550,14 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ
/// Append the slice of items to the list. Allocates more
/// memory as necessary.
pub fn appendSlice(self: *Self, allocator: *Allocator, items: SliceConst) !void {
pub fn appendSlice(self: *Self, allocator: *Allocator, items: []const T) !void {
try self.ensureUnusedCapacity(allocator, items.len);
self.appendSliceAssumeCapacity(items);
}
/// Append the slice of items to the list, asserting the capacity is enough
/// to store the new items.
pub fn appendSliceAssumeCapacity(self: *Self, items: SliceConst) void {
pub fn appendSliceAssumeCapacity(self: *Self, items: []const T) void {
const oldlen = self.items.len;
const newlen = self.items.len + items.len;
@ -1150,15 +1148,31 @@ test "std.ArrayList/ArrayListUnmanaged: ArrayList(T) of struct T" {
}
}
test "std.ArrayList(u8) implements writer" {
var buffer = ArrayList(u8).init(std.testing.allocator);
defer buffer.deinit();
test "std.ArrayList(u8)/ArrayListAligned implements writer" {
const a = testing.allocator;
const x: i32 = 42;
const y: i32 = 1234;
try buffer.writer().print("x: {}\ny: {}\n", .{ x, y });
{
var buffer = ArrayList(u8).init(a);
defer buffer.deinit();
testing.expectEqualSlices(u8, "x: 42\ny: 1234\n", buffer.items);
const x: i32 = 42;
const y: i32 = 1234;
try buffer.writer().print("x: {}\ny: {}\n", .{ x, y });
testing.expectEqualSlices(u8, "x: 42\ny: 1234\n", buffer.items);
}
{
var list = ArrayListAligned(u8, 2).init(a);
defer list.deinit();
const writer = list.writer();
try writer.writeAll("a");
try writer.writeAll("bc");
try writer.writeAll("d");
try writer.writeAll("efg");
testing.expectEqualSlices(u8, list.items, "abcdefg");
}
}
test "std.ArrayList/ArrayListUnmanaged.shrink still sets length on error.OutOfMemory" {
@ -1189,18 +1203,6 @@ test "std.ArrayList/ArrayListUnmanaged.shrink still sets length on error.OutOfMe
}
}
test "std.ArrayList.writer" {
var list = ArrayList(u8).init(std.testing.allocator);
defer list.deinit();
const writer = list.writer();
try writer.writeAll("a");
try writer.writeAll("bc");
try writer.writeAll("d");
try writer.writeAll("efg");
testing.expectEqualSlices(u8, list.items, "abcdefg");
}
test "std.ArrayList/ArrayListUnmanaged.addManyAsArray" {
const a = std.testing.allocator;
{
@ -1248,3 +1250,27 @@ test "std.ArrayList/ArrayListUnmanaged.toOwnedSliceSentinel" {
testing.expectEqualStrings(result, mem.spanZ(result.ptr));
}
}
test "ArrayListAligned/ArrayListAlignedUnmanaged accepts unaligned slices" {
const a = testing.allocator;
{
var list = std.ArrayListAligned(u8, 8).init(a);
defer list.deinit();
try list.appendSlice(&.{ 0, 1, 2, 3 });
try list.insertSlice(2, &.{ 4, 5, 6, 7 });
try list.replaceRange(1, 3, &.{ 8, 9 });
testing.expectEqualSlices(u8, list.items, &.{ 0, 8, 9, 6, 7, 2, 3 });
}
{
var list = std.ArrayListAlignedUnmanaged(u8, 8){};
defer list.deinit(a);
try list.appendSlice(a, &.{ 0, 1, 2, 3 });
try list.insertSlice(a, 2, &.{ 4, 5, 6, 7 });
try list.replaceRange(a, 1, 3, &.{ 8, 9 });
testing.expectEqualSlices(u8, list.items, &.{ 0, 8, 9, 6, 7, 2, 3 });
}
}

View File

@ -191,6 +191,13 @@ pub const RunStep = struct {
child.stdout_behavior = stdIoActionToBehavior(self.stdout_action);
child.stderr_behavior = stdIoActionToBehavior(self.stderr_action);
if (self.builder.verbose) {
for (argv) |arg| {
warn("{s} ", .{arg});
}
warn("\n", .{});
}
child.spawn() catch |err| {
warn("Unable to spawn {s}: {s}\n", .{ argv[0], @errorName(err) });
return err;

View File

@ -166,7 +166,7 @@ pub const CallingConvention = enum {
APCS,
AAPCS,
AAPCSVFP,
SysV
SysV,
};
/// This data structure is used by the Zig language code generation and

View File

@ -10,8 +10,6 @@ const page_size = std.mem.page_size;
pub const tokenizer = @import("c/tokenizer.zig");
pub const Token = tokenizer.Token;
pub const Tokenizer = tokenizer.Tokenizer;
pub const parse = @import("c/parse.zig").parse;
pub const ast = @import("c/ast.zig");
pub const builtins = @import("c/builtins.zig");
test {

View File

@ -1,678 +0,0 @@
// SPDX-License-Identifier: MIT
// Copyright (c) 2015-2021 Zig Contributors
// This file is part of [zig](https://ziglang.org/), which is MIT licensed.
// The MIT license requires this copyright notice to be included in all copies
// and substantial portions of the software.
const std = @import("std");
const ArrayList = std.ArrayList;
const Token = std.c.Token;
const Source = std.c.tokenizer.Source;
pub const TokenIndex = usize;
pub const Tree = struct {
tokens: []Token,
sources: []Source,
root_node: *Node.Root,
arena_state: std.heap.ArenaAllocator.State,
gpa: *mem.Allocator,
msgs: []Msg,
pub fn deinit(self: *Tree) void {
self.arena_state.promote(self.gpa).deinit();
}
pub fn tokenSlice(tree: *Tree, token: TokenIndex) []const u8 {
return tree.tokens.at(token).slice();
}
pub fn tokenEql(tree: *Tree, a: TokenIndex, b: TokenIndex) bool {
const atok = tree.tokens.at(a);
const btok = tree.tokens.at(b);
return atok.eql(btok.*);
}
};
pub const Msg = struct {
kind: enum {
Error,
Warning,
Note,
},
inner: Error,
};
pub const Error = union(enum) {
InvalidToken: SingleTokenError("invalid token '{}'"),
ExpectedToken: ExpectedToken,
ExpectedExpr: SingleTokenError("expected expression, found '{}'"),
ExpectedTypeName: SingleTokenError("expected type name, found '{}'"),
ExpectedFnBody: SingleTokenError("expected function body, found '{}'"),
ExpectedDeclarator: SingleTokenError("expected declarator, found '{}'"),
ExpectedInitializer: SingleTokenError("expected initializer, found '{}'"),
ExpectedEnumField: SingleTokenError("expected enum field, found '{}'"),
ExpectedType: SingleTokenError("expected enum field, found '{}'"),
InvalidTypeSpecifier: InvalidTypeSpecifier,
InvalidStorageClass: SingleTokenError("invalid storage class, found '{}'"),
InvalidDeclarator: SimpleError("invalid declarator"),
DuplicateQualifier: SingleTokenError("duplicate type qualifier '{}'"),
DuplicateSpecifier: SingleTokenError("duplicate declaration specifier '{}'"),
MustUseKwToRefer: MustUseKwToRefer,
FnSpecOnNonFn: SingleTokenError("function specifier '{}' on non function"),
NothingDeclared: SimpleError("declaration doesn't declare anything"),
QualifierIgnored: SingleTokenError("qualifier '{}' ignored"),
pub fn render(self: *const Error, tree: *Tree, stream: anytype) !void {
switch (self.*) {
.InvalidToken => |*x| return x.render(tree, stream),
.ExpectedToken => |*x| return x.render(tree, stream),
.ExpectedExpr => |*x| return x.render(tree, stream),
.ExpectedTypeName => |*x| return x.render(tree, stream),
.ExpectedDeclarator => |*x| return x.render(tree, stream),
.ExpectedFnBody => |*x| return x.render(tree, stream),
.ExpectedInitializer => |*x| return x.render(tree, stream),
.ExpectedEnumField => |*x| return x.render(tree, stream),
.ExpectedType => |*x| return x.render(tree, stream),
.InvalidTypeSpecifier => |*x| return x.render(tree, stream),
.InvalidStorageClass => |*x| return x.render(tree, stream),
.InvalidDeclarator => |*x| return x.render(tree, stream),
.DuplicateQualifier => |*x| return x.render(tree, stream),
.DuplicateSpecifier => |*x| return x.render(tree, stream),
.MustUseKwToRefer => |*x| return x.render(tree, stream),
.FnSpecOnNonFn => |*x| return x.render(tree, stream),
.NothingDeclared => |*x| return x.render(tree, stream),
.QualifierIgnored => |*x| return x.render(tree, stream),
}
}
pub fn loc(self: *const Error) TokenIndex {
switch (self.*) {
.InvalidToken => |x| return x.token,
.ExpectedToken => |x| return x.token,
.ExpectedExpr => |x| return x.token,
.ExpectedTypeName => |x| return x.token,
.ExpectedDeclarator => |x| return x.token,
.ExpectedFnBody => |x| return x.token,
.ExpectedInitializer => |x| return x.token,
.ExpectedEnumField => |x| return x.token,
.ExpectedType => |*x| return x.token,
.InvalidTypeSpecifier => |x| return x.token,
.InvalidStorageClass => |x| return x.token,
.InvalidDeclarator => |x| return x.token,
.DuplicateQualifier => |x| return x.token,
.DuplicateSpecifier => |x| return x.token,
.MustUseKwToRefer => |*x| return x.name,
.FnSpecOnNonFn => |*x| return x.name,
.NothingDeclared => |*x| return x.name,
.QualifierIgnored => |*x| return x.name,
}
}
pub const ExpectedToken = struct {
token: TokenIndex,
expected_id: std.meta.Tag(Token.Id),
pub fn render(self: *const ExpectedToken, tree: *Tree, stream: anytype) !void {
const found_token = tree.tokens.at(self.token);
if (found_token.id == .Invalid) {
return stream.print("expected '{s}', found invalid bytes", .{self.expected_id.symbol()});
} else {
const token_name = found_token.id.symbol();
return stream.print("expected '{s}', found '{s}'", .{ self.expected_id.symbol(), token_name });
}
}
};
pub const InvalidTypeSpecifier = struct {
token: TokenIndex,
type_spec: *Node.TypeSpec,
pub fn render(self: *const ExpectedToken, tree: *Tree, stream: anytype) !void {
try stream.write("invalid type specifier '");
try type_spec.spec.print(tree, stream);
const token_name = tree.tokens.at(self.token).id.symbol();
return stream.print("{s}'", .{token_name});
}
};
pub const MustUseKwToRefer = struct {
kw: TokenIndex,
name: TokenIndex,
pub fn render(self: *const ExpectedToken, tree: *Tree, stream: anytype) !void {
return stream.print("must use '{s}' tag to refer to type '{s}'", .{ tree.slice(kw), tree.slice(name) });
}
};
fn SingleTokenError(comptime msg: []const u8) type {
return struct {
token: TokenIndex,
pub fn render(self: *const @This(), tree: *Tree, stream: anytype) !void {
const actual_token = tree.tokens.at(self.token);
return stream.print(msg, .{actual_token.id.symbol()});
}
};
}
fn SimpleError(comptime msg: []const u8) type {
return struct {
const ThisError = @This();
token: TokenIndex,
pub fn render(self: *const ThisError, tokens: *Tree.TokenList, stream: anytype) !void {
return stream.write(msg);
}
};
}
};
pub const Type = struct {
pub const TypeList = ArrayList(*Type);
@"const": bool = false,
atomic: bool = false,
@"volatile": bool = false,
restrict: bool = false,
id: union(enum) {
Int: struct {
id: Id,
is_signed: bool,
pub const Id = enum {
Char,
Short,
Int,
Long,
LongLong,
};
},
Float: struct {
id: Id,
pub const Id = enum {
Float,
Double,
LongDouble,
};
},
Pointer: *Type,
Function: struct {
return_type: *Type,
param_types: TypeList,
},
Typedef: *Type,
Record: *Node.RecordType,
Enum: *Node.EnumType,
/// Special case for macro parameters that can be any type.
/// Only present if `retain_macros == true`.
Macro,
},
};
pub const Node = struct {
id: Id,
pub const Id = enum {
Root,
EnumField,
RecordField,
RecordDeclarator,
JumpStmt,
ExprStmt,
LabeledStmt,
CompoundStmt,
IfStmt,
SwitchStmt,
WhileStmt,
DoStmt,
ForStmt,
StaticAssert,
Declarator,
Pointer,
FnDecl,
Typedef,
VarDecl,
};
pub const Root = struct {
base: Node = Node{ .id = .Root },
decls: DeclList,
eof: TokenIndex,
pub const DeclList = ArrayList(*Node);
};
pub const DeclSpec = struct {
storage_class: union(enum) {
Auto: TokenIndex,
Extern: TokenIndex,
Register: TokenIndex,
Static: TokenIndex,
Typedef: TokenIndex,
None,
} = .None,
thread_local: ?TokenIndex = null,
type_spec: TypeSpec = TypeSpec{},
fn_spec: union(enum) {
Inline: TokenIndex,
Noreturn: TokenIndex,
None,
} = .None,
align_spec: ?struct {
alignas: TokenIndex,
expr: *Node,
rparen: TokenIndex,
} = null,
};
pub const TypeSpec = struct {
qual: TypeQual = TypeQual{},
spec: union(enum) {
/// error or default to int
None,
Void: TokenIndex,
Char: struct {
sign: ?TokenIndex = null,
char: TokenIndex,
},
Short: struct {
sign: ?TokenIndex = null,
short: TokenIndex = null,
int: ?TokenIndex = null,
},
Int: struct {
sign: ?TokenIndex = null,
int: ?TokenIndex = null,
},
Long: struct {
sign: ?TokenIndex = null,
long: TokenIndex,
longlong: ?TokenIndex = null,
int: ?TokenIndex = null,
},
Float: struct {
float: TokenIndex,
complex: ?TokenIndex = null,
},
Double: struct {
long: ?TokenIndex = null,
double: ?TokenIndex,
complex: ?TokenIndex = null,
},
Bool: TokenIndex,
Atomic: struct {
atomic: TokenIndex,
typename: *Node,
rparen: TokenIndex,
},
Enum: *EnumType,
Record: *RecordType,
Typedef: struct {
sym: TokenIndex,
sym_type: *Type,
},
pub fn print(self: *@This(), self: *const @This(), tree: *Tree, stream: anytype) !void {
switch (self.spec) {
.None => unreachable,
.Void => |index| try stream.write(tree.slice(index)),
.Char => |char| {
if (char.sign) |s| {
try stream.write(tree.slice(s));
try stream.writeByte(' ');
}
try stream.write(tree.slice(char.char));
},
.Short => |short| {
if (short.sign) |s| {
try stream.write(tree.slice(s));
try stream.writeByte(' ');
}
try stream.write(tree.slice(short.short));
if (short.int) |i| {
try stream.writeByte(' ');
try stream.write(tree.slice(i));
}
},
.Int => |int| {
if (int.sign) |s| {
try stream.write(tree.slice(s));
try stream.writeByte(' ');
}
if (int.int) |i| {
try stream.writeByte(' ');
try stream.write(tree.slice(i));
}
},
.Long => |long| {
if (long.sign) |s| {
try stream.write(tree.slice(s));
try stream.writeByte(' ');
}
try stream.write(tree.slice(long.long));
if (long.longlong) |l| {
try stream.writeByte(' ');
try stream.write(tree.slice(l));
}
if (long.int) |i| {
try stream.writeByte(' ');
try stream.write(tree.slice(i));
}
},
.Float => |float| {
try stream.write(tree.slice(float.float));
if (float.complex) |c| {
try stream.writeByte(' ');
try stream.write(tree.slice(c));
}
},
.Double => |double| {
if (double.long) |l| {
try stream.write(tree.slice(l));
try stream.writeByte(' ');
}
try stream.write(tree.slice(double.double));
if (double.complex) |c| {
try stream.writeByte(' ');
try stream.write(tree.slice(c));
}
},
.Bool => |index| try stream.write(tree.slice(index)),
.Typedef => |typedef| try stream.write(tree.slice(typedef.sym)),
else => try stream.print("TODO print {}", self.spec),
}
}
} = .None,
};
pub const EnumType = struct {
tok: TokenIndex,
name: ?TokenIndex,
body: ?struct {
lbrace: TokenIndex,
/// always EnumField
fields: FieldList,
rbrace: TokenIndex,
},
pub const FieldList = Root.DeclList;
};
pub const EnumField = struct {
base: Node = Node{ .id = .EnumField },
name: TokenIndex,
value: ?*Node,
};
pub const RecordType = struct {
tok: TokenIndex,
kind: enum {
Struct,
Union,
},
name: ?TokenIndex,
body: ?struct {
lbrace: TokenIndex,
/// RecordField or StaticAssert
fields: FieldList,
rbrace: TokenIndex,
},
pub const FieldList = Root.DeclList;
};
pub const RecordField = struct {
base: Node = Node{ .id = .RecordField },
type_spec: TypeSpec,
declarators: DeclaratorList,
semicolon: TokenIndex,
pub const DeclaratorList = Root.DeclList;
};
pub const RecordDeclarator = struct {
base: Node = Node{ .id = .RecordDeclarator },
declarator: ?*Declarator,
bit_field_expr: ?*Expr,
};
pub const TypeQual = struct {
@"const": ?TokenIndex = null,
atomic: ?TokenIndex = null,
@"volatile": ?TokenIndex = null,
restrict: ?TokenIndex = null,
};
pub const JumpStmt = struct {
base: Node = Node{ .id = .JumpStmt },
ltoken: TokenIndex,
kind: union(enum) {
Break,
Continue,
Return: ?*Node,
Goto: TokenIndex,
},
semicolon: TokenIndex,
};
pub const ExprStmt = struct {
base: Node = Node{ .id = .ExprStmt },
expr: ?*Expr,
semicolon: TokenIndex,
};
pub const LabeledStmt = struct {
base: Node = Node{ .id = .LabeledStmt },
kind: union(enum) {
Label: TokenIndex,
Case: TokenIndex,
Default: TokenIndex,
},
stmt: *Node,
};
pub const CompoundStmt = struct {
base: Node = Node{ .id = .CompoundStmt },
lbrace: TokenIndex,
statements: StmtList,
rbrace: TokenIndex,
pub const StmtList = Root.DeclList;
};
pub const IfStmt = struct {
base: Node = Node{ .id = .IfStmt },
@"if": TokenIndex,
cond: *Node,
body: *Node,
@"else": ?struct {
tok: TokenIndex,
body: *Node,
},
};
pub const SwitchStmt = struct {
base: Node = Node{ .id = .SwitchStmt },
@"switch": TokenIndex,
expr: *Expr,
rparen: TokenIndex,
stmt: *Node,
};
pub const WhileStmt = struct {
base: Node = Node{ .id = .WhileStmt },
@"while": TokenIndex,
cond: *Expr,
rparen: TokenIndex,
body: *Node,
};
pub const DoStmt = struct {
base: Node = Node{ .id = .DoStmt },
do: TokenIndex,
body: *Node,
@"while": TokenIndex,
cond: *Expr,
semicolon: TokenIndex,
};
pub const ForStmt = struct {
base: Node = Node{ .id = .ForStmt },
@"for": TokenIndex,
init: ?*Node,
cond: ?*Expr,
semicolon: TokenIndex,
incr: ?*Expr,
rparen: TokenIndex,
body: *Node,
};
pub const StaticAssert = struct {
base: Node = Node{ .id = .StaticAssert },
assert: TokenIndex,
expr: *Node,
semicolon: TokenIndex,
};
pub const Declarator = struct {
base: Node = Node{ .id = .Declarator },
pointer: ?*Pointer,
prefix: union(enum) {
None,
Identifer: TokenIndex,
Complex: struct {
lparen: TokenIndex,
inner: *Node,
rparen: TokenIndex,
},
},
suffix: union(enum) {
None,
Fn: struct {
lparen: TokenIndex,
params: Params,
rparen: TokenIndex,
},
Array: Arrays,
},
pub const Arrays = ArrayList(*Array);
pub const Params = ArrayList(*Param);
};
pub const Array = struct {
lbracket: TokenIndex,
inner: union(enum) {
Inferred,
Unspecified: TokenIndex,
Variable: struct {
asterisk: ?TokenIndex,
static: ?TokenIndex,
qual: TypeQual,
expr: *Expr,
},
},
rbracket: TokenIndex,
};
pub const Pointer = struct {
base: Node = Node{ .id = .Pointer },
asterisk: TokenIndex,
qual: TypeQual,
pointer: ?*Pointer,
};
pub const Param = struct {
kind: union(enum) {
Variable,
Old: TokenIndex,
Normal: struct {
decl_spec: *DeclSpec,
declarator: *Node,
},
},
};
pub const FnDecl = struct {
base: Node = Node{ .id = .FnDecl },
decl_spec: DeclSpec,
declarator: *Declarator,
old_decls: OldDeclList,
body: ?*CompoundStmt,
pub const OldDeclList = ArrayList(*Node);
};
pub const Typedef = struct {
base: Node = Node{ .id = .Typedef },
decl_spec: DeclSpec,
declarators: DeclaratorList,
semicolon: TokenIndex,
pub const DeclaratorList = Root.DeclList;
};
pub const VarDecl = struct {
base: Node = Node{ .id = .VarDecl },
decl_spec: DeclSpec,
initializers: Initializers,
semicolon: TokenIndex,
pub const Initializers = Root.DeclList;
};
pub const Initialized = struct {
base: Node = Node{ .id = Initialized },
declarator: *Declarator,
eq: TokenIndex,
init: Initializer,
};
pub const Initializer = union(enum) {
list: struct {
initializers: List,
rbrace: TokenIndex,
},
expr: *Expr,
pub const List = ArrayList(*Initializer);
};
pub const Macro = struct {
base: Node = Node{ .id = Macro },
kind: union(enum) {
Undef: []const u8,
Fn: struct {
params: []const []const u8,
expr: *Expr,
},
Expr: *Expr,
},
};
};
pub const Expr = struct {
id: Id,
ty: *Type,
value: union(enum) {
None,
},
pub const Id = enum {
Infix,
Literal,
};
pub const Infix = struct {
base: Expr = Expr{ .id = .Infix },
lhs: *Expr,
op_token: TokenIndex,
rhs: *Expr,
};
};

File diff suppressed because it is too large Load Diff

View File

@ -662,14 +662,12 @@ test "lengths overflow" {
// malformed final dynamic block, tries to write 321 code lengths (MAXCODES is 316)
// f dy hlit hdist hclen 16 17 18 0 (18) x138 (18) x138 (18) x39 (16) x6
// 1 10 11101 11101 0000 010 010 010 010 (11) 1111111 (11) 1111111 (11) 0011100 (01) 11
const stream = [_]u8{
0b11101101, 0b00011101, 0b00100100, 0b11101001, 0b11111111, 0b11111111, 0b00111001, 0b00001110
};
const stream = [_]u8{ 0b11101101, 0b00011101, 0b00100100, 0b11101001, 0b11111111, 0b11111111, 0b00111001, 0b00001110 };
const reader = std.io.fixedBufferStream(&stream).reader();
var window: [0x8000]u8 = undefined;
var inflate = inflateStream(reader, &window);
var buf: [1]u8 = undefined;
std.testing.expectError(error.InvalidLength, inflate.read(&buf));
std.testing.expectError(error.InvalidLength, inflate.read(&buf));
}

View File

@ -67,6 +67,7 @@ pub const dh = struct {
pub const ecc = struct {
pub const Curve25519 = @import("crypto/25519/curve25519.zig").Curve25519;
pub const Edwards25519 = @import("crypto/25519/edwards25519.zig").Edwards25519;
pub const P256 = @import("crypto/pcurves/p256.zig").P256;
pub const Ristretto255 = @import("crypto/25519/ristretto255.zig").Ristretto255;
};

View File

@ -0,0 +1,284 @@
const std = @import("std");
const builtin = std.builtin;
const crypto = std.crypto;
const debug = std.debug;
const mem = std.mem;
const meta = std.meta;
const NonCanonicalError = crypto.errors.NonCanonicalError;
const NotSquareError = crypto.errors.NotSquareError;
/// Parameters to create a finite field type.
pub const FieldParams = struct {
fiat: type,
field_order: comptime_int,
field_bits: comptime_int,
saturated_bits: comptime_int,
encoded_length: comptime_int,
};
/// A field element, internally stored in Montgomery domain.
pub fn Field(comptime params: FieldParams) type {
const fiat = params.fiat;
const Limbs = fiat.Limbs;
return struct {
const Fe = @This();
limbs: Limbs,
/// Field size.
pub const field_order = params.field_order;
/// Number of bits to represent the set of all elements.
pub const field_bits = params.field_bits;
/// Number of bits that can be saturated without overflowing.
pub const saturated_bits = params.saturated_bits;
/// Number of bytes required to encode an element.
pub const encoded_length = params.encoded_length;
/// Zero.
pub const zero: Fe = Fe{ .limbs = mem.zeroes(Limbs) };
/// One.
pub const one = comptime one: {
var fe: Fe = undefined;
fiat.setOne(&fe.limbs);
break :one fe;
};
/// Reject non-canonical encodings of an element.
pub fn rejectNonCanonical(s_: [encoded_length]u8, endian: builtin.Endian) NonCanonicalError!void {
var s = if (endian == .Little) s_ else orderSwap(s_);
const field_order_s = comptime fos: {
var fos: [encoded_length]u8 = undefined;
mem.writeIntLittle(std.meta.Int(.unsigned, encoded_length * 8), &fos, field_order);
break :fos fos;
};
if (crypto.utils.timingSafeCompare(u8, &s, &field_order_s, .Little) != .lt) {
return error.NonCanonical;
}
}
/// Swap the endianness of an encoded element.
pub fn orderSwap(s: [encoded_length]u8) [encoded_length]u8 {
var t = s;
for (s) |x, i| t[t.len - 1 - i] = x;
return t;
}
/// Unpack a field element.
pub fn fromBytes(s_: [encoded_length]u8, endian: builtin.Endian) NonCanonicalError!Fe {
var s = if (endian == .Little) s_ else orderSwap(s_);
try rejectNonCanonical(s, .Little);
var limbs_z: Limbs = undefined;
fiat.fromBytes(&limbs_z, s);
var limbs: Limbs = undefined;
fiat.toMontgomery(&limbs, limbs_z);
return Fe{ .limbs = limbs };
}
/// Pack a field element.
pub fn toBytes(fe: Fe, endian: builtin.Endian) [encoded_length]u8 {
var limbs_z: Limbs = undefined;
fiat.fromMontgomery(&limbs_z, fe.limbs);
var s: [encoded_length]u8 = undefined;
fiat.toBytes(&s, limbs_z);
return if (endian == .Little) s else orderSwap(s);
}
/// Element as an integer.
pub const IntRepr = meta.Int(.unsigned, params.field_bits);
/// Create a field element from an integer.
pub fn fromInt(comptime x: IntRepr) NonCanonicalError!Fe {
var s: [encoded_length]u8 = undefined;
mem.writeIntLittle(IntRepr, &s, x);
return fromBytes(s, .Little);
}
/// Return the field element as an integer.
pub fn toInt(fe: Fe) IntRepr {
const s = fe.toBytes(.Little);
return mem.readIntLittle(IntRepr, &s);
}
/// Return true if the field element is zero.
pub fn isZero(fe: Fe) bool {
var z: @TypeOf(fe.limbs[0]) = undefined;
fiat.nonzero(&z, fe.limbs);
return z == 0;
}
/// Return true if both field elements are equivalent.
pub fn equivalent(a: Fe, b: Fe) bool {
return a.sub(b).isZero();
}
/// Return true if the element is odd.
pub fn isOdd(fe: Fe) bool {
const s = fe.toBytes(.Little);
return @truncate(u1, s[0]) != 0;
}
/// Conditonally replace a field element with `a` if `c` is positive.
pub fn cMov(fe: *Fe, a: Fe, c: u1) void {
fiat.selectznz(&fe.limbs, c, fe.limbs, a.limbs);
}
/// Add field elements.
pub fn add(a: Fe, b: Fe) Fe {
var fe: Fe = undefined;
fiat.add(&fe.limbs, a.limbs, b.limbs);
return fe;
}
/// Subtract field elements.
pub fn sub(a: Fe, b: Fe) Fe {
var fe: Fe = undefined;
fiat.sub(&fe.limbs, a.limbs, b.limbs);
return fe;
}
/// Double a field element.
pub fn dbl(a: Fe) Fe {
var fe: Fe = undefined;
fiat.add(&fe.limbs, a.limbs, a.limbs);
return fe;
}
/// Multiply field elements.
pub fn mul(a: Fe, b: Fe) Fe {
var fe: Fe = undefined;
fiat.mul(&fe.limbs, a.limbs, b.limbs);
return fe;
}
/// Square a field element.
pub fn sq(a: Fe) Fe {
var fe: Fe = undefined;
fiat.square(&fe.limbs, a.limbs);
return fe;
}
/// Square a field element n times.
fn sqn(a: Fe, comptime n: comptime_int) Fe {
var i: usize = 0;
var fe = a;
while (i < n) : (i += 1) {
fe = fe.sq();
}
return fe;
}
/// Compute a^n.
pub fn pow(a: Fe, comptime T: type, comptime n: T) Fe {
var fe = one;
var x: T = n;
var t = a;
while (true) {
if (@truncate(u1, x) != 0) fe = fe.mul(t);
x >>= 1;
if (x == 0) break;
t = t.sq();
}
return fe;
}
/// Negate a field element.
pub fn neg(a: Fe) Fe {
var fe: Fe = undefined;
fiat.opp(&fe.limbs, a.limbs);
return fe;
}
/// Return the inverse of a field element, or 0 if a=0.
// Field inversion from https://eprint.iacr.org/2021/549.pdf
pub fn invert(a: Fe) Fe {
const iterations = (49 * field_bits + 57) / 17;
const Word = @TypeOf(a.limbs[0]);
const XLimbs = [a.limbs.len + 1]Word;
var d: Word = 1;
var f: XLimbs = undefined;
fiat.msat(&f);
var g: XLimbs = undefined;
fiat.fromMontgomery(g[0..a.limbs.len], a.limbs);
g[g.len - 1] = 0;
var r: Limbs = undefined;
fiat.setOne(&r);
var v = mem.zeroes(Limbs);
var precomp: Limbs = undefined;
fiat.divstepPrecomp(&precomp);
var out1: Word = undefined;
var out2: XLimbs = undefined;
var out3: XLimbs = undefined;
var out4: Limbs = undefined;
var out5: Limbs = undefined;
var i: usize = 0;
while (i < iterations - iterations % 2) : (i += 2) {
fiat.divstep(&out1, &out2, &out3, &out4, &out5, d, f, g, v, r);
fiat.divstep(&d, &f, &g, &v, &r, out1, out2, out3, out4, out5);
}
if (iterations % 2 != 0) {
fiat.divstep(&out1, &out2, &out3, &out4, &out5, d, f, g, v, r);
mem.copy(Word, &v, &out4);
mem.copy(Word, &f, &out2);
}
var v_opp: Limbs = undefined;
fiat.opp(&v_opp, v);
fiat.selectznz(&v, @truncate(u1, f[f.len - 1] >> (meta.bitCount(Word) - 1)), v, v_opp);
var fe: Fe = undefined;
fiat.mul(&fe.limbs, v, precomp);
return fe;
}
/// Return true if the field element is a square.
pub fn isSquare(x2: Fe) bool {
if (field_order == 115792089210356248762697446949407573530086143415290314195533631308867097853951) {
const t110 = x2.mul(x2.sq()).sq();
const t111 = x2.mul(t110);
const t111111 = t111.mul(x2.mul(t110).sqn(3));
const x15 = t111111.sqn(6).mul(t111111).sqn(3).mul(t111);
const x16 = x15.sq().mul(x2);
const x53 = x16.sqn(16).mul(x16).sqn(15);
const x47 = x15.mul(x53);
const ls = x47.mul(((x53.sqn(17).mul(x2)).sqn(143).mul(x47)).sqn(47)).sq().mul(x2);
return ls.equivalent(Fe.one);
} else {
const ls = x2.pow(std.meta.Int(.unsigned, field_bits), (field_order - 1) / 2); // Legendre symbol
return ls.equivalent(Fe.one);
}
}
// x=x2^((field_order+1)/4) w/ field order=3 (mod 4).
fn uncheckedSqrt(x2: Fe) Fe {
comptime debug.assert(field_order % 4 == 3);
if (field_order == 115792089210356248762697446949407573530086143415290314195533631308867097853951) {
const t11 = x2.mul(x2.sq());
const t1111 = t11.mul(t11.sqn(2));
const t11111111 = t1111.mul(t1111.sqn(4));
const x16 = t11111111.sqn(8).mul(t11111111);
return x16.sqn(16).mul(x16).sqn(32).mul(x2).sqn(96).mul(x2).sqn(94);
} else {
return x2.pow(std.meta.Int(.unsigned, field_bits), (field_order + 1) / 4);
}
}
/// Compute the square root of `x2`, returning `error.NotSquare` if `x2` was not a square.
pub fn sqrt(x2: Fe) NotSquareError!Fe {
const x = x2.uncheckedSqrt();
if (x.sq().equivalent(x2)) {
return x;
}
return error.NotSquare;
}
};
}

View File

@ -0,0 +1,412 @@
// SPDX-License-Identifier: MIT
// Copyright (c) 2015-2021 Zig Contributors
// This file is part of [zig](https://ziglang.org/), which is MIT licensed.
// The MIT license requires this copyright notice to be included in all copies
// and substantial portions of the software.
const std = @import("std");
const builtin = std.builtin;
const crypto = std.crypto;
const mem = std.mem;
const meta = std.meta;
const EncodingError = crypto.errors.EncodingError;
const IdentityElementError = crypto.errors.IdentityElementError;
const NonCanonicalError = crypto.errors.NonCanonicalError;
const NotSquareError = crypto.errors.NotSquareError;
/// Group operations over P256.
pub const P256 = struct {
/// The underlying prime field.
pub const Fe = @import("p256/field.zig").Fe;
/// Field arithmetic mod the order of the main subgroup.
pub const scalar = @import("p256/scalar.zig");
x: Fe,
y: Fe,
z: Fe = Fe.one,
is_base: bool = false,
/// The P256 base point.
pub const basePoint = P256{
.x = try Fe.fromInt(48439561293906451759052585252797914202762949526041747995844080717082404635286),
.y = try Fe.fromInt(36134250956749795798585127919587881956611106672985015071877198253568414405109),
.z = Fe.one,
.is_base = true,
};
/// The P256 neutral element.
pub const identityElement = P256{ .x = Fe.zero, .y = Fe.one, .z = Fe.zero };
pub const B = try Fe.fromInt(41058363725152142129326129780047268409114441015993725554835256314039467401291);
/// Reject the neutral element.
pub fn rejectIdentity(p: P256) IdentityElementError!void {
if (p.x.isZero()) {
return error.IdentityElement;
}
}
/// Create a point from affine coordinates after checking that they match the curve equation.
pub fn fromAffineCoordinates(x: Fe, y: Fe) EncodingError!P256 {
const x3AxB = x.sq().mul(x).sub(x).sub(x).sub(x).add(B);
const yy = y.sq();
if (!x3AxB.equivalent(yy)) {
return error.InvalidEncoding;
}
const p: P256 = .{ .x = x, .y = y, .z = Fe.one };
return p;
}
/// Create a point from serialized affine coordinates.
pub fn fromSerializedAffineCoordinates(xs: [32]u8, ys: [32]u8, endian: builtin.Endian) (NonCanonicalError || EncodingError)!P256 {
const x = try Fe.fromBytes(xs, endian);
const y = try Fe.fromBytes(ys, endian);
return fromAffineCoordinates(x, y);
}
/// Recover the Y coordinate from the X coordinate.
pub fn recoverY(x: Fe, is_odd: bool) NotSquareError!Fe {
const x3AxB = x.sq().mul(x).sub(x).sub(x).sub(x).add(B);
var y = try x3AxB.sqrt();
const yn = y.neg();
y.cMov(yn, @boolToInt(is_odd) ^ @boolToInt(y.isOdd()));
return y;
}
/// Deserialize a SEC1-encoded point.
pub fn fromSec1(s: []const u8) (EncodingError || NotSquareError || NonCanonicalError)!P256 {
if (s.len < 1) return error.InvalidEncoding;
const encoding_type = s[0];
const encoded = s[1..];
switch (encoding_type) {
0 => {
if (encoded.len != 0) return error.InvalidEncoding;
return P256.identityElement;
},
2, 3 => {
if (encoded.len != 32) return error.InvalidEncoding;
const x = try Fe.fromBytes(encoded[0..32].*, .Big);
const y_is_odd = (encoding_type == 3);
const y = try recoverY(x, y_is_odd);
return P256{ .x = x, .y = y };
},
4 => {
if (encoded.len != 64) return error.InvalidEncoding;
const x = try Fe.fromBytes(encoded[0..32].*, .Big);
const y = try Fe.fromBytes(encoded[32..64].*, .Big);
return P256.fromAffineCoordinates(x, y);
},
else => return error.InvalidEncoding,
}
}
/// Serialize a point using the compressed SEC-1 format.
pub fn toCompressedSec1(p: P256) [33]u8 {
var out: [33]u8 = undefined;
const xy = p.affineCoordinates();
out[0] = if (xy.y.isOdd()) 3 else 2;
mem.copy(u8, out[1..], &xy.x.toBytes(.Big));
return out;
}
/// Serialize a point using the uncompressed SEC-1 format.
pub fn toUncompressedSec1(p: P256) [65]u8 {
var out: [65]u8 = undefined;
out[0] = 4;
const xy = p.affineCoordinates();
mem.copy(u8, out[1..33], &xy.x.toBytes(.Big));
mem.copy(u8, out[33..65], &xy.y.toBytes(.Big));
return out;
}
/// Return a random point.
pub fn random() P256 {
const n = scalar.random(.Little);
return basePoint.mul(n, .Little) catch unreachable;
}
/// Flip the sign of the X coordinate.
pub fn neg(p: P256) P256 {
return .{ .x = p.x, .y = p.y.neg(), .z = p.z };
}
/// Double a P256 point.
// Algorithm 6 from https://eprint.iacr.org/2015/1060.pdf
pub fn dbl(p: P256) P256 {
var t0 = p.x.sq();
var t1 = p.y.sq();
var t2 = p.z.sq();
var t3 = p.x.mul(p.y);
t3 = t3.dbl();
var Z3 = p.x.mul(p.z);
Z3 = Z3.add(Z3);
var Y3 = B.mul(t2);
Y3 = Y3.sub(Z3);
var X3 = Y3.dbl();
Y3 = X3.add(Y3);
X3 = t1.sub(Y3);
Y3 = t1.add(Y3);
Y3 = X3.mul(Y3);
X3 = X3.mul(t3);
t3 = t2.dbl();
t2 = t2.add(t3);
Z3 = B.mul(Z3);
Z3 = Z3.sub(t2);
Z3 = Z3.sub(t0);
t3 = Z3.dbl();
Z3 = Z3.add(t3);
t3 = t0.dbl();
t0 = t3.add(t0);
t0 = t0.sub(t2);
t0 = t0.mul(Z3);
Y3 = Y3.add(t0);
t0 = p.y.mul(p.z);
t0 = t0.dbl();
Z3 = t0.mul(Z3);
X3 = X3.sub(Z3);
Z3 = t0.mul(t1);
Z3 = Z3.dbl().dbl();
return .{
.x = X3,
.y = Y3,
.z = Z3,
};
}
/// Add P256 points, the second being specified using affine coordinates.
// Algorithm 5 from https://eprint.iacr.org/2015/1060.pdf
pub fn addMixed(p: P256, q: struct { x: Fe, y: Fe }) P256 {
var t0 = p.x.mul(q.x);
var t1 = p.y.mul(q.y);
var t3 = q.x.add(q.y);
var t4 = p.x.add(p.y);
t3 = t3.mul(t4);
t4 = t0.add(t1);
t3 = t3.sub(t4);
t4 = q.y.mul(p.z);
t4 = t4.add(p.y);
var Y3 = q.x.mul(p.z);
Y3 = Y3.add(p.x);
var Z3 = B.mul(p.z);
var X3 = Y3.sub(Z3);
Z3 = X3.dbl();
X3 = X3.add(Z3);
Z3 = t1.sub(X3);
X3 = t1.dbl();
Y3 = B.mul(Y3);
t1 = p.z.add(p.z);
var t2 = t1.add(p.z);
Y3 = Y3.sub(t2);
Y3 = Y3.sub(t0);
t1 = Y3.dbl();
Y3 = t1.add(Y3);
t1 = t0.dbl();
t0 = t1.add(t0);
t0 = t0.sub(t2);
t1 = t4.mul(Y3);
t2 = t0.mul(Y3);
Y3 = X3.mul(Z3);
Y3 = Y3.add(t2);
X3 = t3.mul(X3);
X3 = X3.sub(t1);
Z3 = t4.mul(Z3);
t1 = t3.mul(t0);
Z3 = Z3.add(t1);
return .{
.x = X3,
.y = Y3,
.z = Z3,
};
}
// Add P256 points.
// Algorithm 4 from https://eprint.iacr.org/2015/1060.pdf
pub fn add(p: P256, q: P256) P256 {
var t0 = p.x.mul(q.x);
var t1 = p.y.mul(q.y);
var t2 = p.z.mul(q.z);
var t3 = p.x.add(p.y);
var t4 = q.x.add(q.y);
t3 = t3.mul(t4);
t4 = t0.add(t1);
t3 = t3.sub(t4);
t4 = p.y.add(p.z);
var X3 = q.y.add(q.z);
t4 = t4.mul(X3);
X3 = t1.add(t2);
t4 = t4.sub(X3);
X3 = p.x.add(p.z);
var Y3 = q.x.add(q.z);
X3 = X3.mul(Y3);
Y3 = t0.add(t2);
Y3 = X3.sub(Y3);
var Z3 = B.mul(t2);
X3 = Y3.sub(Z3);
Z3 = X3.dbl();
X3 = X3.add(Z3);
Z3 = t1.sub(X3);
X3 = t1.add(X3);
Y3 = B.mul(Y3);
t1 = t2.dbl();
t2 = t1.add(t2);
Y3 = Y3.sub(t2);
Y3 = Y3.sub(t0);
t1 = Y3.dbl();
Y3 = t1.add(Y3);
t1 = t0.dbl();
t0 = t1.add(t0);
t0 = t0.sub(t2);
t1 = t4.mul(Y3);
t2 = t0.mul(Y3);
Y3 = X3.mul(Z3);
Y3 = Y3.add(t2);
X3 = t3.mul(X3);
X3 = X3.sub(t1);
Z3 = t4.mul(Z3);
t1 = t3.mul(t0);
Z3 = Z3.add(t1);
return .{
.x = X3,
.y = Y3,
.z = Z3,
};
}
// Subtract P256 points.
pub fn sub(p: P256, q: P256) P256 {
return p.add(q.neg());
}
/// Return affine coordinates.
pub fn affineCoordinates(p: P256) struct { x: Fe, y: Fe } {
const zinv = p.z.invert();
const ret = .{
.x = p.x.mul(zinv),
.y = p.y.mul(zinv),
};
return ret;
}
/// Return true if both coordinate sets represent the same point.
pub fn equivalent(a: P256, b: P256) bool {
if (a.sub(b).rejectIdentity()) {
return false;
} else |_| {
return true;
}
}
fn cMov(p: *P256, a: P256, c: u1) void {
p.x.cMov(a.x, c);
p.y.cMov(a.y, c);
p.z.cMov(a.z, c);
}
fn pcSelect(comptime n: usize, pc: [n]P256, b: u8) P256 {
var t = P256.identityElement;
comptime var i: u8 = 1;
inline while (i < pc.len) : (i += 1) {
t.cMov(pc[i], @truncate(u1, (@as(usize, b ^ i) -% 1) >> 8));
}
return t;
}
fn slide(s: [32]u8) [2 * 32 + 1]i8 {
var e: [2 * 32 + 1]i8 = undefined;
for (s) |x, i| {
e[i * 2 + 0] = @as(i8, @truncate(u4, x));
e[i * 2 + 1] = @as(i8, @truncate(u4, x >> 4));
}
// Now, e[0..63] is between 0 and 15, e[63] is between 0 and 7
var carry: i8 = 0;
for (e[0..64]) |*x| {
x.* += carry;
carry = (x.* + 8) >> 4;
x.* -= carry * 16;
std.debug.assert(x.* >= -8 and x.* <= 8);
}
e[64] = carry;
// Now, e[*] is between -8 and 8, including e[64]
std.debug.assert(carry >= -8 and carry <= 8);
return e;
}
fn pcMul(pc: [9]P256, s: [32]u8, comptime vartime: bool) IdentityElementError!P256 {
std.debug.assert(vartime);
const e = slide(s);
var q = P256.identityElement;
var pos = e.len - 1;
while (true) : (pos -= 1) {
const slot = e[pos];
if (slot > 0) {
q = q.add(pc[@intCast(usize, slot)]);
} else if (slot < 0) {
q = q.sub(pc[@intCast(usize, -slot)]);
}
if (pos == 0) break;
q = q.dbl().dbl().dbl().dbl();
}
try q.rejectIdentity();
return q;
}
fn pcMul16(pc: [16]P256, s: [32]u8, comptime vartime: bool) IdentityElementError!P256 {
var q = P256.identityElement;
var pos: usize = 252;
while (true) : (pos -= 4) {
const slot = @truncate(u4, (s[pos >> 3] >> @truncate(u3, pos)));
if (vartime) {
if (slot != 0) {
q = q.add(pc[slot]);
}
} else {
q = q.add(pcSelect(16, pc, slot));
}
if (pos == 0) break;
q = q.dbl().dbl().dbl().dbl();
}
try q.rejectIdentity();
return q;
}
fn precompute(p: P256, comptime count: usize) [1 + count]P256 {
var pc: [1 + count]P256 = undefined;
pc[0] = P256.identityElement;
pc[1] = p;
var i: usize = 2;
while (i <= count) : (i += 1) {
pc[i] = if (i % 2 == 0) pc[i / 2].dbl() else pc[i - 1].add(p);
}
return pc;
}
/// Multiply an elliptic curve point by a scalar.
/// Return error.IdentityElement if the result is the identity element.
pub fn mul(p: P256, s_: [32]u8, endian: builtin.Endian) IdentityElementError!P256 {
const s = if (endian == .Little) s_ else Fe.orderSwap(s_);
const pc = if (p.is_base) precompute(P256.basePoint, 15) else pc: {
try p.rejectIdentity();
const xpc = precompute(p, 15);
break :pc xpc;
};
return pcMul16(pc, s, false);
}
/// Multiply an elliptic curve point by a *PUBLIC* scalar *IN VARIABLE TIME*
/// This can be used for signature verification.
pub fn mulPublic(p: P256, s_: [32]u8, endian: builtin.Endian) IdentityElementError!P256 {
const s = if (endian == .Little) s_ else Fe.orderSwap(s_);
const pc = if (p.is_base) precompute(P256.basePoint, 8) else pc: {
try p.rejectIdentity();
const xpc = precompute(p, 8);
break :pc xpc;
};
return pcMul(pc, s, true);
}
};
test "p256" {
_ = @import("tests.zig");
}

View File

@ -0,0 +1,18 @@
// SPDX-License-Identifier: MIT
// Copyright (c) 2015-2021 Zig Contributors
// This file is part of [zig](https://ziglang.org/), which is MIT licensed.
// The MIT license requires this copyright notice to be included in all copies
// and substantial portions of the software.
const std = @import("std");
const common = @import("../common.zig");
const Field = common.Field;
pub const Fe = Field(.{
.fiat = @import("p256_64.zig"),
.field_order = 115792089210356248762697446949407573530086143415290314195533631308867097853951,
.field_bits = 256,
.saturated_bits = 255,
.encoded_length = 32,
});

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,231 @@
// SPDX-License-Identifier: MIT
// Copyright (c) 2015-2021 Zig Contributors
// This file is part of [zig](https://ziglang.org/), which is MIT licensed.
// The MIT license requires this copyright notice to be included in all copies
// and substantial portions of the software.
const std = @import("std");
const builtin = std.builtin;
const common = @import("../common.zig");
const crypto = std.crypto;
const debug = std.debug;
const math = std.math;
const mem = std.mem;
const Field = common.Field;
const NonCanonicalError = std.crypto.errors.NonCanonicalError;
const NotSquareError = std.crypto.errors.NotSquareError;
/// Number of bytes required to encode a scalar.
pub const encoded_length = 32;
/// A compressed scalar, in canonical form.
pub const CompressedScalar = [encoded_length]u8;
const Fe = Field(.{
.fiat = @import("p256_scalar_64.zig"),
.field_order = 115792089210356248762697446949407573529996955224135760342422259061068512044369,
.field_bits = 256,
.saturated_bits = 255,
.encoded_length = encoded_length,
});
/// Reject a scalar whose encoding is not canonical.
pub fn rejectNonCanonical(s: CompressedScalar, endian: builtin.Endian) NonCanonicalError!void {
return Fe.rejectNonCanonical(s, endian);
}
/// Reduce a 48-bytes scalar to the field size.
pub fn reduce48(s: [48]u8, endian: builtin.Endian) CompressedScalar {
return Scalar.fromBytes48(s, endian).toBytes(endian);
}
/// Reduce a 64-bytes scalar to the field size.
pub fn reduce64(s: [64]u8, endian: builtin.Endian) CompressedScalar {
return ScalarDouble.fromBytes64(s, endian).toBytes(endian);
}
/// Return a*b (mod L)
pub fn mul(a: CompressedScalar, b: CompressedScalar, endian: builtin.Endian) NonCanonicalError!CompressedScalar {
return (try Scalar.fromBytes(a, endian)).mul(try Scalar.fromBytes(b, endian)).toBytes(endian);
}
/// Return a*b+c (mod L)
pub fn mulAdd(a: CompressedScalar, b: CompressedScalar, c: CompressedScalar, endian: builtin.Endian) NonCanonicalError!CompressedScalar {
return (try Scalar.fromBytes(a, endian)).mul(try Scalar.fromBytes(b, endian)).add(try Scalar.fromBytes(c, endian)).toBytes(endian);
}
/// Return a+b (mod L)
pub fn add(a: CompressedScalar, b: CompressedScalar, endian: builtin.Endian) NonCanonicalError!CompressedScalar {
return (try Scalar.fromBytes(a, endian)).add(try Scalar.fromBytes(b, endian)).toBytes(endian);
}
/// Return -s (mod L)
pub fn neg(s: CompressedScalar, endian: builtin.Endian) NonCanonicalError!CompressedScalar {
return (try Scalar.fromBytes(a, endian)).neg().toBytes(endian);
}
/// Return (a-b) (mod L)
pub fn sub(a: CompressedScalar, b: CompressedScalar, endian: builtin.Endian) NonCanonicalError!CompressedScalar {
return (try Scalar.fromBytes(a, endian)).sub(try Scalar.fromBytes(b.endian)).toBytes(endian);
}
/// Return a random scalar
pub fn random(endian: builtin.Endian) CompressedScalar {
return Scalar.random().toBytes(endian);
}
/// A scalar in unpacked representation.
pub const Scalar = struct {
fe: Fe,
/// Zero.
pub const zero = Scalar{ .fe = Fe.zero };
/// One.
pub const one = Scalar{ .fe = Fe.one };
/// Unpack a serialized representation of a scalar.
pub fn fromBytes(s: CompressedScalar, endian: builtin.Endian) NonCanonicalError!Scalar {
return Scalar{ .fe = try Fe.fromBytes(s, endian) };
}
/// Reduce a 384 bit input to the field size.
pub fn fromBytes48(s: [48]u8, endian: builtin.Endian) Scalar {
const t = ScalarDouble.fromBytes(384, s, endian);
return t.reduce(384);
}
/// Reduce a 512 bit input to the field size.
pub fn fromBytes64(s: [64]u8, endian: builtin.Endian) Scalar {
const t = ScalarDouble.fromBytes(512, s, endian);
return t.reduce(512);
}
/// Pack a scalar into bytes.
pub fn toBytes(n: Scalar, endian: builtin.Endian) CompressedScalar {
return n.fe.toBytes(endian);
}
/// Return true if the scalar is zero..
pub fn isZero(n: Scalar) bool {
return n.fe.isZero();
}
/// Return true if a and b are equivalent.
pub fn equivalent(a: Scalar, b: Scalar) bool {
return a.fe.equivalent(b.fe);
}
/// Compute x+y (mod L)
pub fn add(x: Scalar, y: Scalar) Scalar {
return Scalar{ .fe = x.fe().add(y.fe) };
}
/// Compute x-y (mod L)
pub fn sub(x: Scalar, y: Scalar) Scalar {
return Scalar{ .fe = x.fe().sub(y.fe) };
}
/// Compute 2n (mod L)
pub fn dbl(n: Scalar) Scalar {
return Scalar{ .fe = n.fe.dbl() };
}
/// Compute x*y (mod L)
pub fn mul(x: Scalar, y: Scalar) Scalar {
return Scalar{ .fe = x.fe().mul(y.fe) };
}
/// Compute x^2 (mod L)
pub fn sq(n: Scalar) Scalar {
return Scalar{ .fe = n.fe.sq() };
}
/// Compute x^n (mod L)
pub fn pow(a: Scalar, comptime T: type, comptime n: T) Scalar {
return Scalar{ .fe = a.fe.pow(n) };
}
/// Compute -x (mod L)
pub fn neg(n: Scalar) Scalar {
return Scalar{ .fe = n.fe.neg() };
}
/// Compute x^-1 (mod L)
pub fn invert(n: Scalar) Scalar {
return Scalar{ .fe = n.fe.invert() };
}
/// Return true if n is a quadratic residue mod L.
pub fn isSquare(n: Scalar) Scalar {
return n.fe.isSquare();
}
/// Return the square root of L, or NotSquare if there isn't any solutions.
pub fn sqrt(n: Scalar) NotSquareError!Scalar {
return Scalar{ .fe = try n.fe.sqrt() };
}
/// Return a random scalar < L.
pub fn random() Scalar {
var s: [48]u8 = undefined;
while (true) {
crypto.random.bytes(&s);
const n = Scalar.fromBytes48(s, .Little);
if (!n.isZero()) {
return n;
}
}
}
};
const ScalarDouble = struct {
x1: Fe,
x2: Fe,
x3: Fe,
fn fromBytes(comptime bits: usize, s_: [bits / 8]u8, endian: builtin.Endian) ScalarDouble {
debug.assert(bits > 0 and bits <= 512 and bits >= Fe.saturated_bits and bits <= Fe.saturated_bits * 3);
var s = s_;
if (endian == .Big) {
for (s_) |x, i| s[s.len - 1 - i] = x;
}
var t = ScalarDouble{ .x1 = undefined, .x2 = Fe.zero, .x3 = Fe.zero };
{
var b = [_]u8{0} ** encoded_length;
const len = math.min(s.len, 24);
mem.copy(u8, b[0..len], s[0..len]);
t.x1 = Fe.fromBytes(b, .Little) catch unreachable;
}
if (s_.len >= 24) {
var b = [_]u8{0} ** encoded_length;
const len = math.min(s.len - 24, 24);
mem.copy(u8, b[0..len], s[24..][0..len]);
t.x2 = Fe.fromBytes(b, .Little) catch unreachable;
}
if (s_.len >= 48) {
var b = [_]u8{0} ** encoded_length;
const len = s.len - 48;
mem.copy(u8, b[0..len], s[48..][0..len]);
t.x3 = Fe.fromBytes(b, .Little) catch unreachable;
}
return t;
}
fn reduce(expanded: ScalarDouble, comptime bits: usize) Scalar {
debug.assert(bits > 0 and bits <= Fe.saturated_bits * 3 and bits <= 512);
var fe = expanded.x1;
if (bits >= 192) {
const st1 = Fe.fromInt(1 << 192) catch unreachable;
fe = fe.add(expanded.x2.mul(st1));
if (bits >= 384) {
const st2 = st1.sq();
fe = fe.add(expanded.x3.mul(st2));
}
}
return Scalar{ .fe = fe };
}
};

View File

@ -0,0 +1,103 @@
// SPDX-License-Identifier: MIT
// Copyright (c) 2015-2021 Zig Contributors
// This file is part of [zig](https://ziglang.org/), which is MIT licensed.
// The MIT license requires this copyright notice to be included in all copies
// and substantial portions of the software.
const std = @import("std");
const fmt = std.fmt;
const testing = std.testing;
const P256 = @import("p256.zig").P256;
test "p256 ECDH key exchange" {
const dha = P256.scalar.random(.Little);
const dhb = P256.scalar.random(.Little);
const dhA = try P256.basePoint.mul(dha, .Little);
const dhB = try P256.basePoint.mul(dhb, .Little);
const shareda = try dhA.mul(dhb, .Little);
const sharedb = try dhB.mul(dha, .Little);
testing.expect(shareda.equivalent(sharedb));
}
test "p256 point from affine coordinates" {
const xh = "6B17D1F2E12C4247F8BCE6E563A440F277037D812DEB33A0F4A13945D898C296";
const yh = "4FE342E2FE1A7F9B8EE7EB4A7C0F9E162BCE33576B315ECECBB6406837BF51F5";
var xs: [32]u8 = undefined;
_ = try fmt.hexToBytes(&xs, xh);
var ys: [32]u8 = undefined;
_ = try fmt.hexToBytes(&ys, yh);
var p = try P256.fromSerializedAffineCoordinates(xs, ys, .Big);
testing.expect(p.equivalent(P256.basePoint));
}
test "p256 test vectors" {
const expected = [_][]const u8{
"0000000000000000000000000000000000000000000000000000000000000000",
"6b17d1f2e12c4247f8bce6e563a440f277037d812deb33a0f4a13945d898c296",
"7cf27b188d034f7e8a52380304b51ac3c08969e277f21b35a60b48fc47669978",
"5ecbe4d1a6330a44c8f7ef951d4bf165e6c6b721efada985fb41661bc6e7fd6c",
"e2534a3532d08fbba02dde659ee62bd0031fe2db785596ef509302446b030852",
"51590b7a515140d2d784c85608668fdfef8c82fd1f5be52421554a0dc3d033ed",
"b01a172a76a4602c92d3242cb897dde3024c740debb215b4c6b0aae93c2291a9",
"8e533b6fa0bf7b4625bb30667c01fb607ef9f8b8a80fef5b300628703187b2a3",
"62d9779dbee9b0534042742d3ab54cadc1d238980fce97dbb4dd9dc1db6fb393",
"ea68d7b6fedf0b71878938d51d71f8729e0acb8c2c6df8b3d79e8a4b90949ee0",
};
var p = P256.identityElement;
for (expected) |xh| {
const x = p.affineCoordinates().x;
p = p.add(P256.basePoint);
var xs: [32]u8 = undefined;
_ = try fmt.hexToBytes(&xs, xh);
testing.expectEqualSlices(u8, &x.toBytes(.Big), &xs);
}
}
test "p256 test vectors - doubling" {
const expected = [_][]const u8{
"6b17d1f2e12c4247f8bce6e563a440f277037d812deb33a0f4a13945d898c296",
"7cf27b188d034f7e8a52380304b51ac3c08969e277f21b35a60b48fc47669978",
"e2534a3532d08fbba02dde659ee62bd0031fe2db785596ef509302446b030852",
"62d9779dbee9b0534042742d3ab54cadc1d238980fce97dbb4dd9dc1db6fb393",
};
var p = P256.basePoint;
for (expected) |xh| {
const x = p.affineCoordinates().x;
p = p.dbl();
var xs: [32]u8 = undefined;
_ = try fmt.hexToBytes(&xs, xh);
testing.expectEqualSlices(u8, &x.toBytes(.Big), &xs);
}
}
test "p256 compressed sec1 encoding/decoding" {
const p = P256.random();
const s = p.toCompressedSec1();
const q = try P256.fromSec1(&s);
testing.expect(p.equivalent(q));
}
test "p256 uncompressed sec1 encoding/decoding" {
const p = P256.random();
const s = p.toUncompressedSec1();
const q = try P256.fromSec1(&s);
testing.expect(p.equivalent(q));
}
test "p256 public key is the neutral element" {
const n = P256.scalar.Scalar.zero.toBytes(.Little);
const p = P256.random();
testing.expectError(error.IdentityElement, p.mul(n, .Little));
}
test "p256 public key is the neutral element (public verification)" {
const n = P256.scalar.Scalar.zero.toBytes(.Little);
const p = P256.random();
testing.expectError(error.IdentityElement, p.mulPublic(n, .Little));
}
test "p256 field element non-canonical encoding" {
const s = [_]u8{0xff} ** 32;
testing.expectError(error.NonCanonical, P256.Fe.fromBytes(s, .Little));
}

View File

@ -696,6 +696,11 @@ fn formatFloatValue(
error.NoSpaceLeft => unreachable,
else => |e| return e,
};
} else if (comptime std.mem.eql(u8, fmt, "x")) {
formatFloatHexadecimal(value, options, buf_stream.writer()) catch |err| switch (err) {
error.NoSpaceLeft => unreachable,
else => |e| return e,
};
} else {
@compileError("Unsupported format string '" ++ fmt ++ "' for type '" ++ @typeName(@TypeOf(value)) ++ "'");
}
@ -1023,6 +1028,112 @@ pub fn formatFloatScientific(
}
}
pub fn formatFloatHexadecimal(
value: anytype,
options: FormatOptions,
writer: anytype,
) !void {
if (math.signbit(value)) {
try writer.writeByte('-');
}
if (math.isNan(value)) {
return writer.writeAll("nan");
}
if (math.isInf(value)) {
return writer.writeAll("inf");
}
const T = @TypeOf(value);
const TU = std.meta.Int(.unsigned, std.meta.bitCount(T));
const mantissa_bits = math.floatMantissaBits(T);
const exponent_bits = math.floatExponentBits(T);
const mantissa_mask = (1 << mantissa_bits) - 1;
const exponent_mask = (1 << exponent_bits) - 1;
const exponent_bias = (1 << (exponent_bits - 1)) - 1;
const as_bits = @bitCast(TU, value);
var mantissa = as_bits & mantissa_mask;
var exponent: i32 = @truncate(u16, (as_bits >> mantissa_bits) & exponent_mask);
const is_denormal = exponent == 0 and mantissa != 0;
const is_zero = exponent == 0 and mantissa == 0;
if (is_zero) {
// Handle this case here to simplify the logic below.
try writer.writeAll("0x0");
if (options.precision) |precision| {
if (precision > 0) {
try writer.writeAll(".");
try writer.writeByteNTimes('0', precision);
}
} else {
try writer.writeAll(".0");
}
try writer.writeAll("p0");
return;
}
if (is_denormal) {
// Adjust the exponent for printing.
exponent += 1;
} else {
// Add the implicit 1.
mantissa |= 1 << mantissa_bits;
}
// Fill in zeroes to round the mantissa width to a multiple of 4.
if (T == f16) mantissa <<= 2 else if (T == f32) mantissa <<= 1;
const mantissa_digits = (mantissa_bits + 3) / 4;
if (options.precision) |precision| {
// Round if needed.
if (precision < mantissa_digits) {
// We always have at least 4 extra bits.
var extra_bits = (mantissa_digits - precision) * 4;
// The result LSB is the Guard bit, we need two more (Round and
// Sticky) to round the value.
while (extra_bits > 2) {
mantissa = (mantissa >> 1) | (mantissa & 1);
extra_bits -= 1;
}
// Round to nearest, tie to even.
mantissa |= @boolToInt(mantissa & 0b100 != 0);
mantissa += 1;
// Drop the excess bits.
mantissa >>= 2;
// Restore the alignment.
mantissa <<= @intCast(math.Log2Int(TU), (mantissa_digits - precision) * 4);
const overflow = mantissa & (1 << 1 + mantissa_digits * 4) != 0;
// Prefer a normalized result in case of overflow.
if (overflow) {
mantissa >>= 1;
exponent += 1;
}
}
}
// +1 for the decimal part.
var buf: [1 + mantissa_digits]u8 = undefined;
const N = formatIntBuf(&buf, mantissa, 16, false, .{ .fill = '0', .width = 1 + mantissa_digits });
try writer.writeAll("0x");
try writer.writeByte(buf[0]);
if (options.precision != @as(usize, 0))
try writer.writeAll(".");
const trimmed = mem.trimRight(u8, buf[1..], "0");
try writer.writeAll(trimmed);
// Add trailing zeros if explicitly requested.
if (options.precision) |precision| if (precision > 0) {
if (precision > trimmed.len)
try writer.writeByteNTimes('0', precision - trimmed.len);
};
try writer.writeAll("p");
try formatInt(exponent - exponent_bias, 10, false, .{}, writer);
}
/// Print a float of the format x.yyyyy where the number of y is specified by the precision argument.
/// By default floats are printed at full precision (no rounding).
pub fn formatFloatDecimal(
@ -1917,6 +2028,54 @@ test "float.special" {
try expectFmt("f64: -inf", "f64: {}", .{-math.inf_f64});
}
test "float.hexadecimal.special" {
try expectFmt("f64: nan", "f64: {x}", .{math.nan_f64});
// negative nan is not defined by IEE 754,
// and ARM thus normalizes it to positive nan
if (builtin.arch != builtin.Arch.arm) {
try expectFmt("f64: -nan", "f64: {x}", .{-math.nan_f64});
}
try expectFmt("f64: inf", "f64: {x}", .{math.inf_f64});
try expectFmt("f64: -inf", "f64: {x}", .{-math.inf_f64});
try expectFmt("f64: 0x0.0p0", "f64: {x}", .{@as(f64, 0)});
try expectFmt("f64: -0x0.0p0", "f64: {x}", .{-@as(f64, 0)});
}
test "float.hexadecimal" {
try expectFmt("f16: 0x1.554p-2", "f16: {x}", .{@as(f16, 1.0 / 3.0)});
try expectFmt("f32: 0x1.555556p-2", "f32: {x}", .{@as(f32, 1.0 / 3.0)});
try expectFmt("f64: 0x1.5555555555555p-2", "f64: {x}", .{@as(f64, 1.0 / 3.0)});
try expectFmt("f128: 0x1.5555555555555555555555555555p-2", "f128: {x}", .{@as(f128, 1.0 / 3.0)});
try expectFmt("f16: 0x1.p-14", "f16: {x}", .{@as(f16, math.f16_min)});
try expectFmt("f32: 0x1.p-126", "f32: {x}", .{@as(f32, math.f32_min)});
try expectFmt("f64: 0x1.p-1022", "f64: {x}", .{@as(f64, math.f64_min)});
try expectFmt("f128: 0x1.p-16382", "f128: {x}", .{@as(f128, math.f128_min)});
try expectFmt("f16: 0x0.004p-14", "f16: {x}", .{@as(f16, math.f16_true_min)});
try expectFmt("f32: 0x0.000002p-126", "f32: {x}", .{@as(f32, math.f32_true_min)});
try expectFmt("f64: 0x0.0000000000001p-1022", "f64: {x}", .{@as(f64, math.f64_true_min)});
try expectFmt("f128: 0x0.0000000000000000000000000001p-16382", "f128: {x}", .{@as(f128, math.f128_true_min)});
try expectFmt("f16: 0x1.ffcp15", "f16: {x}", .{@as(f16, math.f16_max)});
try expectFmt("f32: 0x1.fffffep127", "f32: {x}", .{@as(f32, math.f32_max)});
try expectFmt("f64: 0x1.fffffffffffffp1023", "f64: {x}", .{@as(f64, math.f64_max)});
try expectFmt("f128: 0x1.ffffffffffffffffffffffffffffp16383", "f128: {x}", .{@as(f128, math.f128_max)});
}
test "float.hexadecimal.precision" {
try expectFmt("f16: 0x1.5p-2", "f16: {x:.1}", .{@as(f16, 1.0 / 3.0)});
try expectFmt("f32: 0x1.555p-2", "f32: {x:.3}", .{@as(f32, 1.0 / 3.0)});
try expectFmt("f64: 0x1.55555p-2", "f64: {x:.5}", .{@as(f64, 1.0 / 3.0)});
try expectFmt("f128: 0x1.5555555p-2", "f128: {x:.7}", .{@as(f128, 1.0 / 3.0)});
try expectFmt("f16: 0x1.00000p0", "f16: {x:.5}", .{@as(f16, 1.0)});
try expectFmt("f32: 0x1.00000p0", "f32: {x:.5}", .{@as(f32, 1.0)});
try expectFmt("f64: 0x1.00000p0", "f64: {x:.5}", .{@as(f64, 1.0)});
try expectFmt("f128: 0x1.00000p0", "f128: {x:.5}", .{@as(f128, 1.0)});
}
test "float.decimal" {
try expectFmt("f64: 152314000000000000000000000000", "f64: {d}", .{@as(f64, 1.52314e+29)});
try expectFmt("f32: 0", "f32: {d}", .{@as(f32, 0.0)});

View File

@ -20,6 +20,7 @@ pub fn copysign(comptime T: type, x: T, y: T) T {
f16 => copysign16(x, y),
f32 => copysign32(x, y),
f64 => copysign64(x, y),
f128 => copysign128(x, y),
else => @compileError("copysign not implemented for " ++ @typeName(T)),
};
}
@ -51,10 +52,20 @@ fn copysign64(x: f64, y: f64) f64 {
return @bitCast(f64, h1 | h2);
}
fn copysign128(x: f128, y: f128) f128 {
const ux = @bitCast(u128, x);
const uy = @bitCast(u128, y);
const h1 = ux & (maxInt(u128) / 2);
const h2 = uy & (@as(u128, 1) << 127);
return @bitCast(f128, h1 | h2);
}
test "math.copysign" {
expect(copysign(f16, 1.0, 1.0) == copysign16(1.0, 1.0));
expect(copysign(f32, 1.0, 1.0) == copysign32(1.0, 1.0));
expect(copysign(f64, 1.0, 1.0) == copysign64(1.0, 1.0));
expect(copysign(f128, 1.0, 1.0) == copysign128(1.0, 1.0));
}
test "math.copysign16" {
@ -77,3 +88,10 @@ test "math.copysign64" {
expect(copysign64(-5.0, -1.0) == -5.0);
expect(copysign64(-5.0, 1.0) == 5.0);
}
test "math.copysign128" {
expect(copysign128(5.0, 1.0) == 5.0);
expect(copysign128(5.0, -1.0) == -5.0);
expect(copysign128(-5.0, -1.0) == -5.0);
expect(copysign128(-5.0, 1.0) == 5.0);
}

View File

@ -24,6 +24,10 @@ pub fn isFinite(x: anytype) bool {
const bits = @bitCast(u64, x);
return bits & (maxInt(u64) >> 1) < (0x7FF << 52);
},
f128 => {
const bits = @bitCast(u128, x);
return bits & (maxInt(u128) >> 1) < (0x7FFF << 112);
},
else => {
@compileError("isFinite not implemented for " ++ @typeName(T));
},
@ -37,10 +41,24 @@ test "math.isFinite" {
expect(isFinite(@as(f32, -0.0)));
expect(isFinite(@as(f64, 0.0)));
expect(isFinite(@as(f64, -0.0)));
expect(isFinite(@as(f128, 0.0)));
expect(isFinite(@as(f128, -0.0)));
expect(!isFinite(math.inf(f16)));
expect(!isFinite(-math.inf(f16)));
expect(!isFinite(math.inf(f32)));
expect(!isFinite(-math.inf(f32)));
expect(!isFinite(math.inf(f64)));
expect(!isFinite(-math.inf(f64)));
expect(!isFinite(math.inf(f128)));
expect(!isFinite(-math.inf(f128)));
expect(!isFinite(math.nan(f16)));
expect(!isFinite(-math.nan(f16)));
expect(!isFinite(math.nan(f32)));
expect(!isFinite(-math.nan(f32)));
expect(!isFinite(math.nan(f64)));
expect(!isFinite(-math.nan(f64)));
expect(!isFinite(math.nan(f128)));
expect(!isFinite(-math.nan(f128)));
}

View File

@ -14,6 +14,7 @@ pub fn signbit(x: anytype) bool {
f16 => signbit16(x),
f32 => signbit32(x),
f64 => signbit64(x),
f128 => signbit128(x),
else => @compileError("signbit not implemented for " ++ @typeName(T)),
};
}
@ -33,10 +34,16 @@ fn signbit64(x: f64) bool {
return bits >> 63 != 0;
}
fn signbit128(x: f128) bool {
const bits = @bitCast(u128, x);
return bits >> 127 != 0;
}
test "math.signbit" {
expect(signbit(@as(f16, 4.0)) == signbit16(4.0));
expect(signbit(@as(f32, 4.0)) == signbit32(4.0));
expect(signbit(@as(f64, 4.0)) == signbit64(4.0));
expect(signbit(@as(f128, 4.0)) == signbit128(4.0));
}
test "math.signbit16" {
@ -53,3 +60,8 @@ test "math.signbit64" {
expect(!signbit64(4.0));
expect(signbit64(-3.0));
}
test "math.signbit128" {
expect(!signbit128(4.0));
expect(signbit128(-3.0));
}

View File

@ -832,6 +832,13 @@ pub const SO_RCVTIMEO = 0x1006;
pub const SO_ERROR = 0x1007;
pub const SO_TYPE = 0x1008;
pub const SO_NREAD = 0x1020;
pub const SO_NKE = 0x1021;
pub const SO_NOSIGPIPE = 0x1022;
pub const SO_NOADDRERR = 0x1023;
pub const SO_NWRITE = 0x1024;
pub const SO_REUSESHAREUID = 0x1025;
fn wstatus(x: u32) u32 {
return x & 0o177;
}

View File

@ -5,11 +5,12 @@
// and substantial portions of the software.
const std = @import("../../std.zig");
const maxInt = std.math.maxInt;
const arch = std.Target.current.cpu.arch;
const arch = @import("builtin").target.cpu.arch;
usingnamespace @import("../bits.zig");
pub usingnamespace switch (arch) {
.mips, .mipsel => @import("linux/errno-mips.zig"),
.sparc, .sparcel, .sparcv9 => @import("linux/errno-sparc.zig"),
else => @import("linux/errno-generic.zig"),
};
@ -31,6 +32,7 @@ pub usingnamespace @import("linux/prctl.zig");
pub usingnamespace @import("linux/securebits.zig");
const is_mips = arch.isMIPS();
const is_ppc = arch.isPPC();
const is_ppc64 = arch.isPPC64();
const is_sparc = arch.isSPARC();
@ -247,40 +249,78 @@ else
pub const SIG_SETMASK = 2;
};
pub const SIGHUP = 1;
pub const SIGINT = 2;
pub const SIGQUIT = 3;
pub const SIGILL = 4;
pub const SIGTRAP = 5;
pub const SIGABRT = 6;
pub const SIGIOT = SIGABRT;
pub const SIGBUS = 7;
pub const SIGFPE = 8;
pub const SIGKILL = 9;
pub const SIGUSR1 = 10;
pub const SIGSEGV = 11;
pub const SIGUSR2 = 12;
pub const SIGPIPE = 13;
pub const SIGALRM = 14;
pub const SIGTERM = 15;
pub const SIGSTKFLT = 16;
pub const SIGCHLD = 17;
pub const SIGCONT = 18;
pub const SIGSTOP = 19;
pub const SIGTSTP = 20;
pub const SIGTTIN = 21;
pub const SIGTTOU = 22;
pub const SIGURG = 23;
pub const SIGXCPU = 24;
pub const SIGXFSZ = 25;
pub const SIGVTALRM = 26;
pub const SIGPROF = 27;
pub const SIGWINCH = 28;
pub const SIGIO = 29;
pub const SIGPOLL = 29;
pub const SIGPWR = 30;
pub const SIGSYS = 31;
pub const SIGUNUSED = SIGSYS;
pub usingnamespace if (is_sparc) struct {
pub const SIGHUP = 1;
pub const SIGINT = 2;
pub const SIGQUIT = 3;
pub const SIGILL = 4;
pub const SIGTRAP = 5;
pub const SIGABRT = 6;
pub const SIGEMT = 7;
pub const SIGFPE = 8;
pub const SIGKILL = 9;
pub const SIGBUS = 10;
pub const SIGSEGV = 11;
pub const SIGSYS = 12;
pub const SIGPIPE = 13;
pub const SIGALRM = 14;
pub const SIGTERM = 15;
pub const SIGURG = 16;
pub const SIGSTOP = 17;
pub const SIGTSTP = 18;
pub const SIGCONT = 19;
pub const SIGCHLD = 20;
pub const SIGTTIN = 21;
pub const SIGTTOU = 22;
pub const SIGPOLL = 23;
pub const SIGXCPU = 24;
pub const SIGXFSZ = 25;
pub const SIGVTALRM = 26;
pub const SIGPROF = 27;
pub const SIGWINCH = 28;
pub const SIGLOST = 29;
pub const SIGUSR1 = 30;
pub const SIGUSR2 = 31;
pub const SIGIOT = SIGABRT;
pub const SIGCLD = SIGCHLD;
pub const SIGPWR = SIGLOST;
pub const SIGIO = SIGPOLL;
} else struct {
pub const SIGHUP = 1;
pub const SIGINT = 2;
pub const SIGQUIT = 3;
pub const SIGILL = 4;
pub const SIGTRAP = 5;
pub const SIGABRT = 6;
pub const SIGIOT = SIGABRT;
pub const SIGBUS = 7;
pub const SIGFPE = 8;
pub const SIGKILL = 9;
pub const SIGUSR1 = 10;
pub const SIGSEGV = 11;
pub const SIGUSR2 = 12;
pub const SIGPIPE = 13;
pub const SIGALRM = 14;
pub const SIGTERM = 15;
pub const SIGSTKFLT = 16;
pub const SIGCHLD = 17;
pub const SIGCONT = 18;
pub const SIGSTOP = 19;
pub const SIGTSTP = 20;
pub const SIGTTIN = 21;
pub const SIGTTOU = 22;
pub const SIGURG = 23;
pub const SIGXCPU = 24;
pub const SIGXFSZ = 25;
pub const SIGVTALRM = 26;
pub const SIGPROF = 27;
pub const SIGWINCH = 28;
pub const SIGIO = 29;
pub const SIGPOLL = 29;
pub const SIGPWR = 30;
pub const SIGSYS = 31;
pub const SIGUNUSED = SIGSYS;
};
pub const O_RDONLY = 0o0;
pub const O_WRONLY = 0o1;
@ -419,7 +459,39 @@ pub const AF_QIPCRTR = PF_QIPCRTR;
pub const AF_SMC = PF_SMC;
pub const AF_MAX = PF_MAX;
pub usingnamespace if (!is_mips)
pub usingnamespace if (is_mips)
struct {}
else if (is_ppc or is_ppc64)
struct {
pub const SO_DEBUG = 1;
pub const SO_REUSEADDR = 2;
pub const SO_TYPE = 3;
pub const SO_ERROR = 4;
pub const SO_DONTROUTE = 5;
pub const SO_BROADCAST = 6;
pub const SO_SNDBUF = 7;
pub const SO_RCVBUF = 8;
pub const SO_KEEPALIVE = 9;
pub const SO_OOBINLINE = 10;
pub const SO_NO_CHECK = 11;
pub const SO_PRIORITY = 12;
pub const SO_LINGER = 13;
pub const SO_BSDCOMPAT = 14;
pub const SO_REUSEPORT = 15;
pub const SO_RCVLOWAT = 16;
pub const SO_SNDLOWAT = 17;
pub const SO_RCVTIMEO = 18;
pub const SO_SNDTIMEO = 19;
pub const SO_PASSCRED = 20;
pub const SO_PEERCRED = 21;
pub const SO_ACCEPTCONN = 30;
pub const SO_PEERSEC = 31;
pub const SO_SNDBUFFORCE = 32;
pub const SO_RCVBUFFORCE = 33;
pub const SO_PROTOCOL = 38;
pub const SO_DOMAIN = 39;
}
else
struct {
pub const SO_DEBUG = 1;
pub const SO_REUSEADDR = 2;
@ -448,9 +520,7 @@ pub usingnamespace if (!is_mips)
pub const SO_RCVBUFFORCE = 33;
pub const SO_PROTOCOL = 38;
pub const SO_DOMAIN = 39;
}
else
struct {};
};
pub const SO_SECURITY_AUTHENTICATION = 22;
pub const SO_SECURITY_ENCRYPTION_TRANSPORT = 23;

View File

@ -9,6 +9,7 @@
const std = @import("../../../std.zig");
const linux = std.os.linux;
const socklen_t = linux.socklen_t;
const sockaddr = linux.sockaddr;
const iovec = linux.iovec;
const iovec_const = linux.iovec_const;
const uid_t = linux.uid_t;

View File

@ -3,6 +3,9 @@
// This file is part of [zig](https://ziglang.org/), which is MIT licensed.
// The MIT license requires this copyright notice to be included in all copies
// and substantial portions of the software.
// These are MIPS ABI compatible.
pub const EPERM = 1;
pub const ENOENT = 2;
pub const ESRCH = 3;
@ -37,6 +40,7 @@ pub const EMLINK = 31;
pub const EPIPE = 32;
pub const EDOM = 33;
pub const ERANGE = 34;
pub const ENOMSG = 35;
pub const EIDRM = 36;
pub const ECHRNG = 37;

View File

@ -0,0 +1,144 @@
// SPDX-License-Identifier: MIT
// Copyright (c) 2015-2021 Zig Contributors
// This file is part of [zig](https://ziglang.org/), which is MIT licensed.
// The MIT license requires this copyright notice to be included in all copies
// and substantial portions of the software.
// These match the SunOS error numbering scheme.
pub const EPERM = 1;
pub const ENOENT = 2;
pub const ESRCH = 3;
pub const EINTR = 4;
pub const EIO = 5;
pub const ENXIO = 6;
pub const E2BIG = 7;
pub const ENOEXEC = 8;
pub const EBADF = 9;
pub const ECHILD = 10;
pub const EAGAIN = 11;
pub const ENOMEM = 12;
pub const EACCES = 13;
pub const EFAULT = 14;
pub const ENOTBLK = 15;
pub const EBUSY = 16;
pub const EEXIST = 17;
pub const EXDEV = 18;
pub const ENODEV = 19;
pub const ENOTDIR = 20;
pub const EISDIR = 21;
pub const EINVAL = 22;
pub const ENFILE = 23;
pub const EMFILE = 24;
pub const ENOTTY = 25;
pub const ETXTBSY = 26;
pub const EFBIG = 27;
pub const ENOSPC = 28;
pub const ESPIPE = 29;
pub const EROFS = 30;
pub const EMLINK = 31;
pub const EPIPE = 32;
pub const EDOM = 33;
pub const ERANGE = 34;
pub const EWOULDBLOCK = EAGAIN;
pub const EINPROGRESS = 36;
pub const EALREADY = 37;
pub const ENOTSOCK = 38;
pub const EDESTADDRREQ = 39;
pub const EMSGSIZE = 40;
pub const EPROTOTYPE = 41;
pub const ENOPROTOOPT = 42;
pub const EPROTONOSUPPORT = 43;
pub const ESOCKTNOSUPPORT = 44;
pub const EOPNOTSUPP = 45;
pub const EPFNOSUPPORT = 46;
pub const EAFNOSUPPORT = 47;
pub const EADDRINUSE = 48;
pub const EADDRNOTAVAIL = 49;
pub const ENETDOWN = 50;
pub const ENETUNREACH = 51;
pub const ENETRESET = 52;
pub const ECONNABORTED = 53;
pub const ECONNRESET = 54;
pub const ENOBUFS = 55;
pub const EISCONN = 56;
pub const ENOTCONN = 57;
pub const ESHUTDOWN = 58;
pub const ETOOMANYREFS = 59;
pub const ETIMEDOUT = 60;
pub const ECONNREFUSED = 61;
pub const ELOOP = 62;
pub const ENAMETOOLONG = 63;
pub const EHOSTDOWN = 64;
pub const EHOSTUNREACH = 65;
pub const ENOTEMPTY = 66;
pub const EPROCLIM = 67;
pub const EUSERS = 68;
pub const EDQUOT = 69;
pub const ESTALE = 70;
pub const EREMOTE = 71;
pub const ENOSTR = 72;
pub const ETIME = 73;
pub const ENOSR = 74;
pub const ENOMSG = 75;
pub const EBADMSG = 76;
pub const EIDRM = 77;
pub const EDEADLK = 78;
pub const ENOLCK = 79;
pub const ENONET = 80;
pub const ERREMOTE = 81;
pub const ENOLINK = 82;
pub const EADV = 83;
pub const ESRMNT = 84;
pub const ECOMM = 85;
pub const EPROTO = 86;
pub const EMULTIHOP = 87;
pub const EDOTDOT = 88;
pub const EREMCHG = 89;
pub const ENOSYS = 90;
pub const ESTRPIPE = 91;
pub const EOVERFLOW = 92;
pub const EBADFD = 93;
pub const ECHRNG = 94;
pub const EL2NSYNC = 95;
pub const EL3HLT = 96;
pub const EL3RST = 97;
pub const ELNRNG = 98;
pub const EUNATCH = 99;
pub const ENOCSI = 100;
pub const EL2HLT = 101;
pub const EBADE = 102;
pub const EBADR = 103;
pub const EXFULL = 104;
pub const ENOANO = 105;
pub const EBADRQC = 106;
pub const EBADSLT = 107;
pub const EDEADLOCK = 108;
pub const EBFONT = 109;
pub const ELIBEXEC = 110;
pub const ENODATA = 111;
pub const ELIBBAD = 112;
pub const ENOPKG = 113;
pub const ELIBACC = 114;
pub const ENOTUNIQ = 115;
pub const ERESTART = 116;
pub const EUCLEAN = 117;
pub const ENOTNAM = 118;
pub const ENAVAIL = 119;
pub const EISNAM = 120;
pub const EREMOTEIO = 121;
pub const EILSEQ = 122;
pub const ELIBMAX = 123;
pub const ELIBSCN = 124;
pub const ENOMEDIUM = 125;
pub const EMEDIUMTYPE = 126;
pub const ECANCELED = 127;
pub const ENOKEY = 128;
pub const EKEYEXPIRED = 129;
pub const EKEYREVOKED = 130;
pub const EKEYREJECTED = 131;
pub const EOWNERDEAD = 132;
pub const ENOTRECOVERABLE = 133;
pub const ERFKILL = 134;
pub const EHWPOISON = 135;

View File

@ -376,6 +376,11 @@ pub const timespec = extern struct {
tv_nsec: isize,
};
pub const timeval = extern struct {
tv_sec: time_t,
tv_usec: i64,
};
pub const Flock = extern struct {
l_type: i16,
l_whence: i16,

View File

@ -22,6 +22,11 @@ pub const timespec = extern struct {
tv_nsec: c_long,
};
pub const timeval = extern struct {
tv_sec: c_long,
tv_usec: c_long,
};
pub const sig_atomic_t = c_int;
/// maximum signal number + 1

View File

@ -387,7 +387,7 @@ pub fn symlinkat(existing: [*:0]const u8, newfd: i32, newpath: [*:0]const u8) us
}
pub fn pread(fd: i32, buf: [*]u8, count: usize, offset: u64) usize {
if (@hasField(SYS, "pread64")) {
if (@hasField(SYS, "pread64") and usize_bits < 64) {
const offset_halves = splitValue64(offset);
if (require_aligned_register_pair) {
return syscall6(
@ -410,8 +410,10 @@ pub fn pread(fd: i32, buf: [*]u8, count: usize, offset: u64) usize {
);
}
} else {
// Some architectures (eg. 64bit SPARC) pread is called pread64.
const S = if (!@hasField(SYS, "pread") and @hasField(SYS, "pread64")) .pread64 else .pread;
return syscall4(
.pread,
S,
@bitCast(usize, @as(isize, fd)),
@ptrToInt(buf),
count,
@ -451,7 +453,7 @@ pub fn write(fd: i32, buf: [*]const u8, count: usize) usize {
}
pub fn ftruncate(fd: i32, length: u64) usize {
if (@hasField(SYS, "ftruncate64")) {
if (@hasField(SYS, "ftruncate64") and usize_bits < 64) {
const length_halves = splitValue64(length);
if (require_aligned_register_pair) {
return syscall4(
@ -479,7 +481,7 @@ pub fn ftruncate(fd: i32, length: u64) usize {
}
pub fn pwrite(fd: i32, buf: [*]const u8, count: usize, offset: u64) usize {
if (@hasField(SYS, "pwrite64")) {
if (@hasField(SYS, "pwrite64") and usize_bits < 64) {
const offset_halves = splitValue64(offset);
if (require_aligned_register_pair) {
@ -503,8 +505,10 @@ pub fn pwrite(fd: i32, buf: [*]const u8, count: usize, offset: u64) usize {
);
}
} else {
// Some architectures (eg. 64bit SPARC) pwrite is called pwrite64.
const S = if (!@hasField(SYS, "pwrite") and @hasField(SYS, "pwrite64")) .pwrite64 else .pwrite;
return syscall4(
.pwrite,
S,
@bitCast(usize, @as(isize, fd)),
@ptrToInt(buf),
count,

View File

@ -617,23 +617,24 @@ fn clone() callconv(.Naked) void {
\\ # Shuffle the arguments
\\ mov 217, %%g1
\\ mov %%i2, %%o0
\\ sub %%i1, 2047, %%o1
\\ # Add some extra space for the initial frame
\\ sub %%i1, 176 + 2047, %%o1
\\ mov %%i4, %%o2
\\ mov %%i5, %%o3
\\ ldx [%%fp + 192 - 2*8 + 2047], %%o4
\\ ldx [%%fp + 0x8af], %%o4
\\ t 0x6d
\\ bcs,pn %%xcc, 2f
\\ nop
\\ # sparc64 returns the child pid in o0 and a flag telling
\\ # whether the process is the child in o1
\\ # The child pid is returned in o0 while o1 tells if this
\\ # process is # the child (=1) or the parent (=0).
\\ brnz %%o1, 1f
\\ nop
\\ # This is the parent process, return the child pid
\\ # Parent process, return the child pid
\\ mov %%o0, %%i0
\\ ret
\\ restore
\\1:
\\ # This is the child process
\\ # Child process, call func(arg)
\\ mov %%g0, %%fp
\\ call %%g2
\\ mov %%g3, %%o0

View File

@ -1621,6 +1621,16 @@ pub const cpu = struct {
.apple_a7,
}),
};
pub const emag = CpuModel{
.name = "emag",
.llvm_name = null,
.features = featureSet(&[_]Feature{
.crc,
.crypto,
.perfmon,
.v8a,
}),
};
pub const exynos_m1 = CpuModel{
.name = "exynos_m1",
.llvm_name = null,
@ -1867,4 +1877,12 @@ pub const cpu = struct {
.v8_2a,
}),
};
pub const xgene1 = CpuModel{
.name = "xgene1",
.llvm_name = null,
.features = featureSet(&[_]Feature{
.perfmon,
.v8a,
}),
};
};

View File

@ -1 +1,23 @@
pub const os = @import("x/os/os.zig");
// SPDX-License-Identifier: MIT
// Copyright (c) 2015-2021 Zig Contributors
// This file is part of [zig](https://ziglang.org/), which is MIT licensed.
// The MIT license requires this copyright notice to be included in all copies
// and substantial portions of the software.
const std = @import("std.zig");
pub const os = struct {
pub const Socket = @import("x/os/Socket.zig");
pub usingnamespace @import("x/os/net.zig");
};
pub const net = struct {
pub const ip = @import("x/net/ip.zig");
pub const tcp = @import("x/net/tcp.zig");
};
test {
inline for (.{ os, net }) |module| {
std.testing.refAllDecls(module);
}
}

61
lib/std/x/net/ip.zig Normal file
View File

@ -0,0 +1,61 @@
// SPDX-License-Identifier: MIT
// Copyright (c) 2015-2021 Zig Contributors
// This file is part of [zig](https://ziglang.org/), which is MIT licensed.
// The MIT license requires this copyright notice to be included in all copies
// and substantial portions of the software.
const std = @import("../../std.zig");
const fmt = std.fmt;
const IPv4 = std.x.os.IPv4;
const IPv6 = std.x.os.IPv6;
const Socket = std.x.os.Socket;
/// A generic IP abstraction.
const ip = @This();
/// A union of all eligible types of IP addresses.
pub const Address = union(enum) {
ipv4: IPv4.Address,
ipv6: IPv6.Address,
/// Instantiate a new address with a IPv4 host and port.
pub fn initIPv4(host: IPv4, port: u16) Address {
return .{ .ipv4 = .{ .host = host, .port = port } };
}
/// Instantiate a new address with a IPv6 host and port.
pub fn initIPv6(host: IPv6, port: u16) Address {
return .{ .ipv6 = .{ .host = host, .port = port } };
}
/// Re-interpret a generic socket address into an IP address.
pub fn from(address: Socket.Address) ip.Address {
return switch (address) {
.ipv4 => |ipv4_address| .{ .ipv4 = ipv4_address },
.ipv6 => |ipv6_address| .{ .ipv6 = ipv6_address },
};
}
/// Re-interpret an IP address into a generic socket address.
pub fn into(self: ip.Address) Socket.Address {
return switch (self) {
.ipv4 => |ipv4_address| .{ .ipv4 = ipv4_address },
.ipv6 => |ipv6_address| .{ .ipv6 = ipv6_address },
};
}
/// Implements the `std.fmt.format` API.
pub fn format(
self: ip.Address,
comptime layout: []const u8,
opts: fmt.FormatOptions,
writer: anytype,
) !void {
switch (self) {
.ipv4 => |address| try fmt.format(writer, "{}:{}", .{ address.host, address.port }),
.ipv6 => |address| try fmt.format(writer, "{}:{}", .{ address.host, address.port }),
}
}
};

352
lib/std/x/net/tcp.zig Normal file
View File

@ -0,0 +1,352 @@
// SPDX-License-Identifier: MIT
// Copyright (c) 2015-2021 Zig Contributors
// This file is part of [zig](https://ziglang.org/), which is MIT licensed.
// The MIT license requires this copyright notice to be included in all copies
// and substantial portions of the software.
const std = @import("../../std.zig");
const os = std.os;
const ip = std.x.net.ip;
const fmt = std.fmt;
const mem = std.mem;
const builtin = std.builtin;
const testing = std.testing;
const IPv4 = std.x.os.IPv4;
const IPv6 = std.x.os.IPv6;
const Socket = std.x.os.Socket;
/// A generic TCP socket abstraction.
const tcp = @This();
/// A TCP client-address pair.
pub const Connection = struct {
client: tcp.Client,
address: ip.Address,
/// Enclose a TCP client and address into a client-address pair.
pub fn from(conn: Socket.Connection) tcp.Connection {
return .{
.client = tcp.Client.from(conn.socket),
.address = ip.Address.from(conn.address),
};
}
/// Unravel a TCP client-address pair into a socket-address pair.
pub fn into(self: tcp.Connection) Socket.Connection {
return .{
.socket = self.client.socket,
.address = self.address.into(),
};
}
/// Closes the underlying client of the connection.
pub fn deinit(self: tcp.Connection) void {
self.client.deinit();
}
};
/// Possible domains that a TCP client/listener may operate over.
pub const Domain = extern enum(u16) {
ip = os.AF_INET,
ipv6 = os.AF_INET6,
};
/// A TCP client.
pub const Client = struct {
socket: Socket,
/// Opens a new client.
pub fn init(domain: tcp.Domain, flags: u32) !Client {
return Client{
.socket = try Socket.init(
@enumToInt(domain),
os.SOCK_STREAM | flags,
os.IPPROTO_TCP,
),
};
}
/// Enclose a TCP client over an existing socket.
pub fn from(socket: Socket) Client {
return Client{ .socket = socket };
}
/// Closes the client.
pub fn deinit(self: Client) void {
self.socket.deinit();
}
/// Shutdown either the read side, write side, or all sides of the client's underlying socket.
pub fn shutdown(self: Client, how: os.ShutdownHow) !void {
return self.socket.shutdown(how);
}
/// Have the client attempt to the connect to an address.
pub fn connect(self: Client, address: ip.Address) !void {
return self.socket.connect(address.into());
}
/// Read data from the socket into the buffer provided. It returns the
/// number of bytes read into the buffer provided.
pub fn read(self: Client, buf: []u8) !usize {
return self.socket.read(buf);
}
/// Read data from the socket into the buffer provided with a set of flags
/// specified. It returns the number of bytes read into the buffer provided.
pub fn recv(self: Client, buf: []u8, flags: u32) !usize {
return self.socket.recv(buf, flags);
}
/// Write a buffer of data provided to the socket. It returns the number
/// of bytes that are written to the socket.
pub fn write(self: Client, buf: []const u8) !usize {
return self.socket.write(buf);
}
/// Writes multiple I/O vectors to the socket. It returns the number
/// of bytes that are written to the socket.
pub fn writev(self: Client, buffers: []const os.iovec_const) !usize {
return self.socket.writev(buffers);
}
/// Write a buffer of data provided to the socket with a set of flags specified.
/// It returns the number of bytes that are written to the socket.
pub fn send(self: Client, buf: []const u8, flags: u32) !usize {
return self.socket.send(buf, flags);
}
/// Writes multiple I/O vectors with a prepended message header to the socket
/// with a set of flags specified. It returns the number of bytes that are
/// written to the socket.
pub fn sendmsg(self: Client, msg: os.msghdr_const, flags: u32) !usize {
return self.socket.sendmsg(msg, flags);
}
/// Query and return the latest cached error on the client's underlying socket.
pub fn getError(self: Client) !void {
return self.socket.getError();
}
/// Query the read buffer size of the client's underlying socket.
pub fn getReadBufferSize(self: Client) !u32 {
return self.socket.getReadBufferSize();
}
/// Query the write buffer size of the client's underlying socket.
pub fn getWriteBufferSize(self: Client) !u32 {
return self.socket.getWriteBufferSize();
}
/// Query the address that the client's socket is locally bounded to.
pub fn getLocalAddress(self: Client) !ip.Address {
return ip.Address.from(try self.socket.getLocalAddress());
}
/// Disable Nagle's algorithm on a TCP socket. It returns `error.UnsupportedSocketOption` if
/// the host does not support sockets disabling Nagle's algorithm.
pub fn setNoDelay(self: Client, enabled: bool) !void {
if (comptime @hasDecl(os, "TCP_NODELAY")) {
const bytes = mem.asBytes(&@as(usize, @boolToInt(enabled)));
return os.setsockopt(self.socket.fd, os.IPPROTO_TCP, os.TCP_NODELAY, bytes);
}
return error.UnsupportedSocketOption;
}
/// Set the write buffer size of the socket.
pub fn setWriteBufferSize(self: Client, size: u32) !void {
return self.socket.setWriteBufferSize(size);
}
/// Set the read buffer size of the socket.
pub fn setReadBufferSize(self: Client, size: u32) !void {
return self.socket.setReadBufferSize(size);
}
/// Set a timeout on the socket that is to occur if no messages are successfully written
/// to its bound destination after a specified number of milliseconds. A subsequent write
/// to the socket will thereafter return `error.WouldBlock` should the timeout be exceeded.
pub fn setWriteTimeout(self: Client, milliseconds: usize) !void {
return self.socket.setWriteTimeout(milliseconds);
}
/// Set a timeout on the socket that is to occur if no messages are successfully read
/// from its bound destination after a specified number of milliseconds. A subsequent
/// read from the socket will thereafter return `error.WouldBlock` should the timeout be
/// exceeded.
pub fn setReadTimeout(self: Client, milliseconds: usize) !void {
return self.socket.setReadTimeout(milliseconds);
}
};
/// A TCP listener.
pub const Listener = struct {
socket: Socket,
/// Opens a new listener.
pub fn init(domain: tcp.Domain, flags: u32) !Listener {
return Listener{
.socket = try Socket.init(
@enumToInt(domain),
os.SOCK_STREAM | flags,
os.IPPROTO_TCP,
),
};
}
/// Closes the listener.
pub fn deinit(self: Listener) void {
self.socket.deinit();
}
/// Shuts down the underlying listener's socket. The next subsequent call, or
/// a current pending call to accept() after shutdown is called will return
/// an error.
pub fn shutdown(self: Listener) !void {
return self.socket.shutdown(.recv);
}
/// Binds the listener's socket to an address.
pub fn bind(self: Listener, address: ip.Address) !void {
return self.socket.bind(address.into());
}
/// Start listening for incoming connections.
pub fn listen(self: Listener, max_backlog_size: u31) !void {
return self.socket.listen(max_backlog_size);
}
/// Accept a pending incoming connection queued to the kernel backlog
/// of the listener's socket.
pub fn accept(self: Listener, flags: u32) !tcp.Connection {
return tcp.Connection.from(try self.socket.accept(flags));
}
/// Query and return the latest cached error on the listener's underlying socket.
pub fn getError(self: Client) !void {
return self.socket.getError();
}
/// Query the address that the listener's socket is locally bounded to.
pub fn getLocalAddress(self: Listener) !ip.Address {
return ip.Address.from(try self.socket.getLocalAddress());
}
/// Allow multiple sockets on the same host to listen on the same address. It returns `error.UnsupportedSocketOption` if
/// the host does not support sockets listening the same address.
pub fn setReuseAddress(self: Listener, enabled: bool) !void {
return self.socket.setReuseAddress(enabled);
}
/// Allow multiple sockets on the same host to listen on the same port. It returns `error.UnsupportedSocketOption` if
/// the host does not supports sockets listening on the same port.
pub fn setReusePort(self: Listener, enabled: bool) !void {
return self.socket.setReusePort(enabled);
}
/// Enables TCP Fast Open (RFC 7413) on a TCP socket. It returns `error.UnsupportedSocketOption` if the host does not
/// support TCP Fast Open.
pub fn setFastOpen(self: Listener, enabled: bool) !void {
if (comptime @hasDecl(os, "TCP_FASTOPEN")) {
return os.setsockopt(self.socket.fd, os.IPPROTO_TCP, os.TCP_FASTOPEN, mem.asBytes(&@as(usize, @boolToInt(enabled))));
}
return error.UnsupportedSocketOption;
}
/// Enables TCP Quick ACK on a TCP socket to immediately send rather than delay ACKs when necessary. It returns
/// `error.UnsupportedSocketOption` if the host does not support TCP Quick ACK.
pub fn setQuickACK(self: Listener, enabled: bool) !void {
if (comptime @hasDecl(os, "TCP_QUICKACK")) {
return os.setsockopt(self.socket.fd, os.IPPROTO_TCP, os.TCP_QUICKACK, mem.asBytes(&@as(usize, @boolToInt(enabled))));
}
return error.UnsupportedSocketOption;
}
/// Set a timeout on the listener that is to occur if no new incoming connections come in
/// after a specified number of milliseconds. A subsequent accept call to the listener
/// will thereafter return `error.WouldBlock` should the timeout be exceeded.
pub fn setAcceptTimeout(self: Listener, milliseconds: usize) !void {
return self.socket.setReadTimeout(milliseconds);
}
};
test "tcp: create client/listener pair" {
if (builtin.os.tag == .wasi) return error.SkipZigTest;
const listener = try tcp.Listener.init(.ip, os.SOCK_CLOEXEC);
defer listener.deinit();
try listener.bind(ip.Address.initIPv4(IPv4.unspecified, 0));
try listener.listen(128);
var binded_address = try listener.getLocalAddress();
switch (binded_address) {
.ipv4 => |*ipv4| ipv4.host = IPv4.localhost,
.ipv6 => |*ipv6| ipv6.host = IPv6.localhost,
}
const client = try tcp.Client.init(.ip, os.SOCK_CLOEXEC);
defer client.deinit();
try client.connect(binded_address);
const conn = try listener.accept(os.SOCK_CLOEXEC);
defer conn.deinit();
}
test "tcp/client: set read timeout of 1 millisecond on blocking client" {
if (builtin.os.tag == .wasi) return error.SkipZigTest;
const listener = try tcp.Listener.init(.ip, os.SOCK_CLOEXEC);
defer listener.deinit();
try listener.bind(ip.Address.initIPv4(IPv4.unspecified, 0));
try listener.listen(128);
var binded_address = try listener.getLocalAddress();
switch (binded_address) {
.ipv4 => |*ipv4| ipv4.host = IPv4.localhost,
.ipv6 => |*ipv6| ipv6.host = IPv6.localhost,
}
const client = try tcp.Client.init(.ip, os.SOCK_CLOEXEC);
defer client.deinit();
try client.connect(binded_address);
try client.setReadTimeout(1);
const conn = try listener.accept(os.SOCK_CLOEXEC);
defer conn.deinit();
var buf: [1]u8 = undefined;
testing.expectError(error.WouldBlock, client.read(&buf));
}
test "tcp/listener: bind to unspecified ipv4 address" {
if (builtin.os.tag == .wasi) return error.SkipZigTest;
const listener = try tcp.Listener.init(.ip, os.SOCK_CLOEXEC);
defer listener.deinit();
try listener.bind(ip.Address.initIPv4(IPv4.unspecified, 0));
try listener.listen(128);
const address = try listener.getLocalAddress();
testing.expect(address == .ipv4);
}
test "tcp/listener: bind to unspecified ipv6 address" {
if (builtin.os.tag == .wasi) return error.SkipZigTest;
const listener = try tcp.Listener.init(.ipv6, os.SOCK_CLOEXEC);
defer listener.deinit();
try listener.bind(ip.Address.initIPv6(IPv6.unspecified, 0));
try listener.listen(128);
const address = try listener.getLocalAddress();
testing.expect(address == .ipv6);
}

View File

@ -1,18 +1,110 @@
// SPDX-License-Identifier: MIT
// Copyright (c) 2015-2021 Zig Contributors
// This file is part of [zig](https://ziglang.org/), which is MIT licensed.
// The MIT license requires this copyright notice to be included in all copies
// and substantial portions of the software.
const std = @import("../../std.zig");
const net = @import("net.zig");
const os = std.os;
const fmt = std.fmt;
const mem = std.mem;
const net = std.net;
const time = std.time;
const builtin = std.builtin;
const testing = std.testing;
/// A generic socket abstraction.
const Socket = @This();
/// A socket-address pair.
pub const Connection = struct {
socket: Socket,
address: net.Address,
address: Socket.Address,
/// Enclose a socket and address into a socket-address pair.
pub fn from(socket: Socket, address: Socket.Address) Socket.Connection {
return .{ .socket = socket, .address = address };
}
};
/// A generic socket address abstraction. It is safe to directly access and modify
/// the fields of a `Socket.Address`.
pub const Address = union(enum) {
ipv4: net.IPv4.Address,
ipv6: net.IPv6.Address,
/// Instantiate a new address with a IPv4 host and port.
pub fn initIPv4(host: net.IPv4, port: u16) Socket.Address {
return .{ .ipv4 = .{ .host = host, .port = port } };
}
/// Instantiate a new address with a IPv6 host and port.
pub fn initIPv6(host: net.IPv6, port: u16) Socket.Address {
return .{ .ipv6 = .{ .host = host, .port = port } };
}
/// Parses a `sockaddr` into a generic socket address.
pub fn fromNative(address: *align(4) const os.sockaddr) Socket.Address {
switch (address.family) {
os.AF_INET => {
const info = @ptrCast(*const os.sockaddr_in, address);
const host = net.IPv4{ .octets = @bitCast([4]u8, info.addr) };
const port = mem.bigToNative(u16, info.port);
return Socket.Address.initIPv4(host, port);
},
os.AF_INET6 => {
const info = @ptrCast(*const os.sockaddr_in6, address);
const host = net.IPv6{ .octets = info.addr, .scope_id = info.scope_id };
const port = mem.bigToNative(u16, info.port);
return Socket.Address.initIPv6(host, port);
},
else => unreachable,
}
}
/// Encodes a generic socket address into an extern union that may be reliably
/// casted into a `sockaddr` which may be passed into socket syscalls.
pub fn toNative(self: Socket.Address) extern union {
ipv4: os.sockaddr_in,
ipv6: os.sockaddr_in6,
} {
return switch (self) {
.ipv4 => |address| .{
.ipv4 = .{
.addr = @bitCast(u32, address.host.octets),
.port = mem.nativeToBig(u16, address.port),
},
},
.ipv6 => |address| .{
.ipv6 = .{
.addr = address.host.octets,
.port = mem.nativeToBig(u16, address.port),
.scope_id = address.host.scope_id,
.flowinfo = 0,
},
},
};
}
/// Returns the number of bytes that make up the `sockaddr` equivalent to the address.
pub fn getNativeSize(self: Socket.Address) u32 {
return switch (self) {
.ipv4 => @sizeOf(os.sockaddr_in),
.ipv6 => @sizeOf(os.sockaddr_in6),
};
}
/// Implements the `std.fmt.format` API.
pub fn format(
self: Socket.Address,
comptime layout: []const u8,
opts: fmt.FormatOptions,
writer: anytype,
) !void {
switch (self) {
.ipv4 => |address| try fmt.format(writer, "{}:{}", .{ address.host, address.port }),
.ipv6 => |address| try fmt.format(writer, "{}:{}", .{ address.host, address.port }),
}
}
};
/// The underlying handle of a socket.
@ -23,19 +115,24 @@ pub fn init(domain: u32, socket_type: u32, protocol: u32) !Socket {
return Socket{ .fd = try os.socket(domain, socket_type, protocol) };
}
/// Enclose a socket abstraction over an existing socket file descriptor.
pub fn from(fd: os.socket_t) Socket {
return Socket{ .fd = fd };
}
/// Closes the socket.
pub fn deinit(self: Socket) void {
os.closeSocket(self.fd);
}
/// Shutdown either the read side, or write side, or the entirety of a socket.
/// Shutdown either the read side, write side, or all side of the socket.
pub fn shutdown(self: Socket, how: os.ShutdownHow) !void {
return os.shutdown(self.fd, how);
}
/// Binds the socket to an address.
pub fn bind(self: Socket, address: net.Address) !void {
return os.bind(self.fd, &address.any, address.getOsSockLen());
pub fn bind(self: Socket, address: Socket.Address) !void {
return os.bind(self.fd, @ptrCast(*const os.sockaddr, &address.toNative()), address.getNativeSize());
}
/// Start listening for incoming connections on the socket.
@ -44,8 +141,8 @@ pub fn listen(self: Socket, max_backlog_size: u31) !void {
}
/// Have the socket attempt to the connect to an address.
pub fn connect(self: Socket, address: net.Address) !void {
return os.connect(self.fd, &address.any, address.getOsSockLen());
pub fn connect(self: Socket, address: Socket.Address) !void {
return os.connect(self.fd, @ptrCast(*const os.sockaddr, &address.toNative()), address.getNativeSize());
}
/// Accept a pending incoming connection queued to the kernel backlog
@ -54,12 +151,10 @@ pub fn accept(self: Socket, flags: u32) !Socket.Connection {
var address: os.sockaddr = undefined;
var address_len: u32 = @sizeOf(os.sockaddr);
const fd = try os.accept(self.fd, &address, &address_len, flags);
const socket = Socket{ .fd = try os.accept(self.fd, &address, &address_len, flags) };
const socket_address = Socket.Address.fromNative(@alignCast(4, &address));
return Connection{
.socket = Socket{ .fd = fd },
.address = net.Address.initPosix(@alignCast(4, &address)),
};
return Socket.Connection.from(socket, socket_address);
}
/// Read data from the socket into the buffer provided. It returns the
@ -100,11 +195,11 @@ pub fn sendmsg(self: Socket, msg: os.msghdr_const, flags: u32) !usize {
}
/// Query the address that the socket is locally bounded to.
pub fn getLocalAddress(self: Socket) !net.Address {
pub fn getLocalAddress(self: Socket) !Socket.Address {
var address: os.sockaddr = undefined;
var address_len: u32 = @sizeOf(os.sockaddr);
try os.getsockname(self.fd, &address, &address_len);
return net.Address.initPosix(@alignCast(4, &address));
return Socket.Address.fromNative(@alignCast(4, &address));
}
/// Query and return the latest cached error on the socket.
@ -164,33 +259,6 @@ pub fn setReusePort(self: Socket, enabled: bool) !void {
return error.UnsupportedSocketOption;
}
/// Disable Nagle's algorithm on a TCP socket. It returns `error.UnsupportedSocketOption` if the host does not support
/// sockets disabling Nagle's algorithm.
pub fn setNoDelay(self: Socket, enabled: bool) !void {
if (comptime @hasDecl(os, "TCP_NODELAY")) {
return os.setsockopt(self.fd, os.IPPROTO_TCP, os.TCP_NODELAY, mem.asBytes(&@as(usize, @boolToInt(enabled))));
}
return error.UnsupportedSocketOption;
}
/// Enables TCP Fast Open (RFC 7413) on a TCP socket. It returns `error.UnsupportedSocketOption` if the host does not
/// support TCP Fast Open.
pub fn setFastOpen(self: Socket, enabled: bool) !void {
if (comptime @hasDecl(os, "TCP_FASTOPEN")) {
return os.setsockopt(self.fd, os.IPPROTO_TCP, os.TCP_FASTOPEN, mem.asBytes(&@as(usize, @boolToInt(enabled))));
}
return error.UnsupportedSocketOption;
}
/// Enables TCP Quick ACK on a TCP socket to immediately send rather than delay ACKs when necessary. It returns
/// `error.UnsupportedSocketOption` if the host does not support TCP Quick ACK.
pub fn setQuickACK(self: Socket, enabled: bool) !void {
if (comptime @hasDecl(os, "TCP_QUICKACK")) {
return os.setsockopt(self.fd, os.IPPROTO_TCP, os.TCP_QUICKACK, mem.asBytes(&@as(usize, @boolToInt(enabled))));
}
return error.UnsupportedSocketOption;
}
/// Set the write buffer size of the socket.
pub fn setWriteBufferSize(self: Socket, size: u32) !void {
return os.setsockopt(self.fd, os.SOL_SOCKET, os.SO_SNDBUF, mem.asBytes(&size));
@ -206,8 +274,8 @@ pub fn setReadBufferSize(self: Socket, size: u32) !void {
/// to the socket will thereafter return `error.WouldBlock` should the timeout be exceeded.
pub fn setWriteTimeout(self: Socket, milliseconds: usize) !void {
const timeout = os.timeval{
.tv_sec = @intCast(isize, milliseconds / time.ms_per_s),
.tv_usec = @intCast(isize, (milliseconds % time.ms_per_s) * time.us_per_ms),
.tv_sec = @intCast(i32, milliseconds / time.ms_per_s),
.tv_usec = @intCast(i32, (milliseconds % time.ms_per_s) * time.us_per_ms),
};
return os.setsockopt(self.fd, os.SOL_SOCKET, os.SO_SNDTIMEO, mem.asBytes(&timeout));
@ -219,58 +287,9 @@ pub fn setWriteTimeout(self: Socket, milliseconds: usize) !void {
/// exceeded.
pub fn setReadTimeout(self: Socket, milliseconds: usize) !void {
const timeout = os.timeval{
.tv_sec = @intCast(isize, milliseconds / time.ms_per_s),
.tv_usec = @intCast(isize, (milliseconds % time.ms_per_s) * time.us_per_ms),
.tv_sec = @intCast(i32, milliseconds / time.ms_per_s),
.tv_usec = @intCast(i32, (milliseconds % time.ms_per_s) * time.us_per_ms),
};
return os.setsockopt(self.fd, os.SOL_SOCKET, os.SO_RCVTIMEO, mem.asBytes(&timeout));
}
test {
testing.refAllDecls(@This());
}
test "socket/linux: set read timeout of 1 millisecond on blocking socket" {
if (builtin.os.tag != .linux) return error.SkipZigTest;
const a = try Socket.init(os.AF_INET, os.SOCK_STREAM | os.SOCK_CLOEXEC, os.IPPROTO_TCP);
defer a.deinit();
try a.bind(net.Address.initIp4([_]u8{ 0, 0, 0, 0 }, 0));
try a.listen(128);
const binded_address = try a.getLocalAddress();
const b = try Socket.init(os.AF_INET, os.SOCK_STREAM | os.SOCK_CLOEXEC, os.IPPROTO_TCP);
defer b.deinit();
try b.connect(binded_address);
try b.setReadTimeout(1);
const ab = try a.accept(os.SOCK_CLOEXEC);
defer ab.socket.deinit();
var buf: [1]u8 = undefined;
testing.expectError(error.WouldBlock, b.read(&buf));
}
test "socket/linux: create non-blocking socket pair" {
if (builtin.os.tag != .linux) return error.SkipZigTest;
const a = try Socket.init(os.AF_INET, os.SOCK_STREAM | os.SOCK_NONBLOCK | os.SOCK_CLOEXEC, os.IPPROTO_TCP);
defer a.deinit();
try a.bind(net.Address.initIp4([_]u8{ 0, 0, 0, 0 }, 0));
try a.listen(128);
const binded_address = try a.getLocalAddress();
const b = try Socket.init(os.AF_INET, os.SOCK_STREAM | os.SOCK_NONBLOCK | os.SOCK_CLOEXEC, os.IPPROTO_TCP);
defer b.deinit();
testing.expectError(error.WouldBlock, b.connect(binded_address));
try b.getError();
const ab = try a.accept(os.SOCK_NONBLOCK | os.SOCK_CLOEXEC);
defer ab.socket.deinit();
}

567
lib/std/x/os/net.zig Normal file
View File

@ -0,0 +1,567 @@
// SPDX-License-Identifier: MIT
// Copyright (c) 2015-2021 Zig Contributors
// This file is part of [zig](https://ziglang.org/), which is MIT licensed.
// The MIT license requires this copyright notice to be included in all copies
// and substantial portions of the software.
const std = @import("../../std.zig");
const os = std.os;
const fmt = std.fmt;
const mem = std.mem;
const math = std.math;
const builtin = std.builtin;
const testing = std.testing;
/// Resolves a network interface name into a scope/zone ID. It returns
/// an error if either resolution fails, or if the interface name is
/// too long.
pub fn resolveScopeID(name: []const u8) !u32 {
if (comptime @hasDecl(os, "IFNAMESIZE")) {
if (name.len >= os.IFNAMESIZE - 1) return error.NameTooLong;
const fd = try os.socket(os.AF_UNIX, os.SOCK_DGRAM, 0);
defer os.closeSocket(fd);
var f: os.ifreq = undefined;
mem.copy(u8, &f.ifrn.name, name);
f.ifrn.name[name.len] = 0;
try os.ioctl_SIOCGIFINDEX(fd, &f);
return @bitCast(u32, f.ifru.ivalue);
}
return error.Unsupported;
}
/// An IPv4 address comprised of 4 bytes.
pub const IPv4 = extern struct {
/// A IPv4 host-port pair.
pub const Address = extern struct {
host: IPv4,
port: u16,
};
/// Octets of a IPv4 address designating the local host.
pub const localhost_octets = [_]u8{ 127, 0, 0, 1 };
/// The IPv4 address of the local host.
pub const localhost: IPv4 = .{ .octets = localhost_octets };
/// Octets of an unspecified IPv4 address.
pub const unspecified_octets = [_]u8{0} ** 4;
/// An unspecified IPv4 address.
pub const unspecified: IPv4 = .{ .octets = unspecified_octets };
/// Octets of a broadcast IPv4 address.
pub const broadcast_octets = [_]u8{255} ** 4;
/// An IPv4 broadcast address.
pub const broadcast: IPv4 = .{ .octets = broadcast_octets };
/// The prefix octet pattern of a link-local IPv4 address.
pub const link_local_prefix = [_]u8{ 169, 254 };
/// The prefix octet patterns of IPv4 addresses intended for
/// documentation.
pub const documentation_prefixes = [_][]const u8{
&[_]u8{ 192, 0, 2 },
&[_]u8{ 198, 51, 100 },
&[_]u8{ 203, 0, 113 },
};
octets: [4]u8,
/// Returns whether or not the two addresses are equal to, less than, or
/// greater than each other.
pub fn cmp(self: IPv4, other: IPv4) math.Order {
return mem.order(u8, &self.octets, &other.octets);
}
/// Returns true if both addresses are semantically equivalent.
pub fn eql(self: IPv4, other: IPv4) bool {
return mem.eql(u8, &self.octets, &other.octets);
}
/// Returns true if the address is a loopback address.
pub fn isLoopback(self: IPv4) bool {
return self.octets[0] == 127;
}
/// Returns true if the address is an unspecified IPv4 address.
pub fn isUnspecified(self: IPv4) bool {
return mem.eql(u8, &self.octets, &unspecified_octets);
}
/// Returns true if the address is a private IPv4 address.
pub fn isPrivate(self: IPv4) bool {
return self.octets[0] == 10 or
(self.octets[0] == 172 and self.octets[1] >= 16 and self.octets[1] <= 31) or
(self.octets[0] == 192 and self.octets[1] == 168);
}
/// Returns true if the address is a link-local IPv4 address.
pub fn isLinkLocal(self: IPv4) bool {
return mem.startsWith(u8, &self.octets, &link_local_prefix);
}
/// Returns true if the address is a multicast IPv4 address.
pub fn isMulticast(self: IPv4) bool {
return self.octets[0] >= 224 and self.octets[0] <= 239;
}
/// Returns true if the address is a IPv4 broadcast address.
pub fn isBroadcast(self: IPv4) bool {
return mem.eql(u8, &self.octets, &broadcast_octets);
}
/// Returns true if the address is in a range designated for documentation. Refer
/// to IETF RFC 5737 for more details.
pub fn isDocumentation(self: IPv4) bool {
inline for (documentation_prefixes) |prefix| {
if (mem.startsWith(u8, &self.octets, prefix)) {
return true;
}
}
return false;
}
/// Implements the `std.fmt.format` API.
pub fn format(
self: IPv4,
comptime layout: []const u8,
opts: fmt.FormatOptions,
writer: anytype,
) !void {
if (comptime layout.len != 0 and layout[0] != 's') {
@compileError("Unsupported format specifier for IPv4 type '" ++ layout ++ "'.");
}
try fmt.format(writer, "{}.{}.{}.{}", .{
self.octets[0],
self.octets[1],
self.octets[2],
self.octets[3],
});
}
/// Set of possible errors that may encountered when parsing an IPv4
/// address.
pub const ParseError = error{
UnexpectedEndOfOctet,
TooManyOctets,
OctetOverflow,
UnexpectedToken,
IncompleteAddress,
};
/// Parses an arbitrary IPv4 address.
pub fn parse(buf: []const u8) ParseError!IPv4 {
var octets: [4]u8 = undefined;
var octet: u8 = 0;
var index: u8 = 0;
var saw_any_digits: bool = false;
for (buf) |c| {
switch (c) {
'.' => {
if (!saw_any_digits) return error.UnexpectedEndOfOctet;
if (index == 3) return error.TooManyOctets;
octets[index] = octet;
index += 1;
octet = 0;
saw_any_digits = false;
},
'0'...'9' => {
saw_any_digits = true;
octet = math.mul(u8, octet, 10) catch return error.OctetOverflow;
octet = math.add(u8, octet, c - '0') catch return error.OctetOverflow;
},
else => return error.UnexpectedToken,
}
}
if (index == 3 and saw_any_digits) {
octets[index] = octet;
return IPv4{ .octets = octets };
}
return error.IncompleteAddress;
}
/// Maps the address to its IPv6 equivalent. In most cases, you would
/// want to map the address to its IPv6 equivalent rather than directly
/// re-interpreting the address.
pub fn mapToIPv6(self: IPv4) IPv6 {
var octets: [16]u8 = undefined;
mem.copy(u8, octets[0..12], &IPv6.v4_mapped_prefix);
mem.copy(u8, octets[12..], &self.octets);
return IPv6{ .octets = octets, .scope_id = IPv6.no_scope_id };
}
/// Directly re-interprets the address to its IPv6 equivalent. In most
/// cases, you would want to map the address to its IPv6 equivalent rather
/// than directly re-interpreting the address.
pub fn toIPv6(self: IPv4) IPv6 {
var octets: [16]u8 = undefined;
mem.set(u8, octets[0..12], 0);
mem.copy(u8, octets[12..], &self.octets);
return IPv6{ .octets = octets, .scope_id = IPv6.no_scope_id };
}
};
/// An IPv6 address comprised of 16 bytes for an address, and 4 bytes
/// for a scope ID; cumulatively summing to 20 bytes in total.
pub const IPv6 = extern struct {
/// A IPv6 host-port pair.
pub const Address = extern struct {
host: IPv6,
port: u16,
};
/// Octets of a IPv6 address designating the local host.
pub const localhost_octets = [_]u8{0} ** 15 ++ [_]u8{0x01};
/// The IPv6 address of the local host.
pub const localhost: IPv6 = .{
.octets = localhost_octets,
.scope_id = no_scope_id,
};
/// Octets of an unspecified IPv6 address.
pub const unspecified_octets = [_]u8{0} ** 16;
/// An unspecified IPv6 address.
pub const unspecified: IPv6 = .{
.octets = unspecified_octets,
.scope_id = no_scope_id,
};
/// The prefix of a IPv6 address that is mapped to a IPv4 address.
pub const v4_mapped_prefix = [_]u8{0} ** 10 ++ [_]u8{0xFF} ** 2;
/// A marker value used to designate an IPv6 address with no
/// associated scope ID.
pub const no_scope_id = math.maxInt(u32);
octets: [16]u8,
scope_id: u32,
/// Returns whether or not the two addresses are equal to, less than, or
/// greater than each other.
pub fn cmp(self: IPv6, other: IPv6) math.Order {
return switch (mem.order(u8, self.octets, other.octets)) {
.eq => math.order(self.scope_id, other.scope_id),
else => |order| order,
};
}
/// Returns true if both addresses are semantically equivalent.
pub fn eql(self: IPv6, other: IPv6) bool {
return self.scope_id == other.scope_id and mem.eql(u8, &self.octets, &other.octets);
}
/// Returns true if the address is an unspecified IPv6 address.
pub fn isUnspecified(self: IPv6) bool {
return mem.eql(u8, &self.octets, &unspecified_octets);
}
/// Returns true if the address is a loopback address.
pub fn isLoopback(self: IPv6) bool {
return mem.eql(u8, self.octets[0..3], &[_]u8{ 0, 0, 0 }) and
mem.eql(u8, self.octets[12..], &[_]u8{ 0, 0, 0, 1 });
}
/// Returns true if the address maps to an IPv4 address.
pub fn mapsToIPv4(self: IPv6) bool {
return mem.startsWith(u8, &self.octets, &v4_mapped_prefix);
}
/// Returns an IPv4 address representative of the address should
/// it the address be mapped to an IPv4 address. It returns null
/// otherwise.
pub fn toIPv4(self: IPv6) ?IPv4 {
if (!self.mapsToIPv4()) return null;
return IPv4{ .octets = self.octets[12..][0..4].* };
}
/// Returns true if the address is a multicast IPv6 address.
pub fn isMulticast(self: IPv6) bool {
return self.octets[0] == 0xFF;
}
/// Returns true if the address is a unicast link local IPv6 address.
pub fn isLinkLocal(self: IPv6) bool {
return self.octets[0] == 0xFE and self.octets[1] & 0xC0 == 0x80;
}
/// Returns true if the address is a deprecated unicast site local
/// IPv6 address. Refer to IETF RFC 3879 for more details as to
/// why they are deprecated.
pub fn isSiteLocal(self: IPv6) bool {
return self.octets[0] == 0xFE and self.octets[1] & 0xC0 == 0xC0;
}
/// IPv6 multicast address scopes.
pub const Scope = enum(u8) {
interface = 1,
link = 2,
realm = 3,
admin = 4,
site = 5,
organization = 8,
global = 14,
unknown = 0xFF,
};
/// Returns the multicast scope of the address.
pub fn scope(self: IPv6) Scope {
if (!self.isMulticast()) return .unknown;
return switch (self.octets[0] & 0x0F) {
1 => .interface,
2 => .link,
3 => .realm,
4 => .admin,
5 => .site,
8 => .organization,
14 => .global,
else => .unknown,
};
}
/// Implements the `std.fmt.format` API. Specifying 'x' or 's' formats the
/// address lower-cased octets, while specifying 'X' or 'S' formats the
/// address using upper-cased ASCII octets.
///
/// The default specifier is 'x'.
pub fn format(
self: IPv6,
comptime layout: []const u8,
opts: fmt.FormatOptions,
writer: anytype,
) !void {
comptime const specifier = &[_]u8{if (layout.len == 0) 'x' else switch (layout[0]) {
'x', 'X' => |specifier| specifier,
's' => 'x',
'S' => 'X',
else => @compileError("Unsupported format specifier for IPv6 type '" ++ layout ++ "'."),
}};
if (mem.startsWith(u8, &self.octets, &v4_mapped_prefix)) {
return fmt.format(writer, "::{" ++ specifier ++ "}{" ++ specifier ++ "}:{}.{}.{}.{}", .{
0xFF,
0xFF,
self.octets[12],
self.octets[13],
self.octets[14],
self.octets[15],
});
}
const zero_span = span: {
var i: usize = 0;
while (i < self.octets.len) : (i += 2) {
if (self.octets[i] == 0 and self.octets[i + 1] == 0) break;
} else break :span .{ .from = 0, .to = 0 };
const from = i;
while (i < self.octets.len) : (i += 2) {
if (self.octets[i] != 0 or self.octets[i + 1] != 0) break;
}
break :span .{ .from = from, .to = i };
};
var i: usize = 0;
while (i != 16) : (i += 2) {
if (zero_span.from != zero_span.to and i == zero_span.from) {
try writer.writeAll("::");
} else if (i >= zero_span.from and i < zero_span.to) {} else {
if (i != 0 and i != zero_span.to) try writer.writeAll(":");
const val = @as(u16, self.octets[i]) << 8 | self.octets[i + 1];
try fmt.formatIntValue(val, specifier, .{}, writer);
}
}
if (self.scope_id != no_scope_id and self.scope_id != 0) {
try fmt.format(writer, "%{d}", .{self.scope_id});
}
}
/// Set of possible errors that may encountered when parsing an IPv6
/// address.
pub const ParseError = error{
MalformedV4Mapping,
BadScopeID,
} || IPv4.ParseError;
/// Parses an arbitrary IPv6 address, including link-local addresses.
pub fn parse(buf: []const u8) ParseError!IPv6 {
if (mem.lastIndexOfScalar(u8, buf, '%')) |index| {
const ip_slice = buf[0..index];
const scope_id_slice = buf[index + 1 ..];
if (scope_id_slice.len == 0) return error.BadScopeID;
const scope_id: u32 = switch (scope_id_slice[0]) {
'0'...'9' => fmt.parseInt(u32, scope_id_slice, 10),
else => resolveScopeID(scope_id_slice),
} catch return error.BadScopeID;
return parseWithScopeID(ip_slice, scope_id);
}
return parseWithScopeID(buf, no_scope_id);
}
/// Parses an IPv6 address with a pre-specified scope ID. Presumes
/// that the address is not a link-local address.
pub fn parseWithScopeID(buf: []const u8, scope_id: u32) ParseError!IPv6 {
var octets: [16]u8 = undefined;
var octet: u16 = 0;
var tail: [16]u8 = undefined;
var out: []u8 = &octets;
var index: u8 = 0;
var saw_any_digits: bool = false;
var abbrv: bool = false;
for (buf) |c, i| {
switch (c) {
':' => {
if (!saw_any_digits) {
if (abbrv) return error.UnexpectedToken;
if (i != 0) abbrv = true;
mem.set(u8, out[index..], 0);
out = &tail;
index = 0;
continue;
}
if (index == 14) return error.TooManyOctets;
out[index] = @truncate(u8, octet >> 8);
index += 1;
out[index] = @truncate(u8, octet);
index += 1;
octet = 0;
saw_any_digits = false;
},
'.' => {
if (!abbrv or out[0] != 0xFF and out[1] != 0xFF) {
return error.MalformedV4Mapping;
}
const start_index = mem.lastIndexOfScalar(u8, buf[0..i], ':').? + 1;
const v4 = try IPv4.parse(buf[start_index..]);
octets[10] = 0xFF;
octets[11] = 0xFF;
mem.copy(u8, octets[12..], &v4.octets);
return IPv6{ .octets = octets, .scope_id = scope_id };
},
else => {
saw_any_digits = true;
const digit = fmt.charToDigit(c, 16) catch return error.UnexpectedToken;
octet = math.mul(u16, octet, 16) catch return error.OctetOverflow;
octet = math.add(u16, octet, digit) catch return error.OctetOverflow;
},
}
}
if (!saw_any_digits and !abbrv) {
return error.IncompleteAddress;
}
if (index == 14) {
out[14] = @truncate(u8, octet >> 8);
out[15] = @truncate(u8, octet);
} else {
out[index] = @truncate(u8, octet >> 8);
index += 1;
out[index] = @truncate(u8, octet);
index += 1;
mem.copy(u8, octets[16 - index ..], out[0..index]);
}
return IPv6{ .octets = octets, .scope_id = scope_id };
}
};
test {
testing.refAllDecls(@This());
}
test "ip: convert to and from ipv6" {
try testing.expectFmt("::7f00:1", "{}", .{IPv4.localhost.toIPv6()});
testing.expect(!IPv4.localhost.toIPv6().mapsToIPv4());
try testing.expectFmt("::ffff:127.0.0.1", "{}", .{IPv4.localhost.mapToIPv6()});
testing.expect(IPv4.localhost.mapToIPv6().mapsToIPv4());
testing.expect(IPv4.localhost.toIPv6().toIPv4() == null);
try testing.expectFmt("127.0.0.1", "{}", .{IPv4.localhost.mapToIPv6().toIPv4()});
}
test "ipv4: parse & format" {
const cases = [_][]const u8{
"0.0.0.0",
"255.255.255.255",
"1.2.3.4",
"123.255.0.91",
"127.0.0.1",
};
for (cases) |case| {
try testing.expectFmt(case, "{}", .{try IPv4.parse(case)});
}
}
test "ipv6: parse & format" {
const inputs = [_][]const u8{
"FF01:0:0:0:0:0:0:FB",
"FF01::Fb",
"::1",
"::",
"2001:db8::",
"::1234:5678",
"2001:db8::1234:5678",
"::ffff:123.5.123.5",
};
const outputs = [_][]const u8{
"ff01::fb",
"ff01::fb",
"::1",
"::",
"2001:db8::",
"::1234:5678",
"2001:db8::1234:5678",
"::ffff:123.5.123.5",
};
for (inputs) |input, i| {
try testing.expectFmt(outputs[i], "{}", .{try IPv6.parse(input)});
}
}
test "ipv6: parse & format addresses with scope ids" {
if (!@hasDecl(os, "IFNAMESIZE")) return error.SkipZigTest;
const inputs = [_][]const u8{
"FF01::FB%lo",
};
const outputs = [_][]const u8{
"ff01::fb%1",
};
for (inputs) |input, i| {
try testing.expectFmt(outputs[i], "{}", .{try IPv6.parse(input)});
}
}

View File

@ -1,9 +0,0 @@
const std = @import("../../std.zig");
const testing = std.testing;
pub const Socket = @import("Socket.zig");
test {
testing.refAllDecls(@This());
}

View File

@ -1979,20 +1979,26 @@ pub const Tree = struct {
// asm ("foo" :: [_] "" (y) : "a", "b");
const last_input = result.inputs[result.inputs.len - 1];
const rparen = tree.lastToken(last_input);
if (token_tags[rparen + 1] == .colon and
token_tags[rparen + 2] == .string_literal)
var i = rparen + 1;
// Allow a (useless) comma right after the closing parenthesis.
if (token_tags[i] == .comma) i += 1;
if (token_tags[i] == .colon and
token_tags[i + 1] == .string_literal)
{
result.first_clobber = rparen + 2;
result.first_clobber = i + 1;
}
} else {
// asm ("foo" : [_] "" (x) :: "a", "b");
const last_output = result.outputs[result.outputs.len - 1];
const rparen = tree.lastToken(last_output);
if (token_tags[rparen + 1] == .colon and
token_tags[rparen + 2] == .colon and
token_tags[rparen + 3] == .string_literal)
var i = rparen + 1;
// Allow a (useless) comma right after the closing parenthesis.
if (token_tags[i] == .comma) i += 1;
if (token_tags[i] == .colon and
token_tags[i + 1] == .colon and
token_tags[i + 2] == .string_literal)
{
result.first_clobber = rparen + 3;
result.first_clobber = i + 2;
}
}

View File

@ -4,6 +4,38 @@
// The MIT license requires this copyright notice to be included in all copies
// and substantial portions of the software.
test "zig fmt: preserves clobbers in inline asm with stray comma" {
try testTransform(
\\fn foo() void {
\\ asm volatile (""
\\ : [_] "" (-> type),
\\ :
\\ : "clobber"
\\ );
\\ asm volatile (""
\\ :
\\ : [_] "" (type),
\\ : "clobber"
\\ );
\\}
\\
,
\\fn foo() void {
\\ asm volatile (""
\\ : [_] "" (-> type)
\\ :
\\ : "clobber"
\\ );
\\ asm volatile (""
\\ :
\\ : [_] "" (type)
\\ : "clobber"
\\ );
\\}
\\
);
}
test "zig fmt: respect line breaks in struct field value declaration" {
try testCanonical(
\\const Foo = struct {

View File

@ -53,7 +53,7 @@ const SparcCpuinfoImpl = struct {
// At the moment we only support 64bit SPARC systems.
assert(self.is_64bit);
const model = self.model orelse Target.Cpu.Model.generic(arch);
const model = self.model orelse return null;
return Target.Cpu{
.arch = arch,
.model = model,
@ -65,7 +65,7 @@ const SparcCpuinfoImpl = struct {
const SparcCpuinfoParser = CpuinfoParser(SparcCpuinfoImpl);
test "cpuinfo: SPARC" {
try testParser(SparcCpuinfoParser, &Target.sparc.cpu.niagara2,
try testParser(SparcCpuinfoParser, .sparcv9, &Target.sparc.cpu.niagara2,
\\cpu : UltraSparc T2 (Niagara2)
\\fpu : UltraSparc T2 integrated FPU
\\pmu : niagara2
@ -119,7 +119,7 @@ const PowerpcCpuinfoImpl = struct {
}
fn finalize(self: *const PowerpcCpuinfoImpl, arch: Target.Cpu.Arch) ?Target.Cpu {
const model = self.model orelse Target.Cpu.Model.generic(arch);
const model = self.model orelse return null;
return Target.Cpu{
.arch = arch,
.model = model,
@ -131,13 +131,13 @@ const PowerpcCpuinfoImpl = struct {
const PowerpcCpuinfoParser = CpuinfoParser(PowerpcCpuinfoImpl);
test "cpuinfo: PowerPC" {
try testParser(PowerpcCpuinfoParser, &Target.powerpc.cpu.@"970",
try testParser(PowerpcCpuinfoParser, .powerpc, &Target.powerpc.cpu.@"970",
\\processor : 0
\\cpu : PPC970MP, altivec supported
\\clock : 1250.000000MHz
\\revision : 1.1 (pvr 0044 0101)
);
try testParser(PowerpcCpuinfoParser, &Target.powerpc.cpu.pwr8,
try testParser(PowerpcCpuinfoParser, .powerpc64le, &Target.powerpc.cpu.pwr8,
\\processor : 0
\\cpu : POWER8 (raw), altivec supported
\\clock : 2926.000000MHz
@ -145,9 +145,275 @@ test "cpuinfo: PowerPC" {
);
}
fn testParser(parser: anytype, expected_model: *const Target.Cpu.Model, input: []const u8) !void {
const ArmCpuinfoImpl = struct {
cores: [4]CoreInfo = undefined,
core_no: usize = 0,
have_fields: usize = 0,
const CoreInfo = struct {
architecture: u8 = 0,
implementer: u8 = 0,
variant: u8 = 0,
part: u16 = 0,
is_really_v6: bool = false,
};
const cpu_models = struct {
// Shorthands to simplify the tables below.
const A32 = Target.arm.cpu;
const A64 = Target.aarch64.cpu;
const E = struct {
part: u16,
variant: ?u8 = null, // null if matches any variant
m32: ?*const Target.Cpu.Model = null,
m64: ?*const Target.Cpu.Model = null,
};
// implementer = 0x41
const ARM = [_]E{
E{ .part = 0x926, .m32 = &A32.arm926ej_s, .m64 = null },
E{ .part = 0xb02, .m32 = &A32.mpcore, .m64 = null },
E{ .part = 0xb36, .m32 = &A32.arm1136j_s, .m64 = null },
E{ .part = 0xb56, .m32 = &A32.arm1156t2_s, .m64 = null },
E{ .part = 0xb76, .m32 = &A32.arm1176jz_s, .m64 = null },
E{ .part = 0xc05, .m32 = &A32.cortex_a5, .m64 = null },
E{ .part = 0xc07, .m32 = &A32.cortex_a7, .m64 = null },
E{ .part = 0xc08, .m32 = &A32.cortex_a8, .m64 = null },
E{ .part = 0xc09, .m32 = &A32.cortex_a9, .m64 = null },
E{ .part = 0xc0d, .m32 = &A32.cortex_a17, .m64 = null },
E{ .part = 0xc0f, .m32 = &A32.cortex_a15, .m64 = null },
E{ .part = 0xc0e, .m32 = &A32.cortex_a17, .m64 = null },
E{ .part = 0xc14, .m32 = &A32.cortex_r4, .m64 = null },
E{ .part = 0xc15, .m32 = &A32.cortex_r5, .m64 = null },
E{ .part = 0xc17, .m32 = &A32.cortex_r7, .m64 = null },
E{ .part = 0xc18, .m32 = &A32.cortex_r8, .m64 = null },
E{ .part = 0xc20, .m32 = &A32.cortex_m0, .m64 = null },
E{ .part = 0xc21, .m32 = &A32.cortex_m1, .m64 = null },
E{ .part = 0xc23, .m32 = &A32.cortex_m3, .m64 = null },
E{ .part = 0xc24, .m32 = &A32.cortex_m4, .m64 = null },
E{ .part = 0xc27, .m32 = &A32.cortex_m7, .m64 = null },
E{ .part = 0xc60, .m32 = &A32.cortex_m0plus, .m64 = null },
E{ .part = 0xd01, .m32 = &A32.cortex_a32, .m64 = null },
E{ .part = 0xd03, .m32 = &A32.cortex_a53, .m64 = &A64.cortex_a53 },
E{ .part = 0xd04, .m32 = &A32.cortex_a35, .m64 = &A64.cortex_a35 },
E{ .part = 0xd05, .m32 = &A32.cortex_a55, .m64 = &A64.cortex_a55 },
E{ .part = 0xd07, .m32 = &A32.cortex_a57, .m64 = &A64.cortex_a57 },
E{ .part = 0xd08, .m32 = &A32.cortex_a72, .m64 = &A64.cortex_a72 },
E{ .part = 0xd09, .m32 = &A32.cortex_a73, .m64 = &A64.cortex_a73 },
E{ .part = 0xd0a, .m32 = &A32.cortex_a75, .m64 = &A64.cortex_a75 },
E{ .part = 0xd0b, .m32 = &A32.cortex_a76, .m64 = &A64.cortex_a76 },
E{ .part = 0xd0c, .m32 = &A32.neoverse_n1, .m64 = null },
E{ .part = 0xd0d, .m32 = &A32.cortex_a77, .m64 = &A64.cortex_a77 },
E{ .part = 0xd13, .m32 = &A32.cortex_r52, .m64 = null },
E{ .part = 0xd20, .m32 = &A32.cortex_m23, .m64 = null },
E{ .part = 0xd21, .m32 = &A32.cortex_m33, .m64 = null },
E{ .part = 0xd41, .m32 = &A32.cortex_a78, .m64 = &A64.cortex_a78 },
E{ .part = 0xd4b, .m32 = &A32.cortex_a78c, .m64 = &A64.cortex_a78c },
E{ .part = 0xd44, .m32 = &A32.cortex_x1, .m64 = &A64.cortex_x1 },
E{ .part = 0xd02, .m64 = &A64.cortex_a34 },
E{ .part = 0xd06, .m64 = &A64.cortex_a65 },
E{ .part = 0xd43, .m64 = &A64.cortex_a65ae },
};
// implementer = 0x42
const Broadcom = [_]E{
E{ .part = 0x516, .m64 = &A64.thunderx2t99 },
};
// implementer = 0x43
const Cavium = [_]E{
E{ .part = 0x0a0, .m64 = &A64.thunderx },
E{ .part = 0x0a2, .m64 = &A64.thunderxt81 },
E{ .part = 0x0a3, .m64 = &A64.thunderxt83 },
E{ .part = 0x0a1, .m64 = &A64.thunderxt88 },
E{ .part = 0x0af, .m64 = &A64.thunderx2t99 },
};
// implementer = 0x46
const Fujitsu = [_]E{
E{ .part = 0x001, .m64 = &A64.a64fx },
};
// implementer = 0x48
const HiSilicon = [_]E{
E{ .part = 0xd01, .m64 = &A64.tsv110 },
};
// implementer = 0x4e
const Nvidia = [_]E{
E{ .part = 0x004, .m64 = &A64.carmel },
};
// implementer = 0x50
const Ampere = [_]E{
E{ .part = 0x000, .variant = 3, .m64 = &A64.emag },
E{ .part = 0x000, .m64 = &A64.xgene1 },
};
// implementer = 0x51
const Qualcomm = [_]E{
E{ .part = 0x06f, .m32 = &A32.krait },
E{ .part = 0x201, .m64 = &A64.kryo, .m32 = &A64.kryo },
E{ .part = 0x205, .m64 = &A64.kryo, .m32 = &A64.kryo },
E{ .part = 0x211, .m64 = &A64.kryo, .m32 = &A64.kryo },
E{ .part = 0x800, .m64 = &A64.cortex_a73, .m32 = &A64.cortex_a73 },
E{ .part = 0x801, .m64 = &A64.cortex_a73, .m32 = &A64.cortex_a73 },
E{ .part = 0x802, .m64 = &A64.cortex_a75, .m32 = &A64.cortex_a75 },
E{ .part = 0x803, .m64 = &A64.cortex_a75, .m32 = &A64.cortex_a75 },
E{ .part = 0x804, .m64 = &A64.cortex_a76, .m32 = &A64.cortex_a76 },
E{ .part = 0x805, .m64 = &A64.cortex_a76, .m32 = &A64.cortex_a76 },
E{ .part = 0xc00, .m64 = &A64.falkor },
E{ .part = 0xc01, .m64 = &A64.saphira },
};
fn isKnown(core: CoreInfo, is_64bit: bool) ?*const Target.Cpu.Model {
const models = switch (core.implementer) {
0x41 => &ARM,
0x42 => &Broadcom,
0x43 => &Cavium,
0x46 => &Fujitsu,
0x48 => &HiSilicon,
0x50 => &Ampere,
0x51 => &Qualcomm,
else => return null,
};
for (models) |model| {
if (model.part == core.part and
(model.variant == null or model.variant.? == core.variant))
return if (is_64bit) model.m64 else model.m32;
}
return null;
}
};
fn addOne(self: *ArmCpuinfoImpl) void {
if (self.have_fields == 4 and self.core_no < self.cores.len) {
if (self.core_no > 0) {
// Deduplicate the core info.
for (self.cores[0..self.core_no]) |it| {
if (std.meta.eql(it, self.cores[self.core_no]))
return;
}
}
self.core_no += 1;
}
}
fn line_hook(self: *ArmCpuinfoImpl, key: []const u8, value: []const u8) !bool {
const info = &self.cores[self.core_no];
if (mem.eql(u8, key, "processor")) {
// Handle both old-style and new-style cpuinfo formats.
// The former prints a sequence of "processor: N" lines for each
// core and then the info for the core that's executing this code(!)
// while the latter prints the infos for each core right after the
// "processor" key.
self.have_fields = 0;
self.cores[self.core_no] = .{};
} else if (mem.eql(u8, key, "CPU implementer")) {
info.implementer = try fmt.parseInt(u8, value, 0);
self.have_fields += 1;
} else if (mem.eql(u8, key, "CPU architecture")) {
// "AArch64" on older kernels.
info.architecture = if (mem.startsWith(u8, value, "AArch64"))
8
else
try fmt.parseInt(u8, value, 0);
self.have_fields += 1;
} else if (mem.eql(u8, key, "CPU variant")) {
info.variant = try fmt.parseInt(u8, value, 0);
self.have_fields += 1;
} else if (mem.eql(u8, key, "CPU part")) {
info.part = try fmt.parseInt(u16, value, 0);
self.have_fields += 1;
} else if (mem.eql(u8, key, "model name")) {
// ARMv6 cores report "CPU architecture" equal to 7.
if (mem.indexOf(u8, value, "(v6l)")) |_| {
info.is_really_v6 = true;
}
} else if (mem.eql(u8, key, "CPU revision")) {
// This field is always the last one for each CPU section.
_ = self.addOne();
}
return true;
}
fn finalize(self: *ArmCpuinfoImpl, arch: Target.Cpu.Arch) ?Target.Cpu {
if (self.core_no == 0) return null;
const is_64bit = switch (arch) {
.aarch64, .aarch64_be, .aarch64_32 => true,
else => false,
};
var known_models: [self.cores.len]?*const Target.Cpu.Model = undefined;
for (self.cores[0..self.core_no]) |core, i| {
known_models[i] = cpu_models.isKnown(core, is_64bit);
}
// XXX We pick the first core on big.LITTLE systems, hopefully the
// LITTLE one.
const model = known_models[0] orelse return null;
return Target.Cpu{
.arch = arch,
.model = model,
.features = model.features,
};
}
};
const ArmCpuinfoParser = CpuinfoParser(ArmCpuinfoImpl);
test "cpuinfo: ARM" {
try testParser(ArmCpuinfoParser, .arm, &Target.arm.cpu.arm1176jz_s,
\\processor : 0
\\model name : ARMv6-compatible processor rev 7 (v6l)
\\BogoMIPS : 997.08
\\Features : half thumb fastmult vfp edsp java tls
\\CPU implementer : 0x41
\\CPU architecture: 7
\\CPU variant : 0x0
\\CPU part : 0xb76
\\CPU revision : 7
);
try testParser(ArmCpuinfoParser, .arm, &Target.arm.cpu.cortex_a7,
\\processor : 0
\\model name : ARMv7 Processor rev 3 (v7l)
\\BogoMIPS : 18.00
\\Features : half thumb fastmult vfp edsp neon vfpv3 tls vfpv4 idiva idivt vfpd32 lpae
\\CPU implementer : 0x41
\\CPU architecture: 7
\\CPU variant : 0x0
\\CPU part : 0xc07
\\CPU revision : 3
\\
\\processor : 4
\\model name : ARMv7 Processor rev 3 (v7l)
\\BogoMIPS : 90.00
\\Features : half thumb fastmult vfp edsp neon vfpv3 tls vfpv4 idiva idivt vfpd32 lpae
\\CPU implementer : 0x41
\\CPU architecture: 7
\\CPU variant : 0x2
\\CPU part : 0xc0f
\\CPU revision : 3
);
try testParser(ArmCpuinfoParser, .aarch64, &Target.aarch64.cpu.cortex_a72,
\\processor : 0
\\BogoMIPS : 108.00
\\Features : fp asimd evtstrm crc32 cpuid
\\CPU implementer : 0x41
\\CPU architecture: 8
\\CPU variant : 0x0
\\CPU part : 0xd08
\\CPU revision : 3
);
}
fn testParser(
parser: anytype,
arch: Target.Cpu.Arch,
expected_model: *const Target.Cpu.Model,
input: []const u8,
) !void {
var fbs = io.fixedBufferStream(input);
const result = try parser.parse(.powerpc, fbs.reader());
const result = try parser.parse(arch, fbs.reader());
testing.expectEqual(expected_model, result.?.model);
testing.expect(expected_model.features.eql(result.?.features));
}
@ -186,6 +452,9 @@ pub fn detectNativeCpuAndFeatures() ?Target.Cpu {
const current_arch = std.Target.current.cpu.arch;
switch (current_arch) {
.arm, .armeb, .thumb, .thumbeb, .aarch64, .aarch64_be, .aarch64_32 => {
return ArmCpuinfoParser.parse(current_arch, f.reader()) catch null;
},
.sparcv9 => {
return SparcCpuinfoParser.parse(current_arch, f.reader()) catch null;
},

View File

@ -239,7 +239,6 @@ pub const CObject = struct {
pub fn destroy(em: *ErrorMsg, gpa: *Allocator) void {
gpa.free(em.msg);
gpa.destroy(em);
em.* = undefined;
}
};

View File

@ -43,17 +43,13 @@ dwarf_debug_str_index: ?u16 = null,
dwarf_debug_line_index: ?u16 = null,
dwarf_debug_ranges_index: ?u16 = null,
symtab: std.ArrayListUnmanaged(macho.nlist_64) = .{},
strtab: std.ArrayListUnmanaged(u8) = .{},
symbols: std.ArrayListUnmanaged(*Symbol) = .{},
initializers: std.ArrayListUnmanaged(*Symbol) = .{},
data_in_code_entries: std.ArrayListUnmanaged(macho.data_in_code_entry) = .{},
locals: std.StringArrayHashMapUnmanaged(Symbol) = .{},
stabs: std.ArrayListUnmanaged(Stab) = .{},
tu_path: ?[]const u8 = null,
tu_mtime: ?u64 = null,
initializers: std.ArrayListUnmanaged(CppStatic) = .{},
data_in_code_entries: std.ArrayListUnmanaged(macho.data_in_code_entry) = .{},
pub const Section = struct {
inner: macho.section_64,
code: []u8,
@ -71,23 +67,6 @@ pub const Section = struct {
}
};
const CppStatic = struct {
symbol: u32,
target_addr: u64,
};
const Stab = struct {
tag: Tag,
symbol: u32,
size: ?u64 = null,
const Tag = enum {
function,
global,
static,
};
};
const DebugInfo = struct {
inner: dwarf.DwarfInfo,
debug_info: []u8,
@ -169,14 +148,12 @@ pub fn deinit(self: *Object) void {
}
self.sections.deinit(self.allocator);
for (self.locals.items()) |*entry| {
entry.value.deinit(self.allocator);
for (self.symbols.items) |sym| {
sym.deinit(self.allocator);
self.allocator.destroy(sym);
}
self.locals.deinit(self.allocator);
self.symbols.deinit(self.allocator);
self.symtab.deinit(self.allocator);
self.strtab.deinit(self.allocator);
self.stabs.deinit(self.allocator);
self.data_in_code_entries.deinit(self.allocator);
self.initializers.deinit(self.allocator);
@ -222,9 +199,9 @@ pub fn parse(self: *Object) !void {
}
try self.readLoadCommands(reader);
try self.parseSymbols();
try self.parseSections();
if (self.symtab_cmd_index != null) try self.parseSymtab();
if (self.data_in_code_cmd_index != null) try self.readDataInCode();
try self.parseDataInCode();
try self.parseInitializers();
try self.parseDebugInfo();
}
@ -298,9 +275,10 @@ pub fn readLoadCommands(self: *Object, reader: anytype) !void {
}
pub fn parseSections(self: *Object) !void {
log.debug("parsing sections in {s}", .{self.name.?});
const seg = self.load_commands.items[self.segment_cmd_index.?].Segment;
log.debug("parsing sections in {s}", .{self.name.?});
try self.sections.ensureCapacity(self.allocator, seg.sections.items.len);
for (seg.sections.items) |sect| {
@ -327,6 +305,7 @@ pub fn parseSections(self: *Object) !void {
self.arch.?,
section.code,
mem.bytesAsSlice(macho.relocation_info, raw_relocs),
self.symbols.items,
);
}
@ -344,60 +323,70 @@ pub fn parseInitializers(self: *Object) !void {
const relocs = section.relocs orelse unreachable;
try self.initializers.ensureCapacity(self.allocator, relocs.len);
for (relocs) |rel| {
self.initializers.appendAssumeCapacity(.{
.symbol = rel.target.symbol,
.target_addr = undefined,
});
self.initializers.appendAssumeCapacity(rel.target.symbol);
}
mem.reverse(CppStatic, self.initializers.items);
for (self.initializers.items) |initializer| {
const sym = self.symtab.items[initializer.symbol];
const sym_name = self.getString(sym.n_strx);
log.debug(" | {s}", .{sym_name});
}
mem.reverse(*Symbol, self.initializers.items);
}
pub fn parseSymtab(self: *Object) !void {
const symtab_cmd = self.load_commands.items[self.symtab_cmd_index.?].Symtab;
pub fn parseSymbols(self: *Object) !void {
const index = self.symtab_cmd_index orelse return;
const symtab_cmd = self.load_commands.items[index].Symtab;
var symtab = try self.allocator.alloc(u8, @sizeOf(macho.nlist_64) * symtab_cmd.nsyms);
defer self.allocator.free(symtab);
_ = try self.file.?.preadAll(symtab, symtab_cmd.symoff);
const slice = @alignCast(@alignOf(macho.nlist_64), mem.bytesAsSlice(macho.nlist_64, symtab));
try self.symtab.appendSlice(self.allocator, slice);
var strtab = try self.allocator.alloc(u8, symtab_cmd.strsize);
defer self.allocator.free(strtab);
_ = try self.file.?.preadAll(strtab, symtab_cmd.stroff);
try self.strtab.appendSlice(self.allocator, strtab);
for (self.symtab.items) |sym, sym_id| {
if (Symbol.isStab(sym) or Symbol.isUndef(sym)) continue;
for (slice) |sym| {
if (Symbol.isStab(sym)) {
log.err("TODO handle stabs embedded within object files", .{});
return error.HandleStabsInObjects;
}
const sym_name = self.getString(sym.n_strx);
const tag: Symbol.Tag = tag: {
if (Symbol.isLocal(sym)) {
if (self.arch.? == .aarch64 and mem.startsWith(u8, sym_name, "l")) continue;
break :tag .local;
}
if (Symbol.isWeakDef(sym)) {
break :tag .weak;
}
break :tag .strong;
};
const sym_name = mem.spanZ(@ptrCast([*:0]const u8, strtab.ptr + sym.n_strx));
const name = try self.allocator.dupe(u8, sym_name);
try self.locals.putNoClobber(self.allocator, name, .{
.tag = tag,
.name = name,
.address = 0,
.section = 0,
.index = @intCast(u32, sym_id),
});
const symbol: *Symbol = symbol: {
if (Symbol.isSect(sym)) {
const linkage: Symbol.Regular.Linkage = linkage: {
if (!Symbol.isExt(sym)) break :linkage .translation_unit;
if (Symbol.isWeakDef(sym) or Symbol.isPext(sym)) break :linkage .linkage_unit;
break :linkage .global;
};
const regular = try self.allocator.create(Symbol.Regular);
errdefer self.allocator.destroy(regular);
regular.* = .{
.base = .{
.@"type" = .regular,
.name = name,
},
.linkage = linkage,
.address = sym.n_value,
.section = sym.n_sect - 1,
.weak_ref = Symbol.isWeakRef(sym),
.file = self,
};
break :symbol &regular.base;
}
const undef = try self.allocator.create(Symbol.Unresolved);
errdefer self.allocator.destroy(undef);
undef.* = .{
.base = .{
.@"type" = .unresolved,
.name = name,
},
.file = self,
};
break :symbol &undef.base;
};
try self.symbols.append(self.allocator, symbol);
}
}
@ -429,38 +418,31 @@ pub fn parseDebugInfo(self: *Object) !void {
break :mtime @intCast(u64, @divFloor(stat.mtime, 1_000_000_000));
};
for (self.locals.items()) |entry, index| {
const local = entry.value;
const source_sym = self.symtab.items[local.index.?];
const size = blk: for (debug_info.inner.func_list.items) |func| {
if (func.pc_range) |range| {
if (source_sym.n_value >= range.start and source_sym.n_value < range.end) {
break :blk range.end - range.start;
for (self.symbols.items) |sym| {
if (sym.cast(Symbol.Regular)) |reg| {
const size: u64 = blk: for (debug_info.inner.func_list.items) |func| {
if (func.pc_range) |range| {
if (reg.address >= range.start and reg.address < range.end) {
break :blk range.end - range.start;
}
}
}
} else null;
const tag: Stab.Tag = tag: {
if (size != null) break :tag .function;
switch (local.tag) {
.weak, .strong => break :tag .global,
else => break :tag .static,
}
};
} else 0;
try self.stabs.append(self.allocator, .{
.tag = tag,
.size = size,
.symbol = @intCast(u32, index),
});
reg.stab = .{
.kind = kind: {
if (size > 0) break :kind .function;
switch (reg.linkage) {
.translation_unit => break :kind .static,
else => break :kind .global,
}
},
.size = size,
};
}
}
}
pub fn getString(self: *const Object, str_off: u32) []const u8 {
assert(str_off < self.strtab.items.len);
return mem.spanZ(@ptrCast([*:0]const u8, self.strtab.items.ptr + str_off));
}
pub fn readSection(self: Object, allocator: *Allocator, index: u16) ![]u8 {
fn readSection(self: Object, allocator: *Allocator, index: u16) ![]u8 {
const seg = self.load_commands.items[self.segment_cmd_index.?].Segment;
const sect = seg.sections.items[index];
var buffer = try allocator.alloc(u8, sect.size);
@ -468,7 +450,7 @@ pub fn readSection(self: Object, allocator: *Allocator, index: u16) ![]u8 {
return buffer;
}
pub fn readDataInCode(self: *Object) !void {
pub fn parseDataInCode(self: *Object) !void {
const index = self.data_in_code_cmd_index orelse return;
const data_in_code = self.load_commands.items[index].LinkeditData;

View File

@ -2,31 +2,113 @@ const Symbol = @This();
const std = @import("std");
const macho = std.macho;
const mem = std.mem;
const Allocator = std.mem.Allocator;
const Allocator = mem.Allocator;
const Object = @import("Object.zig");
pub const Tag = enum {
local,
weak,
strong,
import,
undef,
pub const Type = enum {
regular,
proxy,
unresolved,
};
tag: Tag,
/// Symbol type.
@"type": Type,
/// Symbol name. Owned slice.
name: []u8,
address: u64,
section: u8,
/// Index of file where to locate this symbol.
/// Depending on context, this is either an object file, or a dylib.
file: ?u16 = null,
/// Alias of.
alias: ?*Symbol = null,
/// Index of this symbol within the file's symbol table.
index: ?u32 = null,
/// Index in GOT table for indirection.
got_index: ?u32 = null,
pub fn deinit(self: *Symbol, allocator: *Allocator) void {
allocator.free(self.name);
/// Index in stubs table for late binding.
stubs_index: ?u32 = null,
pub const Regular = struct {
base: Symbol,
/// Linkage type.
linkage: Linkage,
/// Symbol address.
address: u64,
/// Section ID where the symbol resides.
section: u8,
/// Whether the symbol is a weak ref.
weak_ref: bool,
/// File where to locate this symbol.
file: *Object,
/// Debug stab if defined.
stab: ?struct {
/// Stab kind
kind: enum {
function,
global,
static,
},
/// Size of the stab.
size: u64,
} = null,
pub const base_type: Symbol.Type = .regular;
pub const Linkage = enum {
translation_unit,
linkage_unit,
global,
};
pub fn isTemp(regular: *Regular) bool {
if (regular.linkage == .translation_unit) {
return mem.startsWith(u8, regular.base.name, "l") or mem.startsWith(u8, regular.base.name, "L");
}
return false;
}
};
pub const Proxy = struct {
base: Symbol,
/// Dylib ordinal.
dylib: u16,
pub const base_type: Symbol.Type = .proxy;
};
pub const Unresolved = struct {
base: Symbol,
/// File where this symbol was referenced.
file: *Object,
pub const base_type: Symbol.Type = .unresolved;
};
pub fn deinit(base: *Symbol, allocator: *Allocator) void {
allocator.free(base.name);
}
pub fn cast(base: *Symbol, comptime T: type) ?*T {
if (base.@"type" != T.base_type) {
return null;
}
return @fieldParentPtr(T, "base", base);
}
pub fn getTopmostAlias(base: *Symbol) *Symbol {
if (base.alias) |alias| {
return alias.getTopmostAlias();
}
return base;
}
pub fn isStab(sym: macho.nlist_64) bool {
@ -55,17 +137,6 @@ pub fn isWeakDef(sym: macho.nlist_64) bool {
return (sym.n_desc & macho.N_WEAK_DEF) != 0;
}
/// Symbol is local if it is defined and not an extern.
pub fn isLocal(sym: macho.nlist_64) bool {
return isSect(sym) and !isExt(sym);
}
/// Symbol is global if it is defined and an extern.
pub fn isGlobal(sym: macho.nlist_64) bool {
return isSect(sym) and isExt(sym);
}
/// Symbol is undefined if it is not defined and an extern.
pub fn isUndef(sym: macho.nlist_64) bool {
return isUndf(sym) and isExt(sym);
pub fn isWeakRef(sym: macho.nlist_64) bool {
return (sym.n_desc & macho.N_WEAK_REF) != 0;
}

File diff suppressed because it is too large Load Diff

View File

@ -10,6 +10,7 @@ const aarch64 = @import("reloc/aarch64.zig");
const x86_64 = @import("reloc/x86_64.zig");
const Allocator = mem.Allocator;
const Symbol = @import("Symbol.zig");
pub const Relocation = struct {
@"type": Type,
@ -75,12 +76,12 @@ pub const Relocation = struct {
};
pub const Target = union(enum) {
symbol: u32,
symbol: *Symbol,
section: u16,
pub fn from_reloc(reloc: macho.relocation_info) Target {
pub fn from_reloc(reloc: macho.relocation_info, symbols: []*Symbol) Target {
return if (reloc.r_extern == 1) .{
.symbol = reloc.r_symbolnum,
.symbol = symbols[reloc.r_symbolnum],
} else .{
.section = @intCast(u16, reloc.r_symbolnum - 1),
};
@ -136,6 +137,7 @@ pub fn parse(
arch: std.Target.Cpu.Arch,
code: []u8,
relocs: []const macho.relocation_info,
symbols: []*Symbol,
) ![]*Relocation {
var it = RelocIterator{
.buffer = relocs,
@ -148,6 +150,7 @@ pub fn parse(
.it = &it,
.code = code,
.parsed = std.ArrayList(*Relocation).init(allocator),
.symbols = symbols,
};
defer parser.deinit();
try parser.parse();
@ -160,6 +163,7 @@ pub fn parse(
.it = &it,
.code = code,
.parsed = std.ArrayList(*Relocation).init(allocator),
.symbols = symbols,
};
defer parser.deinit();
try parser.parse();

View File

@ -10,6 +10,7 @@ const reloc = @import("../reloc.zig");
const Allocator = mem.Allocator;
const Relocation = reloc.Relocation;
const Symbol = @import("../Symbol.zig");
pub const Branch = struct {
base: Relocation,
@ -188,6 +189,7 @@ pub const Parser = struct {
it: *reloc.RelocIterator,
code: []u8,
parsed: std.ArrayList(*Relocation),
symbols: []*Symbol,
addend: ?u32 = null,
subtractor: ?Relocation.Target = null,
@ -273,7 +275,7 @@ pub const Parser = struct {
var branch = try parser.allocator.create(Branch);
errdefer parser.allocator.destroy(branch);
const target = Relocation.Target.from_reloc(rel);
const target = Relocation.Target.from_reloc(rel, parser.symbols);
branch.* = .{
.base = .{
@ -294,7 +296,7 @@ pub const Parser = struct {
assert(rel.r_length == 2);
const rel_type = @intToEnum(macho.reloc_type_arm64, rel.r_type);
const target = Relocation.Target.from_reloc(rel);
const target = Relocation.Target.from_reloc(rel, parser.symbols);
const offset = @intCast(u32, rel.r_address);
const inst = parser.code[offset..][0..4];
@ -400,7 +402,7 @@ pub const Parser = struct {
aarch64.Instruction.load_store_register,
), inst) };
}
const target = Relocation.Target.from_reloc(rel);
const target = Relocation.Target.from_reloc(rel, parser.symbols);
var page_off = try parser.allocator.create(PageOff);
errdefer parser.allocator.destroy(page_off);
@ -437,7 +439,7 @@ pub const Parser = struct {
), inst);
assert(parsed_inst.size == 3);
const target = Relocation.Target.from_reloc(rel);
const target = Relocation.Target.from_reloc(rel, parser.symbols);
var page_off = try parser.allocator.create(GotPageOff);
errdefer parser.allocator.destroy(page_off);
@ -496,7 +498,7 @@ pub const Parser = struct {
}
};
const target = Relocation.Target.from_reloc(rel);
const target = Relocation.Target.from_reloc(rel, parser.symbols);
var page_off = try parser.allocator.create(TlvpPageOff);
errdefer parser.allocator.destroy(page_off);
@ -531,7 +533,7 @@ pub const Parser = struct {
assert(rel.r_pcrel == 0);
assert(parser.subtractor == null);
parser.subtractor = Relocation.Target.from_reloc(rel);
parser.subtractor = Relocation.Target.from_reloc(rel, parser.symbols);
// Verify SUBTRACTOR is followed by UNSIGNED.
const next = @intToEnum(macho.reloc_type_arm64, parser.it.peek().r_type);
@ -554,7 +556,7 @@ pub const Parser = struct {
var unsigned = try parser.allocator.create(reloc.Unsigned);
errdefer parser.allocator.destroy(unsigned);
const target = Relocation.Target.from_reloc(rel);
const target = Relocation.Target.from_reloc(rel, parser.symbols);
const is_64bit: bool = switch (rel.r_length) {
3 => true,
2 => false,

View File

@ -9,6 +9,7 @@ const reloc = @import("../reloc.zig");
const Allocator = mem.Allocator;
const Relocation = reloc.Relocation;
const Symbol = @import("../Symbol.zig");
pub const Branch = struct {
base: Relocation,
@ -95,6 +96,7 @@ pub const Parser = struct {
it: *reloc.RelocIterator,
code: []u8,
parsed: std.ArrayList(*Relocation),
symbols: []*Symbol,
subtractor: ?Relocation.Target = null,
pub fn deinit(parser: *Parser) void {
@ -145,7 +147,7 @@ pub const Parser = struct {
var branch = try parser.allocator.create(Branch);
errdefer parser.allocator.destroy(branch);
const target = Relocation.Target.from_reloc(rel);
const target = Relocation.Target.from_reloc(rel, parser.symbols);
branch.* = .{
.base = .{
@ -165,7 +167,7 @@ pub const Parser = struct {
assert(rel.r_length == 2);
const rel_type = @intToEnum(macho.reloc_type_x86_64, rel.r_type);
const target = Relocation.Target.from_reloc(rel);
const target = Relocation.Target.from_reloc(rel, parser.symbols);
const is_extern = rel.r_extern == 1;
const offset = @intCast(u32, rel.r_address);
@ -211,7 +213,7 @@ pub const Parser = struct {
const offset = @intCast(u32, rel.r_address);
const inst = parser.code[offset..][0..4];
const target = Relocation.Target.from_reloc(rel);
const target = Relocation.Target.from_reloc(rel, parser.symbols);
var got_load = try parser.allocator.create(GotLoad);
errdefer parser.allocator.destroy(got_load);
@ -237,7 +239,7 @@ pub const Parser = struct {
const offset = @intCast(u32, rel.r_address);
const inst = parser.code[offset..][0..4];
const target = Relocation.Target.from_reloc(rel);
const target = Relocation.Target.from_reloc(rel, parser.symbols);
var got = try parser.allocator.create(Got);
errdefer parser.allocator.destroy(got);
@ -263,7 +265,7 @@ pub const Parser = struct {
const offset = @intCast(u32, rel.r_address);
const inst = parser.code[offset..][0..4];
const target = Relocation.Target.from_reloc(rel);
const target = Relocation.Target.from_reloc(rel, parser.symbols);
var tlv = try parser.allocator.create(Tlv);
errdefer parser.allocator.destroy(tlv);
@ -288,7 +290,7 @@ pub const Parser = struct {
assert(rel.r_pcrel == 0);
assert(parser.subtractor == null);
parser.subtractor = Relocation.Target.from_reloc(rel);
parser.subtractor = Relocation.Target.from_reloc(rel, parser.symbols);
// Verify SUBTRACTOR is followed by UNSIGNED.
const next = @intToEnum(macho.reloc_type_x86_64, parser.it.peek().r_type);
@ -311,7 +313,7 @@ pub const Parser = struct {
var unsigned = try parser.allocator.create(reloc.Unsigned);
errdefer parser.allocator.destroy(unsigned);
const target = Relocation.Target.from_reloc(rel);
const target = Relocation.Target.from_reloc(rel, parser.symbols);
const is_64bit: bool = switch (rel.r_length) {
3 => true,
2 => false,

View File

@ -56,7 +56,7 @@ enum TargetSubsystem {
// ABI warning
// Synchronize with target.cpp::os_list
// Synchronize with std.Target.Os.Tag and target.cpp::os_list
enum Os {
OsFreestanding,
OsAnanas,
@ -94,6 +94,9 @@ enum Os {
OsWASI,
OsEmscripten,
OsUefi,
OsOpenCL,
OsGLSL450,
OsVulkan,
OsOther,
};

View File

@ -122,6 +122,9 @@ static const Os os_list[] = {
OsWASI,
OsEmscripten,
OsUefi,
OsOpenCL,
OsGLSL450,
OsVulkan,
OsOther,
};
@ -213,6 +216,9 @@ Os target_os_enum(size_t index) {
ZigLLVM_OSType get_llvm_os_type(Os os_type) {
switch (os_type) {
case OsFreestanding:
case OsOpenCL:
case OsGLSL450:
case OsVulkan:
case OsOther:
return ZigLLVM_UnknownOS;
case OsAnanas:
@ -330,6 +336,9 @@ const char *target_os_name(Os os_type) {
case OsHurd:
case OsWASI:
case OsEmscripten:
case OsOpenCL:
case OsGLSL450:
case OsVulkan:
return ZigLLVMGetOSTypeName(get_llvm_os_type(os_type));
}
zig_unreachable();
@ -733,6 +742,9 @@ uint32_t target_c_type_size_in_bits(const ZigTarget *target, CIntType id) {
case OsAMDPAL:
case OsHermitCore:
case OsHurd:
case OsOpenCL:
case OsGLSL450:
case OsVulkan:
zig_panic("TODO c type size in bits for this target");
}
zig_unreachable();
@ -999,6 +1011,10 @@ ZigLLVM_EnvironmentType target_default_abi(ZigLLVM_ArchType arch, Os os) {
case OsWASI:
case OsEmscripten:
return ZigLLVM_Musl;
case OsOpenCL:
case OsGLSL450:
case OsVulkan:
return ZigLLVM_UnknownEnvironment;
}
zig_unreachable();
}

View File

@ -239,6 +239,28 @@ const llvm_targets = [_]LlvmTarget{
"zcz_fp",
},
},
.{
.llvm_name = null,
.zig_name = "xgene1",
.features = &.{
"fp_armv8",
"neon",
"perfmon",
"v8a",
},
},
.{
.llvm_name = null,
.zig_name = "emag",
.features = &.{
"crc",
"crypto",
"fp_armv8",
"neon",
"perfmon",
"v8a",
},
},
},
},
.{