Compare commits

...

26 Commits

Author SHA1 Message Date
Nico Elbers
4d421e94a5
Merge fa5006819f into f845fa04a0 2024-11-21 10:08:30 +01:00
Alex Rønne Petersen
f845fa04a0 std.debug: Gracefully handle process_vm_readv() EPERM in MemoryAccessor.read().
Closes #21815.
2024-11-20 23:07:46 +01:00
Frank Denis
a5d4ad17b7
crypto.keccak.State: add checks to prevent insecure transitions (#22020)
* crypto.keccak.State: don't unconditionally permute after a squeeze()

Now, squeeze() behaves like absorb()

Namely,

squeeze(x[0..t]);
squeeze(x[t..n)); with t <= n

becomes equivalent to squeeze(x[0..n]).

* keccak: in debug mode, track transitions to prevent insecure ones.

Fixes #22019
2024-11-20 11:16:09 +01:00
Shawn Gao
dafe1a910d Append disabled LLVM CPU features after enabled ones 2024-11-20 10:09:03 +01:00
Frank Denis
acba2645f7
crypto.aes.soft: use std.atomic.cache_line instead of a harcoded value (#22026) 2024-11-20 03:48:18 +00:00
xdBronch
5f3a70ed5f Fix peer type resolution with allowzero pointers 2024-11-20 02:09:50 +02:00
Alex Kladov
865ef24518 build: don't hang when capturing Stdout of verbose Build.Step.Run
When using Build.Step.Run.captureStdOut with a program that prints more
than 10 megabytes of output, the build process hangs.

This is because evalGeneric returns an error without reading child's
stdin till the end, so we subsequently get stuck in `try child.wait()`.

To fix this, make sure to kill the child in case of an error!

Output before this change:

    λ ./zig/zig build  -Dmultiversion=0.15.6 -Dconfig-release=0.15.7 -Dconfig-release-client-min=0.15.6
    [3/8] steps
    └─ run gh
    ^C
    λ # an hour of debugging

Output after this change:

    λ ./zig/zig build  -Dmultiversion=0.15.6 -Dconfig-release=0.15.7 -Dconfig-release-client-min=0.15.6
    install
    └─ install generated to ../tigerbeetle
       └─ run build_mutliversion (tigerbeetle)
          └─ run unzip
             └─ run gh failure
    error: unable to spawn gh: StdoutStreamTooLong
    Build Summary: 3/8 steps succeeded; 1 failed (disable with --summary none)
    install transitive failure
    └─ install generated to ../tigerbeetle transitive failure
       └─ run build_mutliversion (tigerbeetle) transitive failure
          └─ run unzip transitive failure
             └─ run gh failure
    error: the following build command failed with exit code 1:
    /home/matklad/p/tb/work/.zig-cache/o/c0e3f5e66ff441cd16f9a1a7e1401494/build /home/matklad/p/tb/work/zig/zig /home/matklad/p/tb/work /home/matklad/p/tb/work/.zig-cache /home/matklad/.cache/zig --seed 0xc1d4efc8 -Zaecc61299ff08765 -Dmultiversion=0.15.6 -Dconfig-release=0.15.7 -Dconfig-release-client-min=0.15.6
2024-11-19 11:50:38 -08:00
Frank Denis
8a00bd4ce6
std.crypto: make the key pair API creation consistent (#21955)
Our key pair creation API was ugly and inconsistent between ecdsa
keys and other keys.

The same `generate()` function can now be used to generate key pairs,
and that function cannot fail.

For deterministic keys, a `generateDeterministic()` function is
available for all key types.

Fix comments and compilation of the benchmark by the way.

Fixes #21002
2024-11-19 18:05:09 +01:00
Alex Rønne Petersen
94be75a94f Compilation: Re-enable LTO for RISC-V. 2024-11-19 12:04:42 +01:00
Daniel Berg
b5f9e07034
std.c.darwin: make os_log_t a pointer to opaque 2024-11-19 09:32:42 +00:00
Ian Johnson
dceab4502a zig fetch: handle redirects for Git packages
Closes #21976
2024-11-19 00:35:00 -08:00
Andrew Kelley
fbcb00fbb3
Merge pull request #22004 from jacobly0/fix-self-llvm
fix llvm-enabled compiler builds with the self-hosted backend
2024-11-18 16:44:12 -08:00
Alex Rønne Petersen
e6d2e16413 Compilation: Disable LTO for all ILP32-on-LP64 ABIs.
Extension of 3a6a8b8aa5 to all similar ABIs. The
LLD issue affects them all.
2024-11-18 19:10:14 +01:00
Alex Rønne Petersen
a703b85c7c libunwind: Fix compilation for the x32 ABI.
See: https://github.com/llvm/llvm-project/pull/116608
2024-11-18 17:43:54 +01:00
Tw
a9c7714b78 linux/bpf: add alignment for helper functions to make compiler happy
Signed-off-by: Tw <tw19881113@gmail.com>
2024-11-18 16:19:44 +01:00
gooncreeper
73f2671c7b
std.format: properly handle vectors of pointers 2024-11-18 13:48:54 +02:00
Alex Rønne Petersen
3a6a8b8aa5 Compilation: Disable LTO for mips n32.
See: https://github.com/llvm/llvm-project/pull/116537
2024-11-17 15:15:59 +01:00
Jacob Young
41282e7fb2 build.zig: fix libc++ being a linker script 2024-11-17 00:55:36 -05:00
Jacob Young
5be8a5fe5f link: fix memory bugs 2024-11-16 21:29:17 -05:00
Jacob Young
a8ec306b49 Sema: fix peer resolution alignment between slice and empty struct
An empty struct that coerces to an empty array should not force
`align(1)` on the resulting slice type.
2024-11-16 21:22:57 -05:00
Jacob Young
7266d4497e
Merge pull request #21999 from jacobly0/incr-cases
link: fix failing incremental test cases
2024-11-16 20:32:02 -05:00
Jacob Young
11e54a3559 link: fix failing incremental test cases 2024-11-16 14:03:31 -05:00
Jacob Young
96552638ae dev: support incremental for x86_64-linux env 2024-11-16 11:49:49 -05:00
Nico Elbers
fa5006819f
Remove reason when none is specified 2024-08-24 00:53:02 +02:00
Nico Elbers
fc6ff3af11
Show skip reason in build summary 2024-08-24 00:53:02 +02:00
Nico Elbers
bf588efbe6
Add skip reason to Step 2024-08-24 00:52:40 +02:00
33 changed files with 724 additions and 432 deletions

View File

@ -861,6 +861,10 @@ fn addCxxKnownPath(
}
return error.RequiredLibraryNotFound;
}
// By default, explicit library paths are not checked for being linker scripts,
// but libc++ may very well be one, so force all inputs to be checked when passing
// an explicit path to libc++.
exe.allow_so_scripts = true;
exe.addObjectFile(.{ .cwd_relative = path_unpadded });
// TODO a way to integrate with system c++ include files here

View File

@ -841,6 +841,10 @@ fn printStepStatus(
.skipped, .skipped_oom => |skip| {
try ttyconf.setColor(stderr, .yellow);
try stderr.writeAll(" skipped");
if (s.result_skip_reason) |reason| {
try stderr.writeAll(": ");
try stderr.writeAll(reason);
}
if (skip == .skipped_oom) {
try stderr.writeAll(" (not enough memory)");
try ttyconf.setColor(stderr, .dim);

View File

@ -53,6 +53,9 @@
# else
# define _LIBUNWIND_CURSOR_SIZE 66
# endif
# elif defined(__ILP32__)
# define _LIBUNWIND_CONTEXT_SIZE 21
# define _LIBUNWIND_CURSOR_SIZE 28
# else
# define _LIBUNWIND_CONTEXT_SIZE 21
# define _LIBUNWIND_CURSOR_SIZE 33

View File

@ -39,6 +39,7 @@ state: State,
/// total system memory available.
max_rss: usize,
result_skip_reason: ?[]const u8 = null,
result_error_msgs: std.ArrayListUnmanaged([]const u8),
result_error_bundle: std.zig.ErrorBundle,
result_stderr: []const u8,

View File

@ -1017,7 +1017,10 @@ fn runCommand(
.link_libc = exe.is_linking_libc,
})) {
.native, .rosetta => {
if (allow_skip) return error.MakeSkipped;
if (allow_skip) {
run.step.result_skip_reason = "Invalid binary";
return error.MakeSkipped;
}
break :interpret;
},
.wine => |bin_name| {
@ -1098,7 +1101,10 @@ fn runCommand(
}
},
.bad_dl => |foreign_dl| {
if (allow_skip) return error.MakeSkipped;
if (allow_skip) {
run.step.result_skip_reason = "Invalid binary";
return error.MakeSkipped;
}
const host_dl = b.graph.host.result.dynamic_linker.get() orelse "(none)";
@ -1110,7 +1116,10 @@ fn runCommand(
, .{ host_dl, foreign_dl });
},
.bad_os_or_cpu => {
if (allow_skip) return error.MakeSkipped;
if (allow_skip) {
run.step.result_skip_reason = "Invalid os or cpu";
return error.MakeSkipped;
}
const host_name = try b.graph.host.result.zigTriple(b.allocator);
const foreign_name = try exe.rootModuleTarget().zigTriple(b.allocator);
@ -1129,7 +1138,10 @@ fn runCommand(
try Step.handleVerbose2(step.owner, cwd, run.env_map, interp_argv.items);
break :term spawnChildAndCollect(run, interp_argv.items, has_side_effects, prog_node, fuzz_context) catch |e| {
if (!run.failing_to_execute_foreign_is_an_error) return error.MakeSkipped;
if (!run.failing_to_execute_foreign_is_an_error) {
run.step.result_skip_reason = "Foreign binary failed";
return error.MakeSkipped;
}
return step.fail("unable to spawn interpreter {s}: {s}", .{
interp_argv.items[0], @errorName(e),
@ -1369,18 +1381,22 @@ fn spawnChildAndCollect(
defer if (inherit) std.debug.unlockStdErr();
try child.spawn();
errdefer {
_ = child.kill() catch {};
}
var timer = try std.time.Timer.start();
const result = if (run.stdio == .zig_test)
evalZigTest(run, &child, prog_node, fuzz_context)
try evalZigTest(run, &child, prog_node, fuzz_context)
else
evalGeneric(run, &child);
try evalGeneric(run, &child);
break :t .{ try child.wait(), result, timer.read() };
};
return .{
.stdio = try result,
.stdio = result,
.term = term,
.elapsed_ns = elapsed_ns,
.peak_rss = child.resource_usage_statistics.getMaxRss() orelse 0,
@ -1732,8 +1748,10 @@ fn failForeign(
) error{ MakeFailed, MakeSkipped, OutOfMemory } {
switch (run.stdio) {
.check, .zig_test => {
if (run.skip_foreign_checks)
if (run.skip_foreign_checks) {
run.step.result_skip_reason = "Foreign binary failed";
return error.MakeSkipped;
}
const b = run.step.owner;
const host_name = try b.graph.host.result.zigTriple(b.allocator);

View File

@ -928,7 +928,7 @@ pub const OS_SIGNPOST_ID_NULL: os_signpost_id_t = 0;
pub const OS_SIGNPOST_ID_INVALID: os_signpost_id_t = !0;
pub const OS_SIGNPOST_ID_EXCLUSIVE: os_signpost_id_t = 0xeeeeb0b5b2b2eeee;
pub const os_log_t = opaque {};
pub const os_log_t = *opaque {};
pub const os_log_type_t = enum(u8) {
/// default messages always captures
OS_LOG_TYPE_DEFAULT = 0x00,

View File

@ -245,7 +245,9 @@ pub const Ed25519 = struct {
/// Secret scalar.
secret_key: SecretKey,
/// Derive a key pair from an optional secret seed.
/// Deterministically derive a key pair from a cryptograpically secure secret seed.
///
/// Except in tests, applications should generally call `generate()` instead of this function.
///
/// As in RFC 8032, an Ed25519 public key is generated by hashing
/// the secret key using the SHA-512 function, and interpreting the
@ -253,20 +255,15 @@ pub const Ed25519 = struct {
///
/// For this reason, an EdDSA secret key is commonly called a seed,
/// from which the actual secret is derived.
pub fn create(seed: ?[seed_length]u8) IdentityElementError!KeyPair {
const ss = seed orelse ss: {
var random_seed: [seed_length]u8 = undefined;
crypto.random.bytes(&random_seed);
break :ss random_seed;
};
pub fn generateDeterministic(seed: [seed_length]u8) IdentityElementError!KeyPair {
var az: [Sha512.digest_length]u8 = undefined;
var h = Sha512.init(.{});
h.update(&ss);
h.update(&seed);
h.final(&az);
const pk_p = Curve.basePoint.clampedMul(az[0..32].*) catch return error.IdentityElement;
const pk_bytes = pk_p.toBytes();
var sk_bytes: [SecretKey.encoded_length]u8 = undefined;
sk_bytes[0..ss.len].* = ss;
sk_bytes[0..seed_length].* = seed;
sk_bytes[seed_length..].* = pk_bytes;
return KeyPair{
.public_key = PublicKey.fromBytes(pk_bytes) catch unreachable,
@ -274,7 +271,22 @@ pub const Ed25519 = struct {
};
}
/// Create a KeyPair from a secret key.
/// Generate a new, random key pair.
///
/// `crypto.random.bytes` must be supported by the target.
pub fn generate() KeyPair {
var random_seed: [seed_length]u8 = undefined;
while (true) {
crypto.random.bytes(&random_seed);
return generateDeterministic(random_seed) catch {
@branchHint(.unlikely);
continue;
};
}
}
/// Create a key pair from an existing secret key.
///
/// Note that with EdDSA, storing the seed, and recovering the key pair
/// from it is recommended over storing the entire secret key.
/// The seed of an exiting key pair can be obtained with
@ -285,7 +297,7 @@ pub const Ed25519 = struct {
// With runtime safety, we can still afford checking that the public key is correct.
if (std.debug.runtime_safety) {
const pk_p = try Curve.fromBytes(secret_key.publicKeyBytes());
const recomputed_kp = try create(secret_key.seed());
const recomputed_kp = try generateDeterministic(secret_key.seed());
debug.assert(mem.eql(u8, &recomputed_kp.public_key.toBytes(), &pk_p.toBytes()));
}
return KeyPair{
@ -492,7 +504,7 @@ pub const Ed25519 = struct {
test "key pair creation" {
var seed: [32]u8 = undefined;
_ = try fmt.hexToBytes(seed[0..], "8052030376d47112be7f73ed7a019293dd12ad910b654455798b4667d73de166");
const key_pair = try Ed25519.KeyPair.create(seed);
const key_pair = try Ed25519.KeyPair.generateDeterministic(seed);
var buf: [256]u8 = undefined;
try std.testing.expectEqualStrings(try std.fmt.bufPrint(&buf, "{s}", .{std.fmt.fmtSliceHexUpper(&key_pair.secret_key.toBytes())}), "8052030376D47112BE7F73ED7A019293DD12AD910B654455798B4667D73DE1662D6F7455D97B4A3A10D7293909D1A4F2058CB9A370E43FA8154BB280DB839083");
try std.testing.expectEqualStrings(try std.fmt.bufPrint(&buf, "{s}", .{std.fmt.fmtSliceHexUpper(&key_pair.public_key.toBytes())}), "2D6F7455D97B4A3A10D7293909D1A4F2058CB9A370E43FA8154BB280DB839083");
@ -501,7 +513,7 @@ test "key pair creation" {
test "signature" {
var seed: [32]u8 = undefined;
_ = try fmt.hexToBytes(seed[0..], "8052030376d47112be7f73ed7a019293dd12ad910b654455798b4667d73de166");
const key_pair = try Ed25519.KeyPair.create(seed);
const key_pair = try Ed25519.KeyPair.generateDeterministic(seed);
const sig = try key_pair.sign("test", null);
var buf: [128]u8 = undefined;
@ -513,7 +525,7 @@ test "signature" {
test "batch verification" {
var i: usize = 0;
while (i < 100) : (i += 1) {
const key_pair = try Ed25519.KeyPair.create(null);
const key_pair = Ed25519.KeyPair.generate();
var msg1: [32]u8 = undefined;
var msg2: [32]u8 = undefined;
crypto.random.bytes(&msg1);
@ -645,7 +657,7 @@ test "with blind keys" {
const BlindKeyPair = Ed25519.key_blinding.BlindKeyPair;
// Create a standard Ed25519 key pair
const kp = try Ed25519.KeyPair.create(null);
const kp = Ed25519.KeyPair.generate();
// Create a random blinding seed
var blind: [32]u8 = undefined;
@ -665,7 +677,7 @@ test "with blind keys" {
}
test "signatures with streaming" {
const kp = try Ed25519.KeyPair.create(null);
const kp = Ed25519.KeyPair.generate();
var signer = try kp.signer(null);
signer.update("mes");
@ -681,7 +693,7 @@ test "signatures with streaming" {
}
test "key pair from secret key" {
const kp = try Ed25519.KeyPair.create(null);
const kp = Ed25519.KeyPair.generate();
const kp2 = try Ed25519.KeyPair.fromSecretKey(kp.secret_key);
try std.testing.expectEqualSlices(u8, &kp.secret_key.toBytes(), &kp2.secret_key.toBytes());
try std.testing.expectEqualSlices(u8, &kp.public_key.toBytes(), &kp2.public_key.toBytes());

View File

@ -29,19 +29,29 @@ pub const X25519 = struct {
/// Secret part.
secret_key: [secret_length]u8,
/// Create a new key pair using an optional seed.
pub fn create(seed: ?[seed_length]u8) IdentityElementError!KeyPair {
const sk = seed orelse sk: {
var random_seed: [seed_length]u8 = undefined;
crypto.random.bytes(&random_seed);
break :sk random_seed;
/// Deterministically derive a key pair from a cryptograpically secure secret seed.
///
/// Except in tests, applications should generally call `generate()` instead of this function.
pub fn generateDeterministic(seed: [seed_length]u8) IdentityElementError!KeyPair {
const kp = KeyPair{
.public_key = try X25519.recoverPublicKey(seed),
.secret_key = seed,
};
var kp: KeyPair = undefined;
kp.secret_key = sk;
kp.public_key = try X25519.recoverPublicKey(sk);
return kp;
}
/// Generate a new, random key pair.
pub fn generate() KeyPair {
var random_seed: [seed_length]u8 = undefined;
while (true) {
crypto.random.bytes(&random_seed);
return generateDeterministic(random_seed) catch {
@branchHint(.unlikely);
continue;
};
}
}
/// Create a key pair from an Ed25519 key pair
pub fn fromEd25519(ed25519_key_pair: crypto.sign.Ed25519.KeyPair) (IdentityElementError || EncodingError)!KeyPair {
const seed = ed25519_key_pair.secret_key.seed();
@ -171,7 +181,7 @@ test "rfc7748 1,000,000 iterations" {
}
test "edwards25519 -> curve25519 map" {
const ed_kp = try crypto.sign.Ed25519.KeyPair.create([_]u8{0x42} ** 32);
const ed_kp = try crypto.sign.Ed25519.KeyPair.generateDeterministic([_]u8{0x42} ** 32);
const mont_kp = try X25519.KeyPair.fromEd25519(ed_kp);
try htest.assertEqual("90e7595fc89e52fdfddce9c6a43d74dbf6047025ee0462d2d172e8b6a2841d6e", &mont_kp.secret_key);
try htest.assertEqual("cc4f2cdb695dd766f34118eb67b98652fed1d8bc49c330b119bbfa8a64989378", &mont_kp.public_key);

View File

@ -669,7 +669,7 @@ fn mul(a: u8, b: u8) u8 {
return @as(u8, @truncate(s));
}
const cache_line_bytes = 64;
const cache_line_bytes = std.atomic.cache_line;
inline fn sbox_lookup(sbox: *align(64) const [256]u8, idx0: u8, idx1: u8, idx2: u8, idx3: u8) [4]u8 {
if (side_channels_mitigations == .none) {
@ -683,8 +683,8 @@ inline fn sbox_lookup(sbox: *align(64) const [256]u8, idx0: u8, idx1: u8, idx2:
const stride = switch (side_channels_mitigations) {
.none => unreachable,
.basic => sbox.len / 4,
.medium => sbox.len / (sbox.len / cache_line_bytes) * 2,
.full => sbox.len / (sbox.len / cache_line_bytes),
.medium => @min(sbox.len, 2 * cache_line_bytes),
.full => @min(sbox.len, cache_line_bytes),
};
const of0 = idx0 % stride;
const of1 = idx1 % stride;
@ -718,12 +718,11 @@ inline fn table_lookup(table: *align(64) const [4][256]u32, idx0: u8, idx1: u8,
table[3][idx3],
};
} else {
const table_bytes = @sizeOf(@TypeOf(table[0]));
const stride = switch (side_channels_mitigations) {
.none => unreachable,
.basic => table[0].len / 4,
.medium => table[0].len / (table_bytes / cache_line_bytes) * 2,
.full => table[0].len / (table_bytes / cache_line_bytes),
.medium => @max(1, @min(table[0].len, 2 * cache_line_bytes / 4)),
.full => @max(1, @min(table[0].len, cache_line_bytes / 4)),
};
const of0 = idx0 % stride;
const of1 = idx1 % stride;

View File

@ -140,7 +140,7 @@ const signatures = [_]Crypto{
pub fn benchmarkSignature(comptime Signature: anytype, comptime signatures_count: comptime_int) !u64 {
const msg = [_]u8{0} ** 64;
const key_pair = try Signature.KeyPair.create(null);
const key_pair = Signature.KeyPair.generate();
var timer = try Timer.start();
const start = timer.lap();
@ -163,7 +163,7 @@ const signature_verifications = [_]Crypto{Crypto{ .ty = crypto.sign.Ed25519, .na
pub fn benchmarkSignatureVerification(comptime Signature: anytype, comptime signatures_count: comptime_int) !u64 {
const msg = [_]u8{0} ** 64;
const key_pair = try Signature.KeyPair.create(null);
const key_pair = Signature.KeyPair.generate();
const sig = try key_pair.sign(&msg, null);
var timer = try Timer.start();
@ -187,7 +187,7 @@ const batch_signature_verifications = [_]Crypto{Crypto{ .ty = crypto.sign.Ed2551
pub fn benchmarkBatchSignatureVerification(comptime Signature: anytype, comptime signatures_count: comptime_int) !u64 {
const msg = [_]u8{0} ** 64;
const key_pair = try Signature.KeyPair.create(null);
const key_pair = Signature.KeyPair.generate();
const sig = try key_pair.sign(&msg, null);
var batch: [64]Signature.BatchElement = undefined;
@ -219,7 +219,7 @@ const kems = [_]Crypto{
};
pub fn benchmarkKem(comptime Kem: anytype, comptime kems_count: comptime_int) !u64 {
const key_pair = try Kem.KeyPair.create(null);
const key_pair = Kem.KeyPair.generate();
var timer = try Timer.start();
const start = timer.lap();
@ -239,7 +239,7 @@ pub fn benchmarkKem(comptime Kem: anytype, comptime kems_count: comptime_int) !u
}
pub fn benchmarkKemDecaps(comptime Kem: anytype, comptime kems_count: comptime_int) !u64 {
const key_pair = try Kem.KeyPair.create(null);
const key_pair = Kem.KeyPair.generate();
const e = key_pair.public_key.encaps(null);
@ -266,7 +266,7 @@ pub fn benchmarkKemKeyGen(comptime Kem: anytype, comptime kems_count: comptime_i
{
var i: usize = 0;
while (i < kems_count) : (i += 1) {
const key_pair = try Kem.KeyPair.create(null);
const key_pair = Kem.KeyPair.generate();
mem.doNotOptimizeAway(&key_pair);
}
}
@ -409,7 +409,7 @@ fn benchmarkPwhash(
comptime count: comptime_int,
) !f64 {
const password = "testpass" ** 2;
const opts = .{
const opts = ty.HashOptions{
.allocator = allocator,
.params = @as(*const ty.Params, @ptrCast(@alignCast(params))).*,
.encoding = .phc,

View File

@ -296,21 +296,28 @@ pub fn Ecdsa(comptime Curve: type, comptime Hash: type) type {
/// Secret scalar.
secret_key: SecretKey,
/// Create a new random key pair. `crypto.random.bytes` must be supported for the target.
pub fn generate() IdentityElementError!KeyPair {
var random_seed: [seed_length]u8 = undefined;
crypto.random.bytes(&random_seed);
return create(random_seed);
}
/// Create a new key pair. The seed must be secret and indistinguishable from random.
pub fn create(seed: [seed_length]u8) IdentityElementError!KeyPair {
/// Deterministically derive a key pair from a cryptograpically secure secret seed.
///
/// Except in tests, applications should generally call `generate()` instead of this function.
pub fn generateDeterministic(seed: [seed_length]u8) IdentityElementError!KeyPair {
const h = [_]u8{0x00} ** Hash.digest_length;
const k0 = [_]u8{0x01} ** SecretKey.encoded_length;
const secret_key = deterministicScalar(h, k0, seed).toBytes(.big);
return fromSecretKey(SecretKey{ .bytes = secret_key });
}
/// Generate a new, random key pair.
pub fn generate() KeyPair {
var random_seed: [seed_length]u8 = undefined;
while (true) {
crypto.random.bytes(&random_seed);
return generateDeterministic(random_seed) catch {
@branchHint(.unlikely);
continue;
};
}
}
/// Return the public key corresponding to the secret key.
pub fn fromSecretKey(secret_key: SecretKey) IdentityElementError!KeyPair {
const public_key = try Curve.basePoint.mul(secret_key.bytes, .big);
@ -387,7 +394,7 @@ test "Basic operations over EcdsaP384Sha384" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
const Scheme = EcdsaP384Sha384;
const kp = try Scheme.KeyPair.generate();
const kp = Scheme.KeyPair.generate();
const msg = "test";
var noise: [Scheme.noise_length]u8 = undefined;
@ -403,7 +410,7 @@ test "Basic operations over Secp256k1" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
const Scheme = EcdsaSecp256k1Sha256oSha256;
const kp = try Scheme.KeyPair.generate();
const kp = Scheme.KeyPair.generate();
const msg = "test";
var noise: [Scheme.noise_length]u8 = undefined;
@ -419,7 +426,7 @@ test "Basic operations over EcdsaP384Sha256" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
const Scheme = Ecdsa(crypto.ecc.P384, crypto.hash.sha2.Sha256);
const kp = try Scheme.KeyPair.generate();
const kp = Scheme.KeyPair.generate();
const msg = "test";
var noise: [Scheme.noise_length]u8 = undefined;
@ -893,7 +900,7 @@ test "Sec1 encoding/decoding" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
const Scheme = EcdsaP384Sha384;
const kp = try Scheme.KeyPair.generate();
const kp = Scheme.KeyPair.generate();
const pk = kp.public_key;
const pk_compressed_sec1 = pk.toCompressedSec1();
const pk_recovered1 = try Scheme.PublicKey.fromSec1(&pk_compressed_sec1);

View File

@ -4,6 +4,7 @@ const assert = std.debug.assert;
const math = std.math;
const mem = std.mem;
const native_endian = builtin.cpu.arch.endian();
const mode = @import("builtin").mode;
/// The Keccak-f permutation.
pub fn KeccakF(comptime f: u11) type {
@ -199,6 +200,46 @@ pub fn State(comptime f: u11, comptime capacity: u11, comptime rounds: u5) type
comptime assert(f >= 200 and f <= 1600 and f % 200 == 0); // invalid state size
comptime assert(capacity < f and capacity % 8 == 0); // invalid capacity size
// In debug mode, track transitions to prevent insecure ones.
const Op = enum { uninitialized, initialized, updated, absorb, squeeze };
const TransitionTracker = if (mode == .Debug) struct {
op: Op = .uninitialized,
fn to(tracker: *@This(), next_op: Op) void {
switch (next_op) {
.updated => {
switch (tracker.op) {
.uninitialized => @panic("cannot permute before initializing"),
else => {},
}
},
.absorb => {
switch (tracker.op) {
.squeeze => @panic("cannot absorb right after squeezing"),
else => {},
}
},
.squeeze => {
switch (tracker.op) {
.uninitialized => @panic("cannot squeeze before initializing"),
.initialized => @panic("cannot squeeze right after initializing"),
.absorb => @panic("cannot squeeze right after absorbing"),
else => {},
}
},
.uninitialized => @panic("cannot transition to uninitialized"),
.initialized => {},
}
tracker.op = next_op;
}
} else struct {
// No-op in non-debug modes.
inline fn to(tracker: *@This(), next_op: Op) void {
_ = tracker; // no-op
_ = next_op; // no-op
}
};
return struct {
const Self = @This();
@ -215,67 +256,108 @@ pub fn State(comptime f: u11, comptime capacity: u11, comptime rounds: u5) type
st: KeccakF(f) = .{},
transition: TransitionTracker = .{},
/// Absorb a slice of bytes into the sponge.
pub fn absorb(self: *Self, bytes_: []const u8) void {
var bytes = bytes_;
pub fn absorb(self: *Self, bytes: []const u8) void {
self.transition.to(.absorb);
var i: usize = 0;
if (self.offset > 0) {
const left = @min(rate - self.offset, bytes.len);
@memcpy(self.buf[self.offset..][0..left], bytes[0..left]);
self.offset += left;
if (left == bytes.len) return;
if (self.offset == rate) {
self.offset = 0;
self.st.addBytes(self.buf[0..]);
self.st.permuteR(rounds);
self.offset = 0;
}
if (left == bytes.len) return;
bytes = bytes[left..];
i = left;
}
while (bytes.len >= rate) {
self.st.addBytes(bytes[0..rate]);
while (i + rate < bytes.len) : (i += rate) {
self.st.addBytes(bytes[i..][0..rate]);
self.st.permuteR(rounds);
bytes = bytes[rate..];
}
if (bytes.len > 0) {
@memcpy(self.buf[0..bytes.len], bytes);
self.offset = bytes.len;
const left = bytes.len - i;
if (left > 0) {
@memcpy(self.buf[0..left], bytes[i..][0..left]);
}
self.offset = left;
}
/// Initialize the state from a slice of bytes.
pub fn init(bytes: [f / 8]u8) Self {
return .{ .st = KeccakF(f).init(bytes) };
pub fn init(bytes: [f / 8]u8, delim: u8) Self {
var st = Self{ .st = KeccakF(f).init(bytes), .delim = delim };
st.transition.to(.initialized);
return st;
}
/// Permute the state
pub fn permute(self: *Self) void {
if (mode == .Debug) {
if (self.transition.op == .absorb and self.offset > 0) {
@panic("cannot permute with pending input - call fillBlock() or pad() instead");
}
}
self.transition.to(.updated);
self.st.permuteR(rounds);
self.offset = 0;
}
/// Align the input to the rate boundary.
/// Align the input to the rate boundary and permute.
pub fn fillBlock(self: *Self) void {
self.transition.to(.absorb);
self.st.addBytes(self.buf[0..self.offset]);
self.st.permuteR(rounds);
self.offset = 0;
self.transition.to(.updated);
}
/// Mark the end of the input.
pub fn pad(self: *Self) void {
self.transition.to(.absorb);
self.st.addBytes(self.buf[0..self.offset]);
if (self.offset == rate) {
self.st.permuteR(rounds);
self.offset = 0;
}
self.st.addByte(self.delim, self.offset);
self.st.addByte(0x80, rate - 1);
self.st.permuteR(rounds);
self.offset = 0;
self.transition.to(.updated);
}
/// Squeeze a slice of bytes from the sponge.
/// The function can be called multiple times.
pub fn squeeze(self: *Self, out: []u8) void {
self.transition.to(.squeeze);
var i: usize = 0;
while (i < out.len) : (i += rate) {
const left = @min(rate, out.len - i);
self.st.extractBytes(out[i..][0..left]);
if (self.offset == rate) {
self.st.permuteR(rounds);
} else if (self.offset > 0) {
@branchHint(.unlikely);
var buf: [rate]u8 = undefined;
self.st.extractBytes(buf[0..]);
const left = @min(rate - self.offset, out.len);
@memcpy(out[0..left], buf[self.offset..][0..left]);
self.offset += left;
if (left == out.len) return;
if (self.offset == rate) {
self.offset = 0;
self.st.permuteR(rounds);
}
i = left;
}
while (i + rate < out.len) : (i += rate) {
self.st.extractBytes(out[i..][0..rate]);
self.st.permuteR(rounds);
}
const left = out.len - i;
if (left > 0) {
self.st.extractBytes(out[i..][0..left]);
}
self.offset = left;
}
};
}
@ -298,3 +380,26 @@ test "Keccak-f800" {
};
try std.testing.expectEqualSlices(u32, &st.st, &expected);
}
test "squeeze" {
var st = State(800, 256, 22).init([_]u8{0x80} ** 100, 0x01);
var out0: [15]u8 = undefined;
var out1: [out0.len]u8 = undefined;
st.permute();
var st0 = st;
st0.squeeze(out0[0..]);
var st1 = st;
st1.squeeze(out1[0 .. out1.len / 2]);
st1.squeeze(out1[out1.len / 2 ..]);
try std.testing.expectEqualSlices(u8, &out0, &out1);
var out2: [100]u8 = undefined;
var out3: [out2.len]u8 = undefined;
var st2 = st;
st2.squeeze(out2[0..]);
var st3 = st;
st3.squeeze(out3[0 .. out2.len / 2]);
st3.squeeze(out3[out2.len / 2 ..]);
try std.testing.expectEqualSlices(u8, &out2, &out3);
}

View File

@ -370,15 +370,10 @@ fn Kyber(comptime p: Params) type {
secret_key: SecretKey,
public_key: PublicKey,
/// Create a new key pair.
/// If seed is null, a random seed will be generated.
/// If a seed is provided, the key pair will be deterministic.
pub fn create(seed_: ?[seed_length]u8) !KeyPair {
const seed = seed_ orelse sk: {
var random_seed: [seed_length]u8 = undefined;
crypto.random.bytes(&random_seed);
break :sk random_seed;
};
/// Deterministically derive a key pair from a cryptograpically secure secret seed.
///
/// Except in tests, applications should generally call `generate()` instead of this function.
pub fn generateDeterministic(seed: [seed_length]u8) !KeyPair {
var ret: KeyPair = undefined;
ret.secret_key.z = seed[inner_seed_length..seed_length].*;
@ -399,6 +394,18 @@ fn Kyber(comptime p: Params) type {
return ret;
}
/// Generate a new, random key pair.
pub fn generate() KeyPair {
var random_seed: [seed_length]u8 = undefined;
while (true) {
crypto.random.bytes(&random_seed);
return generateDeterministic(random_seed) catch {
@branchHint(.unlikely);
continue;
};
}
}
};
// Size of plaintexts of the in
@ -1698,7 +1705,7 @@ test "Test happy flow" {
inline for (modes) |mode| {
for (0..10) |i| {
seed[0] = @as(u8, @intCast(i));
const kp = try mode.KeyPair.create(seed);
const kp = try mode.KeyPair.generateDeterministic(seed);
const sk = try mode.SecretKey.fromBytes(&kp.secret_key.toBytes());
try testing.expectEqual(sk, kp.secret_key);
const pk = try mode.PublicKey.fromBytes(&kp.public_key.toBytes());
@ -1745,7 +1752,7 @@ test "NIST KAT test" {
g2.fill(kseed[0..32]);
g2.fill(kseed[32..64]);
g2.fill(&eseed);
const kp = try mode.KeyPair.create(kseed);
const kp = try mode.KeyPair.generateDeterministic(kseed);
const e = kp.public_key.encaps(eseed);
const ss2 = try kp.secret_key.decaps(&e.ciphertext);
try testing.expectEqual(ss2, e.shared_secret);

View File

@ -535,7 +535,7 @@ pub const SealedBox = struct {
/// `c` must be `seal_length` bytes larger than `m`, so that the required metadata can be added.
pub fn seal(c: []u8, m: []const u8, public_key: [public_length]u8) (WeakPublicKeyError || IdentityElementError)!void {
debug.assert(c.len == m.len + seal_length);
var ekp = try KeyPair.create(null);
var ekp = KeyPair.generate();
const nonce = createNonce(ekp.public_key, public_key);
c[0..public_length].* = ekp.public_key;
try Box.seal(c[Box.public_length..], m, nonce, public_key, ekp.secret_key);
@ -607,8 +607,8 @@ test "xsalsa20poly1305 box" {
crypto.random.bytes(&msg);
crypto.random.bytes(&nonce);
const kp1 = try Box.KeyPair.create(null);
const kp2 = try Box.KeyPair.create(null);
const kp1 = Box.KeyPair.generate();
const kp2 = Box.KeyPair.generate();
try Box.seal(boxed[0..], msg[0..], nonce, kp1.public_key, kp2.secret_key);
try Box.open(msg2[0..], boxed[0..], nonce, kp2.public_key, kp1.secret_key);
}
@ -619,7 +619,7 @@ test "xsalsa20poly1305 sealedbox" {
var boxed: [msg.len + SealedBox.seal_length]u8 = undefined;
crypto.random.bytes(&msg);
const kp = try Box.KeyPair.create(null);
const kp = Box.KeyPair.generate();
try SealedBox.seal(boxed[0..], msg[0..], kp.public_key);
try SealedBox.open(msg2[0..], boxed[0..], kp);
}

View File

@ -1649,10 +1649,10 @@ const KeyShare = struct {
fn init(seed: [112]u8) error{IdentityElement}!KeyShare {
return .{
.ml_kem768_kp = try .create(null),
.secp256r1_kp = try .create(seed[0..32].*),
.secp384r1_kp = try .create(seed[32..80].*),
.x25519_kp = try .create(seed[80..112].*),
.ml_kem768_kp = .generate(),
.secp256r1_kp = try .generateDeterministic(seed[0..32].*),
.secp384r1_kp = try .generateDeterministic(seed[32..80].*),
.x25519_kp = try .generateDeterministic(seed[80..112].*),
.sk_buf = undefined,
.sk_len = 0,
};

View File

@ -48,7 +48,8 @@ fn read(ma: *MemoryAccessor, address: usize, buf: []u8) bool {
switch (linux.E.init(bytes_read)) {
.SUCCESS => return bytes_read == buf.len,
.FAULT => return false,
.INVAL, .PERM, .SRCH => unreachable, // own pid is always valid
.INVAL, .SRCH => unreachable, // own pid is always valid
.PERM => {}, // Known to happen in containers.
.NOMEM => {},
.NOSYS => {}, // QEMU is known not to implement this syscall.
else => unreachable, // unexpected

View File

@ -456,10 +456,10 @@ const ANY = "any";
pub fn defaultSpec(comptime T: type) [:0]const u8 {
switch (@typeInfo(T)) {
.array => |_| return ANY,
.array, .vector => return ANY,
.pointer => |ptr_info| switch (ptr_info.size) {
.One => switch (@typeInfo(ptr_info.child)) {
.array => |_| return ANY,
.array => return ANY,
else => {},
},
.Many, .C => return "*",
@ -680,7 +680,7 @@ pub fn formatType(
try writer.writeAll("{ ");
var i: usize = 0;
while (i < info.len) : (i += 1) {
try formatValue(value[i], actual_fmt, options, writer);
try formatType(value[i], actual_fmt, options, writer, max_depth - 1);
if (i < info.len - 1) {
try writer.writeAll(", ");
}
@ -2608,6 +2608,22 @@ test "vector" {
try expectFmt("{ -2, -1, +0, +1 }", "{d:5}", .{vi64});
try expectFmt("{ 1000, 2000, 3000, 4000 }", "{}", .{vu64});
try expectFmt("{ 3e8, 7d0, bb8, fa0 }", "{x}", .{vu64});
const x: [4]u64 = undefined;
const vp: @Vector(4, *const u64) = [_]*const u64{ &x[0], &x[1], &x[2], &x[3] };
const vop: @Vector(4, ?*const u64) = [_]?*const u64{ &x[0], null, null, &x[3] };
var expect_buffer: [@sizeOf(usize) * 2 * 4 + 64]u8 = undefined;
try expectFmt(try bufPrint(
&expect_buffer,
"{{ {}, {}, {}, {} }}",
.{ &x[0], &x[1], &x[2], &x[3] },
), "{}", .{vp});
try expectFmt(try bufPrint(
&expect_buffer,
"{{ {?}, null, null, {?} }}",
.{ &x[0], &x[3] },
), "{any}", .{vop});
}
test "enum-literal" {

View File

@ -11,215 +11,215 @@ const SkFullSock = @compileError("TODO missing os bits: SkFullSock");
//
// Note, these function signatures were created from documentation found in
// '/usr/include/linux/bpf.h'
pub const map_lookup_elem = @as(*const fn (map: *const kern.MapDef, key: ?*const anyopaque) ?*anyopaque, @ptrFromInt(1));
pub const map_update_elem = @as(*const fn (map: *const kern.MapDef, key: ?*const anyopaque, value: ?*const anyopaque, flags: u64) c_long, @ptrFromInt(2));
pub const map_delete_elem = @as(*const fn (map: *const kern.MapDef, key: ?*const anyopaque) c_long, @ptrFromInt(3));
pub const probe_read = @as(*const fn (dst: ?*anyopaque, size: u32, unsafe_ptr: ?*const anyopaque) c_long, @ptrFromInt(4));
pub const ktime_get_ns = @as(*const fn () u64, @ptrFromInt(5));
pub const trace_printk = @as(*const fn (fmt: [*:0]const u8, fmt_size: u32, arg1: u64, arg2: u64, arg3: u64) c_long, @ptrFromInt(6));
pub const get_prandom_u32 = @as(*const fn () u32, @ptrFromInt(7));
pub const get_smp_processor_id = @as(*const fn () u32, @ptrFromInt(8));
pub const skb_store_bytes = @as(*const fn (skb: *kern.SkBuff, offset: u32, from: ?*const anyopaque, len: u32, flags: u64) c_long, @ptrFromInt(9));
pub const l3_csum_replace = @as(*const fn (skb: *kern.SkBuff, offset: u32, from: u64, to: u64, size: u64) c_long, @ptrFromInt(10));
pub const l4_csum_replace = @as(*const fn (skb: *kern.SkBuff, offset: u32, from: u64, to: u64, flags: u64) c_long, @ptrFromInt(11));
pub const tail_call = @as(*const fn (ctx: ?*anyopaque, prog_array_map: *const kern.MapDef, index: u32) c_long, @ptrFromInt(12));
pub const clone_redirect = @as(*const fn (skb: *kern.SkBuff, ifindex: u32, flags: u64) c_long, @ptrFromInt(13));
pub const get_current_pid_tgid = @as(*const fn () u64, @ptrFromInt(14));
pub const get_current_uid_gid = @as(*const fn () u64, @ptrFromInt(15));
pub const get_current_comm = @as(*const fn (buf: ?*anyopaque, size_of_buf: u32) c_long, @ptrFromInt(16));
pub const get_cgroup_classid = @as(*const fn (skb: *kern.SkBuff) u32, @ptrFromInt(17));
pub const map_lookup_elem: *align(1) const fn (map: *const kern.MapDef, key: ?*const anyopaque) ?*anyopaque = @ptrFromInt(1);
pub const map_update_elem: *align(1) const fn (map: *const kern.MapDef, key: ?*const anyopaque, value: ?*const anyopaque, flags: u64) c_long = @ptrFromInt(2);
pub const map_delete_elem: *align(1) const fn (map: *const kern.MapDef, key: ?*const anyopaque) c_long = @ptrFromInt(3);
pub const probe_read: *align(1) const fn (dst: ?*anyopaque, size: u32, unsafe_ptr: ?*const anyopaque) c_long = @ptrFromInt(4);
pub const ktime_get_ns: *align(1) const fn () u64 = @ptrFromInt(5);
pub const trace_printk: *align(1) const fn (fmt: [*:0]const u8, fmt_size: u32, arg1: u64, arg2: u64, arg3: u64) c_long = @ptrFromInt(6);
pub const get_prandom_u32: *align(1) const fn () u32 = @ptrFromInt(7);
pub const get_smp_processor_id: *align(1) const fn () u32 = @ptrFromInt(8);
pub const skb_store_bytes: *align(1) const fn (skb: *kern.SkBuff, offset: u32, from: ?*const anyopaque, len: u32, flags: u64) c_long = @ptrFromInt(9);
pub const l3_csum_replace: *align(1) const fn (skb: *kern.SkBuff, offset: u32, from: u64, to: u64, size: u64) c_long = @ptrFromInt(10);
pub const l4_csum_replace: *align(1) const fn (skb: *kern.SkBuff, offset: u32, from: u64, to: u64, flags: u64) c_long = @ptrFromInt(11);
pub const tail_call: *align(1) const fn (ctx: ?*anyopaque, prog_array_map: *const kern.MapDef, index: u32) c_long = @ptrFromInt(12);
pub const clone_redirect: *align(1) const fn (skb: *kern.SkBuff, ifindex: u32, flags: u64) c_long = @ptrFromInt(13);
pub const get_current_pid_tgid: *align(1) const fn () u64 = @ptrFromInt(14);
pub const get_current_uid_gid: *align(1) const fn () u64 = @ptrFromInt(15);
pub const get_current_comm: *align(1) const fn (buf: ?*anyopaque, size_of_buf: u32) c_long = @ptrFromInt(16);
pub const get_cgroup_classid: *align(1) const fn (skb: *kern.SkBuff) u32 = @ptrFromInt(17);
// Note vlan_proto is big endian
pub const skb_vlan_push = @as(*const fn (skb: *kern.SkBuff, vlan_proto: u16, vlan_tci: u16) c_long, @ptrFromInt(18));
pub const skb_vlan_pop = @as(*const fn (skb: *kern.SkBuff) c_long, @ptrFromInt(19));
pub const skb_get_tunnel_key = @as(*const fn (skb: *kern.SkBuff, key: *kern.TunnelKey, size: u32, flags: u64) c_long, @ptrFromInt(20));
pub const skb_set_tunnel_key = @as(*const fn (skb: *kern.SkBuff, key: *kern.TunnelKey, size: u32, flags: u64) c_long, @ptrFromInt(21));
pub const perf_event_read = @as(*const fn (map: *const kern.MapDef, flags: u64) u64, @ptrFromInt(22));
pub const redirect = @as(*const fn (ifindex: u32, flags: u64) c_long, @ptrFromInt(23));
pub const get_route_realm = @as(*const fn (skb: *kern.SkBuff) u32, @ptrFromInt(24));
pub const perf_event_output = @as(*const fn (ctx: ?*anyopaque, map: *const kern.MapDef, flags: u64, data: ?*anyopaque, size: u64) c_long, @ptrFromInt(25));
pub const skb_load_bytes = @as(*const fn (skb: ?*anyopaque, offset: u32, to: ?*anyopaque, len: u32) c_long, @ptrFromInt(26));
pub const get_stackid = @as(*const fn (ctx: ?*anyopaque, map: *const kern.MapDef, flags: u64) c_long, @ptrFromInt(27));
pub const skb_vlan_push: *align(1) const fn (skb: *kern.SkBuff, vlan_proto: u16, vlan_tci: u16) c_long = @ptrFromInt(18);
pub const skb_vlan_pop: *align(1) const fn (skb: *kern.SkBuff) c_long = @ptrFromInt(19);
pub const skb_get_tunnel_key: *align(1) const fn (skb: *kern.SkBuff, key: *kern.TunnelKey, size: u32, flags: u64) c_long = @ptrFromInt(20);
pub const skb_set_tunnel_key: *align(1) const fn (skb: *kern.SkBuff, key: *kern.TunnelKey, size: u32, flags: u64) c_long = @ptrFromInt(21);
pub const perf_event_read: *align(1) const fn (map: *const kern.MapDef, flags: u64) u64 = @ptrFromInt(22);
pub const redirect: *align(1) const fn (ifindex: u32, flags: u64) c_long = @ptrFromInt(23);
pub const get_route_realm: *align(1) const fn (skb: *kern.SkBuff) u32 = @ptrFromInt(24);
pub const perf_event_output: *align(1) const fn (ctx: ?*anyopaque, map: *const kern.MapDef, flags: u64, data: ?*anyopaque, size: u64) c_long = @ptrFromInt(25);
pub const skb_load_bytes: *align(1) const fn (skb: ?*anyopaque, offset: u32, to: ?*anyopaque, len: u32) c_long = @ptrFromInt(26);
pub const get_stackid: *align(1) const fn (ctx: ?*anyopaque, map: *const kern.MapDef, flags: u64) c_long = @ptrFromInt(27);
// from and to point to __be32
pub const csum_diff = @as(*const fn (from: *u32, from_size: u32, to: *u32, to_size: u32, seed: u32) i64, @ptrFromInt(28));
pub const skb_get_tunnel_opt = @as(*const fn (skb: *kern.SkBuff, opt: ?*anyopaque, size: u32) c_long, @ptrFromInt(29));
pub const skb_set_tunnel_opt = @as(*const fn (skb: *kern.SkBuff, opt: ?*anyopaque, size: u32) c_long, @ptrFromInt(30));
pub const csum_diff: *align(1) const fn (from: *u32, from_size: u32, to: *u32, to_size: u32, seed: u32) i64 = @ptrFromInt(28);
pub const skb_get_tunnel_opt: *align(1) const fn (skb: *kern.SkBuff, opt: ?*anyopaque, size: u32) c_long = @ptrFromInt(29);
pub const skb_set_tunnel_opt: *align(1) const fn (skb: *kern.SkBuff, opt: ?*anyopaque, size: u32) c_long = @ptrFromInt(30);
// proto is __be16
pub const skb_change_proto = @as(*const fn (skb: *kern.SkBuff, proto: u16, flags: u64) c_long, @ptrFromInt(31));
pub const skb_change_type = @as(*const fn (skb: *kern.SkBuff, skb_type: u32) c_long, @ptrFromInt(32));
pub const skb_under_cgroup = @as(*const fn (skb: *kern.SkBuff, map: ?*const anyopaque, index: u32) c_long, @ptrFromInt(33));
pub const get_hash_recalc = @as(*const fn (skb: *kern.SkBuff) u32, @ptrFromInt(34));
pub const get_current_task = @as(*const fn () u64, @ptrFromInt(35));
pub const probe_write_user = @as(*const fn (dst: ?*anyopaque, src: ?*const anyopaque, len: u32) c_long, @ptrFromInt(36));
pub const current_task_under_cgroup = @as(*const fn (map: *const kern.MapDef, index: u32) c_long, @ptrFromInt(37));
pub const skb_change_tail = @as(*const fn (skb: *kern.SkBuff, len: u32, flags: u64) c_long, @ptrFromInt(38));
pub const skb_pull_data = @as(*const fn (skb: *kern.SkBuff, len: u32) c_long, @ptrFromInt(39));
pub const csum_update = @as(*const fn (skb: *kern.SkBuff, csum: u32) i64, @ptrFromInt(40));
pub const set_hash_invalid = @as(*const fn (skb: *kern.SkBuff) void, @ptrFromInt(41));
pub const get_numa_node_id = @as(*const fn () c_long, @ptrFromInt(42));
pub const skb_change_head = @as(*const fn (skb: *kern.SkBuff, len: u32, flags: u64) c_long, @ptrFromInt(43));
pub const xdp_adjust_head = @as(*const fn (xdp_md: *kern.XdpMd, delta: c_int) c_long, @ptrFromInt(44));
pub const probe_read_str = @as(*const fn (dst: ?*anyopaque, size: u32, unsafe_ptr: ?*const anyopaque) c_long, @ptrFromInt(45));
pub const get_socket_cookie = @as(*const fn (ctx: ?*anyopaque) u64, @ptrFromInt(46));
pub const get_socket_uid = @as(*const fn (skb: *kern.SkBuff) u32, @ptrFromInt(47));
pub const set_hash = @as(*const fn (skb: *kern.SkBuff, hash: u32) c_long, @ptrFromInt(48));
pub const setsockopt = @as(*const fn (bpf_socket: *kern.SockOps, level: c_int, optname: c_int, optval: ?*anyopaque, optlen: c_int) c_long, @ptrFromInt(49));
pub const skb_adjust_room = @as(*const fn (skb: *kern.SkBuff, len_diff: i32, mode: u32, flags: u64) c_long, @ptrFromInt(50));
pub const redirect_map = @as(*const fn (map: *const kern.MapDef, key: u32, flags: u64) c_long, @ptrFromInt(51));
pub const sk_redirect_map = @as(*const fn (skb: *kern.SkBuff, map: *const kern.MapDef, key: u32, flags: u64) c_long, @ptrFromInt(52));
pub const sock_map_update = @as(*const fn (skops: *kern.SockOps, map: *const kern.MapDef, key: ?*anyopaque, flags: u64) c_long, @ptrFromInt(53));
pub const xdp_adjust_meta = @as(*const fn (xdp_md: *kern.XdpMd, delta: c_int) c_long, @ptrFromInt(54));
pub const perf_event_read_value = @as(*const fn (map: *const kern.MapDef, flags: u64, buf: *kern.PerfEventValue, buf_size: u32) c_long, @ptrFromInt(55));
pub const perf_prog_read_value = @as(*const fn (ctx: *kern.PerfEventData, buf: *kern.PerfEventValue, buf_size: u32) c_long, @ptrFromInt(56));
pub const getsockopt = @as(*const fn (bpf_socket: ?*anyopaque, level: c_int, optname: c_int, optval: ?*anyopaque, optlen: c_int) c_long, @ptrFromInt(57));
pub const override_return = @as(*const fn (regs: *PtRegs, rc: u64) c_long, @ptrFromInt(58));
pub const sock_ops_cb_flags_set = @as(*const fn (bpf_sock: *kern.SockOps, argval: c_int) c_long, @ptrFromInt(59));
pub const msg_redirect_map = @as(*const fn (msg: *kern.SkMsgMd, map: *const kern.MapDef, key: u32, flags: u64) c_long, @ptrFromInt(60));
pub const msg_apply_bytes = @as(*const fn (msg: *kern.SkMsgMd, bytes: u32) c_long, @ptrFromInt(61));
pub const msg_cork_bytes = @as(*const fn (msg: *kern.SkMsgMd, bytes: u32) c_long, @ptrFromInt(62));
pub const msg_pull_data = @as(*const fn (msg: *kern.SkMsgMd, start: u32, end: u32, flags: u64) c_long, @ptrFromInt(63));
pub const bind = @as(*const fn (ctx: *kern.BpfSockAddr, addr: *kern.SockAddr, addr_len: c_int) c_long, @ptrFromInt(64));
pub const xdp_adjust_tail = @as(*const fn (xdp_md: *kern.XdpMd, delta: c_int) c_long, @ptrFromInt(65));
pub const skb_get_xfrm_state = @as(*const fn (skb: *kern.SkBuff, index: u32, xfrm_state: *kern.XfrmState, size: u32, flags: u64) c_long, @ptrFromInt(66));
pub const get_stack = @as(*const fn (ctx: ?*anyopaque, buf: ?*anyopaque, size: u32, flags: u64) c_long, @ptrFromInt(67));
pub const skb_load_bytes_relative = @as(*const fn (skb: ?*const anyopaque, offset: u32, to: ?*anyopaque, len: u32, start_header: u32) c_long, @ptrFromInt(68));
pub const fib_lookup = @as(*const fn (ctx: ?*anyopaque, params: *kern.FibLookup, plen: c_int, flags: u32) c_long, @ptrFromInt(69));
pub const sock_hash_update = @as(*const fn (skops: *kern.SockOps, map: *const kern.MapDef, key: ?*anyopaque, flags: u64) c_long, @ptrFromInt(70));
pub const msg_redirect_hash = @as(*const fn (msg: *kern.SkMsgMd, map: *const kern.MapDef, key: ?*anyopaque, flags: u64) c_long, @ptrFromInt(71));
pub const sk_redirect_hash = @as(*const fn (skb: *kern.SkBuff, map: *const kern.MapDef, key: ?*anyopaque, flags: u64) c_long, @ptrFromInt(72));
pub const lwt_push_encap = @as(*const fn (skb: *kern.SkBuff, typ: u32, hdr: ?*anyopaque, len: u32) c_long, @ptrFromInt(73));
pub const lwt_seg6_store_bytes = @as(*const fn (skb: *kern.SkBuff, offset: u32, from: ?*const anyopaque, len: u32) c_long, @ptrFromInt(74));
pub const lwt_seg6_adjust_srh = @as(*const fn (skb: *kern.SkBuff, offset: u32, delta: i32) c_long, @ptrFromInt(75));
pub const lwt_seg6_action = @as(*const fn (skb: *kern.SkBuff, action: u32, param: ?*anyopaque, param_len: u32) c_long, @ptrFromInt(76));
pub const rc_repeat = @as(*const fn (ctx: ?*anyopaque) c_long, @ptrFromInt(77));
pub const rc_keydown = @as(*const fn (ctx: ?*anyopaque, protocol: u32, scancode: u64, toggle: u32) c_long, @ptrFromInt(78));
pub const skb_cgroup_id = @as(*const fn (skb: *kern.SkBuff) u64, @ptrFromInt(79));
pub const get_current_cgroup_id = @as(*const fn () u64, @ptrFromInt(80));
pub const get_local_storage = @as(*const fn (map: ?*anyopaque, flags: u64) ?*anyopaque, @ptrFromInt(81));
pub const sk_select_reuseport = @as(*const fn (reuse: *kern.SkReusePortMd, map: *const kern.MapDef, key: ?*anyopaque, flags: u64) c_long, @ptrFromInt(82));
pub const skb_ancestor_cgroup_id = @as(*const fn (skb: *kern.SkBuff, ancestor_level: c_int) u64, @ptrFromInt(83));
pub const sk_lookup_tcp = @as(*const fn (ctx: ?*anyopaque, tuple: *kern.SockTuple, tuple_size: u32, netns: u64, flags: u64) ?*kern.Sock, @ptrFromInt(84));
pub const sk_lookup_udp = @as(*const fn (ctx: ?*anyopaque, tuple: *kern.SockTuple, tuple_size: u32, netns: u64, flags: u64) ?*kern.Sock, @ptrFromInt(85));
pub const sk_release = @as(*const fn (sock: *kern.Sock) c_long, @ptrFromInt(86));
pub const map_push_elem = @as(*const fn (map: *const kern.MapDef, value: ?*const anyopaque, flags: u64) c_long, @ptrFromInt(87));
pub const map_pop_elem = @as(*const fn (map: *const kern.MapDef, value: ?*anyopaque) c_long, @ptrFromInt(88));
pub const map_peek_elem = @as(*const fn (map: *const kern.MapDef, value: ?*anyopaque) c_long, @ptrFromInt(89));
pub const msg_push_data = @as(*const fn (msg: *kern.SkMsgMd, start: u32, len: u32, flags: u64) c_long, @ptrFromInt(90));
pub const msg_pop_data = @as(*const fn (msg: *kern.SkMsgMd, start: u32, len: u32, flags: u64) c_long, @ptrFromInt(91));
pub const rc_pointer_rel = @as(*const fn (ctx: ?*anyopaque, rel_x: i32, rel_y: i32) c_long, @ptrFromInt(92));
pub const spin_lock = @as(*const fn (lock: *kern.SpinLock) c_long, @ptrFromInt(93));
pub const spin_unlock = @as(*const fn (lock: *kern.SpinLock) c_long, @ptrFromInt(94));
pub const sk_fullsock = @as(*const fn (sk: *kern.Sock) ?*SkFullSock, @ptrFromInt(95));
pub const tcp_sock = @as(*const fn (sk: *kern.Sock) ?*kern.TcpSock, @ptrFromInt(96));
pub const skb_ecn_set_ce = @as(*const fn (skb: *kern.SkBuff) c_long, @ptrFromInt(97));
pub const get_listener_sock = @as(*const fn (sk: *kern.Sock) ?*kern.Sock, @ptrFromInt(98));
pub const skc_lookup_tcp = @as(*const fn (ctx: ?*anyopaque, tuple: *kern.SockTuple, tuple_size: u32, netns: u64, flags: u64) ?*kern.Sock, @ptrFromInt(99));
pub const tcp_check_syncookie = @as(*const fn (sk: *kern.Sock, iph: ?*anyopaque, iph_len: u32, th: *TcpHdr, th_len: u32) c_long, @ptrFromInt(100));
pub const sysctl_get_name = @as(*const fn (ctx: *kern.SysCtl, buf: ?*u8, buf_len: c_ulong, flags: u64) c_long, @ptrFromInt(101));
pub const sysctl_get_current_value = @as(*const fn (ctx: *kern.SysCtl, buf: ?*u8, buf_len: c_ulong) c_long, @ptrFromInt(102));
pub const sysctl_get_new_value = @as(*const fn (ctx: *kern.SysCtl, buf: ?*u8, buf_len: c_ulong) c_long, @ptrFromInt(103));
pub const sysctl_set_new_value = @as(*const fn (ctx: *kern.SysCtl, buf: ?*const u8, buf_len: c_ulong) c_long, @ptrFromInt(104));
pub const strtol = @as(*const fn (buf: *const u8, buf_len: c_ulong, flags: u64, res: *c_long) c_long, @ptrFromInt(105));
pub const strtoul = @as(*const fn (buf: *const u8, buf_len: c_ulong, flags: u64, res: *c_ulong) c_long, @ptrFromInt(106));
pub const sk_storage_get = @as(*const fn (map: *const kern.MapDef, sk: *kern.Sock, value: ?*anyopaque, flags: u64) ?*anyopaque, @ptrFromInt(107));
pub const sk_storage_delete = @as(*const fn (map: *const kern.MapDef, sk: *kern.Sock) c_long, @ptrFromInt(108));
pub const send_signal = @as(*const fn (sig: u32) c_long, @ptrFromInt(109));
pub const tcp_gen_syncookie = @as(*const fn (sk: *kern.Sock, iph: ?*anyopaque, iph_len: u32, th: *TcpHdr, th_len: u32) i64, @ptrFromInt(110));
pub const skb_output = @as(*const fn (ctx: ?*anyopaque, map: *const kern.MapDef, flags: u64, data: ?*anyopaque, size: u64) c_long, @ptrFromInt(111));
pub const probe_read_user = @as(*const fn (dst: ?*anyopaque, size: u32, unsafe_ptr: ?*const anyopaque) c_long, @ptrFromInt(112));
pub const probe_read_kernel = @as(*const fn (dst: ?*anyopaque, size: u32, unsafe_ptr: ?*const anyopaque) c_long, @ptrFromInt(113));
pub const probe_read_user_str = @as(*const fn (dst: ?*anyopaque, size: u32, unsafe_ptr: ?*const anyopaque) c_long, @ptrFromInt(114));
pub const probe_read_kernel_str = @as(*const fn (dst: ?*anyopaque, size: u32, unsafe_ptr: ?*const anyopaque) c_long, @ptrFromInt(115));
pub const tcp_send_ack = @as(*const fn (tp: ?*anyopaque, rcv_nxt: u32) c_long, @ptrFromInt(116));
pub const send_signal_thread = @as(*const fn (sig: u32) c_long, @ptrFromInt(117));
pub const jiffies64 = @as(*const fn () u64, @ptrFromInt(118));
pub const read_branch_records = @as(*const fn (ctx: *kern.PerfEventData, buf: ?*anyopaque, size: u32, flags: u64) c_long, @ptrFromInt(119));
pub const get_ns_current_pid_tgid = @as(*const fn (dev: u64, ino: u64, nsdata: *kern.PidNsInfo, size: u32) c_long, @ptrFromInt(120));
pub const xdp_output = @as(*const fn (ctx: ?*anyopaque, map: *const kern.MapDef, flags: u64, data: ?*anyopaque, size: u64) c_long, @ptrFromInt(121));
pub const get_netns_cookie = @as(*const fn (ctx: ?*anyopaque) u64, @ptrFromInt(122));
pub const get_current_ancestor_cgroup_id = @as(*const fn (ancestor_level: c_int) u64, @ptrFromInt(123));
pub const sk_assign = @as(*const fn (skb: *kern.SkBuff, sk: *kern.Sock, flags: u64) c_long, @ptrFromInt(124));
pub const ktime_get_boot_ns = @as(*const fn () u64, @ptrFromInt(125));
pub const seq_printf = @as(*const fn (m: *kern.SeqFile, fmt: ?*const u8, fmt_size: u32, data: ?*const anyopaque, data_len: u32) c_long, @ptrFromInt(126));
pub const seq_write = @as(*const fn (m: *kern.SeqFile, data: ?*const u8, len: u32) c_long, @ptrFromInt(127));
pub const sk_cgroup_id = @as(*const fn (sk: *kern.BpfSock) u64, @ptrFromInt(128));
pub const sk_ancestor_cgroup_id = @as(*const fn (sk: *kern.BpfSock, ancestor_level: c_long) u64, @ptrFromInt(129));
pub const ringbuf_output = @as(*const fn (ringbuf: ?*anyopaque, data: ?*anyopaque, size: u64, flags: u64) c_long, @ptrFromInt(130));
pub const ringbuf_reserve = @as(*const fn (ringbuf: ?*anyopaque, size: u64, flags: u64) ?*anyopaque, @ptrFromInt(131));
pub const ringbuf_submit = @as(*const fn (data: ?*anyopaque, flags: u64) void, @ptrFromInt(132));
pub const ringbuf_discard = @as(*const fn (data: ?*anyopaque, flags: u64) void, @ptrFromInt(133));
pub const ringbuf_query = @as(*const fn (ringbuf: ?*anyopaque, flags: u64) u64, @ptrFromInt(134));
pub const csum_level = @as(*const fn (skb: *kern.SkBuff, level: u64) c_long, @ptrFromInt(135));
pub const skc_to_tcp6_sock = @as(*const fn (sk: ?*anyopaque) ?*kern.Tcp6Sock, @ptrFromInt(136));
pub const skc_to_tcp_sock = @as(*const fn (sk: ?*anyopaque) ?*kern.TcpSock, @ptrFromInt(137));
pub const skc_to_tcp_timewait_sock = @as(*const fn (sk: ?*anyopaque) ?*kern.TcpTimewaitSock, @ptrFromInt(138));
pub const skc_to_tcp_request_sock = @as(*const fn (sk: ?*anyopaque) ?*kern.TcpRequestSock, @ptrFromInt(139));
pub const skc_to_udp6_sock = @as(*const fn (sk: ?*anyopaque) ?*kern.Udp6Sock, @ptrFromInt(140));
pub const get_task_stack = @as(*const fn (task: ?*anyopaque, buf: ?*anyopaque, size: u32, flags: u64) c_long, @ptrFromInt(141));
pub const load_hdr_opt = @as(*const fn (?*kern.BpfSockOps, ?*anyopaque, u32, u64) c_long, @ptrFromInt(142));
pub const store_hdr_opt = @as(*const fn (?*kern.BpfSockOps, ?*const anyopaque, u32, u64) c_long, @ptrFromInt(143));
pub const reserve_hdr_opt = @as(*const fn (?*kern.BpfSockOps, u32, u64) c_long, @ptrFromInt(144));
pub const inode_storage_get = @as(*const fn (?*anyopaque, ?*anyopaque, ?*anyopaque, u64) ?*anyopaque, @ptrFromInt(145));
pub const inode_storage_delete = @as(*const fn (?*anyopaque, ?*anyopaque) c_int, @ptrFromInt(146));
pub const d_path = @as(*const fn (?*kern.Path, [*c]u8, u32) c_long, @ptrFromInt(147));
pub const copy_from_user = @as(*const fn (?*anyopaque, u32, ?*const anyopaque) c_long, @ptrFromInt(148));
pub const snprintf_btf = @as(*const fn ([*c]u8, u32, ?*kern.BTFPtr, u32, u64) c_long, @ptrFromInt(149));
pub const seq_printf_btf = @as(*const fn (?*kern.SeqFile, ?*kern.BTFPtr, u32, u64) c_long, @ptrFromInt(150));
pub const skb_cgroup_classid = @as(*const fn (?*kern.SkBuff) u64, @ptrFromInt(151));
pub const redirect_neigh = @as(*const fn (u32, ?*kern.BpfRedirNeigh, c_int, u64) c_long, @ptrFromInt(152));
pub const per_cpu_ptr = @as(*const fn (?*const anyopaque, u32) ?*anyopaque, @ptrFromInt(153));
pub const this_cpu_ptr = @as(*const fn (?*const anyopaque) ?*anyopaque, @ptrFromInt(154));
pub const redirect_peer = @as(*const fn (u32, u64) c_long, @ptrFromInt(155));
pub const task_storage_get = @as(*const fn (?*anyopaque, ?*kern.Task, ?*anyopaque, u64) ?*anyopaque, @ptrFromInt(156));
pub const task_storage_delete = @as(*const fn (?*anyopaque, ?*kern.Task) c_long, @ptrFromInt(157));
pub const get_current_task_btf = @as(*const fn () ?*kern.Task, @ptrFromInt(158));
pub const bprm_opts_set = @as(*const fn (?*kern.BinPrm, u64) c_long, @ptrFromInt(159));
pub const ktime_get_coarse_ns = @as(*const fn () u64, @ptrFromInt(160));
pub const ima_inode_hash = @as(*const fn (?*kern.Inode, ?*anyopaque, u32) c_long, @ptrFromInt(161));
pub const sock_from_file = @as(*const fn (?*kern.File) ?*kern.Socket, @ptrFromInt(162));
pub const check_mtu = @as(*const fn (?*anyopaque, u32, [*c]u32, i32, u64) c_long, @ptrFromInt(163));
pub const for_each_map_elem = @as(*const fn (?*anyopaque, ?*anyopaque, ?*anyopaque, u64) c_long, @ptrFromInt(164));
pub const snprintf = @as(*const fn ([*c]u8, u32, [*c]const u8, [*c]u64, u32) c_long, @ptrFromInt(165));
pub const sys_bpf = @as(*const fn (u32, ?*anyopaque, u32) c_long, @ptrFromInt(166));
pub const btf_find_by_name_kind = @as(*const fn ([*c]u8, c_int, u32, c_int) c_long, @ptrFromInt(167));
pub const sys_close = @as(*const fn (u32) c_long, @ptrFromInt(168));
pub const timer_init = @as(*const fn (?*kern.BpfTimer, ?*anyopaque, u64) c_long, @ptrFromInt(169));
pub const timer_set_callback = @as(*const fn (?*kern.BpfTimer, ?*anyopaque) c_long, @ptrFromInt(170));
pub const timer_start = @as(*const fn (?*kern.BpfTimer, u64, u64) c_long, @ptrFromInt(171));
pub const timer_cancel = @as(*const fn (?*kern.BpfTimer) c_long, @ptrFromInt(172));
pub const get_func_ip = @as(*const fn (?*anyopaque) u64, @ptrFromInt(173));
pub const get_attach_cookie = @as(*const fn (?*anyopaque) u64, @ptrFromInt(174));
pub const task_pt_regs = @as(*const fn (?*kern.Task) c_long, @ptrFromInt(175));
pub const get_branch_snapshot = @as(*const fn (?*anyopaque, u32, u64) c_long, @ptrFromInt(176));
pub const trace_vprintk = @as(*const fn ([*c]const u8, u32, ?*const anyopaque, u32) c_long, @ptrFromInt(177));
pub const skc_to_unix_sock = @as(*const fn (?*anyopaque) ?*kern.UnixSock, @ptrFromInt(178));
pub const kallsyms_lookup_name = @as(*const fn ([*c]const u8, c_int, c_int, [*c]u64) c_long, @ptrFromInt(179));
pub const find_vma = @as(*const fn (?*kern.Task, u64, ?*anyopaque, ?*anyopaque, u64) c_long, @ptrFromInt(180));
pub const loop = @as(*const fn (u32, ?*anyopaque, ?*anyopaque, u64) c_long, @ptrFromInt(181));
pub const strncmp = @as(*const fn ([*c]const u8, u32, [*c]const u8) c_long, @ptrFromInt(182));
pub const get_func_arg = @as(*const fn (?*anyopaque, u32, [*c]u64) c_long, @ptrFromInt(183));
pub const get_func_ret = @as(*const fn (?*anyopaque, [*c]u64) c_long, @ptrFromInt(184));
pub const get_func_arg_cnt = @as(*const fn (?*anyopaque) c_long, @ptrFromInt(185));
pub const get_retval = @as(*const fn () c_int, @ptrFromInt(186));
pub const set_retval = @as(*const fn (c_int) c_int, @ptrFromInt(187));
pub const xdp_get_buff_len = @as(*const fn (?*kern.XdpMd) u64, @ptrFromInt(188));
pub const xdp_load_bytes = @as(*const fn (?*kern.XdpMd, u32, ?*anyopaque, u32) c_long, @ptrFromInt(189));
pub const xdp_store_bytes = @as(*const fn (?*kern.XdpMd, u32, ?*anyopaque, u32) c_long, @ptrFromInt(190));
pub const copy_from_user_task = @as(*const fn (?*anyopaque, u32, ?*const anyopaque, ?*kern.Task, u64) c_long, @ptrFromInt(191));
pub const skb_set_tstamp = @as(*const fn (?*kern.SkBuff, u64, u32) c_long, @ptrFromInt(192));
pub const ima_file_hash = @as(*const fn (?*kern.File, ?*anyopaque, u32) c_long, @ptrFromInt(193));
pub const kptr_xchg = @as(*const fn (?*anyopaque, ?*anyopaque) ?*anyopaque, @ptrFromInt(194));
pub const map_lookup_percpu_elem = @as(*const fn (?*anyopaque, ?*const anyopaque, u32) ?*anyopaque, @ptrFromInt(195));
pub const skc_to_mptcp_sock = @as(*const fn (?*anyopaque) ?*kern.MpTcpSock, @ptrFromInt(196));
pub const dynptr_from_mem = @as(*const fn (?*anyopaque, u32, u64, ?*kern.BpfDynPtr) c_long, @ptrFromInt(197));
pub const ringbuf_reserve_dynptr = @as(*const fn (?*anyopaque, u32, u64, ?*kern.BpfDynPtr) c_long, @ptrFromInt(198));
pub const ringbuf_submit_dynptr = @as(*const fn (?*kern.BpfDynPtr, u64) void, @ptrFromInt(199));
pub const ringbuf_discard_dynptr = @as(*const fn (?*kern.BpfDynPtr, u64) void, @ptrFromInt(200));
pub const dynptr_read = @as(*const fn (?*anyopaque, u32, ?*kern.BpfDynPtr, u32, u64) c_long, @ptrFromInt(201));
pub const dynptr_write = @as(*const fn (?*kern.BpfDynPtr, u32, ?*anyopaque, u32, u64) c_long, @ptrFromInt(202));
pub const dynptr_data = @as(*const fn (?*kern.BpfDynPtr, u32, u32) ?*anyopaque, @ptrFromInt(203));
pub const tcp_raw_gen_syncookie_ipv4 = @as(*const fn (?*kern.IpHdr, ?*TcpHdr, u32) i64, @ptrFromInt(204));
pub const tcp_raw_gen_syncookie_ipv6 = @as(*const fn (?*kern.Ipv6Hdr, ?*TcpHdr, u32) i64, @ptrFromInt(205));
pub const tcp_raw_check_syncookie_ipv4 = @as(*const fn (?*kern.IpHdr, ?*TcpHdr) c_long, @ptrFromInt(206));
pub const tcp_raw_check_syncookie_ipv6 = @as(*const fn (?*kern.Ipv6Hdr, ?*TcpHdr) c_long, @ptrFromInt(207));
pub const ktime_get_tai_ns = @as(*const fn () u64, @ptrFromInt(208));
pub const user_ringbuf_drain = @as(*const fn (?*anyopaque, ?*anyopaque, ?*anyopaque, u64) c_long, @ptrFromInt(209));
pub const skb_change_proto: *align(1) const fn (skb: *kern.SkBuff, proto: u16, flags: u64) c_long = @ptrFromInt(31);
pub const skb_change_type: *align(1) const fn (skb: *kern.SkBuff, skb_type: u32) c_long = @ptrFromInt(32);
pub const skb_under_cgroup: *align(1) const fn (skb: *kern.SkBuff, map: ?*const anyopaque, index: u32) c_long = @ptrFromInt(33);
pub const get_hash_recalc: *align(1) const fn (skb: *kern.SkBuff) u32 = @ptrFromInt(34);
pub const get_current_task: *align(1) const fn () u64 = @ptrFromInt(35);
pub const probe_write_user: *align(1) const fn (dst: ?*anyopaque, src: ?*const anyopaque, len: u32) c_long = @ptrFromInt(36);
pub const current_task_under_cgroup: *align(1) const fn (map: *const kern.MapDef, index: u32) c_long = @ptrFromInt(37);
pub const skb_change_tail: *align(1) const fn (skb: *kern.SkBuff, len: u32, flags: u64) c_long = @ptrFromInt(38);
pub const skb_pull_data: *align(1) const fn (skb: *kern.SkBuff, len: u32) c_long = @ptrFromInt(39);
pub const csum_update: *align(1) const fn (skb: *kern.SkBuff, csum: u32) i64 = @ptrFromInt(40);
pub const set_hash_invalid: *align(1) const fn (skb: *kern.SkBuff) void = @ptrFromInt(41);
pub const get_numa_node_id: *align(1) const fn () c_long = @ptrFromInt(42);
pub const skb_change_head: *align(1) const fn (skb: *kern.SkBuff, len: u32, flags: u64) c_long = @ptrFromInt(43);
pub const xdp_adjust_head: *align(1) const fn (xdp_md: *kern.XdpMd, delta: c_int) c_long = @ptrFromInt(44);
pub const probe_read_str: *align(1) const fn (dst: ?*anyopaque, size: u32, unsafe_ptr: ?*const anyopaque) c_long = @ptrFromInt(45);
pub const get_socket_cookie: *align(1) const fn (ctx: ?*anyopaque) u64 = @ptrFromInt(46);
pub const get_socket_uid: *align(1) const fn (skb: *kern.SkBuff) u32 = @ptrFromInt(47);
pub const set_hash: *align(1) const fn (skb: *kern.SkBuff, hash: u32) c_long = @ptrFromInt(48);
pub const setsockopt: *align(1) const fn (bpf_socket: *kern.SockOps, level: c_int, optname: c_int, optval: ?*anyopaque, optlen: c_int) c_long = @ptrFromInt(49);
pub const skb_adjust_room: *align(1) const fn (skb: *kern.SkBuff, len_diff: i32, mode: u32, flags: u64) c_long = @ptrFromInt(50);
pub const redirect_map: *align(1) const fn (map: *const kern.MapDef, key: u32, flags: u64) c_long = @ptrFromInt(51);
pub const sk_redirect_map: *align(1) const fn (skb: *kern.SkBuff, map: *const kern.MapDef, key: u32, flags: u64) c_long = @ptrFromInt(52);
pub const sock_map_update: *align(1) const fn (skops: *kern.SockOps, map: *const kern.MapDef, key: ?*anyopaque, flags: u64) c_long = @ptrFromInt(53);
pub const xdp_adjust_meta: *align(1) const fn (xdp_md: *kern.XdpMd, delta: c_int) c_long = @ptrFromInt(54);
pub const perf_event_read_value: *align(1) const fn (map: *const kern.MapDef, flags: u64, buf: *kern.PerfEventValue, buf_size: u32) c_long = @ptrFromInt(55);
pub const perf_prog_read_value: *align(1) const fn (ctx: *kern.PerfEventData, buf: *kern.PerfEventValue, buf_size: u32) c_long = @ptrFromInt(56);
pub const getsockopt: *align(1) const fn (bpf_socket: ?*anyopaque, level: c_int, optname: c_int, optval: ?*anyopaque, optlen: c_int) c_long = @ptrFromInt(57);
pub const override_return: *align(1) const fn (regs: *PtRegs, rc: u64) c_long = @ptrFromInt(58);
pub const sock_ops_cb_flags_set: *align(1) const fn (bpf_sock: *kern.SockOps, argval: c_int) c_long = @ptrFromInt(59);
pub const msg_redirect_map: *align(1) const fn (msg: *kern.SkMsgMd, map: *const kern.MapDef, key: u32, flags: u64) c_long = @ptrFromInt(60);
pub const msg_apply_bytes: *align(1) const fn (msg: *kern.SkMsgMd, bytes: u32) c_long = @ptrFromInt(61);
pub const msg_cork_bytes: *align(1) const fn (msg: *kern.SkMsgMd, bytes: u32) c_long = @ptrFromInt(62);
pub const msg_pull_data: *align(1) const fn (msg: *kern.SkMsgMd, start: u32, end: u32, flags: u64) c_long = @ptrFromInt(63);
pub const bind: *align(1) const fn (ctx: *kern.BpfSockAddr, addr: *kern.SockAddr, addr_len: c_int) c_long = @ptrFromInt(64);
pub const xdp_adjust_tail: *align(1) const fn (xdp_md: *kern.XdpMd, delta: c_int) c_long = @ptrFromInt(65);
pub const skb_get_xfrm_state: *align(1) const fn (skb: *kern.SkBuff, index: u32, xfrm_state: *kern.XfrmState, size: u32, flags: u64) c_long = @ptrFromInt(66);
pub const get_stack: *align(1) const fn (ctx: ?*anyopaque, buf: ?*anyopaque, size: u32, flags: u64) c_long = @ptrFromInt(67);
pub const skb_load_bytes_relative: *align(1) const fn (skb: ?*const anyopaque, offset: u32, to: ?*anyopaque, len: u32, start_header: u32) c_long = @ptrFromInt(68);
pub const fib_lookup: *align(1) const fn (ctx: ?*anyopaque, params: *kern.FibLookup, plen: c_int, flags: u32) c_long = @ptrFromInt(69);
pub const sock_hash_update: *align(1) const fn (skops: *kern.SockOps, map: *const kern.MapDef, key: ?*anyopaque, flags: u64) c_long = @ptrFromInt(70);
pub const msg_redirect_hash: *align(1) const fn (msg: *kern.SkMsgMd, map: *const kern.MapDef, key: ?*anyopaque, flags: u64) c_long = @ptrFromInt(71);
pub const sk_redirect_hash: *align(1) const fn (skb: *kern.SkBuff, map: *const kern.MapDef, key: ?*anyopaque, flags: u64) c_long = @ptrFromInt(72);
pub const lwt_push_encap: *align(1) const fn (skb: *kern.SkBuff, typ: u32, hdr: ?*anyopaque, len: u32) c_long = @ptrFromInt(73);
pub const lwt_seg6_store_bytes: *align(1) const fn (skb: *kern.SkBuff, offset: u32, from: ?*const anyopaque, len: u32) c_long = @ptrFromInt(74);
pub const lwt_seg6_adjust_srh: *align(1) const fn (skb: *kern.SkBuff, offset: u32, delta: i32) c_long = @ptrFromInt(75);
pub const lwt_seg6_action: *align(1) const fn (skb: *kern.SkBuff, action: u32, param: ?*anyopaque, param_len: u32) c_long = @ptrFromInt(76);
pub const rc_repeat: *align(1) const fn (ctx: ?*anyopaque) c_long = @ptrFromInt(77);
pub const rc_keydown: *align(1) const fn (ctx: ?*anyopaque, protocol: u32, scancode: u64, toggle: u32) c_long = @ptrFromInt(78);
pub const skb_cgroup_id: *align(1) const fn (skb: *kern.SkBuff) u64 = @ptrFromInt(79);
pub const get_current_cgroup_id: *align(1) const fn () u64 = @ptrFromInt(80);
pub const get_local_storage: *align(1) const fn (map: ?*anyopaque, flags: u64) ?*anyopaque = @ptrFromInt(81);
pub const sk_select_reuseport: *align(1) const fn (reuse: *kern.SkReusePortMd, map: *const kern.MapDef, key: ?*anyopaque, flags: u64) c_long = @ptrFromInt(82);
pub const skb_ancestor_cgroup_id: *align(1) const fn (skb: *kern.SkBuff, ancestor_level: c_int) u64 = @ptrFromInt(83);
pub const sk_lookup_tcp: *align(1) const fn (ctx: ?*anyopaque, tuple: *kern.SockTuple, tuple_size: u32, netns: u64, flags: u64) ?*kern.Sock = @ptrFromInt(84);
pub const sk_lookup_udp: *align(1) const fn (ctx: ?*anyopaque, tuple: *kern.SockTuple, tuple_size: u32, netns: u64, flags: u64) ?*kern.Sock = @ptrFromInt(85);
pub const sk_release: *align(1) const fn (sock: *kern.Sock) c_long = @ptrFromInt(86);
pub const map_push_elem: *align(1) const fn (map: *const kern.MapDef, value: ?*const anyopaque, flags: u64) c_long = @ptrFromInt(87);
pub const map_pop_elem: *align(1) const fn (map: *const kern.MapDef, value: ?*anyopaque) c_long = @ptrFromInt(88);
pub const map_peek_elem: *align(1) const fn (map: *const kern.MapDef, value: ?*anyopaque) c_long = @ptrFromInt(89);
pub const msg_push_data: *align(1) const fn (msg: *kern.SkMsgMd, start: u32, len: u32, flags: u64) c_long = @ptrFromInt(90);
pub const msg_pop_data: *align(1) const fn (msg: *kern.SkMsgMd, start: u32, len: u32, flags: u64) c_long = @ptrFromInt(91);
pub const rc_pointer_rel: *align(1) const fn (ctx: ?*anyopaque, rel_x: i32, rel_y: i32) c_long = @ptrFromInt(92);
pub const spin_lock: *align(1) const fn (lock: *kern.SpinLock) c_long = @ptrFromInt(93);
pub const spin_unlock: *align(1) const fn (lock: *kern.SpinLock) c_long = @ptrFromInt(94);
pub const sk_fullsock: *align(1) const fn (sk: *kern.Sock) ?*SkFullSock = @ptrFromInt(95);
pub const tcp_sock: *align(1) const fn (sk: *kern.Sock) ?*kern.TcpSock = @ptrFromInt(96);
pub const skb_ecn_set_ce: *align(1) const fn (skb: *kern.SkBuff) c_long = @ptrFromInt(97);
pub const get_listener_sock: *align(1) const fn (sk: *kern.Sock) ?*kern.Sock = @ptrFromInt(98);
pub const skc_lookup_tcp: *align(1) const fn (ctx: ?*anyopaque, tuple: *kern.SockTuple, tuple_size: u32, netns: u64, flags: u64) ?*kern.Sock = @ptrFromInt(99);
pub const tcp_check_syncookie: *align(1) const fn (sk: *kern.Sock, iph: ?*anyopaque, iph_len: u32, th: *TcpHdr, th_len: u32) c_long = @ptrFromInt(100);
pub const sysctl_get_name: *align(1) const fn (ctx: *kern.SysCtl, buf: ?*u8, buf_len: c_ulong, flags: u64) c_long = @ptrFromInt(101);
pub const sysctl_get_current_value: *align(1) const fn (ctx: *kern.SysCtl, buf: ?*u8, buf_len: c_ulong) c_long = @ptrFromInt(102);
pub const sysctl_get_new_value: *align(1) const fn (ctx: *kern.SysCtl, buf: ?*u8, buf_len: c_ulong) c_long = @ptrFromInt(103);
pub const sysctl_set_new_value: *align(1) const fn (ctx: *kern.SysCtl, buf: ?*const u8, buf_len: c_ulong) c_long = @ptrFromInt(104);
pub const strtol: *align(1) const fn (buf: *const u8, buf_len: c_ulong, flags: u64, res: *c_long) c_long = @ptrFromInt(105);
pub const strtoul: *align(1) const fn (buf: *const u8, buf_len: c_ulong, flags: u64, res: *c_ulong) c_long = @ptrFromInt(106);
pub const sk_storage_get: *align(1) const fn (map: *const kern.MapDef, sk: *kern.Sock, value: ?*anyopaque, flags: u64) ?*anyopaque = @ptrFromInt(107);
pub const sk_storage_delete: *align(1) const fn (map: *const kern.MapDef, sk: *kern.Sock) c_long = @ptrFromInt(108);
pub const send_signal: *align(1) const fn (sig: u32) c_long = @ptrFromInt(109);
pub const tcp_gen_syncookie: *align(1) const fn (sk: *kern.Sock, iph: ?*anyopaque, iph_len: u32, th: *TcpHdr, th_len: u32) i64 = @ptrFromInt(110);
pub const skb_output: *align(1) const fn (ctx: ?*anyopaque, map: *const kern.MapDef, flags: u64, data: ?*anyopaque, size: u64) c_long = @ptrFromInt(111);
pub const probe_read_user: *align(1) const fn (dst: ?*anyopaque, size: u32, unsafe_ptr: ?*const anyopaque) c_long = @ptrFromInt(112);
pub const probe_read_kernel: *align(1) const fn (dst: ?*anyopaque, size: u32, unsafe_ptr: ?*const anyopaque) c_long = @ptrFromInt(113);
pub const probe_read_user_str: *align(1) const fn (dst: ?*anyopaque, size: u32, unsafe_ptr: ?*const anyopaque) c_long = @ptrFromInt(114);
pub const probe_read_kernel_str: *align(1) const fn (dst: ?*anyopaque, size: u32, unsafe_ptr: ?*const anyopaque) c_long = @ptrFromInt(115);
pub const tcp_send_ack: *align(1) const fn (tp: ?*anyopaque, rcv_nxt: u32) c_long = @ptrFromInt(116);
pub const send_signal_thread: *align(1) const fn (sig: u32) c_long = @ptrFromInt(117);
pub const jiffies64: *align(1) const fn () u64 = @ptrFromInt(118);
pub const read_branch_records: *align(1) const fn (ctx: *kern.PerfEventData, buf: ?*anyopaque, size: u32, flags: u64) c_long = @ptrFromInt(119);
pub const get_ns_current_pid_tgid: *align(1) const fn (dev: u64, ino: u64, nsdata: *kern.PidNsInfo, size: u32) c_long = @ptrFromInt(120);
pub const xdp_output: *align(1) const fn (ctx: ?*anyopaque, map: *const kern.MapDef, flags: u64, data: ?*anyopaque, size: u64) c_long = @ptrFromInt(121);
pub const get_netns_cookie: *align(1) const fn (ctx: ?*anyopaque) u64 = @ptrFromInt(122);
pub const get_current_ancestor_cgroup_id: *align(1) const fn (ancestor_level: c_int) u64 = @ptrFromInt(123);
pub const sk_assign: *align(1) const fn (skb: *kern.SkBuff, sk: *kern.Sock, flags: u64) c_long = @ptrFromInt(124);
pub const ktime_get_boot_ns: *align(1) const fn () u64 = @ptrFromInt(125);
pub const seq_printf: *align(1) const fn (m: *kern.SeqFile, fmt: ?*const u8, fmt_size: u32, data: ?*const anyopaque, data_len: u32) c_long = @ptrFromInt(126);
pub const seq_write: *align(1) const fn (m: *kern.SeqFile, data: ?*const u8, len: u32) c_long = @ptrFromInt(127);
pub const sk_cgroup_id: *align(1) const fn (sk: *kern.BpfSock) u64 = @ptrFromInt(128);
pub const sk_ancestor_cgroup_id: *align(1) const fn (sk: *kern.BpfSock, ancestor_level: c_long) u64 = @ptrFromInt(129);
pub const ringbuf_output: *align(1) const fn (ringbuf: ?*anyopaque, data: ?*anyopaque, size: u64, flags: u64) c_long = @ptrFromInt(130);
pub const ringbuf_reserve: *align(1) const fn (ringbuf: ?*anyopaque, size: u64, flags: u64) ?*anyopaque = @ptrFromInt(131);
pub const ringbuf_submit: *align(1) const fn (data: ?*anyopaque, flags: u64) void = @ptrFromInt(132);
pub const ringbuf_discard: *align(1) const fn (data: ?*anyopaque, flags: u64) void = @ptrFromInt(133);
pub const ringbuf_query: *align(1) const fn (ringbuf: ?*anyopaque, flags: u64) u64 = @ptrFromInt(134);
pub const csum_level: *align(1) const fn (skb: *kern.SkBuff, level: u64) c_long = @ptrFromInt(135);
pub const skc_to_tcp6_sock: *align(1) const fn (sk: ?*anyopaque) ?*kern.Tcp6Sock = @ptrFromInt(136);
pub const skc_to_tcp_sock: *align(1) const fn (sk: ?*anyopaque) ?*kern.TcpSock = @ptrFromInt(137);
pub const skc_to_tcp_timewait_sock: *align(1) const fn (sk: ?*anyopaque) ?*kern.TcpTimewaitSock = @ptrFromInt(138);
pub const skc_to_tcp_request_sock: *align(1) const fn (sk: ?*anyopaque) ?*kern.TcpRequestSock = @ptrFromInt(139);
pub const skc_to_udp6_sock: *align(1) const fn (sk: ?*anyopaque) ?*kern.Udp6Sock = @ptrFromInt(140);
pub const get_task_stack: *align(1) const fn (task: ?*anyopaque, buf: ?*anyopaque, size: u32, flags: u64) c_long = @ptrFromInt(141);
pub const load_hdr_opt: *align(1) const fn (?*kern.BpfSockOps, ?*anyopaque, u32, u64) c_long = @ptrFromInt(142);
pub const store_hdr_opt: *align(1) const fn (?*kern.BpfSockOps, ?*const anyopaque, u32, u64) c_long = @ptrFromInt(143);
pub const reserve_hdr_opt: *align(1) const fn (?*kern.BpfSockOps, u32, u64) c_long = @ptrFromInt(144);
pub const inode_storage_get: *align(1) const fn (?*anyopaque, ?*anyopaque, ?*anyopaque, u64) ?*anyopaque = @ptrFromInt(145);
pub const inode_storage_delete: *align(1) const fn (?*anyopaque, ?*anyopaque) c_int = @ptrFromInt(146);
pub const d_path: *align(1) const fn (?*kern.Path, [*c]u8, u32) c_long = @ptrFromInt(147);
pub const copy_from_user: *align(1) const fn (?*anyopaque, u32, ?*const anyopaque) c_long = @ptrFromInt(148);
pub const snprintf_btf: *align(1) const fn ([*c]u8, u32, ?*kern.BTFPtr, u32, u64) c_long = @ptrFromInt(149);
pub const seq_printf_btf: *align(1) const fn (?*kern.SeqFile, ?*kern.BTFPtr, u32, u64) c_long = @ptrFromInt(150);
pub const skb_cgroup_classid: *align(1) const fn (?*kern.SkBuff) u64 = @ptrFromInt(151);
pub const redirect_neigh: *align(1) const fn (u32, ?*kern.BpfRedirNeigh, c_int, u64) c_long = @ptrFromInt(152);
pub const per_cpu_ptr: *align(1) const fn (?*const anyopaque, u32) ?*anyopaque = @ptrFromInt(153);
pub const this_cpu_ptr: *align(1) const fn (?*const anyopaque) ?*anyopaque = @ptrFromInt(154);
pub const redirect_peer: *align(1) const fn (u32, u64) c_long = @ptrFromInt(155);
pub const task_storage_get: *align(1) const fn (?*anyopaque, ?*kern.Task, ?*anyopaque, u64) ?*anyopaque = @ptrFromInt(156);
pub const task_storage_delete: *align(1) const fn (?*anyopaque, ?*kern.Task) c_long = @ptrFromInt(157);
pub const get_current_task_btf: *align(1) const fn () ?*kern.Task = @ptrFromInt(158);
pub const bprm_opts_set: *align(1) const fn (?*kern.BinPrm, u64) c_long = @ptrFromInt(159);
pub const ktime_get_coarse_ns: *align(1) const fn () u64 = @ptrFromInt(160);
pub const ima_inode_hash: *align(1) const fn (?*kern.Inode, ?*anyopaque, u32) c_long = @ptrFromInt(161);
pub const sock_from_file: *align(1) const fn (?*kern.File) ?*kern.Socket = @ptrFromInt(162);
pub const check_mtu: *align(1) const fn (?*anyopaque, u32, [*c]u32, i32, u64) c_long = @ptrFromInt(163);
pub const for_each_map_elem: *align(1) const fn (?*anyopaque, ?*anyopaque, ?*anyopaque, u64) c_long = @ptrFromInt(164);
pub const snprintf: *align(1) const fn ([*c]u8, u32, [*c]const u8, [*c]u64, u32) c_long = @ptrFromInt(165);
pub const sys_bpf: *align(1) const fn (u32, ?*anyopaque, u32) c_long = @ptrFromInt(166);
pub const btf_find_by_name_kind: *align(1) const fn ([*c]u8, c_int, u32, c_int) c_long = @ptrFromInt(167);
pub const sys_close: *align(1) const fn (u32) c_long = @ptrFromInt(168);
pub const timer_init: *align(1) const fn (?*kern.BpfTimer, ?*anyopaque, u64) c_long = @ptrFromInt(169);
pub const timer_set_callback: *align(1) const fn (?*kern.BpfTimer, ?*anyopaque) c_long = @ptrFromInt(170);
pub const timer_start: *align(1) const fn (?*kern.BpfTimer, u64, u64) c_long = @ptrFromInt(171);
pub const timer_cancel: *align(1) const fn (?*kern.BpfTimer) c_long = @ptrFromInt(172);
pub const get_func_ip: *align(1) const fn (?*anyopaque) u64 = @ptrFromInt(173);
pub const get_attach_cookie: *align(1) const fn (?*anyopaque) u64 = @ptrFromInt(174);
pub const task_pt_regs: *align(1) const fn (?*kern.Task) c_long = @ptrFromInt(175);
pub const get_branch_snapshot: *align(1) const fn (?*anyopaque, u32, u64) c_long = @ptrFromInt(176);
pub const trace_vprintk: *align(1) const fn ([*c]const u8, u32, ?*const anyopaque, u32) c_long = @ptrFromInt(177);
pub const skc_to_unix_sock: *align(1) const fn (?*anyopaque) ?*kern.UnixSock = @ptrFromInt(178);
pub const kallsyms_lookup_name: *align(1) const fn ([*c]const u8, c_int, c_int, [*c]u64) c_long = @ptrFromInt(179);
pub const find_vma: *align(1) const fn (?*kern.Task, u64, ?*anyopaque, ?*anyopaque, u64) c_long = @ptrFromInt(180);
pub const loop: *align(1) const fn (u32, ?*anyopaque, ?*anyopaque, u64) c_long = @ptrFromInt(181);
pub const strncmp: *align(1) const fn ([*c]const u8, u32, [*c]const u8) c_long = @ptrFromInt(182);
pub const get_func_arg: *align(1) const fn (?*anyopaque, u32, [*c]u64) c_long = @ptrFromInt(183);
pub const get_func_ret: *align(1) const fn (?*anyopaque, [*c]u64) c_long = @ptrFromInt(184);
pub const get_func_arg_cnt: *align(1) const fn (?*anyopaque) c_long = @ptrFromInt(185);
pub const get_retval: *align(1) const fn () c_int = @ptrFromInt(186);
pub const set_retval: *align(1) const fn (c_int) c_int = @ptrFromInt(187);
pub const xdp_get_buff_len: *align(1) const fn (?*kern.XdpMd) u64 = @ptrFromInt(188);
pub const xdp_load_bytes: *align(1) const fn (?*kern.XdpMd, u32, ?*anyopaque, u32) c_long = @ptrFromInt(189);
pub const xdp_store_bytes: *align(1) const fn (?*kern.XdpMd, u32, ?*anyopaque, u32) c_long = @ptrFromInt(190);
pub const copy_from_user_task: *align(1) const fn (?*anyopaque, u32, ?*const anyopaque, ?*kern.Task, u64) c_long = @ptrFromInt(191);
pub const skb_set_tstamp: *align(1) const fn (?*kern.SkBuff, u64, u32) c_long = @ptrFromInt(192);
pub const ima_file_hash: *align(1) const fn (?*kern.File, ?*anyopaque, u32) c_long = @ptrFromInt(193);
pub const kptr_xchg: *align(1) const fn (?*anyopaque, ?*anyopaque) ?*anyopaque = @ptrFromInt(194);
pub const map_lookup_percpu_elem: *align(1) const fn (?*anyopaque, ?*const anyopaque, u32) ?*anyopaque = @ptrFromInt(195);
pub const skc_to_mptcp_sock: *align(1) const fn (?*anyopaque) ?*kern.MpTcpSock = @ptrFromInt(196);
pub const dynptr_from_mem: *align(1) const fn (?*anyopaque, u32, u64, ?*kern.BpfDynPtr) c_long = @ptrFromInt(197);
pub const ringbuf_reserve_dynptr: *align(1) const fn (?*anyopaque, u32, u64, ?*kern.BpfDynPtr) c_long = @ptrFromInt(198);
pub const ringbuf_submit_dynptr: *align(1) const fn (?*kern.BpfDynPtr, u64) void = @ptrFromInt(199);
pub const ringbuf_discard_dynptr: *align(1) const fn (?*kern.BpfDynPtr, u64) void = @ptrFromInt(200);
pub const dynptr_read: *align(1) const fn (?*anyopaque, u32, ?*kern.BpfDynPtr, u32, u64) c_long = @ptrFromInt(201);
pub const dynptr_write: *align(1) const fn (?*kern.BpfDynPtr, u32, ?*anyopaque, u32, u64) c_long = @ptrFromInt(202);
pub const dynptr_data: *align(1) const fn (?*kern.BpfDynPtr, u32, u32) ?*anyopaque = @ptrFromInt(203);
pub const tcp_raw_gen_syncookie_ipv4: *align(1) const fn (?*kern.IpHdr, ?*TcpHdr, u32) i64 = @ptrFromInt(204);
pub const tcp_raw_gen_syncookie_ipv6: *align(1) const fn (?*kern.Ipv6Hdr, ?*TcpHdr, u32) i64 = @ptrFromInt(205);
pub const tcp_raw_check_syncookie_ipv4: *align(1) const fn (?*kern.IpHdr, ?*TcpHdr) c_long = @ptrFromInt(206);
pub const tcp_raw_check_syncookie_ipv6: *align(1) const fn (?*kern.Ipv6Hdr, ?*TcpHdr) c_long = @ptrFromInt(207);
pub const ktime_get_tai_ns: *align(1) const fn () u64 = @ptrFromInt(208);
pub const user_ringbuf_drain: *align(1) const fn (?*anyopaque, ?*anyopaque, ?*anyopaque, u64) c_long = @ptrFromInt(209);

View File

@ -294,12 +294,16 @@ pub fn resolve(options: Options) ResolveError!Config {
if (options.lto) |x| break :b x;
if (!options.any_c_source_files) break :b false;
if (target.cpu.arch.isRISCV()) {
// Clang and LLVM currently don't support RISC-V target-abi for LTO.
// Compiling with LTO may fail or produce undesired results.
// See https://reviews.llvm.org/D71387
// See https://reviews.llvm.org/D102582
break :b false;
// https://github.com/llvm/llvm-project/pull/116537
switch (target.abi) {
.gnuabin32,
.gnuilp32,
.gnux32,
.ilp32,
.muslabin32,
.muslx32,
=> break :b false,
else => {},
}
break :b switch (options.output_mode) {

View File

@ -812,6 +812,7 @@ const Resource = union(enum) {
dir: fs.Dir,
const Git = struct {
session: git.Session,
fetch_stream: git.Session.FetchStream,
want_oid: [git.oid_length]u8,
};
@ -820,7 +821,10 @@ const Resource = union(enum) {
switch (resource.*) {
.file => |*file| file.close(),
.http_request => |*req| req.deinit(),
.git => |*git_resource| git_resource.fetch_stream.deinit(),
.git => |*git_resource| {
git_resource.fetch_stream.deinit();
git_resource.session.deinit();
},
.dir => |*dir| dir.close(),
}
resource.* = undefined;
@ -961,23 +965,13 @@ fn initResource(f: *Fetch, uri: std.Uri, server_header_buffer: []u8) RunError!Re
{
var transport_uri = uri;
transport_uri.scheme = uri.scheme["git+".len..];
var redirect_uri: []u8 = undefined;
var session: git.Session = .{ .transport = http_client, .uri = transport_uri };
session.discoverCapabilities(gpa, &redirect_uri, server_header_buffer) catch |err| switch (err) {
error.Redirected => {
defer gpa.free(redirect_uri);
return f.fail(f.location_tok, try eb.printString(
"repository moved to {s}",
.{redirect_uri},
));
},
else => |e| {
return f.fail(f.location_tok, try eb.printString(
"unable to discover remote git server capabilities: {s}",
.{@errorName(e)},
));
},
var session = git.Session.init(gpa, http_client, transport_uri, server_header_buffer) catch |err| {
return f.fail(f.location_tok, try eb.printString(
"unable to discover remote git server capabilities: {s}",
.{@errorName(err)},
));
};
errdefer session.deinit();
const want_oid = want_oid: {
const want_ref =
@ -987,7 +981,7 @@ fn initResource(f: *Fetch, uri: std.Uri, server_header_buffer: []u8) RunError!Re
const want_ref_head = try std.fmt.allocPrint(arena, "refs/heads/{s}", .{want_ref});
const want_ref_tag = try std.fmt.allocPrint(arena, "refs/tags/{s}", .{want_ref});
var ref_iterator = session.listRefs(gpa, .{
var ref_iterator = session.listRefs(.{
.ref_prefixes = &.{ want_ref, want_ref_head, want_ref_tag },
.include_peeled = true,
.server_header_buffer = server_header_buffer,
@ -1035,7 +1029,7 @@ fn initResource(f: *Fetch, uri: std.Uri, server_header_buffer: []u8) RunError!Re
_ = std.fmt.bufPrint(&want_oid_buf, "{}", .{
std.fmt.fmtSliceHexLower(&want_oid),
}) catch unreachable;
var fetch_stream = session.fetch(gpa, &.{&want_oid_buf}, server_header_buffer) catch |err| {
var fetch_stream = session.fetch(&.{&want_oid_buf}, server_header_buffer) catch |err| {
return f.fail(f.location_tok, try eb.printString(
"unable to create fetch stream: {s}",
.{@errorName(err)},
@ -1044,6 +1038,7 @@ fn initResource(f: *Fetch, uri: std.Uri, server_header_buffer: []u8) RunError!Re
errdefer fetch_stream.deinit();
return .{ .git = .{
.session = session,
.fetch_stream = fetch_stream,
.want_oid = want_oid,
} };

View File

@ -492,26 +492,31 @@ const Packet = union(enum) {
/// [protocol-v2](https://git-scm.com/docs/protocol-v2).
pub const Session = struct {
transport: *std.http.Client,
uri: std.Uri,
supports_agent: bool = false,
supports_shallow: bool = false,
location: Location,
supports_agent: bool,
supports_shallow: bool,
allocator: Allocator,
const agent = "zig/" ++ @import("builtin").zig_version_string;
const agent_capability = std.fmt.comptimePrint("agent={s}\n", .{agent});
/// Discovers server capabilities. This should be called before using any
/// other client functionality, or the client will be forced to default to
/// the bare minimum server requirements, which may be considerably less
/// efficient (e.g. no shallow fetches).
///
/// See the note on `getCapabilities` regarding `redirect_uri`.
pub fn discoverCapabilities(
session: *Session,
/// Initializes a client session and discovers the capabilities of the
/// server for optimal transport.
pub fn init(
allocator: Allocator,
redirect_uri: *[]u8,
transport: *std.http.Client,
uri: std.Uri,
http_headers_buffer: []u8,
) !void {
var capability_iterator = try session.getCapabilities(allocator, redirect_uri, http_headers_buffer);
) !Session {
var session: Session = .{
.transport = transport,
.location = try .init(allocator, uri),
.supports_agent = false,
.supports_shallow = false,
.allocator = allocator,
};
errdefer session.deinit();
var capability_iterator = try session.getCapabilities(http_headers_buffer);
defer capability_iterator.deinit();
while (try capability_iterator.next()) |capability| {
if (mem.eql(u8, capability.key, "agent")) {
@ -525,27 +530,63 @@ pub const Session = struct {
}
}
}
return session;
}
pub fn deinit(session: *Session) void {
session.location.deinit(session.allocator);
session.* = undefined;
}
/// An owned `std.Uri` representing the location of the server (base URI).
const Location = struct {
uri: std.Uri,
fn init(allocator: Allocator, uri: std.Uri) !Location {
const scheme = try allocator.dupe(u8, uri.scheme);
errdefer allocator.free(scheme);
const user = if (uri.user) |user| try std.fmt.allocPrint(allocator, "{user}", .{user}) else null;
errdefer if (user) |s| allocator.free(s);
const password = if (uri.password) |password| try std.fmt.allocPrint(allocator, "{password}", .{password}) else null;
errdefer if (password) |s| allocator.free(s);
const host = if (uri.host) |host| try std.fmt.allocPrint(allocator, "{host}", .{host}) else null;
errdefer if (host) |s| allocator.free(s);
const path = try std.fmt.allocPrint(allocator, "{path}", .{uri.path});
errdefer allocator.free(path);
// The query and fragment are not used as part of the base server URI.
return .{
.uri = .{
.scheme = scheme,
.user = if (user) |s| .{ .percent_encoded = s } else null,
.password = if (password) |s| .{ .percent_encoded = s } else null,
.host = if (host) |s| .{ .percent_encoded = s } else null,
.port = uri.port,
.path = .{ .percent_encoded = path },
},
};
}
fn deinit(loc: *Location, allocator: Allocator) void {
allocator.free(loc.uri.scheme);
if (loc.uri.user) |user| allocator.free(user.percent_encoded);
if (loc.uri.password) |password| allocator.free(password.percent_encoded);
if (loc.uri.host) |host| allocator.free(host.percent_encoded);
allocator.free(loc.uri.path.percent_encoded);
}
};
/// Returns an iterator over capabilities supported by the server.
///
/// If the server redirects the request, `error.Redirected` is returned and
/// `redirect_uri` is populated with the URI resulting from the redirects.
/// When this occurs, the value of `redirect_uri` must be freed with
/// `allocator` when the caller is done with it.
fn getCapabilities(
session: Session,
allocator: Allocator,
redirect_uri: *[]u8,
http_headers_buffer: []u8,
) !CapabilityIterator {
var info_refs_uri = session.uri;
/// The `session.location` is updated if the server returns a redirect, so
/// that subsequent session functions do not need to handle redirects.
fn getCapabilities(session: *Session, http_headers_buffer: []u8) !CapabilityIterator {
var info_refs_uri = session.location.uri;
{
const session_uri_path = try std.fmt.allocPrint(allocator, "{path}", .{session.uri.path});
defer allocator.free(session_uri_path);
info_refs_uri.path = .{ .percent_encoded = try std.fs.path.resolvePosix(allocator, &.{ "/", session_uri_path, "info/refs" }) };
const session_uri_path = try std.fmt.allocPrint(session.allocator, "{path}", .{session.location.uri.path});
defer session.allocator.free(session_uri_path);
info_refs_uri.path = .{ .percent_encoded = try std.fs.path.resolvePosix(session.allocator, &.{ "/", session_uri_path, "info/refs" }) };
}
defer allocator.free(info_refs_uri.path.percent_encoded);
defer session.allocator.free(info_refs_uri.path.percent_encoded);
info_refs_uri.query = .{ .percent_encoded = "service=git-upload-pack" };
info_refs_uri.fragment = null;
@ -565,14 +606,14 @@ pub const Session = struct {
if (request.response.status != .ok) return error.ProtocolError;
const any_redirects_occurred = request.redirect_behavior.remaining() < max_redirects;
if (any_redirects_occurred) {
const request_uri_path = try std.fmt.allocPrint(allocator, "{path}", .{request.uri.path});
defer allocator.free(request_uri_path);
const request_uri_path = try std.fmt.allocPrint(session.allocator, "{path}", .{request.uri.path});
defer session.allocator.free(request_uri_path);
if (!mem.endsWith(u8, request_uri_path, "/info/refs")) return error.UnparseableRedirect;
var new_uri = request.uri;
new_uri.path = .{ .percent_encoded = request_uri_path[0 .. request_uri_path.len - "/info/refs".len] };
new_uri.query = null;
redirect_uri.* = try std.fmt.allocPrint(allocator, "{+/}", .{new_uri});
return error.Redirected;
const new_location: Location = try .init(session.allocator, new_uri);
session.location.deinit(session.allocator);
session.location = new_location;
}
const reader = request.reader();
@ -649,28 +690,28 @@ pub const Session = struct {
};
/// Returns an iterator over refs known to the server.
pub fn listRefs(session: Session, allocator: Allocator, options: ListRefsOptions) !RefIterator {
var upload_pack_uri = session.uri;
pub fn listRefs(session: Session, options: ListRefsOptions) !RefIterator {
var upload_pack_uri = session.location.uri;
{
const session_uri_path = try std.fmt.allocPrint(allocator, "{path}", .{session.uri.path});
defer allocator.free(session_uri_path);
upload_pack_uri.path = .{ .percent_encoded = try std.fs.path.resolvePosix(allocator, &.{ "/", session_uri_path, "git-upload-pack" }) };
const session_uri_path = try std.fmt.allocPrint(session.allocator, "{path}", .{session.location.uri.path});
defer session.allocator.free(session_uri_path);
upload_pack_uri.path = .{ .percent_encoded = try std.fs.path.resolvePosix(session.allocator, &.{ "/", session_uri_path, "git-upload-pack" }) };
}
defer allocator.free(upload_pack_uri.path.percent_encoded);
defer session.allocator.free(upload_pack_uri.path.percent_encoded);
upload_pack_uri.query = null;
upload_pack_uri.fragment = null;
var body: std.ArrayListUnmanaged(u8) = .empty;
defer body.deinit(allocator);
const body_writer = body.writer(allocator);
defer body.deinit(session.allocator);
const body_writer = body.writer(session.allocator);
try Packet.write(.{ .data = "command=ls-refs\n" }, body_writer);
if (session.supports_agent) {
try Packet.write(.{ .data = agent_capability }, body_writer);
}
try Packet.write(.delimiter, body_writer);
for (options.ref_prefixes) |ref_prefix| {
const ref_prefix_packet = try std.fmt.allocPrint(allocator, "ref-prefix {s}\n", .{ref_prefix});
defer allocator.free(ref_prefix_packet);
const ref_prefix_packet = try std.fmt.allocPrint(session.allocator, "ref-prefix {s}\n", .{ref_prefix});
defer session.allocator.free(ref_prefix_packet);
try Packet.write(.{ .data = ref_prefix_packet }, body_writer);
}
if (options.include_symrefs) {
@ -753,23 +794,22 @@ pub const Session = struct {
/// performed if the server supports it.
pub fn fetch(
session: Session,
allocator: Allocator,
wants: []const []const u8,
http_headers_buffer: []u8,
) !FetchStream {
var upload_pack_uri = session.uri;
var upload_pack_uri = session.location.uri;
{
const session_uri_path = try std.fmt.allocPrint(allocator, "{path}", .{session.uri.path});
defer allocator.free(session_uri_path);
upload_pack_uri.path = .{ .percent_encoded = try std.fs.path.resolvePosix(allocator, &.{ "/", session_uri_path, "git-upload-pack" }) };
const session_uri_path = try std.fmt.allocPrint(session.allocator, "{path}", .{session.location.uri.path});
defer session.allocator.free(session_uri_path);
upload_pack_uri.path = .{ .percent_encoded = try std.fs.path.resolvePosix(session.allocator, &.{ "/", session_uri_path, "git-upload-pack" }) };
}
defer allocator.free(upload_pack_uri.path.percent_encoded);
defer session.allocator.free(upload_pack_uri.path.percent_encoded);
upload_pack_uri.query = null;
upload_pack_uri.fragment = null;
var body: std.ArrayListUnmanaged(u8) = .empty;
defer body.deinit(allocator);
const body_writer = body.writer(allocator);
defer body.deinit(session.allocator);
const body_writer = body.writer(session.allocator);
try Packet.write(.{ .data = "command=fetch\n" }, body_writer);
if (session.supports_agent) {
try Packet.write(.{ .data = agent_capability }, body_writer);

View File

@ -312,18 +312,29 @@ pub fn create(arena: Allocator, options: CreateOptions) !*Package.Module {
if (!options.global.use_llvm) break :b null;
var buf = std.ArrayList(u8).init(arena);
for (target.cpu.arch.allFeaturesList(), 0..) |feature, index_usize| {
const index = @as(std.Target.Cpu.Feature.Set.Index, @intCast(index_usize));
const is_enabled = target.cpu.features.isEnabled(index);
var disabled_features = std.ArrayList(u8).init(arena);
defer disabled_features.deinit();
// Append disabled features after enabled ones, so that their effects aren't overwritten.
for (target.cpu.arch.allFeaturesList()) |feature| {
if (feature.llvm_name) |llvm_name| {
const plus_or_minus = "-+"[@intFromBool(is_enabled)];
try buf.ensureUnusedCapacity(2 + llvm_name.len);
buf.appendAssumeCapacity(plus_or_minus);
buf.appendSliceAssumeCapacity(llvm_name);
buf.appendSliceAssumeCapacity(",");
const is_enabled = target.cpu.features.isEnabled(feature.index);
if (is_enabled) {
try buf.ensureUnusedCapacity(2 + llvm_name.len);
buf.appendAssumeCapacity('+');
buf.appendSliceAssumeCapacity(llvm_name);
buf.appendAssumeCapacity(',');
} else {
try disabled_features.ensureUnusedCapacity(2 + llvm_name.len);
disabled_features.appendAssumeCapacity('-');
disabled_features.appendSliceAssumeCapacity(llvm_name);
disabled_features.appendAssumeCapacity(',');
}
}
}
try buf.appendSlice(disabled_features.items);
if (buf.items.len == 0) break :b "";
assert(std.mem.endsWith(u8, buf.items, ","));
buf.items[buf.items.len - 1] = 0;

View File

@ -35010,6 +35010,7 @@ fn resolvePeerTypesInner(
// if there were no actual slices. Else, we want the slice index to report a conflict.
var opt_slice_idx: ?usize = null;
var any_abi_aligned = false;
var opt_ptr_info: ?InternPool.Key.PtrType = null;
var first_idx: usize = undefined;
var other_idx: usize = undefined; // We sometimes need a second peer index to report a generic error
@ -35054,17 +35055,14 @@ fn resolvePeerTypesInner(
} };
// Note that the align can be always non-zero; Type.ptr will canonicalize it
ptr_info.flags.alignment = Alignment.min(
if (ptr_info.flags.alignment != .none)
ptr_info.flags.alignment
else
try Type.fromInterned(ptr_info.child).abiAlignmentSema(pt),
if (peer_info.flags.alignment != .none)
peer_info.flags.alignment
else
try Type.fromInterned(peer_info.child).abiAlignmentSema(pt),
);
if (peer_info.flags.alignment == .none) {
any_abi_aligned = true;
} else if (ptr_info.flags.alignment == .none) {
any_abi_aligned = true;
ptr_info.flags.alignment = peer_info.flags.alignment;
} else {
ptr_info.flags.alignment = ptr_info.flags.alignment.minStrict(peer_info.flags.alignment);
}
if (ptr_info.flags.address_space != peer_info.flags.address_space) {
return generic_err;
@ -35078,6 +35076,7 @@ fn resolvePeerTypesInner(
ptr_info.flags.is_const = ptr_info.flags.is_const or peer_info.flags.is_const;
ptr_info.flags.is_volatile = ptr_info.flags.is_volatile or peer_info.flags.is_volatile;
ptr_info.flags.is_allowzero = ptr_info.flags.is_allowzero or peer_info.flags.is_allowzero;
const peer_sentinel: InternPool.Index = switch (peer_info.flags.size) {
.One => switch (ip.indexToKey(peer_info.child)) {
@ -35312,6 +35311,12 @@ fn resolvePeerTypesInner(
},
}
if (any_abi_aligned and opt_ptr_info.?.flags.alignment != .none) {
opt_ptr_info.?.flags.alignment = opt_ptr_info.?.flags.alignment.minStrict(
try Type.fromInterned(pointee).abiAlignmentSema(pt),
);
}
return .{ .success = try pt.ptrTypeSema(opt_ptr_info.?) };
},

View File

@ -23,7 +23,7 @@ pub const Env = enum {
sema,
/// - sema
/// - `zig build-* -fno-llvm -fno-lld -target x86_64-linux`
/// - `zig build-* -fincremental -fno-llvm -fno-lld -target x86_64-linux --listen=-`
@"x86_64-linux",
/// - sema
@ -130,6 +130,8 @@ pub const Env = enum {
else => Env.ast_gen.supports(feature),
},
.@"x86_64-linux" => switch (feature) {
.stdio_listen,
.incremental,
.x86_64_backend,
.elf_linker,
=> true,

View File

@ -72,6 +72,7 @@ pub const Parsed = struct {
pub fn deinit(p: *Parsed, gpa: Allocator) void {
gpa.free(p.strtab);
gpa.free(p.sections);
gpa.free(p.symtab);
gpa.free(p.versyms);
gpa.free(p.symbols);

View File

@ -1496,7 +1496,7 @@ pub fn updateFunc(
});
defer gpa.free(name);
const osec = if (self.text_index) |sect_sym_index|
self.atom(self.symbol(sect_sym_index).ref.index).?.output_section_index
self.symbol(sect_sym_index).output_section_index
else osec: {
const osec = try elf_file.addSection(.{
.name = try elf_file.insertShString(".text"),
@ -1896,12 +1896,13 @@ pub fn deleteExport(
} orelse return;
const zcu = elf_file.base.comp.zcu.?;
const exp_name = name.toSlice(&zcu.intern_pool);
const esym_index = metadata.@"export"(self, exp_name) orelse return;
const sym_index = metadata.@"export"(self, exp_name) orelse return;
log.debug("deleting export '{s}'", .{exp_name});
const esym = &self.symtab.items(.elf_sym)[esym_index.*];
const esym_index = self.symbol(sym_index.*).esym_index;
const esym = &self.symtab.items(.elf_sym)[esym_index];
_ = self.globals_lookup.remove(esym.st_name);
esym.* = Elf.null_sym;
self.symtab.items(.shndx)[esym_index.*] = elf.SHN_UNDEF;
self.symtab.items(.shndx)[esym_index] = elf.SHN_UNDEF;
}
pub fn getGlobalSymbol(self: *ZigObject, elf_file: *Elf, name: []const u8, lib_name: ?[]const u8) !u32 {

View File

@ -984,6 +984,7 @@ fn buildOutputType(
.libc_paths_file = try EnvVar.ZIG_LIBC.get(arena),
.native_system_include_paths = &.{},
};
defer create_module.link_inputs.deinit(gpa);
// before arg parsing, check for the NO_COLOR and CLICOLOR_FORCE environment variables
// if set, default the color setting to .off or .on, respectively
@ -3682,7 +3683,7 @@ const CreateModule = struct {
/// This one is used while collecting CLI options. The set of libs is used
/// directly after computing the target and used to compute link_libc,
/// link_libcpp, and then the libraries are filtered into
/// `unresolved_linker_inputs` and `windows_libs`.
/// `unresolved_link_inputs` and `windows_libs`.
cli_link_inputs: std.ArrayListUnmanaged(link.UnresolvedInput),
windows_libs: std.StringArrayHashMapUnmanaged(void),
/// The local variable `unresolved_link_inputs` is fed into library
@ -3816,7 +3817,8 @@ fn createModule(
// to decide whether to trigger native path detection logic.
// Preserves linker input order.
var unresolved_link_inputs: std.ArrayListUnmanaged(link.UnresolvedInput) = .empty;
try unresolved_link_inputs.ensureUnusedCapacity(arena, create_module.cli_link_inputs.items.len);
defer unresolved_link_inputs.deinit(gpa);
try unresolved_link_inputs.ensureUnusedCapacity(gpa, create_module.cli_link_inputs.items.len);
var any_name_queries_remaining = false;
for (create_module.cli_link_inputs.items) |cli_link_input| switch (cli_link_input) {
.name_query => |nq| {

View File

@ -2200,39 +2200,77 @@ test "peer type resolution: pointer attributes are combined correctly" {
var buf_a align(4) = "foo".*;
var buf_b align(4) = "bar".*;
var buf_c align(4) = "baz".*;
var buf_d align(4) = "qux".*;
const a: [*:0]align(4) const u8 = &buf_a;
const b: *align(2) volatile [3:0]u8 = &buf_b;
const c: [*:0]align(4) u8 = &buf_c;
const d: [*:0]allowzero align(4) u8 = &buf_d;
comptime assert(@TypeOf(a, b, c) == [*:0]align(2) const volatile u8);
comptime assert(@TypeOf(a, c, b) == [*:0]align(2) const volatile u8);
comptime assert(@TypeOf(b, a, c) == [*:0]align(2) const volatile u8);
comptime assert(@TypeOf(b, c, a) == [*:0]align(2) const volatile u8);
comptime assert(@TypeOf(c, a, b) == [*:0]align(2) const volatile u8);
comptime assert(@TypeOf(c, b, a) == [*:0]align(2) const volatile u8);
comptime assert(@TypeOf(a, b, c, d) == [*:0]allowzero align(2) const volatile u8);
comptime assert(@TypeOf(a, b, d, c) == [*:0]allowzero align(2) const volatile u8);
comptime assert(@TypeOf(a, c, b, d) == [*:0]allowzero align(2) const volatile u8);
comptime assert(@TypeOf(a, c, d, b) == [*:0]allowzero align(2) const volatile u8);
comptime assert(@TypeOf(a, d, b, c) == [*:0]allowzero align(2) const volatile u8);
comptime assert(@TypeOf(a, d, c, b) == [*:0]allowzero align(2) const volatile u8);
comptime assert(@TypeOf(b, a, c, d) == [*:0]allowzero align(2) const volatile u8);
comptime assert(@TypeOf(b, a, d, c) == [*:0]allowzero align(2) const volatile u8);
comptime assert(@TypeOf(b, c, a, d) == [*:0]allowzero align(2) const volatile u8);
comptime assert(@TypeOf(b, c, d, a) == [*:0]allowzero align(2) const volatile u8);
comptime assert(@TypeOf(b, d, c, a) == [*:0]allowzero align(2) const volatile u8);
comptime assert(@TypeOf(b, d, a, c) == [*:0]allowzero align(2) const volatile u8);
comptime assert(@TypeOf(c, a, b, d) == [*:0]allowzero align(2) const volatile u8);
comptime assert(@TypeOf(c, a, d, b) == [*:0]allowzero align(2) const volatile u8);
comptime assert(@TypeOf(c, b, a, d) == [*:0]allowzero align(2) const volatile u8);
comptime assert(@TypeOf(c, b, d, a) == [*:0]allowzero align(2) const volatile u8);
comptime assert(@TypeOf(c, d, b, a) == [*:0]allowzero align(2) const volatile u8);
comptime assert(@TypeOf(c, d, a, b) == [*:0]allowzero align(2) const volatile u8);
comptime assert(@TypeOf(d, a, b, c) == [*:0]allowzero align(2) const volatile u8);
comptime assert(@TypeOf(d, a, c, b) == [*:0]allowzero align(2) const volatile u8);
comptime assert(@TypeOf(d, b, a, c) == [*:0]allowzero align(2) const volatile u8);
comptime assert(@TypeOf(d, b, c, a) == [*:0]allowzero align(2) const volatile u8);
comptime assert(@TypeOf(d, c, b, a) == [*:0]allowzero align(2) const volatile u8);
comptime assert(@TypeOf(d, c, a, b) == [*:0]allowzero align(2) const volatile u8);
var x: u8 = 0;
_ = &x;
const r1 = switch (x) {
0 => a,
1 => b,
else => c,
2 => c,
else => d,
};
const r2 = switch (x) {
0 => b,
1 => a,
else => c,
2 => c,
else => d,
};
const r3 = switch (x) {
0 => c,
1 => a,
else => b,
2 => b,
else => d,
};
const r4 = switch (x) {
0 => d,
1 => a,
2 => b,
else => c,
};
try expectEqualSlices(u8, std.mem.span(@volatileCast(r1)), "foo");
try expectEqualSlices(u8, std.mem.span(@volatileCast(r2)), "bar");
try expectEqualSlices(u8, std.mem.span(@volatileCast(r3)), "baz");
const NonAllowZero = comptime blk: {
var ti = @typeInfo(@TypeOf(r1, r2, r3, r4));
ti.pointer.is_allowzero = false;
break :blk @Type(ti);
};
try expectEqualSlices(u8, std.mem.span(@volatileCast(@as(NonAllowZero, @ptrCast(r1)))), "foo");
try expectEqualSlices(u8, std.mem.span(@volatileCast(@as(NonAllowZero, @ptrCast(r2)))), "bar");
try expectEqualSlices(u8, std.mem.span(@volatileCast(@as(NonAllowZero, @ptrCast(r3)))), "baz");
try expectEqualSlices(u8, std.mem.span(@volatileCast(@as(NonAllowZero, @ptrCast(r4)))), "qux");
}
test "peer type resolution: arrays of compatible types" {

View File

@ -995,3 +995,11 @@ test "sentinel-terminated 0-length slices" {
try expect(comptime_known_array_value[0] == 2);
try expect(runtime_array_value[0] == 2);
}
test "peer slices keep abi alignment with empty struct" {
var cond: bool = undefined;
cond = false;
const slice = if (cond) &[1]u32{42} else &.{};
comptime assert(@TypeOf(slice) == []const u32);
try expect(slice.len == 0);
}

View File

@ -1,5 +1,4 @@
// Disabled on self-hosted due to linker crash
// #target=x86_64-linux-selfhosted
#target=x86_64-linux-selfhosted
#target=x86_64-linux-cbe
#target=x86_64-windows-cbe
#update=initial version

View File

@ -1,5 +1,4 @@
// Disabled on self-hosted due to linker crash
// #target=x86_64-linux-selfhosted
#target=x86_64-linux-selfhosted
#target=x86_64-linux-cbe
#target=x86_64-windows-cbe
#update=initial version with error

View File

@ -1,4 +1,4 @@
//#target=x86_64-linux-selfhosted
#target=x86_64-linux-selfhosted
#target=x86_64-linux-cbe
#target=x86_64-windows-cbe
#update=non-inline version

View File

@ -1,4 +1,4 @@
//#target=x86_64-linux-selfhosted
#target=x86_64-linux-selfhosted
#target=x86_64-linux-cbe
#target=x86_64-windows-cbe
#update=initial version