Merge branch 'master' into detached-child

This commit is contained in:
antosikv 2024-08-11 14:38:37 +02:00 committed by GitHub
commit d45487e9da
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
884 changed files with 45894 additions and 16844 deletions

View File

@ -381,6 +381,7 @@ set(ZIG_STAGE2_SOURCES
lib/std/Target/avr.zig
lib/std/Target/bpf.zig
lib/std/Target/hexagon.zig
lib/std/Target/loongarch.zig
lib/std/Target/mips.zig
lib/std/Target/msp430.zig
lib/std/Target/nvptx.zig
@ -945,6 +946,11 @@ set(ZIG_BUILD_ARGS
-Dno-langref
)
option(ZIG_EXTRA_BUILD_ARGS "Extra zig build args")
if(ZIG_EXTRA_BUILD_ARGS)
list(APPEND ZIG_BUILD_ARGS ${ZIG_EXTRA_BUILD_ARGS})
endif()
if("${CMAKE_BUILD_TYPE}" STREQUAL "Debug")
list(APPEND ZIG_BUILD_ARGS -Doptimize=Debug)
elseif("${CMAKE_BUILD_TYPE}" STREQUAL "RelWithDebInfo")

View File

@ -13,11 +13,9 @@ Documentation** corresponding to the version of Zig that you are using by
following the appropriate link on the
[download page](https://ziglang.org/download).
Otherwise, you're looking at a release of Zig, and you can find documentation
here:
* doc/langref.html
* doc/std/index.html
Otherwise, you're looking at a release of Zig, so you can find the language
reference at `doc/langref.html`, and the standard library documentation by
running `zig std`, which will open a browser tab.
## Installation

View File

@ -64,6 +64,22 @@ stage3-release/bin/zig build test docs \
--zig-lib-dir "$PWD/../lib" \
-Denable-tidy
# Ensure that stage3 and stage4 are byte-for-byte identical.
stage3-release/bin/zig build \
--prefix stage4-release \
-Denable-llvm \
-Dno-lib \
-Doptimize=ReleaseFast \
-Dstrip \
-Dtarget=$TARGET \
-Duse-zig-libcxx \
-Dversion-string="$(stage3-release/bin/zig version)"
# diff returns an error code if the files differ.
echo "If the following command fails, it means nondeterminism has been"
echo "introduced, making stage3 and stage4 no longer byte-for-byte identical."
diff stage3-release/bin/zig stage4-release/bin/zig
# Ensure that updating the wasm binary from this commit will result in a viable build.
stage3-release/bin/zig build update-zig1

View File

@ -69,3 +69,22 @@ Write-Output "Main test suite..."
-Dskip-non-native `
-Denable-symlinks-windows
CheckLastExitCode
# Ensure that stage3 and stage4 are byte-for-byte identical.
Write-Output "Build and compare stage4..."
& "stage3-release\bin\zig.exe" build `
--prefix stage4-release `
-Denable-llvm `
-Dno-lib `
-Doptimize=ReleaseFast `
-Dstrip `
-Dtarget="$TARGET" `
-Duse-zig-libcxx `
-Dversion-string="$(stage3-release\bin\zig version)"
CheckLastExitCode
# Compare-Object returns an error code if the files differ.
Write-Output "If the following command fails, it means nondeterminism has been"
Write-Output "introduced, making stage3 and stage4 no longer byte-for-byte identical."
Compare-Object (Get-Content stage3-release\bin\zig.exe) (Get-Content stage4-release\bin\zig.exe)
CheckLastExitCode

View File

@ -70,6 +70,25 @@ Write-Output "Main test suite..."
-Denable-symlinks-windows
CheckLastExitCode
# Ensure that stage3 and stage4 are byte-for-byte identical.
Write-Output "Build and compare stage4..."
& "stage3-release\bin\zig.exe" build `
--prefix stage4-release `
-Denable-llvm `
-Dno-lib `
-Doptimize=ReleaseFast `
-Dstrip `
-Dtarget="$TARGET" `
-Duse-zig-libcxx `
-Dversion-string="$(stage3-release\bin\zig version)"
CheckLastExitCode
# Compare-Object returns an error code if the files differ.
Write-Output "If the following command fails, it means nondeterminism has been"
Write-Output "introduced, making stage3 and stage4 no longer byte-for-byte identical."
Compare-Object (Get-Content stage3-release\bin\zig.exe) (Get-Content stage4-release\bin\zig.exe)
CheckLastExitCode
Write-Output "Build x86_64-windows-msvc behavior tests using the C backend..."
& "stage3-release\bin\zig.exe" test `
..\test\behavior.zig `

View File

@ -5053,7 +5053,7 @@ fn cmpxchgWeakButNotAtomic(comptime T: type, ptr: *T, expected_value: T, new_val
It may have any alignment, and it may have any element type.</p>
<p>{#syntax#}elem{#endsyntax#} is coerced to the element type of {#syntax#}dest{#endsyntax#}.</p>
<p>For securely zeroing out sensitive contents from memory, you should use
{#syntax#}std.crypto.utils.secureZero{#endsyntax#}</p>
{#syntax#}std.crypto.secureZero{#endsyntax#}</p>
{#header_close#}
{#header_open|@min#}

402
lib/c.zig
View File

@ -29,10 +29,6 @@ comptime {
@export(wasm_start, .{ .name = "_start", .linkage = .strong });
}
if (native_os == .linux) {
@export(clone, .{ .name = "clone" });
}
if (builtin.link_libc) {
@export(strcmp, .{ .name = "strcmp", .linkage = .strong });
@export(strncmp, .{ .name = "strncmp", .linkage = .strong });
@ -188,401 +184,3 @@ test "strncmp" {
try std.testing.expect(strncmp("b", "a", 1) > 0);
try std.testing.expect(strncmp("\xff", "\x02", 1) > 0);
}
// TODO we should be able to put this directly in std/linux/x86_64.zig but
// it causes a segfault in release mode. this is a workaround of calling it
// across .o file boundaries. fix comptime @ptrCast of nakedcc functions.
fn clone() callconv(.Naked) void {
switch (native_arch) {
.x86 => {
// __clone(func, stack, flags, arg, ptid, tls, ctid)
// +8, +12, +16, +20, +24, +28, +32
// syscall(SYS_clone, flags, stack, ptid, tls, ctid)
// eax, ebx, ecx, edx, esi, edi
asm volatile (
\\ pushl %%ebp
\\ movl %%esp,%%ebp
\\ pushl %%ebx
\\ pushl %%esi
\\ pushl %%edi
\\ // Setup the arguments
\\ movl 16(%%ebp),%%ebx
\\ movl 12(%%ebp),%%ecx
\\ andl $-16,%%ecx
\\ subl $20,%%ecx
\\ movl 20(%%ebp),%%eax
\\ movl %%eax,4(%%ecx)
\\ movl 8(%%ebp),%%eax
\\ movl %%eax,0(%%ecx)
\\ movl 24(%%ebp),%%edx
\\ movl 28(%%ebp),%%esi
\\ movl 32(%%ebp),%%edi
\\ movl $120,%%eax
\\ int $128
\\ testl %%eax,%%eax
\\ jnz 1f
\\ popl %%eax
\\ xorl %%ebp,%%ebp
\\ calll *%%eax
\\ movl %%eax,%%ebx
\\ movl $1,%%eax
\\ int $128
\\1:
\\ popl %%edi
\\ popl %%esi
\\ popl %%ebx
\\ popl %%ebp
\\ retl
);
},
.x86_64 => {
asm volatile (
\\ movl $56,%%eax // SYS_clone
\\ movq %%rdi,%%r11
\\ movq %%rdx,%%rdi
\\ movq %%r8,%%rdx
\\ movq %%r9,%%r8
\\ movq 8(%%rsp),%%r10
\\ movq %%r11,%%r9
\\ andq $-16,%%rsi
\\ subq $8,%%rsi
\\ movq %%rcx,(%%rsi)
\\ syscall
\\ testq %%rax,%%rax
\\ jnz 1f
\\ xorl %%ebp,%%ebp
\\ popq %%rdi
\\ callq *%%r9
\\ movl %%eax,%%edi
\\ movl $60,%%eax // SYS_exit
\\ syscall
\\1: ret
\\
);
},
.aarch64, .aarch64_be => {
// __clone(func, stack, flags, arg, ptid, tls, ctid)
// x0, x1, w2, x3, x4, x5, x6
// syscall(SYS_clone, flags, stack, ptid, tls, ctid)
// x8, x0, x1, x2, x3, x4
asm volatile (
\\ // align stack and save func,arg
\\ and x1,x1,#-16
\\ stp x0,x3,[x1,#-16]!
\\
\\ // syscall
\\ uxtw x0,w2
\\ mov x2,x4
\\ mov x3,x5
\\ mov x4,x6
\\ mov x8,#220 // SYS_clone
\\ svc #0
\\
\\ cbz x0,1f
\\ // parent
\\ ret
\\ // child
\\1: ldp x1,x0,[sp],#16
\\ blr x1
\\ mov x8,#93 // SYS_exit
\\ svc #0
);
},
.arm, .armeb, .thumb, .thumbeb => {
// __clone(func, stack, flags, arg, ptid, tls, ctid)
// r0, r1, r2, r3, +0, +4, +8
// syscall(SYS_clone, flags, stack, ptid, tls, ctid)
// r7 r0, r1, r2, r3, r4
asm volatile (
\\ stmfd sp!,{r4,r5,r6,r7}
\\ mov r7,#120
\\ mov r6,r3
\\ mov r5,r0
\\ mov r0,r2
\\ and r1,r1,#-16
\\ ldr r2,[sp,#16]
\\ ldr r3,[sp,#20]
\\ ldr r4,[sp,#24]
\\ svc 0
\\ tst r0,r0
\\ beq 1f
\\ ldmfd sp!,{r4,r5,r6,r7}
\\ bx lr
\\
\\1: mov r0,r6
\\ bl 3f
\\2: mov r7,#1
\\ svc 0
\\ b 2b
\\3: bx r5
);
},
.riscv32 => {
// __clone(func, stack, flags, arg, ptid, tls, ctid)
// a0, a1, a2, a3, a4, a5, a6
// syscall(SYS_clone, flags, stack, ptid, tls, ctid)
// a7 a0, a1, a2, a3, a4
asm volatile (
\\ # Save func and arg to stack
\\ addi a1, a1, -8
\\ sw a0, 0(a1)
\\ sw a3, 4(a1)
\\
\\ # Call SYS_clone
\\ mv a0, a2
\\ mv a2, a4
\\ mv a3, a5
\\ mv a4, a6
\\ li a7, 220 # SYS_clone
\\ ecall
\\
\\ beqz a0, 1f
\\ # Parent
\\ ret
\\
\\ # Child
\\1: lw a1, 0(sp)
\\ lw a0, 4(sp)
\\ jalr a1
\\
\\ # Exit
\\ li a7, 93 # SYS_exit
\\ ecall
);
},
.riscv64 => {
// __clone(func, stack, flags, arg, ptid, tls, ctid)
// a0, a1, a2, a3, a4, a5, a6
// syscall(SYS_clone, flags, stack, ptid, tls, ctid)
// a7 a0, a1, a2, a3, a4
asm volatile (
\\ # Save func and arg to stack
\\ addi a1, a1, -16
\\ sd a0, 0(a1)
\\ sd a3, 8(a1)
\\
\\ # Call SYS_clone
\\ mv a0, a2
\\ mv a2, a4
\\ mv a3, a5
\\ mv a4, a6
\\ li a7, 220 # SYS_clone
\\ ecall
\\
\\ beqz a0, 1f
\\ # Parent
\\ ret
\\
\\ # Child
\\1: ld a1, 0(sp)
\\ ld a0, 8(sp)
\\ jalr a1
\\
\\ # Exit
\\ li a7, 93 # SYS_exit
\\ ecall
);
},
.mips, .mipsel, .mips64, .mips64el => {
// __clone(func, stack, flags, arg, ptid, tls, ctid)
// 3, 4, 5, 6, 7, 8, 9
// syscall(SYS_clone, flags, stack, ptid, tls, ctid)
// 2 4, 5, 6, 7, 8
asm volatile (
\\ # Save function pointer and argument pointer on new thread stack
\\ and $5, $5, -8
\\ subu $5, $5, 16
\\ sw $4, 0($5)
\\ sw $7, 4($5)
\\ # Shuffle (fn,sp,fl,arg,ptid,tls,ctid) to (fl,sp,ptid,tls,ctid)
\\ move $4, $6
\\ lw $6, 16($sp)
\\ lw $7, 20($sp)
\\ lw $9, 24($sp)
\\ subu $sp, $sp, 16
\\ sw $9, 16($sp)
\\ li $2, 4120
\\ syscall
\\ beq $7, $0, 1f
\\ nop
\\ addu $sp, $sp, 16
\\ jr $ra
\\ subu $2, $0, $2
\\1:
\\ beq $2, $0, 1f
\\ nop
\\ addu $sp, $sp, 16
\\ jr $ra
\\ nop
\\1:
\\ lw $25, 0($sp)
\\ lw $4, 4($sp)
\\ jalr $25
\\ nop
\\ move $4, $2
\\ li $2, 4001
\\ syscall
);
},
.powerpc, .powerpcle => {
// __clone(func, stack, flags, arg, ptid, tls, ctid)
// 3, 4, 5, 6, 7, 8, 9
// syscall(SYS_clone, flags, stack, ptid, tls, ctid)
// 0 3, 4, 5, 6, 7
asm volatile (
\\ # store non-volatile regs r30, r31 on stack in order to put our
\\ # start func and its arg there
\\ stwu 30, -16(1)
\\ stw 31, 4(1)
\\
\\ # save r3 (func) into r30, and r6(arg) into r31
\\ mr 30, 3
\\ mr 31, 6
\\
\\ # create initial stack frame for new thread
\\ clrrwi 4, 4, 4
\\ li 0, 0
\\ stwu 0, -16(4)
\\
\\ #move c into first arg
\\ mr 3, 5
\\ #mr 4, 4
\\ mr 5, 7
\\ mr 6, 8
\\ mr 7, 9
\\
\\ # move syscall number into r0
\\ li 0, 120
\\
\\ sc
\\
\\ # check for syscall error
\\ bns+ 1f # jump to label 1 if no summary overflow.
\\ #else
\\ neg 3, 3 #negate the result (errno)
\\ 1:
\\ # compare sc result with 0
\\ cmpwi cr7, 3, 0
\\
\\ # if not 0, jump to end
\\ bne cr7, 2f
\\
\\ #else: we're the child
\\ #call funcptr: move arg (d) into r3
\\ mr 3, 31
\\ #move r30 (funcptr) into CTR reg
\\ mtctr 30
\\ # call CTR reg
\\ bctrl
\\ # mov SYS_exit into r0 (the exit param is already in r3)
\\ li 0, 1
\\ sc
\\
\\ 2:
\\
\\ # restore stack
\\ lwz 30, 0(1)
\\ lwz 31, 4(1)
\\ addi 1, 1, 16
\\
\\ blr
);
},
.powerpc64, .powerpc64le => {
// __clone(func, stack, flags, arg, ptid, tls, ctid)
// 3, 4, 5, 6, 7, 8, 9
// syscall(SYS_clone, flags, stack, ptid, tls, ctid)
// 0 3, 4, 5, 6, 7
asm volatile (
\\ # create initial stack frame for new thread
\\ clrrdi 4, 4, 4
\\ li 0, 0
\\ stdu 0,-32(4)
\\
\\ # save fn and arg to child stack
\\ std 3, 8(4)
\\ std 6, 16(4)
\\
\\ # shuffle args into correct registers and call SYS_clone
\\ mr 3, 5
\\ #mr 4, 4
\\ mr 5, 7
\\ mr 6, 8
\\ mr 7, 9
\\ li 0, 120 # SYS_clone = 120
\\ sc
\\
\\ # if error, negate return (errno)
\\ bns+ 1f
\\ neg 3, 3
\\
\\1:
\\ # if we're the parent, return
\\ cmpwi cr7, 3, 0
\\ bnelr cr7
\\
\\ # we're the child. call fn(arg)
\\ ld 3, 16(1)
\\ ld 12, 8(1)
\\ mtctr 12
\\ bctrl
\\
\\ # call SYS_exit. exit code is already in r3 from fn return value
\\ li 0, 1 # SYS_exit = 1
\\ sc
);
},
.sparc64 => {
// __clone(func, stack, flags, arg, ptid, tls, ctid)
// i0, i1, i2, i3, i4, i5, sp
// syscall(SYS_clone, flags, stack, ptid, tls, ctid)
// g1 o0, o1, o2, o3, o4
asm volatile (
\\ save %%sp, -192, %%sp
\\ # Save the func pointer and the arg pointer
\\ mov %%i0, %%g2
\\ mov %%i3, %%g3
\\ # Shuffle the arguments
\\ mov 217, %%g1
\\ mov %%i2, %%o0
\\ # Add some extra space for the initial frame
\\ sub %%i1, 176 + 2047, %%o1
\\ mov %%i4, %%o2
\\ mov %%i5, %%o3
\\ ldx [%%fp + 0x8af], %%o4
\\ t 0x6d
\\ bcs,pn %%xcc, 2f
\\ nop
\\ # The child pid is returned in o0 while o1 tells if this
\\ # process is # the child (=1) or the parent (=0).
\\ brnz %%o1, 1f
\\ nop
\\ # Parent process, return the child pid
\\ mov %%o0, %%i0
\\ ret
\\ restore
\\1:
\\ # Child process, call func(arg)
\\ mov %%g0, %%fp
\\ call %%g2
\\ mov %%g3, %%o0
\\ # Exit
\\ mov 1, %%g1
\\ t 0x6d
\\2:
\\ # The syscall failed
\\ sub %%g0, %%o0, %%i0
\\ ret
\\ restore
);
},
else => @compileError("Implement clone() for this arch."),
}
}

View File

@ -363,7 +363,7 @@ fn generateSystemDefines(comp: *Compilation, w: anytype) !void {
\\#define __sparc_v9__ 1
\\
),
.sparc, .sparcel => try w.writeAll(
.sparc => try w.writeAll(
\\#define __sparc__ 1
\\#define __sparc 1
\\
@ -534,7 +534,7 @@ pub fn generateBuiltinMacros(comp: *Compilation, system_defines_mode: SystemDefi
if (system_defines_mode == .include_system_defines) {
try buf.appendSlice(
\\#define __VERSION__ "Aro
\\#define __VERSION__ "Aro
++ @import("../backend.zig").version_str ++ "\"\n" ++
\\#define __Aro__
\\

View File

@ -376,7 +376,7 @@ fn collectLibDirsAndTriples(
biarch_libdirs.appendSliceAssumeCapacity(&RISCV32LibDirs);
biarch_triple_aliases.appendSliceAssumeCapacity(&RISCV32Triples);
},
.sparc, .sparcel => {
.sparc => {
lib_dirs.appendSliceAssumeCapacity(&SPARCv8LibDirs);
triple_aliases.appendSliceAssumeCapacity(&SPARCv8Triples);
biarch_libdirs.appendSliceAssumeCapacity(&SPARCv9LibDirs);

View File

@ -265,14 +265,13 @@ fn clearBuffers(pp: *Preprocessor) void {
pub fn expansionSlice(pp: *Preprocessor, tok: Tree.TokenIndex) []Source.Location {
const S = struct {
fn order_token_index(context: void, lhs: Tree.TokenIndex, rhs: Tree.TokenIndex) std.math.Order {
_ = context;
return std.math.order(lhs, rhs);
fn orderTokenIndex(context: Tree.TokenIndex, item: Tree.TokenIndex) std.math.Order {
return std.math.order(item, context);
}
};
const indices = pp.expansion_entries.items(.idx);
const idx = std.sort.binarySearch(Tree.TokenIndex, tok, indices, {}, S.order_token_index) orelse return &.{};
const idx = std.sort.binarySearch(Tree.TokenIndex, indices, tok, S.orderTokenIndex) orelse return &.{};
const locs = pp.expansion_entries.items(.locs)[idx];
var i: usize = 0;
while (locs[i].id != .unused) : (i += 1) {}

View File

@ -89,7 +89,7 @@ pub fn discover(tc: *Toolchain) !void {
.{ .unknown = {} } // TODO
else if (target.cpu.arch.isMIPS())
.{ .unknown = {} } // TODO
else if (target.cpu.arch.isPPC())
else if (target.cpu.arch.isPowerPC())
.{ .unknown = {} } // TODO
else if (target.cpu.arch == .ve)
.{ .unknown = {} } // TODO

View File

@ -53,13 +53,12 @@ pub fn intPtrType(target: std.Target) Type {
.xcore,
.hexagon,
.m68k,
.spir,
.spirv32,
.arc,
.avr,
=> return .{ .specifier = .int },
.sparc, .sparcel => switch (target.os.tag) {
.sparc => switch (target.os.tag) {
.netbsd, .openbsd => {},
else => return .{ .specifier = .int },
},
@ -133,7 +132,7 @@ pub fn defaultFunctionAlignment(target: std.Target) u8 {
return switch (target.cpu.arch) {
.arm, .armeb => 4,
.aarch64, .aarch64_be => 4,
.sparc, .sparcel, .sparc64 => 4,
.sparc, .sparc64 => 4,
.riscv64 => 2,
else => 1,
};
@ -265,7 +264,7 @@ pub fn systemCompiler(target: std.Target) LangOpts.Compiler {
pub fn hasFloat128(target: std.Target) bool {
if (target.cpu.arch.isWasm()) return true;
if (target.isDarwin()) return false;
if (target.cpu.arch.isPPC() or target.cpu.arch.isPPC64()) return std.Target.powerpc.featureSetHas(target.cpu.features, .float128);
if (target.cpu.arch.isPowerPC()) return std.Target.powerpc.featureSetHas(target.cpu.features, .float128);
return switch (target.os.tag) {
.dragonfly,
.haiku,
@ -427,7 +426,7 @@ pub fn ldEmulationOption(target: std.Target, arm_endianness: ?std.builtin.Endian
.powerpc64le => "elf64lppc",
.riscv32 => "elf32lriscv",
.riscv64 => "elf64lriscv",
.sparc, .sparcel => "elf32_sparc",
.sparc => "elf32_sparc",
.sparc64 => "elf64_sparc",
.loongarch32 => "elf32loongarch",
.loongarch64 => "elf64loongarch",
@ -467,13 +466,11 @@ pub fn get32BitArchVariant(target: std.Target) ?std.Target {
.powerpcle,
.riscv32,
.sparc,
.sparcel,
.thumb,
.thumbeb,
.x86,
.xcore,
.nvptx,
.spir,
.kalimba,
.lanai,
.wasm32,
@ -487,7 +484,6 @@ pub fn get32BitArchVariant(target: std.Target) ?std.Target {
.aarch64_be => copy.cpu.arch = .armeb,
.nvptx64 => copy.cpu.arch = .nvptx,
.wasm64 => copy.cpu.arch = .wasm32,
.spir64 => copy.cpu.arch = .spir,
.spirv64 => copy.cpu.arch = .spirv32,
.loongarch64 => copy.cpu.arch = .loongarch32,
.mips64 => copy.cpu.arch = .mips,
@ -513,7 +509,6 @@ pub fn get64BitArchVariant(target: std.Target) ?std.Target {
.lanai,
.m68k,
.msp430,
.sparcel,
.spu_2,
.xcore,
.xtensa,
@ -526,7 +521,6 @@ pub fn get64BitArchVariant(target: std.Target) ?std.Target {
.bpfel,
.nvptx64,
.wasm64,
.spir64,
.spirv64,
.loongarch64,
.mips64,
@ -550,7 +544,6 @@ pub fn get64BitArchVariant(target: std.Target) ?std.Target {
.powerpcle => copy.cpu.arch = .powerpc64le,
.riscv32 => copy.cpu.arch = .riscv64,
.sparc => copy.cpu.arch = .sparc64,
.spir => copy.cpu.arch = .spir64,
.spirv32 => copy.cpu.arch = .spirv64,
.thumb => copy.cpu.arch = .aarch64,
.thumbeb => copy.cpu.arch = .aarch64_be,
@ -597,7 +590,6 @@ pub fn toLLVMTriple(target: std.Target, buf: []u8) []const u8 {
.riscv64 => "riscv64",
.sparc => "sparc",
.sparc64 => "sparc64",
.sparcel => "sparcel",
.s390x => "s390x",
.thumb => "thumb",
.thumbeb => "thumbeb",
@ -607,8 +599,6 @@ pub fn toLLVMTriple(target: std.Target, buf: []u8) []const u8 {
.xtensa => "xtensa",
.nvptx => "nvptx",
.nvptx64 => "nvptx64",
.spir => "spir",
.spir64 => "spir64",
.spirv32 => "spirv32",
.spirv64 => "spirv64",
.kalimba => "kalimba",
@ -624,8 +614,6 @@ pub fn toLLVMTriple(target: std.Target, buf: []u8) []const u8 {
const llvm_os = switch (target.os.tag) {
.freestanding => "unknown",
.ananas => "ananas",
.cloudabi => "cloudabi",
.dragonfly => "dragonfly",
.freebsd => "freebsd",
.fuchsia => "fuchsia",

View File

@ -357,7 +357,6 @@ fn getOSLibDir(target: std.Target) []const u8 {
.powerpc,
.powerpcle,
.sparc,
.sparcel,
=> return "lib32",
else => {},
}

View File

@ -78,6 +78,17 @@ fn addTopLevelDecl(c: *Context, name: []const u8, decl_node: ZigNode) !void {
}
}
fn fail(
c: *Context,
err: anytype,
source_loc: TokenIndex,
comptime format: []const u8,
args: anytype,
) (@TypeOf(err) || error{OutOfMemory}) {
try warn(c, &c.global_scope.base, source_loc, format, args);
return err;
}
fn failDecl(c: *Context, loc: TokenIndex, name: []const u8, comptime format: []const u8, args: anytype) Error!void {
// location
// pub const name = @compileError(msg);
@ -185,7 +196,7 @@ fn prepopulateGlobalNameTable(c: *Context) !void {
for (c.tree.root_decls) |node| {
const data = node_data[@intFromEnum(node)];
switch (node_tags[@intFromEnum(node)]) {
.typedef => @panic("TODO"),
.typedef => {},
.struct_decl_two,
.union_decl_two,
@ -243,6 +254,7 @@ fn transTopLevelDecls(c: *Context) !void {
fn transDecl(c: *Context, scope: *Scope, decl: NodeIndex) !void {
const node_tags = c.tree.nodes.items(.tag);
const node_data = c.tree.nodes.items(.data);
const node_ty = c.tree.nodes.items(.ty);
const data = node_data[@intFromEnum(decl)];
switch (node_tags[@intFromEnum(decl)]) {
.typedef => {
@ -252,17 +264,12 @@ fn transDecl(c: *Context, scope: *Scope, decl: NodeIndex) !void {
.struct_decl_two,
.union_decl_two,
=> {
var fields = [2]NodeIndex{ data.bin.lhs, data.bin.rhs };
var field_count: u2 = 0;
if (fields[0] != .none) field_count += 1;
if (fields[1] != .none) field_count += 1;
try transRecordDecl(c, scope, decl, fields[0..field_count]);
try transRecordDecl(c, scope, node_ty[@intFromEnum(decl)]);
},
.struct_decl,
.union_decl,
=> {
const fields = c.tree.data[data.range.start..data.range.end];
try transRecordDecl(c, scope, decl, fields);
try transRecordDecl(c, scope, node_ty[@intFromEnum(decl)]);
},
.enum_decl_two => {
@ -270,11 +277,13 @@ fn transDecl(c: *Context, scope: *Scope, decl: NodeIndex) !void {
var field_count: u8 = 0;
if (fields[0] != .none) field_count += 1;
if (fields[1] != .none) field_count += 1;
try transEnumDecl(c, scope, decl, fields[0..field_count]);
const enum_decl = node_ty[@intFromEnum(decl)].canonicalize(.standard).data.@"enum";
try transEnumDecl(c, scope, enum_decl, fields[0..field_count]);
},
.enum_decl => {
const fields = c.tree.data[data.range.start..data.range.end];
try transEnumDecl(c, scope, decl, fields);
const enum_decl = node_ty[@intFromEnum(decl)].canonicalize(.standard).data.@"enum";
try transEnumDecl(c, scope, enum_decl, fields);
},
.enum_field_decl,
@ -294,7 +303,7 @@ fn transDecl(c: *Context, scope: *Scope, decl: NodeIndex) !void {
.inline_fn_def,
.inline_static_fn_def,
=> {
try transFnDecl(c, decl);
try transFnDecl(c, decl, true);
},
.@"var",
@ -304,15 +313,51 @@ fn transDecl(c: *Context, scope: *Scope, decl: NodeIndex) !void {
.threadlocal_extern_var,
.threadlocal_static_var,
=> {
try transVarDecl(c, decl, null);
try transVarDecl(c, decl);
},
.static_assert => try warn(c, &c.global_scope.base, 0, "ignoring _Static_assert declaration", .{}),
else => unreachable,
}
}
fn transTypeDef(_: *Context, _: *Scope, _: NodeIndex) Error!void {
@panic("TODO");
fn transTypeDef(c: *Context, scope: *Scope, typedef_decl: NodeIndex) Error!void {
const ty = c.tree.nodes.items(.ty)[@intFromEnum(typedef_decl)];
const data = c.tree.nodes.items(.data)[@intFromEnum(typedef_decl)];
const toplevel = scope.id == .root;
const bs: *Scope.Block = if (!toplevel) try scope.findBlockScope(c) else undefined;
var name: []const u8 = c.tree.tokSlice(data.decl.name);
try c.typedefs.put(c.gpa, name, {});
if (!toplevel) name = try bs.makeMangledName(c, name);
const typedef_loc = data.decl.name;
const init_node = transType(c, scope, ty, .standard, typedef_loc) catch |err| switch (err) {
error.UnsupportedType => {
return failDecl(c, typedef_loc, name, "unable to resolve typedef child type", .{});
},
error.OutOfMemory => |e| return e,
};
const payload = try c.arena.create(ast.Payload.SimpleVarDecl);
payload.* = .{
.base = .{ .tag = ([2]ZigTag{ .var_simple, .pub_var_simple })[@intFromBool(toplevel)] },
.data = .{
.name = name,
.init = init_node,
},
};
const node = ZigNode.initPayload(&payload.base);
if (toplevel) {
try addTopLevelDecl(c, name, node);
} else {
try scope.appendNode(node);
if (node.tag() != .pub_var_simple) {
try bs.discardVariable(c, name);
}
}
}
fn mangleWeakGlobalName(c: *Context, want_name: []const u8) ![]const u8 {
@ -330,16 +375,14 @@ fn mangleWeakGlobalName(c: *Context, want_name: []const u8) ![]const u8 {
return cur_name;
}
fn transRecordDecl(c: *Context, scope: *Scope, record_node: NodeIndex, field_nodes: []const NodeIndex) Error!void {
const node_types = c.tree.nodes.items(.ty);
const raw_record_ty = node_types[@intFromEnum(record_node)];
const record_decl = raw_record_ty.getRecord().?;
fn transRecordDecl(c: *Context, scope: *Scope, record_ty: Type) Error!void {
const record_decl = record_ty.getRecord().?;
if (c.decl_table.get(@intFromPtr(record_decl))) |_|
return; // Avoid processing this decl twice
const toplevel = scope.id == .root;
const bs: *Scope.Block = if (!toplevel) try scope.findBlockScope(c) else undefined;
const container_kind: ZigTag = if (raw_record_ty.is(.@"union")) .@"union" else .@"struct";
const container_kind: ZigTag = if (record_ty.is(.@"union")) .@"union" else .@"struct";
const container_kind_name: []const u8 = @tagName(container_kind);
var is_unnamed = false;
@ -350,7 +393,7 @@ fn transRecordDecl(c: *Context, scope: *Scope, record_node: NodeIndex, field_nod
bare_name = typedef_name;
name = typedef_name;
} else {
if (raw_record_ty.isAnonymousRecord(c.comp)) {
if (record_ty.isAnonymousRecord(c.comp)) {
bare_name = try std.fmt.allocPrint(c.arena, "unnamed_{d}", .{c.getMangle()});
is_unnamed = true;
}
@ -364,6 +407,11 @@ fn transRecordDecl(c: *Context, scope: *Scope, record_node: NodeIndex, field_nod
const is_pub = toplevel and !is_unnamed;
const init_node = blk: {
if (record_decl.isIncomplete()) {
try c.opaque_demotes.put(c.gpa, @intFromPtr(record_decl), {});
break :blk ZigTag.opaque_literal.init();
}
var fields = try std.ArrayList(ast.Payload.Record.Field).initCapacity(c.gpa, record_decl.fields.len);
defer fields.deinit();
@ -377,17 +425,10 @@ fn transRecordDecl(c: *Context, scope: *Scope, record_node: NodeIndex, field_nod
// layout, then we can just use a simple `extern` type. If it does have attributes,
// then we need to inspect the layout and assign an `align` value for each field.
const has_alignment_attributes = record_decl.field_attributes != null or
raw_record_ty.hasAttribute(.@"packed") or
raw_record_ty.hasAttribute(.aligned);
record_ty.hasAttribute(.@"packed") or
record_ty.hasAttribute(.aligned);
const head_field_alignment: ?c_uint = if (has_alignment_attributes) headFieldAlignment(record_decl) else null;
// Iterate over field nodes so that we translate any type decls included in this record decl.
// TODO: Move this logic into `fn transType()` instead of handling decl translation here.
for (field_nodes) |field_node| {
const field_raw_ty = node_types[@intFromEnum(field_node)];
if (field_raw_ty.isEnumOrRecord()) try transDecl(c, scope, field_node);
}
for (record_decl.fields, 0..) |field, field_index| {
const field_loc = field.name_tok;
@ -473,7 +514,7 @@ fn transRecordDecl(c: *Context, scope: *Scope, record_node: NodeIndex, field_nod
}
}
fn transFnDecl(c: *Context, fn_decl: NodeIndex) Error!void {
fn transFnDecl(c: *Context, fn_decl: NodeIndex, is_pub: bool) Error!void {
const raw_ty = c.tree.nodes.items(.ty)[@intFromEnum(fn_decl)];
const fn_ty = raw_ty.canonicalize(.standard);
const node_data = c.tree.nodes.items(.data)[@intFromEnum(fn_decl)];
@ -498,6 +539,7 @@ fn transFnDecl(c: *Context, fn_decl: NodeIndex) Error!void {
else => unreachable,
},
.is_pub = is_pub,
};
const proto_node = transFnType(c, &c.global_scope.base, raw_ty, fn_ty, fn_decl_loc, proto_ctx) catch |err| switch (err) {
@ -566,22 +608,22 @@ fn transFnDecl(c: *Context, fn_decl: NodeIndex) Error!void {
return addTopLevelDecl(c, fn_name, proto_node);
}
fn transVarDecl(_: *Context, _: NodeIndex, _: ?usize) Error!void {
@panic("TODO");
fn transVarDecl(c: *Context, node: NodeIndex) Error!void {
const data = c.tree.nodes.items(.data)[@intFromEnum(node)];
const name = c.tree.tokSlice(data.decl.name);
return failDecl(c, data.decl.name, name, "unable to translate variable declaration", .{});
}
fn transEnumDecl(c: *Context, scope: *Scope, enum_decl: NodeIndex, field_nodes: []const NodeIndex) Error!void {
const node_types = c.tree.nodes.items(.ty);
const ty = node_types[@intFromEnum(enum_decl)];
if (c.decl_table.get(@intFromPtr(ty.data.@"enum"))) |_|
fn transEnumDecl(c: *Context, scope: *Scope, enum_decl: *const Type.Enum, field_nodes: []const NodeIndex) Error!void {
if (c.decl_table.get(@intFromPtr(enum_decl))) |_|
return; // Avoid processing this decl twice
const toplevel = scope.id == .root;
const bs: *Scope.Block = if (!toplevel) try scope.findBlockScope(c) else undefined;
var is_unnamed = false;
var bare_name: []const u8 = c.mapper.lookup(ty.data.@"enum".name);
var bare_name: []const u8 = c.mapper.lookup(enum_decl.name);
var name = bare_name;
if (c.unnamed_typedefs.get(@intFromPtr(ty.data.@"enum"))) |typedef_name| {
if (c.unnamed_typedefs.get(@intFromPtr(enum_decl))) |typedef_name| {
bare_name = typedef_name;
name = typedef_name;
} else {
@ -592,10 +634,10 @@ fn transEnumDecl(c: *Context, scope: *Scope, enum_decl: NodeIndex, field_nodes:
name = try std.fmt.allocPrint(c.arena, "enum_{s}", .{bare_name});
}
if (!toplevel) name = try bs.makeMangledName(c, name);
try c.decl_table.putNoClobber(c.gpa, @intFromPtr(ty.data.@"enum"), name);
try c.decl_table.putNoClobber(c.gpa, @intFromPtr(enum_decl), name);
const enum_type_node = if (!ty.data.@"enum".isIncomplete()) blk: {
for (ty.data.@"enum".fields, field_nodes) |field, field_node| {
const enum_type_node = if (!enum_decl.isIncomplete()) blk: {
for (enum_decl.fields, field_nodes) |field, field_node| {
var enum_val_name: []const u8 = c.mapper.lookup(field.name);
if (!toplevel) {
enum_val_name = try bs.makeMangledName(c, enum_val_name);
@ -621,14 +663,14 @@ fn transEnumDecl(c: *Context, scope: *Scope, enum_decl: NodeIndex, field_nodes:
}
}
break :blk transType(c, scope, ty.data.@"enum".tag_ty, .standard, 0) catch |err| switch (err) {
break :blk transType(c, scope, enum_decl.tag_ty, .standard, 0) catch |err| switch (err) {
error.UnsupportedType => {
return failDecl(c, 0, name, "unable to translate enum integer type", .{});
},
else => |e| return e,
};
} else blk: {
try c.opaque_demotes.put(c.gpa, @intFromPtr(ty.data.@"enum"), {});
try c.opaque_demotes.put(c.gpa, @intFromPtr(enum_decl), {});
break :blk ZigTag.opaque_literal.init();
};
@ -654,8 +696,21 @@ fn transEnumDecl(c: *Context, scope: *Scope, enum_decl: NodeIndex, field_nodes:
}
}
fn getTypeStr(c: *Context, ty: Type) ![]const u8 {
var buf: std.ArrayListUnmanaged(u8) = .{};
defer buf.deinit(c.gpa);
const w = buf.writer(c.gpa);
try ty.print(c.mapper, c.comp.langopts, w);
return c.arena.dupe(u8, buf.items);
}
fn transType(c: *Context, scope: *Scope, raw_ty: Type, qual_handling: Type.QualHandling, source_loc: TokenIndex) TypeError!ZigNode {
const ty = raw_ty.canonicalize(qual_handling);
if (ty.qual.atomic) {
const type_name = try getTypeStr(c, ty);
return fail(c, error.UnsupportedType, source_loc, "unsupported type: '{s}'", .{type_name});
}
switch (ty.specifier) {
.void => return ZigTag.type.create(c.arena, "anyopaque"),
.bool => return ZigTag.type.create(c.arena, "bool"),
@ -678,13 +733,53 @@ fn transType(c: *Context, scope: *Scope, raw_ty: Type, qual_handling: Type.QualH
.long_double => return ZigTag.type.create(c.arena, "c_longdouble"),
.float80 => return ZigTag.type.create(c.arena, "f80"),
.float128 => return ZigTag.type.create(c.arena, "f128"),
.@"enum" => @panic("TODO"),
.pointer,
.unspecified_variable_len_array,
.@"enum" => {
const enum_decl = ty.data.@"enum";
var trans_scope = scope;
if (enum_decl.name != .empty) {
const decl_name = c.mapper.lookup(enum_decl.name);
if (c.weak_global_names.contains(decl_name)) trans_scope = &c.global_scope.base;
}
try transEnumDecl(c, trans_scope, enum_decl, &.{});
return ZigTag.identifier.create(c.arena, c.decl_table.get(@intFromPtr(enum_decl)).?);
},
.pointer => {
const child_type = ty.elemType();
const is_fn_proto = child_type.isFunc();
const is_const = is_fn_proto or child_type.isConst();
const is_volatile = child_type.qual.@"volatile";
const elem_type = try transType(c, scope, child_type, qual_handling, source_loc);
const ptr_info = .{
.is_const = is_const,
.is_volatile = is_volatile,
.elem_type = elem_type,
};
if (is_fn_proto or
typeIsOpaque(c, child_type) or
typeWasDemotedToOpaque(c, child_type))
{
const ptr = try ZigTag.single_pointer.create(c.arena, ptr_info);
return ZigTag.optional_type.create(c.arena, ptr);
}
return ZigTag.c_pointer.create(c.arena, ptr_info);
},
.unspecified_variable_len_array, .incomplete_array => {
const child_type = ty.elemType();
const is_const = child_type.qual.@"const";
const is_volatile = child_type.qual.@"volatile";
const elem_type = try transType(c, scope, child_type, qual_handling, source_loc);
return ZigTag.c_pointer.create(c.arena, .{ .is_const = is_const, .is_volatile = is_volatile, .elem_type = elem_type });
},
.array,
.static_array,
.incomplete_array,
=> @panic("TODO"),
=> {
const size = ty.arrayLen().?;
const elem_type = try transType(c, scope, ty.elemType(), qual_handling, source_loc);
return ZigTag.array_type.create(c.arena, .{ .len = size, .elem_type = elem_type });
},
.func,
.var_args_func,
.old_style_func,
@ -698,6 +793,7 @@ fn transType(c: *Context, scope: *Scope, raw_ty: Type, qual_handling: Type.QualH
const name_id = c.mapper.lookup(record_decl.name);
if (c.weak_global_names.contains(name_id)) trans_scope = &c.global_scope.base;
}
try transRecordDecl(c, trans_scope, ty);
const name = c.decl_table.get(@intFromPtr(ty.data.record)).?;
return ZigTag.identifier.create(c.arena, name);
},
@ -927,7 +1023,9 @@ fn transFnType(
}
fn transStmt(c: *Context, node: NodeIndex) TransError!ZigNode {
return transExpr(c, node, .unused);
_ = c;
_ = node;
return error.UnsupportedTranslation;
}
fn transCompoundStmtInline(c: *Context, compound: NodeIndex, block: *Scope.Block) TransError!void {
@ -952,6 +1050,45 @@ fn transCompoundStmtInline(c: *Context, compound: NodeIndex, block: *Scope.Block
}
}
fn recordHasBitfield(record: *const Type.Record) bool {
if (record.isIncomplete()) return false;
for (record.fields) |field| {
if (!field.isRegularField()) return true;
}
return false;
}
fn typeIsOpaque(c: *Context, ty: Type) bool {
return switch (ty.specifier) {
.void => true,
.@"struct", .@"union" => recordHasBitfield(ty.getRecord().?),
.typeof_type => typeIsOpaque(c, ty.data.sub_type.*),
.typeof_expr => typeIsOpaque(c, ty.data.expr.ty),
.attributed => typeIsOpaque(c, ty.data.attributed.base),
else => false,
};
}
fn typeWasDemotedToOpaque(c: *Context, ty: Type) bool {
switch (ty.specifier) {
.@"struct", .@"union" => {
const record = ty.getRecord().?;
if (c.opaque_demotes.contains(@intFromPtr(record))) return true;
for (record.fields) |field| {
if (typeWasDemotedToOpaque(c, field.ty)) return true;
}
return false;
},
.@"enum" => return c.opaque_demotes.contains(@intFromPtr(ty.data.@"enum")),
.typeof_type => return typeWasDemotedToOpaque(c, ty.data.sub_type.*),
.typeof_expr => return typeWasDemotedToOpaque(c, ty.data.expr.ty),
.attributed => return typeWasDemotedToOpaque(c, ty.data.attributed.base),
else => return false,
}
}
fn transCompoundStmt(c: *Context, scope: *Scope, compound: NodeIndex) TransError!ZigNode {
var block_scope = try Scope.Block.init(c, scope, false);
defer block_scope.deinit();

View File

@ -17,6 +17,12 @@ const runner = @This();
pub const root = @import("@build");
pub const dependencies = @import("@dependencies");
pub const std_options: std.Options = .{
.side_channels_mitigations = .none,
.http_disable_tls = true,
.crypto_fork_safety = false,
};
pub fn main() !void {
// Here we use an ArenaAllocator backed by a page allocator because a build is a short-lived,
// one shot program. We don't need to waste time freeing memory and finding places to squish
@ -106,6 +112,7 @@ pub fn main() !void {
var watch = false;
var fuzz = false;
var debounce_interval_ms: u16 = 50;
var listen_port: u16 = 0;
while (nextArg(args, &arg_idx)) |arg| {
if (mem.startsWith(u8, arg, "-Z")) {
@ -203,6 +210,14 @@ pub fn main() !void {
next_arg, @errorName(err),
});
};
} else if (mem.eql(u8, arg, "--port")) {
const next_arg = nextArg(args, &arg_idx) orelse
fatalWithHint("expected u16 after '{s}'", .{arg});
listen_port = std.fmt.parseUnsigned(u16, next_arg, 10) catch |err| {
fatal("unable to parse port '{s}' as unsigned 16-bit integer: {s}\n", .{
next_arg, @errorName(err),
});
};
} else if (mem.eql(u8, arg, "--debug-log")) {
const next_arg = nextArgOrFatal(args, &arg_idx);
try debug_log_scopes.append(next_arg);
@ -403,7 +418,27 @@ pub fn main() !void {
else => return err,
};
if (fuzz) {
Fuzz.start(&run.thread_pool, run.step_stack.keys(), run.ttyconf, main_progress_node);
switch (builtin.os.tag) {
// Current implementation depends on two things that need to be ported to Windows:
// * Memory-mapping to share data between the fuzzer and build runner.
// * COFF/PE support added to `std.debug.Info` (it needs a batching API for resolving
// many addresses to source locations).
.windows => fatal("--fuzz not yet implemented for {s}", .{@tagName(builtin.os.tag)}),
else => {},
}
const listen_address = std.net.Address.parseIp("127.0.0.1", listen_port) catch unreachable;
try Fuzz.start(
gpa,
arena,
global_cache_directory,
zig_lib_directory,
zig_exe,
&run.thread_pool,
run.step_stack.keys(),
run.ttyconf,
listen_address,
main_progress_node,
);
}
if (!watch) return cleanExit();

View File

@ -185,6 +185,7 @@ const FmtError = error{
BrokenPipe,
Unexpected,
WouldBlock,
Canceled,
FileClosed,
DestinationAddressRequired,
DiskQuota,

View File

@ -275,10 +275,6 @@ fn buildWasmBinary(
) ![]const u8 {
const gpa = context.gpa;
const main_src_path = try std.fs.path.join(arena, &.{
context.zig_lib_directory, "docs", "wasm", "main.zig",
});
var argv: std.ArrayListUnmanaged([]const u8) = .{};
try argv.appendSlice(arena, &.{
@ -298,7 +294,10 @@ fn buildWasmBinary(
"--name",
"autodoc",
"-rdynamic",
main_src_path,
"--dep",
"Walk",
try std.fmt.allocPrint(arena, "-Mroot={s}/docs/wasm/main.zig", .{context.zig_lib_directory}),
try std.fmt.allocPrint(arena, "-MWalk={s}/docs/wasm/Walk.zig", .{context.zig_lib_directory}),
"--listen=-",
});

View File

@ -1,8 +1,10 @@
//! Default test runner for unit tests.
const builtin = @import("builtin");
const std = @import("std");
const io = std.io;
const testing = std.testing;
const assert = std.debug.assert;
pub const std_options = .{
.logFn = log,
@ -28,6 +30,7 @@ pub fn main() void {
@panic("unable to parse command line args");
var listen = false;
var opt_cache_dir: ?[]const u8 = null;
for (args[1..]) |arg| {
if (std.mem.eql(u8, arg, "--listen=-")) {
@ -35,12 +38,18 @@ pub fn main() void {
} else if (std.mem.startsWith(u8, arg, "--seed=")) {
testing.random_seed = std.fmt.parseUnsigned(u32, arg["--seed=".len..], 0) catch
@panic("unable to parse --seed command line argument");
} else if (std.mem.startsWith(u8, arg, "--cache-dir")) {
opt_cache_dir = arg["--cache-dir=".len..];
} else {
@panic("unrecognized command line argument");
}
}
fba.reset();
if (builtin.fuzz) {
const cache_dir = opt_cache_dir orelse @panic("missing --cache-dir=[path] argument");
fuzzer_init(FuzzerSlice.fromSlice(cache_dir));
}
if (listen) {
return mainServer() catch @panic("internal test runner failure");
@ -59,6 +68,11 @@ fn mainServer() !void {
});
defer server.deinit();
if (builtin.fuzz) {
const coverage_id = fuzzer_coverage_id();
try server.serveU64Message(.coverage_id, coverage_id);
}
while (true) {
const hdr = try server.receiveMessage();
switch (hdr.tag) {
@ -129,7 +143,9 @@ fn mainServer() !void {
});
},
.start_fuzzing => {
if (!builtin.fuzz) unreachable;
const index = try server.receiveBody_u32();
var first = true;
const test_fn = builtin.test_functions[index];
while (true) {
testing.allocator_instance = .{};
@ -148,6 +164,10 @@ fn mainServer() !void {
};
if (!is_fuzz_test) @panic("missed call to std.testing.fuzzInput");
if (log_err_count != 0) @panic("error logs detected");
if (first) {
first = false;
try server.serveU64Message(.fuzz_start_addr, entry_addr);
}
}
},
@ -166,7 +186,7 @@ fn mainTerminal() void {
var skip_count: usize = 0;
var fail_count: usize = 0;
var fuzz_count: usize = 0;
const root_node = std.Progress.start(.{
const root_node = if (builtin.fuzz) std.Progress.Node.none else std.Progress.start(.{
.root_name = "Test",
.estimated_total_items = test_fn_list.len,
});
@ -315,20 +335,32 @@ const FuzzerSlice = extern struct {
ptr: [*]const u8,
len: usize,
/// Inline to avoid fuzzer instrumentation.
inline fn toSlice(s: FuzzerSlice) []const u8 {
return s.ptr[0..s.len];
}
/// Inline to avoid fuzzer instrumentation.
inline fn fromSlice(s: []const u8) FuzzerSlice {
return .{ .ptr = s.ptr, .len = s.len };
}
};
var is_fuzz_test: bool = undefined;
var entry_addr: usize = 0;
extern fn fuzzer_next() FuzzerSlice;
extern fn fuzzer_init(cache_dir: FuzzerSlice) void;
extern fn fuzzer_coverage_id() u64;
pub fn fuzzInput(options: testing.FuzzInputOptions) []const u8 {
@disableInstrumentation();
if (crippled) return "";
is_fuzz_test = true;
if (builtin.fuzz) return fuzzer_next().toSlice();
if (builtin.fuzz) {
if (entry_addr == 0) entry_addr = @returnAddress();
return fuzzer_next().toSlice();
}
if (options.corpus.len == 0) return "";
var prng = std.Random.DefaultPrng.init(testing.random_seed);
const random = prng.random();

View File

@ -30,7 +30,7 @@ const largest_atomic_size = switch (arch) {
// On SPARC systems that lacks CAS and/or swap instructions, the only
// available atomic operation is a test-and-set (`ldstub`), so we force
// every atomic memory access to go through the lock.
.sparc, .sparcel => if (cpu.features.featureSetHas(.hasleoncasa)) @sizeOf(usize) else 0,
.sparc => if (cpu.features.featureSetHas(.hasleoncasa)) @sizeOf(usize) else 0,
// XXX: On x86/x86_64 we could check the presence of cmpxchg8b/cmpxchg16b
// and set this parameter accordingly.

View File

@ -28,6 +28,10 @@ fn clear_cache(start: usize, end: usize) callconv(.C) void {
.aarch64, .aarch64_be => true,
else => false,
};
const loongarch64 = switch (arch) {
.loongarch64 => true,
else => false,
};
const mips = switch (arch) {
.mips, .mipsel, .mips64, .mips64el => true,
else => false,
@ -41,7 +45,7 @@ fn clear_cache(start: usize, end: usize) callconv(.C) void {
else => false,
};
const sparc = switch (arch) {
.sparc, .sparc64, .sparcel => true,
.sparc, .sparc64 => true,
else => false,
};
const apple = switch (os) {
@ -159,6 +163,12 @@ fn clear_cache(start: usize, end: usize) callconv(.C) void {
// On Darwin, sys_icache_invalidate() provides this functionality
sys_icache_invalidate(start, end - start);
exportIt();
} else if (os == .linux and loongarch64) {
// See: https://github.com/llvm/llvm-project/blob/cf54cae26b65fc3201eff7200ffb9b0c9e8f9a13/compiler-rt/lib/builtins/clear_cache.c#L94-L95
asm volatile (
\\ ibar 0
);
exportIt();
}
}

View File

@ -22,7 +22,7 @@ pub const want_aeabi = switch (builtin.abi) {
},
else => false,
};
pub const want_ppc_abi = builtin.cpu.arch.isPPC() or builtin.cpu.arch.isPPC64();
pub const want_ppc_abi = builtin.cpu.arch.isPowerPC();
pub const want_float_exceptions = !builtin.cpu.arch.isWasm();

View File

@ -1,3 +1,12 @@
const Decl = @This();
const std = @import("std");
const Ast = std.zig.Ast;
const Walk = @import("Walk.zig");
const gpa = std.heap.wasm_allocator;
const assert = std.debug.assert;
const log = std.log;
const Oom = error{OutOfMemory};
ast_node: Ast.Node.Index,
file: Walk.File.Index,
/// The decl whose namespace this is in.
@ -215,12 +224,3 @@ pub fn find(search_string: []const u8) Decl.Index {
}
return current_decl_index;
}
const Decl = @This();
const std = @import("std");
const Ast = std.zig.Ast;
const Walk = @import("Walk.zig");
const gpa = std.heap.wasm_allocator;
const assert = std.debug.assert;
const log = std.log;
const Oom = error{OutOfMemory};

View File

@ -1,4 +1,15 @@
//! Find and annotate identifiers with links to their declarations.
const Walk = @This();
const std = @import("std");
const Ast = std.zig.Ast;
const assert = std.debug.assert;
const log = std.log;
const gpa = std.heap.wasm_allocator;
const Oom = error{OutOfMemory};
pub const Decl = @import("Decl.zig");
pub var files: std.StringArrayHashMapUnmanaged(File) = .{};
pub var decls: std.ArrayListUnmanaged(Decl) = .{};
pub var modules: std.StringArrayHashMapUnmanaged(File.Index) = .{};
@ -1120,15 +1131,6 @@ pub fn isPrimitiveNonType(name: []const u8) bool {
// try w.root();
//}
const Walk = @This();
const std = @import("std");
const Ast = std.zig.Ast;
const assert = std.debug.assert;
const Decl = @import("Decl.zig");
const log = std.log;
const gpa = std.heap.wasm_allocator;
const Oom = error{OutOfMemory};
fn shrinkToFit(m: anytype) void {
m.shrinkAndFree(gpa, m.entries.len);
}

View File

@ -0,0 +1,412 @@
const std = @import("std");
const Ast = std.zig.Ast;
const assert = std.debug.assert;
const Walk = @import("Walk");
const Decl = Walk.Decl;
const gpa = std.heap.wasm_allocator;
const Oom = error{OutOfMemory};
/// Delete this to find out where URL escaping needs to be added.
pub const missing_feature_url_escape = true;
pub const RenderSourceOptions = struct {
skip_doc_comments: bool = false,
skip_comments: bool = false,
collapse_whitespace: bool = false,
fn_link: Decl.Index = .none,
/// Assumed to be sorted ascending.
source_location_annotations: []const Annotation = &.{},
/// Concatenated with dom_id.
annotation_prefix: []const u8 = "l",
};
pub const Annotation = struct {
file_byte_offset: u32,
/// Concatenated with annotation_prefix.
dom_id: u32,
};
pub fn fileSourceHtml(
file_index: Walk.File.Index,
out: *std.ArrayListUnmanaged(u8),
root_node: Ast.Node.Index,
options: RenderSourceOptions,
) !void {
const ast = file_index.get_ast();
const file = file_index.get();
const g = struct {
var field_access_buffer: std.ArrayListUnmanaged(u8) = .{};
};
const token_tags = ast.tokens.items(.tag);
const token_starts = ast.tokens.items(.start);
const main_tokens = ast.nodes.items(.main_token);
const start_token = ast.firstToken(root_node);
const end_token = ast.lastToken(root_node) + 1;
var cursor: usize = token_starts[start_token];
var indent: usize = 0;
if (std.mem.lastIndexOf(u8, ast.source[0..cursor], "\n")) |newline_index| {
for (ast.source[newline_index + 1 .. cursor]) |c| {
if (c == ' ') {
indent += 1;
} else {
break;
}
}
}
var next_annotate_index: usize = 0;
for (
token_tags[start_token..end_token],
token_starts[start_token..end_token],
start_token..,
) |tag, start, token_index| {
const between = ast.source[cursor..start];
if (std.mem.trim(u8, between, " \t\r\n").len > 0) {
if (!options.skip_comments) {
try out.appendSlice(gpa, "<span class=\"tok-comment\">");
try appendUnindented(out, between, indent);
try out.appendSlice(gpa, "</span>");
}
} else if (between.len > 0) {
if (options.collapse_whitespace) {
if (out.items.len > 0 and out.items[out.items.len - 1] != ' ')
try out.append(gpa, ' ');
} else {
try appendUnindented(out, between, indent);
}
}
if (tag == .eof) break;
const slice = ast.tokenSlice(token_index);
cursor = start + slice.len;
// Insert annotations.
while (true) {
if (next_annotate_index >= options.source_location_annotations.len) break;
const next_annotation = options.source_location_annotations[next_annotate_index];
if (cursor <= next_annotation.file_byte_offset) break;
try out.writer(gpa).print("<span id=\"{s}{d}\"></span>", .{
options.annotation_prefix, next_annotation.dom_id,
});
next_annotate_index += 1;
}
switch (tag) {
.eof => unreachable,
.keyword_addrspace,
.keyword_align,
.keyword_and,
.keyword_asm,
.keyword_async,
.keyword_await,
.keyword_break,
.keyword_catch,
.keyword_comptime,
.keyword_const,
.keyword_continue,
.keyword_defer,
.keyword_else,
.keyword_enum,
.keyword_errdefer,
.keyword_error,
.keyword_export,
.keyword_extern,
.keyword_for,
.keyword_if,
.keyword_inline,
.keyword_noalias,
.keyword_noinline,
.keyword_nosuspend,
.keyword_opaque,
.keyword_or,
.keyword_orelse,
.keyword_packed,
.keyword_anyframe,
.keyword_pub,
.keyword_resume,
.keyword_return,
.keyword_linksection,
.keyword_callconv,
.keyword_struct,
.keyword_suspend,
.keyword_switch,
.keyword_test,
.keyword_threadlocal,
.keyword_try,
.keyword_union,
.keyword_unreachable,
.keyword_usingnamespace,
.keyword_var,
.keyword_volatile,
.keyword_allowzero,
.keyword_while,
.keyword_anytype,
.keyword_fn,
=> {
try out.appendSlice(gpa, "<span class=\"tok-kw\">");
try appendEscaped(out, slice);
try out.appendSlice(gpa, "</span>");
},
.string_literal,
.char_literal,
.multiline_string_literal_line,
=> {
try out.appendSlice(gpa, "<span class=\"tok-str\">");
try appendEscaped(out, slice);
try out.appendSlice(gpa, "</span>");
},
.builtin => {
try out.appendSlice(gpa, "<span class=\"tok-builtin\">");
try appendEscaped(out, slice);
try out.appendSlice(gpa, "</span>");
},
.doc_comment,
.container_doc_comment,
=> {
if (!options.skip_doc_comments) {
try out.appendSlice(gpa, "<span class=\"tok-comment\">");
try appendEscaped(out, slice);
try out.appendSlice(gpa, "</span>");
}
},
.identifier => i: {
if (options.fn_link != .none) {
const fn_link = options.fn_link.get();
const fn_token = main_tokens[fn_link.ast_node];
if (token_index == fn_token + 1) {
try out.appendSlice(gpa, "<a class=\"tok-fn\" href=\"#");
_ = missing_feature_url_escape;
try fn_link.fqn(out);
try out.appendSlice(gpa, "\">");
try appendEscaped(out, slice);
try out.appendSlice(gpa, "</a>");
break :i;
}
}
if (token_index > 0 and token_tags[token_index - 1] == .keyword_fn) {
try out.appendSlice(gpa, "<span class=\"tok-fn\">");
try appendEscaped(out, slice);
try out.appendSlice(gpa, "</span>");
break :i;
}
if (Walk.isPrimitiveNonType(slice)) {
try out.appendSlice(gpa, "<span class=\"tok-null\">");
try appendEscaped(out, slice);
try out.appendSlice(gpa, "</span>");
break :i;
}
if (std.zig.primitives.isPrimitive(slice)) {
try out.appendSlice(gpa, "<span class=\"tok-type\">");
try appendEscaped(out, slice);
try out.appendSlice(gpa, "</span>");
break :i;
}
if (file.token_parents.get(token_index)) |field_access_node| {
g.field_access_buffer.clearRetainingCapacity();
try walkFieldAccesses(file_index, &g.field_access_buffer, field_access_node);
if (g.field_access_buffer.items.len > 0) {
try out.appendSlice(gpa, "<a href=\"#");
_ = missing_feature_url_escape;
try out.appendSlice(gpa, g.field_access_buffer.items);
try out.appendSlice(gpa, "\">");
try appendEscaped(out, slice);
try out.appendSlice(gpa, "</a>");
} else {
try appendEscaped(out, slice);
}
break :i;
}
{
g.field_access_buffer.clearRetainingCapacity();
try resolveIdentLink(file_index, &g.field_access_buffer, token_index);
if (g.field_access_buffer.items.len > 0) {
try out.appendSlice(gpa, "<a href=\"#");
_ = missing_feature_url_escape;
try out.appendSlice(gpa, g.field_access_buffer.items);
try out.appendSlice(gpa, "\">");
try appendEscaped(out, slice);
try out.appendSlice(gpa, "</a>");
break :i;
}
}
try appendEscaped(out, slice);
},
.number_literal => {
try out.appendSlice(gpa, "<span class=\"tok-number\">");
try appendEscaped(out, slice);
try out.appendSlice(gpa, "</span>");
},
.bang,
.pipe,
.pipe_pipe,
.pipe_equal,
.equal,
.equal_equal,
.equal_angle_bracket_right,
.bang_equal,
.l_paren,
.r_paren,
.semicolon,
.percent,
.percent_equal,
.l_brace,
.r_brace,
.l_bracket,
.r_bracket,
.period,
.period_asterisk,
.ellipsis2,
.ellipsis3,
.caret,
.caret_equal,
.plus,
.plus_plus,
.plus_equal,
.plus_percent,
.plus_percent_equal,
.plus_pipe,
.plus_pipe_equal,
.minus,
.minus_equal,
.minus_percent,
.minus_percent_equal,
.minus_pipe,
.minus_pipe_equal,
.asterisk,
.asterisk_equal,
.asterisk_asterisk,
.asterisk_percent,
.asterisk_percent_equal,
.asterisk_pipe,
.asterisk_pipe_equal,
.arrow,
.colon,
.slash,
.slash_equal,
.comma,
.ampersand,
.ampersand_equal,
.question_mark,
.angle_bracket_left,
.angle_bracket_left_equal,
.angle_bracket_angle_bracket_left,
.angle_bracket_angle_bracket_left_equal,
.angle_bracket_angle_bracket_left_pipe,
.angle_bracket_angle_bracket_left_pipe_equal,
.angle_bracket_right,
.angle_bracket_right_equal,
.angle_bracket_angle_bracket_right,
.angle_bracket_angle_bracket_right_equal,
.tilde,
=> try appendEscaped(out, slice),
.invalid, .invalid_periodasterisks => return error.InvalidToken,
}
}
}
fn appendUnindented(out: *std.ArrayListUnmanaged(u8), s: []const u8, indent: usize) !void {
var it = std.mem.splitScalar(u8, s, '\n');
var is_first_line = true;
while (it.next()) |line| {
if (is_first_line) {
try appendEscaped(out, line);
is_first_line = false;
} else {
try out.appendSlice(gpa, "\n");
try appendEscaped(out, unindent(line, indent));
}
}
}
pub fn appendEscaped(out: *std.ArrayListUnmanaged(u8), s: []const u8) !void {
for (s) |c| {
try out.ensureUnusedCapacity(gpa, 6);
switch (c) {
'&' => out.appendSliceAssumeCapacity("&amp;"),
'<' => out.appendSliceAssumeCapacity("&lt;"),
'>' => out.appendSliceAssumeCapacity("&gt;"),
'"' => out.appendSliceAssumeCapacity("&quot;"),
else => out.appendAssumeCapacity(c),
}
}
}
fn walkFieldAccesses(
file_index: Walk.File.Index,
out: *std.ArrayListUnmanaged(u8),
node: Ast.Node.Index,
) Oom!void {
const ast = file_index.get_ast();
const node_tags = ast.nodes.items(.tag);
assert(node_tags[node] == .field_access);
const node_datas = ast.nodes.items(.data);
const main_tokens = ast.nodes.items(.main_token);
const object_node = node_datas[node].lhs;
const dot_token = main_tokens[node];
const field_ident = dot_token + 1;
switch (node_tags[object_node]) {
.identifier => {
const lhs_ident = main_tokens[object_node];
try resolveIdentLink(file_index, out, lhs_ident);
},
.field_access => {
try walkFieldAccesses(file_index, out, object_node);
},
else => {},
}
if (out.items.len > 0) {
try out.append(gpa, '.');
try out.appendSlice(gpa, ast.tokenSlice(field_ident));
}
}
fn resolveIdentLink(
file_index: Walk.File.Index,
out: *std.ArrayListUnmanaged(u8),
ident_token: Ast.TokenIndex,
) Oom!void {
const decl_index = file_index.get().lookup_token(ident_token);
if (decl_index == .none) return;
try resolveDeclLink(decl_index, out);
}
fn unindent(s: []const u8, indent: usize) []const u8 {
var indent_idx: usize = 0;
for (s) |c| {
if (c == ' ' and indent_idx < indent) {
indent_idx += 1;
} else {
break;
}
}
return s[indent_idx..];
}
pub fn resolveDeclLink(decl_index: Decl.Index, out: *std.ArrayListUnmanaged(u8)) Oom!void {
const decl = decl_index.get();
switch (decl.categorize()) {
.alias => |alias_decl| try alias_decl.get().fqn(out),
else => try decl.fqn(out),
}
}

View File

@ -1,15 +1,17 @@
/// Delete this to find out where URL escaping needs to be added.
const missing_feature_url_escape = true;
const gpa = std.heap.wasm_allocator;
const std = @import("std");
const log = std.log;
const assert = std.debug.assert;
const Ast = std.zig.Ast;
const Walk = @import("Walk.zig");
const Walk = @import("Walk");
const markdown = @import("markdown.zig");
const Decl = @import("Decl.zig");
const Decl = Walk.Decl;
const fileSourceHtml = @import("html_render.zig").fileSourceHtml;
const appendEscaped = @import("html_render.zig").appendEscaped;
const resolveDeclLink = @import("html_render.zig").resolveDeclLink;
const missing_feature_url_escape = @import("html_render.zig").missing_feature_url_escape;
const gpa = std.heap.wasm_allocator;
const js = struct {
extern "js" fn log(ptr: [*]const u8, len: usize) void;
@ -53,7 +55,7 @@ export fn unpack(tar_ptr: [*]u8, tar_len: usize) void {
const tar_bytes = tar_ptr[0..tar_len];
//log.debug("received {d} bytes of tar file", .{tar_bytes.len});
unpack_inner(tar_bytes) catch |err| {
unpackInner(tar_bytes) catch |err| {
fatal("unable to unpack tar: {s}", .{@errorName(err)});
};
}
@ -439,7 +441,7 @@ fn decl_field_html_fallible(
const decl = decl_index.get();
const ast = decl.file.get_ast();
try out.appendSlice(gpa, "<pre><code>");
try file_source_html(decl.file, out, field_node, .{});
try fileSourceHtml(decl.file, out, field_node, .{});
try out.appendSlice(gpa, "</code></pre>");
const field = ast.fullContainerField(field_node).?;
@ -478,7 +480,7 @@ fn decl_param_html_fallible(
try out.appendSlice(gpa, "<pre><code>");
try appendEscaped(out, name);
try out.appendSlice(gpa, ": ");
try file_source_html(decl.file, out, param_node, .{});
try fileSourceHtml(decl.file, out, param_node, .{});
try out.appendSlice(gpa, "</code></pre>");
if (ast.tokens.items(.tag)[first_doc_comment] == .doc_comment) {
@ -506,7 +508,7 @@ export fn decl_fn_proto_html(decl_index: Decl.Index, linkify_fn_name: bool) Stri
};
string_result.clearRetainingCapacity();
file_source_html(decl.file, &string_result, proto_node, .{
fileSourceHtml(decl.file, &string_result, proto_node, .{
.skip_doc_comments = true,
.skip_comments = true,
.collapse_whitespace = true,
@ -521,7 +523,7 @@ export fn decl_source_html(decl_index: Decl.Index) String {
const decl = decl_index.get();
string_result.clearRetainingCapacity();
file_source_html(decl.file, &string_result, decl.ast_node, .{}) catch |err| {
fileSourceHtml(decl.file, &string_result, decl.ast_node, .{}) catch |err| {
fatal("unable to render source: {s}", .{@errorName(err)});
};
return String.init(string_result.items);
@ -533,7 +535,7 @@ export fn decl_doctest_html(decl_index: Decl.Index) String {
return String.init("");
string_result.clearRetainingCapacity();
file_source_html(decl.file, &string_result, doctest_ast_node, .{}) catch |err| {
fileSourceHtml(decl.file, &string_result, doctest_ast_node, .{}) catch |err| {
fatal("unable to render source: {s}", .{@errorName(err)});
};
return String.init(string_result.items);
@ -691,7 +693,7 @@ fn render_docs(
const content = doc.string(data.text.content);
if (resolve_decl_path(r.context, content)) |resolved_decl_index| {
g.link_buffer.clearRetainingCapacity();
try resolve_decl_link(resolved_decl_index, &g.link_buffer);
try resolveDeclLink(resolved_decl_index, &g.link_buffer);
try writer.writeAll("<a href=\"#");
_ = missing_feature_url_escape;
@ -734,7 +736,7 @@ export fn decl_type_html(decl_index: Decl.Index) String {
if (ast.fullVarDecl(decl.ast_node)) |var_decl| {
if (var_decl.ast.type_node != 0) {
string_result.appendSlice(gpa, "<code>") catch @panic("OOM");
file_source_html(decl.file, &string_result, var_decl.ast.type_node, .{
fileSourceHtml(decl.file, &string_result, var_decl.ast.type_node, .{
.skip_comments = true,
.collapse_whitespace = true,
}) catch |e| {
@ -750,7 +752,7 @@ export fn decl_type_html(decl_index: Decl.Index) String {
const Oom = error{OutOfMemory};
fn unpack_inner(tar_bytes: []u8) !void {
fn unpackInner(tar_bytes: []u8) !void {
var fbs = std.io.fixedBufferStream(tar_bytes);
var file_name_buffer: [1024]u8 = undefined;
var link_name_buffer: [1024]u8 = undefined;
@ -902,382 +904,6 @@ export fn namespace_members(parent: Decl.Index, include_private: bool) Slice(Dec
return Slice(Decl.Index).init(g.members.items);
}
const RenderSourceOptions = struct {
skip_doc_comments: bool = false,
skip_comments: bool = false,
collapse_whitespace: bool = false,
fn_link: Decl.Index = .none,
};
fn file_source_html(
file_index: Walk.File.Index,
out: *std.ArrayListUnmanaged(u8),
root_node: Ast.Node.Index,
options: RenderSourceOptions,
) !void {
const ast = file_index.get_ast();
const file = file_index.get();
const g = struct {
var field_access_buffer: std.ArrayListUnmanaged(u8) = .{};
};
const token_tags = ast.tokens.items(.tag);
const token_starts = ast.tokens.items(.start);
const main_tokens = ast.nodes.items(.main_token);
const start_token = ast.firstToken(root_node);
const end_token = ast.lastToken(root_node) + 1;
var cursor: usize = token_starts[start_token];
var indent: usize = 0;
if (std.mem.lastIndexOf(u8, ast.source[0..cursor], "\n")) |newline_index| {
for (ast.source[newline_index + 1 .. cursor]) |c| {
if (c == ' ') {
indent += 1;
} else {
break;
}
}
}
for (
token_tags[start_token..end_token],
token_starts[start_token..end_token],
start_token..,
) |tag, start, token_index| {
const between = ast.source[cursor..start];
if (std.mem.trim(u8, between, " \t\r\n").len > 0) {
if (!options.skip_comments) {
try out.appendSlice(gpa, "<span class=\"tok-comment\">");
try appendUnindented(out, between, indent);
try out.appendSlice(gpa, "</span>");
}
} else if (between.len > 0) {
if (options.collapse_whitespace) {
if (out.items.len > 0 and out.items[out.items.len - 1] != ' ')
try out.append(gpa, ' ');
} else {
try appendUnindented(out, between, indent);
}
}
if (tag == .eof) break;
const slice = ast.tokenSlice(token_index);
cursor = start + slice.len;
switch (tag) {
.eof => unreachable,
.keyword_addrspace,
.keyword_align,
.keyword_and,
.keyword_asm,
.keyword_async,
.keyword_await,
.keyword_break,
.keyword_catch,
.keyword_comptime,
.keyword_const,
.keyword_continue,
.keyword_defer,
.keyword_else,
.keyword_enum,
.keyword_errdefer,
.keyword_error,
.keyword_export,
.keyword_extern,
.keyword_for,
.keyword_if,
.keyword_inline,
.keyword_noalias,
.keyword_noinline,
.keyword_nosuspend,
.keyword_opaque,
.keyword_or,
.keyword_orelse,
.keyword_packed,
.keyword_anyframe,
.keyword_pub,
.keyword_resume,
.keyword_return,
.keyword_linksection,
.keyword_callconv,
.keyword_struct,
.keyword_suspend,
.keyword_switch,
.keyword_test,
.keyword_threadlocal,
.keyword_try,
.keyword_union,
.keyword_unreachable,
.keyword_usingnamespace,
.keyword_var,
.keyword_volatile,
.keyword_allowzero,
.keyword_while,
.keyword_anytype,
.keyword_fn,
=> {
try out.appendSlice(gpa, "<span class=\"tok-kw\">");
try appendEscaped(out, slice);
try out.appendSlice(gpa, "</span>");
},
.string_literal,
.char_literal,
.multiline_string_literal_line,
=> {
try out.appendSlice(gpa, "<span class=\"tok-str\">");
try appendEscaped(out, slice);
try out.appendSlice(gpa, "</span>");
},
.builtin => {
try out.appendSlice(gpa, "<span class=\"tok-builtin\">");
try appendEscaped(out, slice);
try out.appendSlice(gpa, "</span>");
},
.doc_comment,
.container_doc_comment,
=> {
if (!options.skip_doc_comments) {
try out.appendSlice(gpa, "<span class=\"tok-comment\">");
try appendEscaped(out, slice);
try out.appendSlice(gpa, "</span>");
}
},
.identifier => i: {
if (options.fn_link != .none) {
const fn_link = options.fn_link.get();
const fn_token = main_tokens[fn_link.ast_node];
if (token_index == fn_token + 1) {
try out.appendSlice(gpa, "<a class=\"tok-fn\" href=\"#");
_ = missing_feature_url_escape;
try fn_link.fqn(out);
try out.appendSlice(gpa, "\">");
try appendEscaped(out, slice);
try out.appendSlice(gpa, "</a>");
break :i;
}
}
if (token_index > 0 and token_tags[token_index - 1] == .keyword_fn) {
try out.appendSlice(gpa, "<span class=\"tok-fn\">");
try appendEscaped(out, slice);
try out.appendSlice(gpa, "</span>");
break :i;
}
if (Walk.isPrimitiveNonType(slice)) {
try out.appendSlice(gpa, "<span class=\"tok-null\">");
try appendEscaped(out, slice);
try out.appendSlice(gpa, "</span>");
break :i;
}
if (std.zig.primitives.isPrimitive(slice)) {
try out.appendSlice(gpa, "<span class=\"tok-type\">");
try appendEscaped(out, slice);
try out.appendSlice(gpa, "</span>");
break :i;
}
if (file.token_parents.get(token_index)) |field_access_node| {
g.field_access_buffer.clearRetainingCapacity();
try walk_field_accesses(file_index, &g.field_access_buffer, field_access_node);
if (g.field_access_buffer.items.len > 0) {
try out.appendSlice(gpa, "<a href=\"#");
_ = missing_feature_url_escape;
try out.appendSlice(gpa, g.field_access_buffer.items);
try out.appendSlice(gpa, "\">");
try appendEscaped(out, slice);
try out.appendSlice(gpa, "</a>");
} else {
try appendEscaped(out, slice);
}
break :i;
}
{
g.field_access_buffer.clearRetainingCapacity();
try resolve_ident_link(file_index, &g.field_access_buffer, token_index);
if (g.field_access_buffer.items.len > 0) {
try out.appendSlice(gpa, "<a href=\"#");
_ = missing_feature_url_escape;
try out.appendSlice(gpa, g.field_access_buffer.items);
try out.appendSlice(gpa, "\">");
try appendEscaped(out, slice);
try out.appendSlice(gpa, "</a>");
break :i;
}
}
try appendEscaped(out, slice);
},
.number_literal => {
try out.appendSlice(gpa, "<span class=\"tok-number\">");
try appendEscaped(out, slice);
try out.appendSlice(gpa, "</span>");
},
.bang,
.pipe,
.pipe_pipe,
.pipe_equal,
.equal,
.equal_equal,
.equal_angle_bracket_right,
.bang_equal,
.l_paren,
.r_paren,
.semicolon,
.percent,
.percent_equal,
.l_brace,
.r_brace,
.l_bracket,
.r_bracket,
.period,
.period_asterisk,
.ellipsis2,
.ellipsis3,
.caret,
.caret_equal,
.plus,
.plus_plus,
.plus_equal,
.plus_percent,
.plus_percent_equal,
.plus_pipe,
.plus_pipe_equal,
.minus,
.minus_equal,
.minus_percent,
.minus_percent_equal,
.minus_pipe,
.minus_pipe_equal,
.asterisk,
.asterisk_equal,
.asterisk_asterisk,
.asterisk_percent,
.asterisk_percent_equal,
.asterisk_pipe,
.asterisk_pipe_equal,
.arrow,
.colon,
.slash,
.slash_equal,
.comma,
.ampersand,
.ampersand_equal,
.question_mark,
.angle_bracket_left,
.angle_bracket_left_equal,
.angle_bracket_angle_bracket_left,
.angle_bracket_angle_bracket_left_equal,
.angle_bracket_angle_bracket_left_pipe,
.angle_bracket_angle_bracket_left_pipe_equal,
.angle_bracket_right,
.angle_bracket_right_equal,
.angle_bracket_angle_bracket_right,
.angle_bracket_angle_bracket_right_equal,
.tilde,
=> try appendEscaped(out, slice),
.invalid, .invalid_periodasterisks => return error.InvalidToken,
}
}
}
fn unindent(s: []const u8, indent: usize) []const u8 {
var indent_idx: usize = 0;
for (s) |c| {
if (c == ' ' and indent_idx < indent) {
indent_idx += 1;
} else {
break;
}
}
return s[indent_idx..];
}
fn appendUnindented(out: *std.ArrayListUnmanaged(u8), s: []const u8, indent: usize) !void {
var it = std.mem.splitScalar(u8, s, '\n');
var is_first_line = true;
while (it.next()) |line| {
if (is_first_line) {
try appendEscaped(out, line);
is_first_line = false;
} else {
try out.appendSlice(gpa, "\n");
try appendEscaped(out, unindent(line, indent));
}
}
}
fn resolve_ident_link(
file_index: Walk.File.Index,
out: *std.ArrayListUnmanaged(u8),
ident_token: Ast.TokenIndex,
) Oom!void {
const decl_index = file_index.get().lookup_token(ident_token);
if (decl_index == .none) return;
try resolve_decl_link(decl_index, out);
}
fn resolve_decl_link(decl_index: Decl.Index, out: *std.ArrayListUnmanaged(u8)) Oom!void {
const decl = decl_index.get();
switch (decl.categorize()) {
.alias => |alias_decl| try alias_decl.get().fqn(out),
else => try decl.fqn(out),
}
}
fn walk_field_accesses(
file_index: Walk.File.Index,
out: *std.ArrayListUnmanaged(u8),
node: Ast.Node.Index,
) Oom!void {
const ast = file_index.get_ast();
const node_tags = ast.nodes.items(.tag);
assert(node_tags[node] == .field_access);
const node_datas = ast.nodes.items(.data);
const main_tokens = ast.nodes.items(.main_token);
const object_node = node_datas[node].lhs;
const dot_token = main_tokens[node];
const field_ident = dot_token + 1;
switch (node_tags[object_node]) {
.identifier => {
const lhs_ident = main_tokens[object_node];
try resolve_ident_link(file_index, out, lhs_ident);
},
.field_access => {
try walk_field_accesses(file_index, out, object_node);
},
else => {},
}
if (out.items.len > 0) {
try out.append(gpa, '.');
try out.appendSlice(gpa, ast.tokenSlice(field_ident));
}
}
fn appendEscaped(out: *std.ArrayListUnmanaged(u8), s: []const u8) !void {
for (s) |c| {
try out.ensureUnusedCapacity(gpa, 6);
switch (c) {
'&' => out.appendSliceAssumeCapacity("&amp;"),
'<' => out.appendSliceAssumeCapacity("&lt;"),
'>' => out.appendSliceAssumeCapacity("&gt;"),
'"' => out.appendSliceAssumeCapacity("&quot;"),
else => out.appendAssumeCapacity(c),
}
}
}
fn count_scalar(haystack: []const u8, needle: u8) usize {
var total: usize = 0;
for (haystack) |elem| {

View File

@ -2,6 +2,8 @@ const builtin = @import("builtin");
const std = @import("std");
const Allocator = std.mem.Allocator;
const assert = std.debug.assert;
const fatal = std.process.fatal;
const SeenPcsHeader = std.Build.Fuzz.abi.SeenPcsHeader;
pub const std_options = .{
.logFn = logOverride,
@ -15,9 +17,9 @@ fn logOverride(
comptime format: []const u8,
args: anytype,
) void {
if (builtin.mode != .Debug) return;
const f = if (log_file) |f| f else f: {
const f = std.fs.cwd().createFile("libfuzzer.log", .{}) catch @panic("failed to open fuzzer log file");
const f = fuzzer.cache_dir.createFile("tmp/libfuzzer.log", .{}) catch
@panic("failed to open fuzzer log file");
log_file = f;
break :f f;
};
@ -26,18 +28,19 @@ fn logOverride(
f.writer().print(prefix1 ++ prefix2 ++ format ++ "\n", args) catch @panic("failed to write to fuzzer log");
}
export threadlocal var __sancov_lowest_stack: usize = 0;
export threadlocal var __sancov_lowest_stack: usize = std.math.maxInt(usize);
export fn __sanitizer_cov_8bit_counters_init(start: [*]u8, stop: [*]u8) void {
std.log.debug("__sanitizer_cov_8bit_counters_init start={*}, stop={*}", .{ start, stop });
var module_count_8bc: usize = 0;
var module_count_pcs: usize = 0;
export fn __sanitizer_cov_8bit_counters_init(start: [*]u8, end: [*]u8) void {
assert(@atomicRmw(usize, &module_count_8bc, .Add, 1, .monotonic) == 0);
fuzzer.pc_counters = start[0 .. end - start];
}
export fn __sanitizer_cov_pcs_init(pc_start: [*]const usize, pc_end: [*]const usize) void {
std.log.debug("__sanitizer_cov_pcs_init pc_start={*}, pc_end={*}", .{ pc_start, pc_end });
fuzzer.pc_range = .{
.start = @intFromPtr(pc_start),
.end = @intFromPtr(pc_start),
};
export fn __sanitizer_cov_pcs_init(start: [*]const Fuzzer.FlaggedPc, end: [*]const Fuzzer.FlaggedPc) void {
assert(@atomicRmw(usize, &module_count_pcs, .Add, 1, .monotonic) == 0);
fuzzer.flagged_pcs = start[0 .. end - start];
}
export fn __sanitizer_cov_trace_const_cmp1(arg1: u8, arg2: u8) void {
@ -102,11 +105,21 @@ const Fuzzer = struct {
gpa: Allocator,
rng: std.Random.DefaultPrng,
input: std.ArrayListUnmanaged(u8),
pc_range: PcRange,
count: usize,
flagged_pcs: []const FlaggedPc,
pc_counters: []u8,
n_runs: usize,
recent_cases: RunMap,
deduplicated_runs: usize,
/// Data collected from code coverage instrumentation from one execution of
/// the test function.
coverage: Coverage,
/// Tracks which PCs have been seen across all runs that do not crash the fuzzer process.
/// Stored in a memory-mapped file so that it can be shared with other
/// processes and viewed while the fuzzer is running.
seen_pcs: MemoryMappedList,
cache_dir: std.fs.Dir,
/// Identifies the file name that will be used to store coverage
/// information, available to other processes.
coverage_id: u64,
const RunMap = std.ArrayHashMapUnmanaged(Run, void, Run.HashContext, false);
@ -161,9 +174,12 @@ const Fuzzer = struct {
}
};
const PcRange = struct {
start: usize,
end: usize,
const FlaggedPc = extern struct {
addr: usize,
flags: packed struct(usize) {
entry: bool,
_: @Type(.{ .Int = .{ .signedness = .unsigned, .bits = @bitSizeOf(usize) - 1 } }),
},
};
const Analysis = struct {
@ -171,6 +187,76 @@ const Fuzzer = struct {
id: Run.Id,
};
fn init(f: *Fuzzer, cache_dir: std.fs.Dir) !void {
const flagged_pcs = f.flagged_pcs;
f.cache_dir = cache_dir;
// Choose a file name for the coverage based on a hash of the PCs that will be stored within.
const pc_digest = d: {
var hasher = std.hash.Wyhash.init(0);
for (flagged_pcs) |flagged_pc| {
hasher.update(std.mem.asBytes(&flagged_pc.addr));
}
break :d f.coverage.run_id_hasher.final();
};
f.coverage_id = pc_digest;
const hex_digest = std.fmt.hex(pc_digest);
const coverage_file_path = "v/" ++ hex_digest;
// Layout of this file:
// - Header
// - list of PC addresses (usize elements)
// - list of hit flag, 1 bit per address (stored in u8 elements)
const coverage_file = createFileBail(cache_dir, coverage_file_path, .{
.read = true,
.truncate = false,
});
defer coverage_file.close();
const n_bitset_elems = (flagged_pcs.len + @bitSizeOf(usize) - 1) / @bitSizeOf(usize);
comptime assert(SeenPcsHeader.trailing[0] == .pc_bits_usize);
comptime assert(SeenPcsHeader.trailing[1] == .pc_addr);
const bytes_len = @sizeOf(SeenPcsHeader) +
n_bitset_elems * @sizeOf(usize) +
flagged_pcs.len * @sizeOf(usize);
const existing_len = coverage_file.getEndPos() catch |err| {
fatal("unable to check len of coverage file: {s}", .{@errorName(err)});
};
if (existing_len == 0) {
coverage_file.setEndPos(bytes_len) catch |err| {
fatal("unable to set len of coverage file: {s}", .{@errorName(err)});
};
} else if (existing_len != bytes_len) {
fatal("incompatible existing coverage file (differing lengths)", .{});
}
f.seen_pcs = MemoryMappedList.init(coverage_file, existing_len, bytes_len) catch |err| {
fatal("unable to init coverage memory map: {s}", .{@errorName(err)});
};
if (existing_len != 0) {
const existing_pcs_bytes = f.seen_pcs.items[@sizeOf(SeenPcsHeader) + @sizeOf(usize) * n_bitset_elems ..][0 .. flagged_pcs.len * @sizeOf(usize)];
const existing_pcs = std.mem.bytesAsSlice(usize, existing_pcs_bytes);
for (existing_pcs, flagged_pcs, 0..) |old, new, i| {
if (old != new.addr) {
fatal("incompatible existing coverage file (differing PC at index {d}: {x} != {x})", .{
i, old, new.addr,
});
}
}
} else {
const header: SeenPcsHeader = .{
.n_runs = 0,
.unique_runs = 0,
.pcs_len = flagged_pcs.len,
.lowest_stack = std.math.maxInt(usize),
};
f.seen_pcs.appendSliceAssumeCapacity(std.mem.asBytes(&header));
f.seen_pcs.appendNTimesAssumeCapacity(0, n_bitset_elems * @sizeOf(usize));
for (flagged_pcs) |flagged_pc| {
f.seen_pcs.appendSliceAssumeCapacity(std.mem.asBytes(&flagged_pc.addr));
}
}
}
fn analyzeLastRun(f: *Fuzzer) Analysis {
return .{
.id = f.coverage.run_id_hasher.final(),
@ -194,7 +280,7 @@ const Fuzzer = struct {
.score = 0,
}, {});
} else {
if (f.count % 1000 == 0) f.dumpStats();
if (f.n_runs % 10000 == 0) f.dumpStats();
const analysis = f.analyzeLastRun();
const gop = f.recent_cases.getOrPutAssumeCapacity(.{
@ -204,7 +290,6 @@ const Fuzzer = struct {
});
if (gop.found_existing) {
//std.log.info("duplicate analysis: score={d} id={d}", .{ analysis.score, analysis.id });
f.deduplicated_runs += 1;
if (f.input.items.len < gop.key_ptr.input.len or gop.key_ptr.score == 0) {
gpa.free(gop.key_ptr.input);
gop.key_ptr.input = try gpa.dupe(u8, f.input.items);
@ -217,6 +302,36 @@ const Fuzzer = struct {
.input = try gpa.dupe(u8, f.input.items),
.score = analysis.score,
};
{
// Track code coverage from all runs.
comptime assert(SeenPcsHeader.trailing[0] == .pc_bits_usize);
const header_end_ptr: [*]volatile usize = @ptrCast(f.seen_pcs.items[@sizeOf(SeenPcsHeader)..]);
const remainder = f.flagged_pcs.len % @bitSizeOf(usize);
const aligned_len = f.flagged_pcs.len - remainder;
const seen_pcs = header_end_ptr[0..aligned_len];
const pc_counters = std.mem.bytesAsSlice([@bitSizeOf(usize)]u8, f.pc_counters[0..aligned_len]);
const V = @Vector(@bitSizeOf(usize), u8);
const zero_v: V = @splat(0);
for (header_end_ptr[0..pc_counters.len], pc_counters) |*elem, *array| {
const v: V = array.*;
const mask: usize = @bitCast(v != zero_v);
_ = @atomicRmw(usize, elem, .Or, mask, .monotonic);
}
if (remainder > 0) {
const i = pc_counters.len;
const elem = &seen_pcs[i];
var mask: usize = 0;
for (f.pc_counters[i * @bitSizeOf(usize) ..][0..remainder], 0..) |byte, bit_index| {
mask |= @as(usize, @intFromBool(byte != 0)) << @intCast(bit_index);
}
_ = @atomicRmw(usize, elem, .Or, mask, .monotonic);
}
}
const header: *volatile SeenPcsHeader = @ptrCast(f.seen_pcs.items[0..@sizeOf(SeenPcsHeader)]);
_ = @atomicRmw(usize, &header.unique_runs, .Add, 1, .monotonic);
}
if (f.recent_cases.entries.len >= 100) {
@ -244,8 +359,12 @@ const Fuzzer = struct {
f.input.appendSliceAssumeCapacity(run.input);
try f.mutate();
f.n_runs += 1;
const header: *volatile SeenPcsHeader = @ptrCast(f.seen_pcs.items[0..@sizeOf(SeenPcsHeader)]);
_ = @atomicRmw(usize, &header.n_runs, .Add, 1, .monotonic);
_ = @atomicRmw(usize, &header.lowest_stack, .Min, __sancov_lowest_stack, .monotonic);
@memset(f.pc_counters, 0);
f.coverage.reset();
f.count += 1;
return f.input.items;
}
@ -256,10 +375,6 @@ const Fuzzer = struct {
}
fn dumpStats(f: *Fuzzer) void {
std.log.info("stats: runs={d} deduplicated={d}", .{
f.count,
f.deduplicated_runs,
});
for (f.recent_cases.keys()[0..@min(f.recent_cases.entries.len, 5)], 0..) |run, i| {
std.log.info("best[{d}] id={x} score={d} input: '{}'", .{
i, run.id, run.score, std.zig.fmtEscapes(run.input),
@ -291,6 +406,21 @@ const Fuzzer = struct {
}
};
fn createFileBail(dir: std.fs.Dir, sub_path: []const u8, flags: std.fs.File.CreateFlags) std.fs.File {
return dir.createFile(sub_path, flags) catch |err| switch (err) {
error.FileNotFound => {
const dir_name = std.fs.path.dirname(sub_path).?;
dir.makePath(dir_name) catch |e| {
fatal("unable to make path '{s}': {s}", .{ dir_name, @errorName(e) });
};
return dir.createFile(sub_path, flags) catch |e| {
fatal("unable to create file '{s}': {s}", .{ sub_path, @errorName(e) });
};
},
else => fatal("unable to create file '{s}': {s}", .{ sub_path, @errorName(err) }),
};
}
fn oom(err: anytype) noreturn {
switch (err) {
error.OutOfMemory => @panic("out of memory"),
@ -303,15 +433,88 @@ var fuzzer: Fuzzer = .{
.gpa = general_purpose_allocator.allocator(),
.rng = std.Random.DefaultPrng.init(0),
.input = .{},
.pc_range = .{ .start = 0, .end = 0 },
.count = 0,
.deduplicated_runs = 0,
.flagged_pcs = undefined,
.pc_counters = undefined,
.n_runs = 0,
.recent_cases = .{},
.coverage = undefined,
.cache_dir = undefined,
.seen_pcs = undefined,
.coverage_id = undefined,
};
/// Invalid until `fuzzer_init` is called.
export fn fuzzer_coverage_id() u64 {
return fuzzer.coverage_id;
}
export fn fuzzer_next() Fuzzer.Slice {
return Fuzzer.Slice.fromZig(fuzzer.next() catch |err| switch (err) {
error.OutOfMemory => @panic("out of memory"),
});
}
export fn fuzzer_init(cache_dir_struct: Fuzzer.Slice) void {
if (module_count_8bc == 0) fatal("__sanitizer_cov_8bit_counters_init was never called", .{});
if (module_count_pcs == 0) fatal("__sanitizer_cov_pcs_init was never called", .{});
const cache_dir_path = cache_dir_struct.toZig();
const cache_dir = if (cache_dir_path.len == 0)
std.fs.cwd()
else
std.fs.cwd().makeOpenPath(cache_dir_path, .{ .iterate = true }) catch |err| {
fatal("unable to open fuzz directory '{s}': {s}", .{ cache_dir_path, @errorName(err) });
};
fuzzer.init(cache_dir) catch |err| fatal("unable to init fuzzer: {s}", .{@errorName(err)});
}
/// Like `std.ArrayListUnmanaged(u8)` but backed by memory mapping.
pub const MemoryMappedList = struct {
/// Contents of the list.
///
/// Pointers to elements in this slice are invalidated by various functions
/// of this ArrayList in accordance with the respective documentation. In
/// all cases, "invalidated" means that the memory has been passed to this
/// allocator's resize or free function.
items: []align(std.mem.page_size) volatile u8,
/// How many bytes this list can hold without allocating additional memory.
capacity: usize,
pub fn init(file: std.fs.File, length: usize, capacity: usize) !MemoryMappedList {
const ptr = try std.posix.mmap(
null,
capacity,
std.posix.PROT.READ | std.posix.PROT.WRITE,
.{ .TYPE = .SHARED },
file.handle,
0,
);
return .{
.items = ptr[0..length],
.capacity = capacity,
};
}
/// Append the slice of items to the list.
/// Asserts that the list can hold the additional items.
pub fn appendSliceAssumeCapacity(l: *MemoryMappedList, items: []const u8) void {
const old_len = l.items.len;
const new_len = old_len + items.len;
assert(new_len <= l.capacity);
l.items.len = new_len;
@memcpy(l.items[old_len..][0..items.len], items);
}
/// Append a value to the list `n` times.
/// Never invalidates element pointers.
/// The function is inline so that a comptime-known `value` parameter will
/// have better memset codegen in case it has a repeated byte pattern.
/// Asserts that the list can hold the additional items.
pub inline fn appendNTimesAssumeCapacity(l: *MemoryMappedList, value: u8, n: usize) void {
const new_len = l.items.len + n;
assert(new_len <= l.capacity);
@memset(l.items.ptr[l.items.len..new_len], value);
l.items.len = new_len;
}
};

161
lib/fuzzer/index.html Normal file
View File

@ -0,0 +1,161 @@
<!doctype html>
<html>
<head>
<meta charset="utf-8">
<title>Zig Build System Interface</title>
<style type="text/css">
body {
font-family: system-ui, -apple-system, Roboto, "Segoe UI", sans-serif;
color: #000000;
}
.hidden {
display: none;
}
table {
width: 100%;
}
a {
color: #2A6286;
}
pre{
font-family:"Source Code Pro",monospace;
font-size:1em;
background-color:#F5F5F5;
padding: 1em;
margin: 0;
overflow-x: auto;
}
:not(pre) > code {
white-space: break-spaces;
}
code {
font-family:"Source Code Pro",monospace;
font-size: 0.9em;
}
code a {
color: #000000;
}
kbd {
color: #000;
background-color: #fafbfc;
border-color: #d1d5da;
border-bottom-color: #c6cbd1;
box-shadow-color: #c6cbd1;
display: inline-block;
padding: 0.3em 0.2em;
font: 1.2em monospace;
line-height: 0.8em;
vertical-align: middle;
border: solid 1px;
border-radius: 3px;
box-shadow: inset 0 -1px 0;
cursor: default;
}
.l {
display: inline-block;
background: red;
width: 1em;
height: 1em;
border-radius: 1em;
}
.c {
background-color: green;
}
.tok-kw {
color: #333;
font-weight: bold;
}
.tok-str {
color: #d14;
}
.tok-builtin {
color: #0086b3;
}
.tok-comment {
color: #777;
font-style: italic;
}
.tok-fn {
color: #900;
font-weight: bold;
}
.tok-null {
color: #008080;
}
.tok-number {
color: #008080;
}
.tok-type {
color: #458;
font-weight: bold;
}
@media (prefers-color-scheme: dark) {
body {
background-color: #111;
color: #bbb;
}
pre {
background-color: #222;
color: #ccc;
}
a {
color: #88f;
}
code a {
color: #ccc;
}
.l {
background-color: red;
}
.c {
background-color: green;
}
.tok-kw {
color: #eee;
}
.tok-str {
color: #2e5;
}
.tok-builtin {
color: #ff894c;
}
.tok-comment {
color: #aa7;
}
.tok-fn {
color: #B1A0F8;
}
.tok-null {
color: #ff8080;
}
.tok-number {
color: #ff8080;
}
.tok-type {
color: #68f;
}
}
</style>
</head>
<body>
<p id="status">Loading JavaScript...</p>
<div id="sectStats" class="hidden">
<ul>
<li>Total Runs: <span id="statTotalRuns"></span></li>
<li>Unique Runs: <span id="statUniqueRuns"></span></li>
<li>Coverage: <span id="statCoverage"></span></li>
<li>Lowest Stack: <span id="statLowestStack"></span></li>
<li>Entry Points: <ul id="entryPointsList"></ul></li>
</ul>
</div>
<div id="sectSource" class="hidden">
<h2>Source Code</h2>
<pre><code id="sourceText"></code></pre>
</div>
<script src="main.js"></script>
</body>
</html>

249
lib/fuzzer/main.js Normal file
View File

@ -0,0 +1,249 @@
(function() {
const domStatus = document.getElementById("status");
const domSectSource = document.getElementById("sectSource");
const domSectStats = document.getElementById("sectStats");
const domSourceText = document.getElementById("sourceText");
const domStatTotalRuns = document.getElementById("statTotalRuns");
const domStatUniqueRuns = document.getElementById("statUniqueRuns");
const domStatCoverage = document.getElementById("statCoverage");
const domStatLowestStack = document.getElementById("statLowestStack");
const domEntryPointsList = document.getElementById("entryPointsList");
let wasm_promise = fetch("main.wasm");
let sources_promise = fetch("sources.tar").then(function(response) {
if (!response.ok) throw new Error("unable to download sources");
return response.arrayBuffer();
});
var wasm_exports = null;
var curNavSearch = null;
var curNavLocation = null;
const text_decoder = new TextDecoder();
const text_encoder = new TextEncoder();
domStatus.textContent = "Loading WebAssembly...";
WebAssembly.instantiateStreaming(wasm_promise, {
js: {
log: function(ptr, len) {
const msg = decodeString(ptr, len);
console.log(msg);
},
panic: function (ptr, len) {
const msg = decodeString(ptr, len);
throw new Error("panic: " + msg);
},
emitSourceIndexChange: onSourceIndexChange,
emitCoverageUpdate: onCoverageUpdate,
emitEntryPointsUpdate: renderStats,
},
}).then(function(obj) {
wasm_exports = obj.instance.exports;
window.wasm = obj; // for debugging
domStatus.textContent = "Loading sources tarball...";
sources_promise.then(function(buffer) {
domStatus.textContent = "Parsing sources...";
const js_array = new Uint8Array(buffer);
const ptr = wasm_exports.alloc(js_array.length);
const wasm_array = new Uint8Array(wasm_exports.memory.buffer, ptr, js_array.length);
wasm_array.set(js_array);
wasm_exports.unpack(ptr, js_array.length);
window.addEventListener('popstate', onPopState, false);
onHashChange(null);
domStatus.textContent = "Waiting for server to send source location metadata...";
connectWebSocket();
});
});
function onPopState(ev) {
onHashChange(ev.state);
}
function onHashChange(state) {
history.replaceState({}, "");
navigate(location.hash);
if (state == null) window.scrollTo({top: 0});
}
function navigate(location_hash) {
domSectSource.classList.add("hidden");
curNavLocation = null;
curNavSearch = null;
if (location_hash.length > 1 && location_hash[0] === '#') {
const query = location_hash.substring(1);
const qpos = query.indexOf("?");
let nonSearchPart;
if (qpos === -1) {
nonSearchPart = query;
} else {
nonSearchPart = query.substring(0, qpos);
curNavSearch = decodeURIComponent(query.substring(qpos + 1));
}
if (nonSearchPart[0] == "l") {
curNavLocation = +nonSearchPart.substring(1);
renderSource(curNavLocation);
}
}
render();
}
function connectWebSocket() {
const host = document.location.host;
const pathname = document.location.pathname;
const isHttps = document.location.protocol === 'https:';
const match = host.match(/^(.+):(\d+)$/);
const defaultPort = isHttps ? 443 : 80;
const port = match ? parseInt(match[2], 10) : defaultPort;
const hostName = match ? match[1] : host;
const wsProto = isHttps ? "wss:" : "ws:";
const wsUrl = wsProto + '//' + hostName + ':' + port + pathname;
ws = new WebSocket(wsUrl);
ws.binaryType = "arraybuffer";
ws.addEventListener('message', onWebSocketMessage, false);
ws.addEventListener('error', timeoutThenCreateNew, false);
ws.addEventListener('close', timeoutThenCreateNew, false);
ws.addEventListener('open', onWebSocketOpen, false);
}
function onWebSocketOpen() {
//console.log("web socket opened");
}
function onWebSocketMessage(ev) {
wasmOnMessage(ev.data);
}
function timeoutThenCreateNew() {
ws.removeEventListener('message', onWebSocketMessage, false);
ws.removeEventListener('error', timeoutThenCreateNew, false);
ws.removeEventListener('close', timeoutThenCreateNew, false);
ws.removeEventListener('open', onWebSocketOpen, false);
ws = null;
setTimeout(connectWebSocket, 1000);
}
function wasmOnMessage(data) {
const jsArray = new Uint8Array(data);
const ptr = wasm_exports.message_begin(jsArray.length);
const wasmArray = new Uint8Array(wasm_exports.memory.buffer, ptr, jsArray.length);
wasmArray.set(jsArray);
wasm_exports.message_end();
}
function onSourceIndexChange() {
render();
if (curNavLocation != null) renderSource(curNavLocation);
}
function onCoverageUpdate() {
renderStats();
renderCoverage();
}
function render() {
domStatus.classList.add("hidden");
}
function renderStats() {
const totalRuns = wasm_exports.totalRuns();
const uniqueRuns = wasm_exports.uniqueRuns();
const totalSourceLocations = wasm_exports.totalSourceLocations();
const coveredSourceLocations = wasm_exports.coveredSourceLocations();
domStatTotalRuns.innerText = totalRuns;
domStatUniqueRuns.innerText = uniqueRuns + " (" + percent(uniqueRuns, totalRuns) + "%)";
domStatCoverage.innerText = coveredSourceLocations + " / " + totalSourceLocations + " (" + percent(coveredSourceLocations, totalSourceLocations) + "%)";
domStatLowestStack.innerText = unwrapString(wasm_exports.lowestStack());
const entryPoints = unwrapInt32Array(wasm_exports.entryPoints());
resizeDomList(domEntryPointsList, entryPoints.length, "<li></li>");
for (let i = 0; i < entryPoints.length; i += 1) {
const liDom = domEntryPointsList.children[i];
liDom.innerHTML = unwrapString(wasm_exports.sourceLocationLinkHtml(entryPoints[i]));
}
domSectStats.classList.remove("hidden");
}
function renderCoverage() {
if (curNavLocation == null) return;
const sourceLocationIndex = curNavLocation;
for (let i = 0; i < domSourceText.children.length; i += 1) {
const childDom = domSourceText.children[i];
if (childDom.id != null && childDom.id[0] == "l") {
childDom.classList.add("l");
childDom.classList.remove("c");
}
}
const coveredList = unwrapInt32Array(wasm_exports.sourceLocationFileCoveredList(sourceLocationIndex));
for (let i = 0; i < coveredList.length; i += 1) {
document.getElementById("l" + coveredList[i]).classList.add("c");
}
}
function resizeDomList(listDom, desiredLen, templateHtml) {
for (let i = listDom.childElementCount; i < desiredLen; i += 1) {
listDom.insertAdjacentHTML('beforeend', templateHtml);
}
while (desiredLen < listDom.childElementCount) {
listDom.removeChild(listDom.lastChild);
}
}
function percent(a, b) {
return ((Number(a) / Number(b)) * 100).toFixed(1);
}
function renderSource(sourceLocationIndex) {
const pathName = unwrapString(wasm_exports.sourceLocationPath(sourceLocationIndex));
if (pathName.length === 0) return;
const h2 = domSectSource.children[0];
h2.innerText = pathName;
domSourceText.innerHTML = unwrapString(wasm_exports.sourceLocationFileHtml(sourceLocationIndex));
domSectSource.classList.remove("hidden");
// Empirically, Firefox needs this requestAnimationFrame in order for the scrollIntoView to work.
requestAnimationFrame(function() {
const slDom = document.getElementById("l" + sourceLocationIndex);
if (slDom != null) slDom.scrollIntoView({
behavior: "smooth",
block: "center",
});
});
}
function decodeString(ptr, len) {
if (len === 0) return "";
return text_decoder.decode(new Uint8Array(wasm_exports.memory.buffer, ptr, len));
}
function unwrapInt32Array(bigint) {
const ptr = Number(bigint & 0xffffffffn);
const len = Number(bigint >> 32n);
if (len === 0) return new Uint32Array();
return new Uint32Array(wasm_exports.memory.buffer, ptr, len);
}
function setInputString(s) {
const jsArray = text_encoder.encode(s);
const len = jsArray.length;
const ptr = wasm_exports.set_input_string(len);
const wasmArray = new Uint8Array(wasm_exports.memory.buffer, ptr, len);
wasmArray.set(jsArray);
}
function unwrapString(bigint) {
const ptr = Number(bigint & 0xffffffffn);
const len = Number(bigint >> 32n);
return decodeString(ptr, len);
}
})();

428
lib/fuzzer/wasm/main.zig Normal file
View File

@ -0,0 +1,428 @@
const std = @import("std");
const assert = std.debug.assert;
const abi = std.Build.Fuzz.abi;
const gpa = std.heap.wasm_allocator;
const log = std.log;
const Coverage = std.debug.Coverage;
const Allocator = std.mem.Allocator;
const Walk = @import("Walk");
const Decl = Walk.Decl;
const html_render = @import("html_render");
const js = struct {
extern "js" fn log(ptr: [*]const u8, len: usize) void;
extern "js" fn panic(ptr: [*]const u8, len: usize) noreturn;
extern "js" fn emitSourceIndexChange() void;
extern "js" fn emitCoverageUpdate() void;
extern "js" fn emitEntryPointsUpdate() void;
};
pub const std_options: std.Options = .{
.logFn = logFn,
};
pub fn panic(msg: []const u8, st: ?*std.builtin.StackTrace, addr: ?usize) noreturn {
_ = st;
_ = addr;
log.err("panic: {s}", .{msg});
@trap();
}
fn logFn(
comptime message_level: log.Level,
comptime scope: @TypeOf(.enum_literal),
comptime format: []const u8,
args: anytype,
) void {
const level_txt = comptime message_level.asText();
const prefix2 = if (scope == .default) ": " else "(" ++ @tagName(scope) ++ "): ";
var buf: [500]u8 = undefined;
const line = std.fmt.bufPrint(&buf, level_txt ++ prefix2 ++ format, args) catch l: {
buf[buf.len - 3 ..][0..3].* = "...".*;
break :l &buf;
};
js.log(line.ptr, line.len);
}
export fn alloc(n: usize) [*]u8 {
const slice = gpa.alloc(u8, n) catch @panic("OOM");
return slice.ptr;
}
var message_buffer: std.ArrayListAlignedUnmanaged(u8, @alignOf(u64)) = .{};
/// Resizes the message buffer to be the correct length; returns the pointer to
/// the query string.
export fn message_begin(len: usize) [*]u8 {
message_buffer.resize(gpa, len) catch @panic("OOM");
return message_buffer.items.ptr;
}
export fn message_end() void {
const msg_bytes = message_buffer.items;
const tag: abi.ToClientTag = @enumFromInt(msg_bytes[0]);
switch (tag) {
.source_index => return sourceIndexMessage(msg_bytes) catch @panic("OOM"),
.coverage_update => return coverageUpdateMessage(msg_bytes) catch @panic("OOM"),
.entry_points => return entryPointsMessage(msg_bytes) catch @panic("OOM"),
_ => unreachable,
}
}
export fn unpack(tar_ptr: [*]u8, tar_len: usize) void {
const tar_bytes = tar_ptr[0..tar_len];
log.debug("received {d} bytes of tar file", .{tar_bytes.len});
unpackInner(tar_bytes) catch |err| {
fatal("unable to unpack tar: {s}", .{@errorName(err)});
};
}
/// Set by `set_input_string`.
var input_string: std.ArrayListUnmanaged(u8) = .{};
var string_result: std.ArrayListUnmanaged(u8) = .{};
export fn set_input_string(len: usize) [*]u8 {
input_string.resize(gpa, len) catch @panic("OOM");
return input_string.items.ptr;
}
/// Looks up the root struct decl corresponding to a file by path.
/// Uses `input_string`.
export fn find_file_root() Decl.Index {
const file: Walk.File.Index = @enumFromInt(Walk.files.getIndex(input_string.items) orelse return .none);
return file.findRootDecl();
}
export fn decl_source_html(decl_index: Decl.Index) String {
const decl = decl_index.get();
string_result.clearRetainingCapacity();
html_render.fileSourceHtml(decl.file, &string_result, decl.ast_node, .{}) catch |err| {
fatal("unable to render source: {s}", .{@errorName(err)});
};
return String.init(string_result.items);
}
export fn lowestStack() String {
const header: *abi.CoverageUpdateHeader = @ptrCast(recent_coverage_update.items[0..@sizeOf(abi.CoverageUpdateHeader)]);
string_result.clearRetainingCapacity();
string_result.writer(gpa).print("0x{d}", .{header.lowest_stack}) catch @panic("OOM");
return String.init(string_result.items);
}
export fn totalSourceLocations() usize {
return coverage_source_locations.items.len;
}
export fn coveredSourceLocations() usize {
const covered_bits = recent_coverage_update.items[@sizeOf(abi.CoverageUpdateHeader)..];
var count: usize = 0;
for (covered_bits) |byte| count += @popCount(byte);
return count;
}
export fn totalRuns() u64 {
const header: *abi.CoverageUpdateHeader = @alignCast(@ptrCast(recent_coverage_update.items[0..@sizeOf(abi.CoverageUpdateHeader)]));
return header.n_runs;
}
export fn uniqueRuns() u64 {
const header: *abi.CoverageUpdateHeader = @alignCast(@ptrCast(recent_coverage_update.items[0..@sizeOf(abi.CoverageUpdateHeader)]));
return header.unique_runs;
}
const String = Slice(u8);
fn Slice(T: type) type {
return packed struct(u64) {
ptr: u32,
len: u32,
fn init(s: []const T) @This() {
return .{
.ptr = @intFromPtr(s.ptr),
.len = s.len,
};
}
};
}
fn unpackInner(tar_bytes: []u8) !void {
var fbs = std.io.fixedBufferStream(tar_bytes);
var file_name_buffer: [1024]u8 = undefined;
var link_name_buffer: [1024]u8 = undefined;
var it = std.tar.iterator(fbs.reader(), .{
.file_name_buffer = &file_name_buffer,
.link_name_buffer = &link_name_buffer,
});
while (try it.next()) |tar_file| {
switch (tar_file.kind) {
.file => {
if (tar_file.size == 0 and tar_file.name.len == 0) break;
if (std.mem.endsWith(u8, tar_file.name, ".zig")) {
log.debug("found file: '{s}'", .{tar_file.name});
const file_name = try gpa.dupe(u8, tar_file.name);
if (std.mem.indexOfScalar(u8, file_name, '/')) |pkg_name_end| {
const pkg_name = file_name[0..pkg_name_end];
const gop = try Walk.modules.getOrPut(gpa, pkg_name);
const file: Walk.File.Index = @enumFromInt(Walk.files.entries.len);
if (!gop.found_existing or
std.mem.eql(u8, file_name[pkg_name_end..], "/root.zig") or
std.mem.eql(u8, file_name[pkg_name_end + 1 .. file_name.len - ".zig".len], pkg_name))
{
gop.value_ptr.* = file;
}
const file_bytes = tar_bytes[fbs.pos..][0..@intCast(tar_file.size)];
assert(file == try Walk.add_file(file_name, file_bytes));
}
} else {
log.warn("skipping: '{s}' - the tar creation should have done that", .{tar_file.name});
}
},
else => continue,
}
}
}
fn fatal(comptime format: []const u8, args: anytype) noreturn {
var buf: [500]u8 = undefined;
const line = std.fmt.bufPrint(&buf, format, args) catch l: {
buf[buf.len - 3 ..][0..3].* = "...".*;
break :l &buf;
};
js.panic(line.ptr, line.len);
}
fn sourceIndexMessage(msg_bytes: []u8) error{OutOfMemory}!void {
const Header = abi.SourceIndexHeader;
const header: Header = @bitCast(msg_bytes[0..@sizeOf(Header)].*);
const directories_start = @sizeOf(Header);
const directories_end = directories_start + header.directories_len * @sizeOf(Coverage.String);
const files_start = directories_end;
const files_end = files_start + header.files_len * @sizeOf(Coverage.File);
const source_locations_start = files_end;
const source_locations_end = source_locations_start + header.source_locations_len * @sizeOf(Coverage.SourceLocation);
const string_bytes = msg_bytes[source_locations_end..][0..header.string_bytes_len];
const directories: []const Coverage.String = @alignCast(std.mem.bytesAsSlice(Coverage.String, msg_bytes[directories_start..directories_end]));
const files: []const Coverage.File = @alignCast(std.mem.bytesAsSlice(Coverage.File, msg_bytes[files_start..files_end]));
const source_locations: []const Coverage.SourceLocation = @alignCast(std.mem.bytesAsSlice(Coverage.SourceLocation, msg_bytes[source_locations_start..source_locations_end]));
try updateCoverage(directories, files, source_locations, string_bytes);
js.emitSourceIndexChange();
}
fn coverageUpdateMessage(msg_bytes: []u8) error{OutOfMemory}!void {
recent_coverage_update.clearRetainingCapacity();
recent_coverage_update.appendSlice(gpa, msg_bytes) catch @panic("OOM");
js.emitCoverageUpdate();
}
var entry_points: std.ArrayListUnmanaged(u32) = .{};
fn entryPointsMessage(msg_bytes: []u8) error{OutOfMemory}!void {
const header: abi.EntryPointHeader = @bitCast(msg_bytes[0..@sizeOf(abi.EntryPointHeader)].*);
entry_points.resize(gpa, header.flags.locs_len) catch @panic("OOM");
@memcpy(entry_points.items, std.mem.bytesAsSlice(u32, msg_bytes[@sizeOf(abi.EntryPointHeader)..]));
js.emitEntryPointsUpdate();
}
export fn entryPoints() Slice(u32) {
return Slice(u32).init(entry_points.items);
}
/// Index into `coverage_source_locations`.
const SourceLocationIndex = enum(u32) {
_,
fn haveCoverage(sli: SourceLocationIndex) bool {
return @intFromEnum(sli) < coverage_source_locations.items.len;
}
fn ptr(sli: SourceLocationIndex) *Coverage.SourceLocation {
return &coverage_source_locations.items[@intFromEnum(sli)];
}
fn sourceLocationLinkHtml(
sli: SourceLocationIndex,
out: *std.ArrayListUnmanaged(u8),
) Allocator.Error!void {
const sl = sli.ptr();
try out.writer(gpa).print("<a href=\"#l{d}\">", .{@intFromEnum(sli)});
try sli.appendPath(out);
try out.writer(gpa).print(":{d}:{d}</a>", .{ sl.line, sl.column });
}
fn appendPath(sli: SourceLocationIndex, out: *std.ArrayListUnmanaged(u8)) Allocator.Error!void {
const sl = sli.ptr();
const file = coverage.fileAt(sl.file);
const file_name = coverage.stringAt(file.basename);
const dir_name = coverage.stringAt(coverage.directories.keys()[file.directory_index]);
try html_render.appendEscaped(out, dir_name);
try out.appendSlice(gpa, "/");
try html_render.appendEscaped(out, file_name);
}
fn toWalkFile(sli: SourceLocationIndex) ?Walk.File.Index {
var buf: std.ArrayListUnmanaged(u8) = .{};
defer buf.deinit(gpa);
sli.appendPath(&buf) catch @panic("OOM");
return @enumFromInt(Walk.files.getIndex(buf.items) orelse return null);
}
fn fileHtml(
sli: SourceLocationIndex,
out: *std.ArrayListUnmanaged(u8),
) error{ OutOfMemory, SourceUnavailable }!void {
const walk_file_index = sli.toWalkFile() orelse return error.SourceUnavailable;
const root_node = walk_file_index.findRootDecl().get().ast_node;
var annotations: std.ArrayListUnmanaged(html_render.Annotation) = .{};
defer annotations.deinit(gpa);
try computeSourceAnnotations(sli.ptr().file, walk_file_index, &annotations, coverage_source_locations.items);
html_render.fileSourceHtml(walk_file_index, out, root_node, .{
.source_location_annotations = annotations.items,
}) catch |err| {
fatal("unable to render source: {s}", .{@errorName(err)});
};
}
};
fn computeSourceAnnotations(
cov_file_index: Coverage.File.Index,
walk_file_index: Walk.File.Index,
annotations: *std.ArrayListUnmanaged(html_render.Annotation),
source_locations: []const Coverage.SourceLocation,
) !void {
// Collect all the source locations from only this file into this array
// first, then sort by line, col, so that we can collect annotations with
// O(N) time complexity.
var locs: std.ArrayListUnmanaged(SourceLocationIndex) = .{};
defer locs.deinit(gpa);
for (source_locations, 0..) |sl, sli_usize| {
if (sl.file != cov_file_index) continue;
const sli: SourceLocationIndex = @enumFromInt(sli_usize);
try locs.append(gpa, sli);
}
std.mem.sortUnstable(SourceLocationIndex, locs.items, {}, struct {
pub fn lessThan(context: void, lhs: SourceLocationIndex, rhs: SourceLocationIndex) bool {
_ = context;
const lhs_ptr = lhs.ptr();
const rhs_ptr = rhs.ptr();
if (lhs_ptr.line < rhs_ptr.line) return true;
if (lhs_ptr.line > rhs_ptr.line) return false;
return lhs_ptr.column < rhs_ptr.column;
}
}.lessThan);
const source = walk_file_index.get_ast().source;
var line: usize = 1;
var column: usize = 1;
var next_loc_index: usize = 0;
for (source, 0..) |byte, offset| {
if (byte == '\n') {
line += 1;
column = 1;
} else {
column += 1;
}
while (true) {
if (next_loc_index >= locs.items.len) return;
const next_sli = locs.items[next_loc_index];
const next_sl = next_sli.ptr();
if (next_sl.line > line or (next_sl.line == line and next_sl.column >= column)) break;
try annotations.append(gpa, .{
.file_byte_offset = offset,
.dom_id = @intFromEnum(next_sli),
});
next_loc_index += 1;
}
}
}
var coverage = Coverage.init;
/// Index of type `SourceLocationIndex`.
var coverage_source_locations: std.ArrayListUnmanaged(Coverage.SourceLocation) = .{};
/// Contains the most recent coverage update message, unmodified.
var recent_coverage_update: std.ArrayListAlignedUnmanaged(u8, @alignOf(u64)) = .{};
fn updateCoverage(
directories: []const Coverage.String,
files: []const Coverage.File,
source_locations: []const Coverage.SourceLocation,
string_bytes: []const u8,
) !void {
coverage.directories.clearRetainingCapacity();
coverage.files.clearRetainingCapacity();
coverage.string_bytes.clearRetainingCapacity();
coverage_source_locations.clearRetainingCapacity();
try coverage_source_locations.appendSlice(gpa, source_locations);
try coverage.string_bytes.appendSlice(gpa, string_bytes);
try coverage.files.entries.resize(gpa, files.len);
@memcpy(coverage.files.entries.items(.key), files);
try coverage.files.reIndexContext(gpa, .{ .string_bytes = coverage.string_bytes.items });
try coverage.directories.entries.resize(gpa, directories.len);
@memcpy(coverage.directories.entries.items(.key), directories);
try coverage.directories.reIndexContext(gpa, .{ .string_bytes = coverage.string_bytes.items });
}
export fn sourceLocationLinkHtml(index: SourceLocationIndex) String {
string_result.clearRetainingCapacity();
index.sourceLocationLinkHtml(&string_result) catch @panic("OOM");
return String.init(string_result.items);
}
/// Returns empty string if coverage metadata is not available for this source location.
export fn sourceLocationPath(sli: SourceLocationIndex) String {
string_result.clearRetainingCapacity();
if (sli.haveCoverage()) sli.appendPath(&string_result) catch @panic("OOM");
return String.init(string_result.items);
}
export fn sourceLocationFileHtml(sli: SourceLocationIndex) String {
string_result.clearRetainingCapacity();
sli.fileHtml(&string_result) catch |err| switch (err) {
error.OutOfMemory => @panic("OOM"),
error.SourceUnavailable => {},
};
return String.init(string_result.items);
}
export fn sourceLocationFileCoveredList(sli_file: SourceLocationIndex) Slice(SourceLocationIndex) {
const global = struct {
var result: std.ArrayListUnmanaged(SourceLocationIndex) = .{};
fn add(i: u32, want_file: Coverage.File.Index) void {
const src_loc_index: SourceLocationIndex = @enumFromInt(i);
if (src_loc_index.ptr().file == want_file) result.appendAssumeCapacity(src_loc_index);
}
};
const want_file = sli_file.ptr().file;
global.result.clearRetainingCapacity();
// This code assumes 64-bit elements, which is incorrect if the executable
// being fuzzed is not a 64-bit CPU. It also assumes little-endian which
// can also be incorrect.
comptime assert(abi.CoverageUpdateHeader.trailing[0] == .pc_bits_usize);
const n_bitset_elems = (coverage_source_locations.items.len + @bitSizeOf(u64) - 1) / @bitSizeOf(u64);
const covered_bits = std.mem.bytesAsSlice(
u64,
recent_coverage_update.items[@sizeOf(abi.CoverageUpdateHeader)..][0 .. n_bitset_elems * @sizeOf(u64)],
);
var sli: u32 = 0;
for (covered_bits) |elem| {
global.result.ensureUnusedCapacity(gpa, 64) catch @panic("OOM");
for (0..@bitSizeOf(u64)) |i| {
if ((elem & (@as(u64, 1) << @intCast(i))) != 0) global.add(sli, want_file);
sli += 1;
}
}
return Slice(SourceLocationIndex).init(global.result.items);
}

Binary file not shown.

View File

@ -44,23 +44,23 @@
/* ISO/IEC TS 18661-1:2014 defines the __STDC_WANT_IEC_60559_BFP_EXT__
macro. Most but not all symbols enabled by that macro in TS
18661-1 are enabled unconditionally in C2X. In C2X, the symbols in
18661-1 are enabled unconditionally in C23. In C23, the symbols in
Annex F still require a new feature test macro
__STDC_WANT_IEC_60559_EXT__ instead (C2X does not define
__STDC_WANT_IEC_60559_EXT__ instead (C23 does not define
__STDC_WANT_IEC_60559_BFP_EXT__), while a few features from TS
18661-1 are not included in C2X (and thus should depend on
__STDC_WANT_IEC_60559_BFP_EXT__ even when C2X features are
18661-1 are not included in C23 (and thus should depend on
__STDC_WANT_IEC_60559_BFP_EXT__ even when C23 features are
enabled).
__GLIBC_USE (IEC_60559_BFP_EXT) controls those features from TS
18661-1 not included in C2X.
18661-1 not included in C23.
__GLIBC_USE (IEC_60559_BFP_EXT_C2X) controls those features from TS
18661-1 that are also included in C2X (with no feature test macro
required in C2X).
__GLIBC_USE (IEC_60559_BFP_EXT_C23) controls those features from TS
18661-1 that are also included in C23 (with no feature test macro
required in C23).
__GLIBC_USE (IEC_60559_EXT) controls those features from TS 18661-1
that are included in C2X but conditional on
that are included in C23 but conditional on
__STDC_WANT_IEC_60559_EXT__. (There are currently no features
conditional on __STDC_WANT_IEC_60559_EXT__ that are not in TS
18661-1.) */
@ -70,11 +70,11 @@
#else
# define __GLIBC_USE_IEC_60559_BFP_EXT 0
#endif
#undef __GLIBC_USE_IEC_60559_BFP_EXT_C2X
#if __GLIBC_USE (IEC_60559_BFP_EXT) || __GLIBC_USE (ISOC2X)
# define __GLIBC_USE_IEC_60559_BFP_EXT_C2X 1
#undef __GLIBC_USE_IEC_60559_BFP_EXT_C23
#if __GLIBC_USE (IEC_60559_BFP_EXT) || __GLIBC_USE (ISOC23)
# define __GLIBC_USE_IEC_60559_BFP_EXT_C23 1
#else
# define __GLIBC_USE_IEC_60559_BFP_EXT_C2X 0
# define __GLIBC_USE_IEC_60559_BFP_EXT_C23 0
#endif
#undef __GLIBC_USE_IEC_60559_EXT
#if __GLIBC_USE (IEC_60559_BFP_EXT) || defined __STDC_WANT_IEC_60559_EXT__
@ -86,18 +86,18 @@
/* ISO/IEC TS 18661-4:2015 defines the
__STDC_WANT_IEC_60559_FUNCS_EXT__ macro. Other than the reduction
functions, the symbols from this TS are enabled unconditionally in
C2X. */
C23. */
#undef __GLIBC_USE_IEC_60559_FUNCS_EXT
#if defined __USE_GNU || defined __STDC_WANT_IEC_60559_FUNCS_EXT__
# define __GLIBC_USE_IEC_60559_FUNCS_EXT 1
#else
# define __GLIBC_USE_IEC_60559_FUNCS_EXT 0
#endif
#undef __GLIBC_USE_IEC_60559_FUNCS_EXT_C2X
#if __GLIBC_USE (IEC_60559_FUNCS_EXT) || __GLIBC_USE (ISOC2X)
# define __GLIBC_USE_IEC_60559_FUNCS_EXT_C2X 1
#undef __GLIBC_USE_IEC_60559_FUNCS_EXT_C23
#if __GLIBC_USE (IEC_60559_FUNCS_EXT) || __GLIBC_USE (ISOC23)
# define __GLIBC_USE_IEC_60559_FUNCS_EXT_C23 1
#else
# define __GLIBC_USE_IEC_60559_FUNCS_EXT_C2X 0
# define __GLIBC_USE_IEC_60559_FUNCS_EXT_C23 0
#endif
/* ISO/IEC TS 18661-3:2015 defines the

View File

@ -831,6 +831,10 @@ typedef struct
control. */
#define NT_ARM_PAC_ENABLED_KEYS 0x40a /* AArch64 pointer authentication
enabled keys. */
#define NT_ARM_SSVE 0x40b /* ARM Streaming SVE registers. */
#define NT_ARM_ZA 0x40c /* ARM SME ZA registers. */
#define NT_ARM_ZT 0x40d /* ARM SME ZT registers. */
#define NT_ARM_FPMR 0x40e /* ARM floating point mode register. */
#define NT_VMCOREDD 0x700 /* Vmcore Device Dump Note. */
#define NT_MIPS_DSP 0x800 /* MIPS DSP ASE registers. */
#define NT_MIPS_FP_MODE 0x801 /* MIPS floating-point mode. */
@ -1234,6 +1238,10 @@ typedef struct
#define AT_RSEQ_FEATURE_SIZE 27 /* rseq supported feature size. */
#define AT_RSEQ_ALIGN 28 /* rseq allocation alignment. */
/* More machine-dependent hints about processor capabilities. */
#define AT_HWCAP3 29 /* extension of AT_HWCAP. */
#define AT_HWCAP4 30 /* extension of AT_HWCAP. */
#define AT_EXECFN 31 /* Filename of executable. */
/* Pointer to the global system page used for system calls and other
@ -1333,9 +1341,13 @@ typedef struct
#define NT_GNU_PROPERTY_TYPE_0 5
/* Packaging metadata as defined on
https://systemd.io/COREDUMP_PACKAGE_METADATA/ */
https://systemd.io/ELF_PACKAGE_METADATA/ */
#define NT_FDO_PACKAGING_METADATA 0xcafe1a7e
/* dlopen metadata as defined on
https://systemd.io/ELF_DLOPEN_METADATA/ */
#define NT_FDO_DLOPEN_METADATA 0x407c0c0a
/* Note section name of program property. */
#define NOTE_GNU_PROPERTY_SECTION_NAME ".note.gnu.property"
@ -4237,6 +4249,8 @@ enum
#define R_LARCH_TLS_TPREL32 10
#define R_LARCH_TLS_TPREL64 11
#define R_LARCH_IRELATIVE 12
#define R_LARCH_TLS_DESC32 13
#define R_LARCH_TLS_DESC64 14
/* Reserved for future relocs that the dynamic linker must understand. */
@ -4331,6 +4345,23 @@ enum
#define R_LARCH_ADD_ULEB128 107
#define R_LARCH_SUB_ULEB128 108
#define R_LARCH_64_PCREL 109
#define R_LARCH_CALL36 110
#define R_LARCH_TLS_DESC_PC_HI20 111
#define R_LARCH_TLS_DESC_PC_LO12 112
#define R_LARCH_TLS_DESC64_PC_LO20 113
#define R_LARCH_TLS_DESC64_PC_HI12 114
#define R_LARCH_TLS_DESC_HI20 115
#define R_LARCH_TLS_DESC_LO12 116
#define R_LARCH_TLS_DESC64_LO20 117
#define R_LARCH_TLS_DESC64_HI12 118
#define R_LARCH_TLS_DESC_LD 119
#define R_LARCH_TLS_DESC_CALL 120
#define R_LARCH_TLS_LE_HI20_R 121
#define R_LARCH_TLS_LE_ADD_R 122
#define R_LARCH_TLS_LE_LO12_R 123
#define R_LARCH_TLS_LD_PCREL20_S2 124
#define R_LARCH_TLS_GD_PCREL20_S2 125
#define R_LARCH_TLS_DESC_PCREL20_S2 126
/* ARC specific declarations. */

View File

@ -679,9 +679,9 @@ for linking")
#endif
/* Helper / base macros for indirect function symbols. */
#define __ifunc_resolver(type_name, name, expr, arg, init, classifier) \
#define __ifunc_resolver(type_name, name, expr, init, classifier, ...) \
classifier inhibit_stack_protector \
__typeof (type_name) *name##_ifunc (arg) \
__typeof (type_name) *name##_ifunc (__VA_ARGS__) \
{ \
init (); \
__typeof (type_name) *res = expr; \
@ -689,13 +689,13 @@ for linking")
}
#ifdef HAVE_GCC_IFUNC
# define __ifunc(type_name, name, expr, arg, init) \
# define __ifunc_args(type_name, name, expr, init, ...) \
extern __typeof (type_name) name __attribute__ \
((ifunc (#name "_ifunc"))); \
__ifunc_resolver (type_name, name, expr, arg, init, static)
__ifunc_resolver (type_name, name, expr, init, static, __VA_ARGS__)
# define __ifunc_hidden(type_name, name, expr, arg, init) \
__ifunc (type_name, name, expr, arg, init)
# define __ifunc_args_hidden(type_name, name, expr, init, ...) \
__ifunc_args (type_name, name, expr, init, __VA_ARGS__)
#else
/* Gcc does not support __attribute__ ((ifunc (...))). Use the old behaviour
as fallback. But keep in mind that the debug information for the ifunc
@ -706,18 +706,24 @@ for linking")
different signatures. (Gcc support is disabled at least on a ppc64le
Ubuntu 14.04 system.) */
# define __ifunc(type_name, name, expr, arg, init) \
# define __ifunc_args(type_name, name, expr, init, ...) \
extern __typeof (type_name) name; \
__typeof (type_name) *name##_ifunc (arg) __asm__ (#name); \
__ifunc_resolver (type_name, name, expr, arg, init,) \
__typeof (type_name) *name##_ifunc (__VA_ARGS__) __asm__ (#name); \
__ifunc_resolver (type_name, name, expr, init, , __VA_ARGS__) \
__asm__ (".type " #name ", %gnu_indirect_function");
# define __ifunc_hidden(type_name, name, expr, arg, init) \
# define __ifunc_args_hidden(type_name, name, expr, init, ...) \
extern __typeof (type_name) __libc_##name; \
__ifunc (type_name, __libc_##name, expr, arg, init) \
__ifunc (type_name, __libc_##name, expr, __VA_ARGS__, init) \
strong_alias (__libc_##name, name);
#endif /* !HAVE_GCC_IFUNC */
#define __ifunc(type_name, name, expr, arg, init) \
__ifunc_args (type_name, name, expr, init, arg)
#define __ifunc_hidden(type_name, name, expr, arg, init) \
__ifunc_args_hidden (type_name, name, expr, init, arg)
/* The following macros are used for indirect function symbols in libc.so.
First of all, you need to have the function prototyped somewhere,
say in foo.h:

View File

@ -53,8 +53,8 @@ libc_hidden_proto (__isoc23_strtoul_l)
libc_hidden_proto (__isoc23_strtoll_l)
libc_hidden_proto (__isoc23_strtoull_l)
#if __GLIBC_USE (C2X_STRTOL)
/* Redirect internal uses of these functions to the C2X versions; the
#if __GLIBC_USE (C23_STRTOL)
/* Redirect internal uses of these functions to the C23 versions; the
redirection in the installed header does not work with
libc_hidden_proto. */
# undef strtol

View File

@ -176,7 +176,7 @@ typedef __pid_t pid_t;
This function is a cancellation point and therefore not marked with
__THROW. */
#ifndef __USE_TIME_BITS64
#ifndef __USE_TIME64_REDIRECTS
# ifndef __USE_FILE_OFFSET64
extern int fcntl (int __fd, int __cmd, ...);
# else
@ -189,7 +189,7 @@ extern int __REDIRECT (fcntl, (int __fd, int __cmd, ...), fcntl64);
# ifdef __USE_LARGEFILE64
extern int fcntl64 (int __fd, int __cmd, ...);
# endif
#else /* __USE_TIME_BITS64 */
#else /* __USE_TIME64_REDIRECTS */
# ifdef __REDIRECT
extern int __REDIRECT_NTH (fcntl, (int __fd, int __request, ...),
__fcntl_time64);
@ -344,8 +344,7 @@ extern int posix_fallocate64 (int __fd, off64_t __offset, off64_t __len);
/* Define some inlines helping to catch common problems. */
#if __USE_FORTIFY_LEVEL > 0 && defined __fortify_function \
&& defined __va_arg_pack_len
#if __USE_FORTIFY_LEVEL > 0 && defined __fortify_function
# include <bits/fcntl2.h>
#endif

View File

@ -209,7 +209,7 @@ extern int stat (const char *__restrict __file,
that file descriptor FD is open on and put them in BUF. */
extern int fstat (int __fd, struct stat *__buf) __THROW __nonnull ((2));
#else
# ifdef __USE_TIME_BITS64
# ifdef __USE_TIME64_REDIRECTS
# ifdef __REDIRECT_NTH
extern int __REDIRECT_NTH (stat, (const char *__restrict __file,
struct stat *__restrict __buf),
@ -236,7 +236,7 @@ extern int __REDIRECT_NTH (fstat, (int __fd, struct stat *__buf), fstat64)
# endif
#endif
#ifdef __USE_LARGEFILE64
# ifndef __USE_TIME_BITS64
# ifndef __USE_TIME64_REDIRECTS
extern int stat64 (const char *__restrict __file,
struct stat64 *__restrict __buf) __THROW __nonnull ((1, 2));
extern int fstat64 (int __fd, struct stat64 *__buf) __THROW __nonnull ((2));
@ -265,7 +265,7 @@ extern int fstatat (int __fd, const char *__restrict __file,
struct stat *__restrict __buf, int __flag)
__THROW __nonnull ((2, 3));
# else
# ifdef __USE_TIME_BITS64
# ifdef __USE_TIME64_REDIRECTS
# ifdef __REDIRECT_NTH
extern int __REDIRECT_NTH (fstatat, (int __fd, const char *__restrict __file,
struct stat *__restrict __buf,
@ -287,7 +287,7 @@ extern int __REDIRECT_NTH (fstatat, (int __fd, const char *__restrict __file,
# endif
# ifdef __USE_LARGEFILE64
# ifndef __USE_TIME_BITS64
# ifndef __USE_TIME64_REDIRECTS
extern int fstatat64 (int __fd, const char *__restrict __file,
struct stat64 *__restrict __buf, int __flag)
__THROW __nonnull ((2, 3));
@ -313,7 +313,7 @@ extern int __REDIRECT_NTH (fstatat64, (int __fd,
extern int lstat (const char *__restrict __file,
struct stat *__restrict __buf) __THROW __nonnull ((1, 2));
# else
# ifdef __USE_TIME_BITS64
# ifdef __USE_TIME64_REDIRECTS
# ifdef __REDIRECT_NTH
extern int __REDIRECT_NTH (lstat,
(const char *__restrict __file,
@ -334,7 +334,7 @@ extern int __REDIRECT_NTH (lstat,
# endif
# endif
# ifdef __USE_LARGEFILE64
# ifndef __USE_TIME_BITS64
# ifndef __USE_TIME64_REDIRECTS
extern int lstat64 (const char *__restrict __file,
struct stat64 *__restrict __buf)
__THROW __nonnull ((1, 2));
@ -427,7 +427,7 @@ extern int mkfifoat (int __fd, const char *__path, __mode_t __mode)
#endif
#ifdef __USE_ATFILE
# ifndef __USE_TIME_BITS64
# ifndef __USE_TIME64_REDIRECTS
/* Set file access and modification times relative to directory file
descriptor. */
extern int utimensat (int __fd, const char *__path,
@ -447,7 +447,7 @@ extern int __REDIRECT_NTH (utimensat, (int fd, const char *__path,
#endif
#ifdef __USE_XOPEN2K8
# ifndef __USE_TIME_BITS64
# ifndef __USE_TIME64_REDIRECTS
/* Set file access and modification times of the file associated with FD. */
extern int futimens (int __fd, const struct timespec __times[2]) __THROW;

View File

@ -145,6 +145,14 @@
#endif
/* The overloadable attribute was added on clang 2.6. */
#if defined __clang_major__ \
&& (__clang_major__ + (__clang_minor__ >= 6) > 2)
# define __attribute_overloadable__ __attribute__((__overloadable__))
#else
# define __attribute_overloadable__
#endif
/* Fortify support. */
#define __bos(ptr) __builtin_object_size (ptr, __USE_FORTIFY_LEVEL > 1)
#define __bos0(ptr) __builtin_object_size (ptr, 0)
@ -187,27 +195,173 @@
__s, __osz)) \
&& !__glibc_safe_len_cond ((__SIZE_TYPE__) (__l), __s, __osz))
/* To correctly instrument the fortify wrapper clang requires the
pass_object_size attribute, and the attribute has the restriction that the
argument needs to be 'const'. Furthermore, to make it usable with C
interfaces, clang provides the overload attribute, which provides a C++
like function overload support. The overloaded fortify wrapper with the
pass_object_size attribute has precedence over the default symbol.
Also, clang does not support __va_arg_pack, so variadic functions are
expanded to issue va_arg implementations. The error function must not have
bodies (address takes are expanded to nonfortified calls), and with
__fortify_function compiler might still create a body with the C++
mangling name (due to the overload attribute). In this case, the function
is defined with __fortify_function_error_function macro instead.
The argument size check is also done with a clang-only attribute,
__attribute__ ((__diagnose_if__ (...))), different than gcc which calls
symbol_chk_warn alias with uses __warnattr attribute.
The pass_object_size was added on clang 4.0, __diagnose_if__ on 5.0,
and pass_dynamic_object_size on 9.0. */
#if defined __clang_major__ && __clang_major__ >= 5
# define __fortify_use_clang 1
# define __fortify_function_error_function static __attribute__((__unused__))
# define __fortify_clang_pass_object_size_n(n) \
__attribute__ ((__pass_object_size__ (n)))
# define __fortify_clang_pass_object_size0 \
__fortify_clang_pass_object_size_n (0)
# define __fortify_clang_pass_object_size \
__fortify_clang_pass_object_size_n (__USE_FORTIFY_LEVEL > 1)
# if __clang_major__ >= 9
# define __fortify_clang_pass_dynamic_object_size_n(n) \
__attribute__ ((__pass_dynamic_object_size__ (n)))
# define __fortify_clang_pass_dynamic_object_size0 \
__fortify_clang_pass_dynamic_object_size_n (0)
# define __fortify_clang_pass_dynamic_object_size \
__fortify_clang_pass_dynamic_object_size_n (1)
# else
# define __fortify_clang_pass_dynamic_object_size_n(n)
# define __fortify_clang_pass_dynamic_object_size0
# define __fortify_clang_pass_dynamic_object_size
# endif
# define __fortify_clang_bos_static_lt_impl(bos_val, n, s) \
((bos_val) != -1ULL && (n) > (bos_val) / (s))
# define __fortify_clang_bos_static_lt2(__n, __e, __s) \
__fortify_clang_bos_static_lt_impl (__bos (__e), __n, __s)
# define __fortify_clang_bos_static_lt(__n, __e) \
__fortify_clang_bos_static_lt2 (__n, __e, 1)
# define __fortify_clang_bos0_static_lt2(__n, __e, __s) \
__fortify_clang_bos_static_lt_impl (__bos0 (__e), __n, __s)
# define __fortify_clang_bos0_static_lt(__n, __e) \
__fortify_clang_bos0_static_lt2 (__n, __e, 1)
# define __fortify_clang_bosn_args(bos_fn, n, buf, div, complaint) \
(__fortify_clang_bos_static_lt_impl (bos_fn (buf), n, div)), (complaint), \
"warning"
# define __fortify_clang_warning(__c, __msg) \
__attribute__ ((__diagnose_if__ ((__c), (__msg), "warning")))
# define __fortify_clang_error(__c, __msg) \
__attribute__ ((__diagnose_if__ ((__c), (__msg), "error")))
# define __fortify_clang_warning_only_if_bos0_lt(n, buf, complaint) \
__attribute__ ((__diagnose_if__ \
(__fortify_clang_bosn_args (__bos0, n, buf, 1, complaint))))
# define __fortify_clang_warning_only_if_bos0_lt2(n, buf, div, complaint) \
__attribute__ ((__diagnose_if__ \
(__fortify_clang_bosn_args (__bos0, n, buf, div, complaint))))
# define __fortify_clang_warning_only_if_bos_lt(n, buf, complaint) \
__attribute__ ((__diagnose_if__ \
(__fortify_clang_bosn_args (__bos, n, buf, 1, complaint))))
# define __fortify_clang_warning_only_if_bos_lt2(n, buf, div, complaint) \
__attribute__ ((__diagnose_if__ \
(__fortify_clang_bosn_args (__bos, n, buf, div, complaint))))
# define __fortify_clang_prefer_this_overload \
__attribute__ ((enable_if (1, "")))
# define __fortify_clang_unavailable(__msg) \
__attribute__ ((unavailable(__msg)))
# if __USE_FORTIFY_LEVEL == 3
# define __fortify_clang_overload_arg(__type, __attr, __name) \
__type __attr const __fortify_clang_pass_dynamic_object_size __name
# define __fortify_clang_overload_arg0(__type, __attr, __name) \
__type __attr const __fortify_clang_pass_dynamic_object_size0 __name
# else
# define __fortify_clang_overload_arg(__type, __attr, __name) \
__type __attr const __fortify_clang_pass_object_size __name
# define __fortify_clang_overload_arg0(__type, __attr, __name) \
__type __attr const __fortify_clang_pass_object_size0 __name
# endif
# define __fortify_clang_mul_may_overflow(size, n) \
((size | n) >= (((size_t)1) << (8 * sizeof (size_t) / 2)))
# define __fortify_clang_size_too_small(__bos, __dest, __len) \
(__bos (__dest) != (size_t) -1 && __bos (__dest) < __len)
# define __fortify_clang_warn_if_src_too_large(__dest, __src) \
__fortify_clang_warning (__fortify_clang_size_too_small (__glibc_objsize, \
__dest, \
__builtin_strlen (__src) + 1), \
"destination buffer will always be overflown by source")
# define __fortify_clang_warn_if_dest_too_small(__dest, __len) \
__fortify_clang_warning (__fortify_clang_size_too_small (__glibc_objsize, \
__dest, \
__len), \
"function called with bigger length than the destination buffer")
# define __fortify_clang_warn_if_dest_too_small0(__dest, __len) \
__fortify_clang_warning (__fortify_clang_size_too_small (__glibc_objsize0, \
__dest, \
__len), \
"function called with bigger length than the destination buffer")
#else
# define __fortify_use_clang 0
# define __fortify_clang_warning(__c, __msg)
# define __fortify_clang_warning_only_if_bos0_lt(__n, __buf, __complaint)
# define __fortify_clang_warning_only_if_bos0_lt2(__n, __buf, __div, complaint)
# define __fortify_clang_warning_only_if_bos_lt(__n, __buf, __complaint)
# define __fortify_clang_warning_only_if_bos_lt2(__n, __buf, div, __complaint)
# define __fortify_clang_overload_arg(__type, __attr, __name) \
__type __attr __name
# define __fortify_clang_overload_arg0(__type, __attr, __name) \
__fortify_clang_overload_arg (__type, __attr, __name)
# define __fortify_clang_warn_if_src_too_large(__dest, __src)
# define __fortify_clang_warn_if_dest_too_small(__dest, __len)
# define __fortify_clang_warn_if_dest_too_small0(__dest, __len)
#endif
/* Fortify function f. __f_alias, __f_chk and __f_chk_warn must be
declared. */
#define __glibc_fortify(f, __l, __s, __osz, ...) \
#if !__fortify_use_clang
# define __glibc_fortify(f, __l, __s, __osz, ...) \
(__glibc_safe_or_unknown_len (__l, __s, __osz) \
? __ ## f ## _alias (__VA_ARGS__) \
: (__glibc_unsafe_len (__l, __s, __osz) \
? __ ## f ## _chk_warn (__VA_ARGS__, __osz) \
: __ ## f ## _chk (__VA_ARGS__, __osz)))
#else
# define __glibc_fortify(f, __l, __s, __osz, ...) \
(__osz == (__SIZE_TYPE__) -1) \
? __ ## f ## _alias (__VA_ARGS__) \
: __ ## f ## _chk (__VA_ARGS__, __osz)
#endif
/* Fortify function f, where object size argument passed to f is the number of
elements and not total size. */
#define __glibc_fortify_n(f, __l, __s, __osz, ...) \
#if !__fortify_use_clang
# define __glibc_fortify_n(f, __l, __s, __osz, ...) \
(__glibc_safe_or_unknown_len (__l, __s, __osz) \
? __ ## f ## _alias (__VA_ARGS__) \
: (__glibc_unsafe_len (__l, __s, __osz) \
? __ ## f ## _chk_warn (__VA_ARGS__, (__osz) / (__s)) \
: __ ## f ## _chk (__VA_ARGS__, (__osz) / (__s))))
# else
# define __glibc_fortify_n(f, __l, __s, __osz, ...) \
(__osz == (__SIZE_TYPE__) -1) \
? __ ## f ## _alias (__VA_ARGS__) \
: __ ## f ## _chk (__VA_ARGS__, (__osz) / (__s))
#endif
#endif /* __USE_FORTIFY_LEVEL > 0 */
#if __GNUC_PREREQ (4,3)
# define __warnattr(msg) __attribute__((__warning__ (msg)))
# define __errordecl(name, msg) \
@ -683,10 +837,10 @@ _Static_assert (0, "IEEE 128-bits long double requires redirection on this platf
# define __attr_access(x) __attribute__ ((__access__ x))
/* For _FORTIFY_SOURCE == 3 we use __builtin_dynamic_object_size, which may
use the access attribute to get object sizes from function definition
arguments, so we can't use them on functions we fortify. Drop the object
size hints for such functions. */
arguments, so we can't use them on functions we fortify. Drop the access
attribute for such functions. */
# if __USE_FORTIFY_LEVEL == 3
# define __fortified_attr_access(a, o, s) __attribute__ ((__access__ (a, o)))
# define __fortified_attr_access(a, o, s)
# else
# define __fortified_attr_access(a, o, s) __attr_access ((a, o, s))
# endif
@ -720,4 +874,13 @@ _Static_assert (0, "IEEE 128-bits long double requires redirection on this platf
# define __attribute_returns_twice__ /* Ignore. */
#endif
/* Mark struct types as aliasable. Restricted to compilers that
support forward declarations of structs in the presence of the
attribute. */
#if __GNUC_PREREQ (7, 1) || defined __clang__
# define __attribute_struct_may_alias__ __attribute__ ((__may_alias__))
#else
# define __attribute_struct_may_alias__
#endif
#endif /* sys/cdefs.h */

View File

@ -98,7 +98,7 @@ __BEGIN_DECLS
This function is a cancellation point and therefore not marked with
__THROW. */
#ifndef __USE_TIME_BITS64
#ifndef __USE_TIME64_REDIRECTS
extern int select (int __nfds, fd_set *__restrict __readfds,
fd_set *__restrict __writefds,
fd_set *__restrict __exceptfds,
@ -123,7 +123,7 @@ extern int __REDIRECT (select,
This function is a cancellation point and therefore not marked with
__THROW. */
# ifndef __USE_TIME_BITS64
# ifndef __USE_TIME64_REDIRECTS
extern int pselect (int __nfds, fd_set *__restrict __readfds,
fd_set *__restrict __writefds,
fd_set *__restrict __exceptfds,

View File

@ -269,7 +269,7 @@ extern int sigwaitinfo (const sigset_t *__restrict __set,
This function is a cancellation point and therefore not marked with
__THROW. */
# ifndef __USE_TIME_BITS64
# ifndef __USE_TIME64_REDIRECTS
extern int sigtimedwait (const sigset_t *__restrict __set,
siginfo_t *__restrict __info,
const struct timespec *__restrict __timeout)

View File

@ -210,7 +210,7 @@ extern unsigned long long int strtoull (const char *__restrict __nptr,
/* Versions of the above functions that handle '0b' and '0B' prefixes
in base 0 or 2. */
#if __GLIBC_USE (C2X_STRTOL)
#if __GLIBC_USE (C23_STRTOL)
# ifdef __REDIRECT
extern long int __REDIRECT_NTH (strtol, (const char *__restrict __nptr,
char **__restrict __endptr,
@ -274,7 +274,7 @@ extern unsigned long long int __isoc23_strtoull (const char *__restrict __nptr,
#endif
/* Convert a floating-point number to a string. */
#if __GLIBC_USE (IEC_60559_BFP_EXT_C2X)
#if __GLIBC_USE (IEC_60559_BFP_EXT_C23)
extern int strfromd (char *__dest, size_t __size, const char *__format,
double __f)
__THROW __nonnull ((3));
@ -360,7 +360,7 @@ extern unsigned long long int strtoull_l (const char *__restrict __nptr,
/* Versions of the above functions that handle '0b' and '0B' prefixes
in base 0 or 2. */
# if __GLIBC_USE (C2X_STRTOL)
# if __GLIBC_USE (C23_STRTOL)
# ifdef __REDIRECT
extern long int __REDIRECT_NTH (strtol_l, (const char *__restrict __nptr,
char **__restrict __endptr,

View File

@ -891,6 +891,17 @@ extern int pthread_setschedparam (pthread_t __thr, int __policy,
/* Set thread THREAD's scheduling priority. */
extern int pthread_setschedprio (pthread_t __thr, int __prio) __THROW;
#ifdef __USE_GNU
/* Get thread name visible in the kernel and its interfaces. */
extern int pthread_getname_np (pthread_t __target_thread, char *__buf,
size_t __buflen)
__THROW __nonnull ((2)) __attr_access ((__write_only__, 2));
/* Set thread name visible in the kernel and its interfaces. */
extern int pthread_setname_np (pthread_t __target_thread, const char *__name)
__THROW __nonnull ((2)) __attr_access ((__read_only__, 2));
#endif
#ifdef __USE_GNU
/* Yield the processor to another thread or process.
This function is similar to the POSIX `sched_yield' function but

View File

@ -223,7 +223,7 @@ extern int pthread_join (pthread_t __th, void **__thread_return);
the thread in *THREAD_RETURN, if THREAD_RETURN is not NULL. */
extern int pthread_tryjoin_np (pthread_t __th, void **__thread_return) __THROW;
# ifndef __USE_TIME_BITS64
# ifndef __USE_TIME64_REDIRECTS
/* Make calling thread wait for termination of the thread TH, but only
until TIMEOUT. The exit status of the thread is stored in
*THREAD_RETURN, if THREAD_RETURN is not NULL.
@ -796,7 +796,7 @@ extern int pthread_mutex_lock (pthread_mutex_t *__mutex)
#ifdef __USE_XOPEN2K
/* Wait until lock becomes available, or specified time passes. */
# ifndef __USE_TIME_BITS64
# ifndef __USE_TIME64_REDIRECTS
extern int pthread_mutex_timedlock (pthread_mutex_t *__restrict __mutex,
const struct timespec *__restrict
__abstime) __THROWNL __nonnull ((1, 2));
@ -813,7 +813,7 @@ extern int __REDIRECT_NTHNL (pthread_mutex_timedlock,
#endif
#ifdef __USE_GNU
# ifndef __USE_TIME_BITS64
# ifndef __USE_TIME64_REDIRECTS
extern int pthread_mutex_clocklock (pthread_mutex_t *__restrict __mutex,
clockid_t __clockid,
const struct timespec *__restrict
@ -982,7 +982,7 @@ extern int pthread_rwlock_tryrdlock (pthread_rwlock_t *__rwlock)
# ifdef __USE_XOPEN2K
/* Try to acquire read lock for RWLOCK or return after specified time. */
# ifndef __USE_TIME_BITS64
# ifndef __USE_TIME64_REDIRECTS
extern int pthread_rwlock_timedrdlock (pthread_rwlock_t *__restrict __rwlock,
const struct timespec *__restrict
__abstime) __THROWNL __nonnull ((1, 2));
@ -1000,7 +1000,7 @@ extern int __REDIRECT_NTHNL (pthread_rwlock_timedrdlock,
# endif
# ifdef __USE_GNU
# ifndef __USE_TIME_BITS64
# ifndef __USE_TIME64_REDIRECTS
extern int pthread_rwlock_clockrdlock (pthread_rwlock_t *__restrict __rwlock,
clockid_t __clockid,
const struct timespec *__restrict
@ -1029,7 +1029,7 @@ extern int pthread_rwlock_trywrlock (pthread_rwlock_t *__rwlock)
# ifdef __USE_XOPEN2K
/* Try to acquire write lock for RWLOCK or return after specified time. */
# ifndef __USE_TIME_BITS64
# ifndef __USE_TIME64_REDIRECTS
extern int pthread_rwlock_timedwrlock (pthread_rwlock_t *__restrict __rwlock,
const struct timespec *__restrict
__abstime) __THROWNL __nonnull ((1, 2));
@ -1047,7 +1047,7 @@ extern int __REDIRECT_NTHNL (pthread_rwlock_timedwrlock,
# endif
# ifdef __USE_GNU
# ifndef __USE_TIME_BITS64
# ifndef __USE_TIME64_REDIRECTS
extern int pthread_rwlock_clockwrlock (pthread_rwlock_t *__restrict __rwlock,
clockid_t __clockid,
const struct timespec *__restrict
@ -1141,7 +1141,7 @@ extern int pthread_cond_wait (pthread_cond_t *__restrict __cond,
This function is a cancellation point and therefore not marked with
__THROW. */
# ifndef __USE_TIME_BITS64
# ifndef __USE_TIME64_REDIRECTS
extern int pthread_cond_timedwait (pthread_cond_t *__restrict __cond,
pthread_mutex_t *__restrict __mutex,
const struct timespec *__restrict __abstime)
@ -1167,7 +1167,7 @@ extern int __REDIRECT (pthread_cond_timedwait,
This function is a cancellation point and therefore not marked with
__THROW. */
# ifndef __USE_TIME_BITS64
# ifndef __USE_TIME64_REDIRECTS
extern int pthread_cond_clockwait (pthread_cond_t *__restrict __cond,
pthread_mutex_t *__restrict __mutex,
__clockid_t __clock_id,

View File

@ -25,7 +25,7 @@
struct timex
{
# if defined __USE_TIME_BITS64 || (__TIMESIZE == 64 && __WORDSIZE == 32)
# if defined __USE_TIME64_REDIRECTS || (__TIMESIZE == 64 && __WORDSIZE == 32)
unsigned int modes; /* mode selector */
int :32; /* pad */
long long offset; /* time offset (usec) */

View File

@ -468,6 +468,15 @@ L(pre_end): ASM_LINE_SEP \
#define CLOB_ARGS_1 CLOB_ARGS_2, "%r25"
#define CLOB_ARGS_0 CLOB_ARGS_1, "%r26"
#define VDSO_NAME "LINUX_6.11"
#define VDSO_HASH 182951793
#ifdef __LP64__
# define HAVE_CLOCK_GETTIME_VSYSCALL "__vdso_clock_gettime"
#else
# define HAVE_CLOCK_GETTIME64_VSYSCALL "__vdso_clock_gettime64"
#endif /* __LP64__ */
#endif /* __ASSEMBLER__ */
#endif /* _LINUX_HPPA_SYSDEP_H */

View File

@ -156,6 +156,7 @@
/* List of system calls which are supported as vsyscalls (for RV32 and
RV64). */
# define HAVE_GETCPU_VSYSCALL "__vdso_getcpu"
# define HAVE_RISCV_HWPROBE "__vdso_riscv_hwprobe"
# undef HAVE_INTERNAL_BRK_ADDR_SYMBOL
# define HAVE_INTERNAL_BRK_ADDR_SYMBOL 1

View File

@ -54,7 +54,7 @@ struct ntptimeval
__BEGIN_DECLS
#ifndef __USE_TIME_BITS64
#ifndef __USE_TIME64_REDIRECTS
extern int adjtimex (struct timex *__ntx) __THROW __nonnull ((1));
extern int ntp_gettimex (struct ntptimeval *__ntv) __THROW __nonnull ((1));

View File

@ -8,10 +8,9 @@
#define __WORDSIZE32_PTRDIFF_LONG 0
#endif
#define __WORDSIZE_TIME64_COMPAT32 1
#ifdef __x86_64__
# define __WORDSIZE_TIME64_COMPAT32 1
/* Both x86-64 and x32 use the 64-bit system call interface. */
# define __SYSCALL_WORDSIZE 64
#else
# define __WORDSIZE_TIME64_COMPAT32 0
#endif

View File

@ -21,14 +21,118 @@
#include <sysdeps/generic/sysdep.h>
/* The extended state feature IDs in the state component bitmap. */
#define X86_XSTATE_X87_ID 0
#define X86_XSTATE_SSE_ID 1
#define X86_XSTATE_AVX_ID 2
#define X86_XSTATE_BNDREGS_ID 3
#define X86_XSTATE_BNDCFG_ID 4
#define X86_XSTATE_K_ID 5
#define X86_XSTATE_ZMM_H_ID 6
#define X86_XSTATE_ZMM_ID 7
#define X86_XSTATE_PKRU_ID 9
#define X86_XSTATE_TILECFG_ID 17
#define X86_XSTATE_TILEDATA_ID 18
#define X86_XSTATE_APX_F_ID 19
#ifdef __x86_64__
/* Offset for fxsave/xsave area used by _dl_runtime_resolve. Also need
space to preserve RCX, RDX, RSI, RDI, R8, R9 and RAX. It must be
aligned to 16 bytes for fxsave and 64 bytes for xsave. */
#define STATE_SAVE_OFFSET (8 * 7 + 8)
aligned to 16 bytes for fxsave and 64 bytes for xsave. It is non-zero
because MOV, instead of PUSH, is used to save registers onto stack.
/* Save SSE, AVX, AVX512, mask and bound registers. */
#define STATE_SAVE_MASK \
((1 << 1) | (1 << 2) | (1 << 3) | (1 << 5) | (1 << 6) | (1 << 7))
+==================+<- stack frame start aligned at 8 or 16 bytes
| |<- paddings for stack realignment of 64 bytes
|------------------|<- xsave buffer end aligned at 64 bytes
| |<-
| |<-
| |<-
|------------------|<- xsave buffer start at STATE_SAVE_OFFSET(%rsp)
| |<- 8-byte padding for 64-byte alignment
| |<- R9
| |<- R8
| |<- RDI
| |<- RSI
| |<- RDX
| |<- RCX
| |<- RAX
+==================+<- RSP aligned at 64 bytes
*/
# define STATE_SAVE_OFFSET (8 * 7 + 8)
/* _dl_tlsdesc_dynamic preserves RDI, RSI and RBX before realigning
stack. After realigning stack, it saves RCX, RDX, R8, R9, R10 and
R11. Allocate space for RDI, RSI and RBX to avoid clobbering saved
RDI, RSI and RBX values on stack by xsave.
+==================+<- stack frame start aligned at 8 or 16 bytes
| |<- RDI saved in the red zone
| |<- RSI saved in the red zone
| |<- RBX saved in the red zone
| |<- paddings for stack realignment of 64 bytes
|------------------|<- xsave buffer end aligned at 64 bytes
| |<-
| |<-
| |<-
|------------------|<- xsave buffer start at STATE_SAVE_OFFSET(%rsp)
| |<- 8-byte padding for 64-byte alignment
| |<- 8-byte padding for 64-byte alignment
| |<- R11
| |<- R10
| |<- R9
| |<- R8
| |<- RDX
| |<- RCX
+==================+<- RSP aligned at 64 bytes
Define the total register save area size for all integer registers by
adding 24 to STATE_SAVE_OFFSET since RDI, RSI and RBX are saved onto
stack without adjusting stack pointer first, using the red-zone. */
# define TLSDESC_CALL_REGISTER_SAVE_AREA (STATE_SAVE_OFFSET + 24)
/* Save SSE, AVX, AVX512, mask, bound and APX registers. Bound and APX
registers are mutually exclusive. */
# define STATE_SAVE_MASK \
((1 << X86_XSTATE_SSE_ID) \
| (1 << X86_XSTATE_AVX_ID) \
| (1 << X86_XSTATE_BNDREGS_ID) \
| (1 << X86_XSTATE_K_ID) \
| (1 << X86_XSTATE_ZMM_H_ID) \
| (1 << X86_XSTATE_ZMM_ID) \
| (1 << X86_XSTATE_APX_F_ID))
/* AMX state mask. */
# define AMX_STATE_SAVE_MASK \
((1 << X86_XSTATE_TILECFG_ID) | (1 << X86_XSTATE_TILEDATA_ID))
/* States to be included in xsave_state_full_size. */
# define FULL_STATE_SAVE_MASK \
(STATE_SAVE_MASK | AMX_STATE_SAVE_MASK)
#else
/* Offset for fxsave/xsave area used by _dl_tlsdesc_dynamic. Since i386
uses PUSH to save registers onto stack, use 0 here. */
# define STATE_SAVE_OFFSET 0
# define TLSDESC_CALL_REGISTER_SAVE_AREA 0
/* Save SSE, AVX, AXV512, mask and bound registers. */
# define STATE_SAVE_MASK \
((1 << X86_XSTATE_SSE_ID) \
| (1 << X86_XSTATE_AVX_ID) \
| (1 << X86_XSTATE_BNDREGS_ID) \
| (1 << X86_XSTATE_K_ID) \
| (1 << X86_XSTATE_ZMM_H_ID))
/* States to be included in xsave_state_size. */
# define FULL_STATE_SAVE_MASK STATE_SAVE_MASK
#endif
/* States which should be saved for TLSDESC_CALL and TLS_DESC_CALL.
Compiler assumes that all registers, including AMX and x87 FPU
stack registers, are unchanged after CALL, except for EFLAGS and
RAX/EAX. */
#define TLSDESC_CALL_STATE_SAVE_MASK \
(FULL_STATE_SAVE_MASK | (1 << X86_XSTATE_X87_ID))
/* Constants for bits in __x86_string_control: */

View File

@ -10,14 +10,14 @@
has nanoseconds instead of microseconds. */
struct timespec
{
#ifdef __USE_TIME_BITS64
#ifdef __USE_TIME64_REDIRECTS
__time64_t tv_sec; /* Seconds. */
#else
__time_t tv_sec; /* Seconds. */
#endif
#if __WORDSIZE == 64 \
|| (defined __SYSCALL_WORDSIZE && __SYSCALL_WORDSIZE == 64) \
|| (__TIMESIZE == 32 && !defined __USE_TIME_BITS64)
|| (__TIMESIZE == 32 && !defined __USE_TIME64_REDIRECTS)
__syscall_slong_t tv_nsec; /* Nanoseconds. */
#else
# if __BYTE_ORDER == __BIG_ENDIAN

View File

@ -7,7 +7,7 @@
microsecond but also has a range of years. */
struct timeval
{
#ifdef __USE_TIME_BITS64
#ifdef __USE_TIME64_REDIRECTS
__time64_t tv_sec; /* Seconds. */
__suseconds64_t tv_usec; /* Microseconds. */
#else

View File

@ -4,7 +4,7 @@
#include <bits/types.h>
/* Returned by `time'. */
#ifdef __USE_TIME_BITS64
#ifdef __USE_TIME64_REDIRECTS
typedef __time64_t time_t;
#else
typedef __time_t time_t;

View File

@ -102,5 +102,25 @@
#define HWCAP2_SME_BI32I32 (1UL << 40)
#define HWCAP2_SME_B16B16 (1UL << 41)
#define HWCAP2_SME_F16F16 (1UL << 42)
#define HWCAP2_MOPS (1UL << 43)
#define HWCAP2_HBC (1UL << 44)
#define HWCAP2_SVE_B16B16 (1UL << 45)
#define HWCAP2_LRCPC3 (1UL << 46)
#define HWCAP2_LSE128 (1UL << 47)
#define HWCAP2_FPMR (1UL << 48)
#define HWCAP2_LUT (1UL << 49)
#define HWCAP2_FAMINMAX (1UL << 50)
#define HWCAP2_F8CVT (1UL << 51)
#define HWCAP2_F8FMA (1UL << 52)
#define HWCAP2_F8DP4 (1UL << 53)
#define HWCAP2_F8DP2 (1UL << 54)
#define HWCAP2_F8E4M3 (1UL << 55)
#define HWCAP2_F8E5M2 (1UL << 56)
#define HWCAP2_SME_LUTV2 (1UL << 57)
#define HWCAP2_SME_F8F16 (1UL << 58)
#define HWCAP2_SME_F8F32 (1UL << 59)
#define HWCAP2_SME_SF8FMA (1UL << 60)
#define HWCAP2_SME_SF8DP4 (1UL << 61)
#define HWCAP2_SME_SF8DP2 (1UL << 62)
#endif /* __ASM_HWCAP_H */

View File

@ -37,9 +37,7 @@
#include <asm/ptrace.h>
#include <asm/sve_context.h>
#define __KVM_HAVE_GUEST_DEBUG
#define __KVM_HAVE_IRQ_LINE
#define __KVM_HAVE_READONLY_MEM
#define __KVM_HAVE_VCPU_EVENTS
#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
@ -76,11 +74,11 @@ struct kvm_regs {
/* KVM_ARM_SET_DEVICE_ADDR ioctl id encoding */
#define KVM_ARM_DEVICE_TYPE_SHIFT 0
#define KVM_ARM_DEVICE_TYPE_MASK GENMASK(KVM_ARM_DEVICE_TYPE_SHIFT + 15, \
KVM_ARM_DEVICE_TYPE_SHIFT)
#define KVM_ARM_DEVICE_TYPE_MASK __GENMASK(KVM_ARM_DEVICE_TYPE_SHIFT + 15, \
KVM_ARM_DEVICE_TYPE_SHIFT)
#define KVM_ARM_DEVICE_ID_SHIFT 16
#define KVM_ARM_DEVICE_ID_MASK GENMASK(KVM_ARM_DEVICE_ID_SHIFT + 15, \
KVM_ARM_DEVICE_ID_SHIFT)
#define KVM_ARM_DEVICE_ID_MASK __GENMASK(KVM_ARM_DEVICE_ID_SHIFT + 15, \
KVM_ARM_DEVICE_ID_SHIFT)
/* Supported device IDs */
#define KVM_ARM_DEVICE_VGIC_V2 0
@ -162,6 +160,11 @@ struct kvm_sync_regs {
__u64 device_irq_level;
};
/* Bits for run->s.regs.device_irq_level */
#define KVM_ARM_DEV_EL1_VTIMER (1 << 0)
#define KVM_ARM_DEV_EL1_PTIMER (1 << 1)
#define KVM_ARM_DEV_PMU (1 << 2)
/*
* PMU filter structure. Describe a range of events with a particular
* action. To be used with KVM_ARM_VCPU_PMU_V3_FILTER.
@ -198,6 +201,15 @@ struct kvm_arm_copy_mte_tags {
__u64 reserved[2];
};
/*
* Counter/Timer offset structure. Describe the virtual/physical offset.
* To be used with KVM_ARM_SET_COUNTER_OFFSET.
*/
struct kvm_arm_counter_offset {
__u64 counter_offset;
__u64 reserved;
};
#define KVM_ARM_TAGS_TO_GUEST 0
#define KVM_ARM_TAGS_FROM_GUEST 1
@ -363,6 +375,10 @@ enum {
KVM_REG_ARM_VENDOR_HYP_BIT_PTP = 1,
};
/* Device Control API on vm fd */
#define KVM_ARM_VM_SMCCC_CTRL 0
#define KVM_ARM_VM_SMCCC_FILTER 0
/* Device Control API: ARM VGIC */
#define KVM_DEV_ARM_VGIC_GRP_ADDR 0
#define KVM_DEV_ARM_VGIC_GRP_DIST_REGS 1
@ -402,6 +418,8 @@ enum {
#define KVM_ARM_VCPU_TIMER_CTRL 1
#define KVM_ARM_VCPU_TIMER_IRQ_VTIMER 0
#define KVM_ARM_VCPU_TIMER_IRQ_PTIMER 1
#define KVM_ARM_VCPU_TIMER_IRQ_HVTIMER 2
#define KVM_ARM_VCPU_TIMER_IRQ_HPTIMER 3
#define KVM_ARM_VCPU_PVTIME_CTRL 2
#define KVM_ARM_VCPU_PVTIME_IPA 0
@ -458,6 +476,56 @@ enum {
/* run->fail_entry.hardware_entry_failure_reason codes. */
#define KVM_EXIT_FAIL_ENTRY_CPU_UNSUPPORTED (1ULL << 0)
enum kvm_smccc_filter_action {
KVM_SMCCC_FILTER_HANDLE = 0,
KVM_SMCCC_FILTER_DENY,
KVM_SMCCC_FILTER_FWD_TO_USER,
};
struct kvm_smccc_filter {
__u32 base;
__u32 nr_functions;
__u8 action;
__u8 pad[15];
};
/* arm64-specific KVM_EXIT_HYPERCALL flags */
#define KVM_HYPERCALL_EXIT_SMC (1U << 0)
#define KVM_HYPERCALL_EXIT_16BIT (1U << 1)
/*
* Get feature ID registers userspace writable mask.
*
* From DDI0487J.a, D19.2.66 ("ID_AA64MMFR2_EL1, AArch64 Memory Model
* Feature Register 2"):
*
* "The Feature ID space is defined as the System register space in
* AArch64 with op0==3, op1=={0, 1, 3}, CRn==0, CRm=={0-7},
* op2=={0-7}."
*
* This covers all currently known R/O registers that indicate
* anything useful feature wise, including the ID registers.
*
* If we ever need to introduce a new range, it will be described as
* such in the range field.
*/
#define KVM_ARM_FEATURE_ID_RANGE_IDX(op0, op1, crn, crm, op2) \
({ \
__u64 __op1 = (op1) & 3; \
__op1 -= (__op1 == 3); \
(__op1 << 6 | ((crm) & 7) << 3 | (op2)); \
})
#define KVM_ARM_FEATURE_ID_RANGE 0
#define KVM_ARM_FEATURE_ID_RANGE_SIZE (3 * 8 * 8)
struct reg_mask_range {
__u64 addr; /* Pointer to mask array */
__u32 range; /* Requested range */
__u32 reserved[13];
};
#endif
#endif /* __ARM_KVM_H__ */

View File

@ -152,6 +152,14 @@ struct tpidr2_context {
__u64 tpidr2;
};
/* FPMR context */
#define FPMR_MAGIC 0x46504d52
struct fpmr_context {
struct _aarch64_ctx head;
__u64 fpmr;
};
#define ZA_MAGIC 0x54366345
struct za_context {
@ -177,7 +185,7 @@ struct zt_context {
* vector length beyond its initial architectural limit of 2048 bits
* (16 quadwords).
*
* See linux/Documentation/arm64/sve.rst for a description of the VL/VQ
* See linux/Documentation/arch/arm64/sve.rst for a description of the VL/VQ
* terminology.
*/
#define SVE_VQ_BYTES __SVE_VQ_BYTES /* bytes per quadword */

View File

@ -13,6 +13,17 @@
#define __SVE_VQ_BYTES 16 /* number of bytes per quadword */
/*
* Yes, __SVE_VQ_MAX is 512 QUADWORDS.
*
* To help ensure forward portability, this is much larger than the
* current maximum value defined by the SVE architecture. While arrays
* or static allocations can be sized based on this value, watch out!
* It will waste a surprisingly large amount of memory.
*
* Dynamic sizing based on the actual runtime vector length is likely to
* be preferable for most purposes.
*/
#define __SVE_VQ_MIN 1
#define __SVE_VQ_MAX 512

View File

@ -73,7 +73,7 @@ fenv_t;
# define FE_NOMASK_ENV ((const fenv_t *) -2)
#endif
#if __GLIBC_USE (IEC_60559_BFP_EXT_C2X)
#if __GLIBC_USE (IEC_60559_BFP_EXT_C23)
/* Type representing floating-point control modes. */
typedef unsigned int femode_t;

View File

@ -99,4 +99,22 @@
#define HWCAP2_SME_B16B16 (1UL << 41)
#define HWCAP2_SME_F16F16 (1UL << 42)
#define HWCAP2_MOPS (1UL << 43)
#define HWCAP2_HBC (1UL << 44)
#define HWCAP2_HBC (1UL << 44)
#define HWCAP2_SVE_B16B16 (1UL << 45)
#define HWCAP2_LRCPC3 (1UL << 46)
#define HWCAP2_LSE128 (1UL << 47)
#define HWCAP2_FPMR (1UL << 48)
#define HWCAP2_LUT (1UL << 49)
#define HWCAP2_FAMINMAX (1UL << 50)
#define HWCAP2_F8CVT (1UL << 51)
#define HWCAP2_F8FMA (1UL << 52)
#define HWCAP2_F8DP4 (1UL << 53)
#define HWCAP2_F8DP2 (1UL << 54)
#define HWCAP2_F8E4M3 (1UL << 55)
#define HWCAP2_F8E5M2 (1UL << 56)
#define HWCAP2_SME_LUTV2 (1UL << 57)
#define HWCAP2_SME_F8F16 (1UL << 58)
#define HWCAP2_SME_F8F32 (1UL << 59)
#define HWCAP2_SME_SF8FMA (1UL << 60)
#define HWCAP2_SME_SF8DP4 (1UL << 61)
#define HWCAP2_SME_SF8DP2 (1UL << 62)

View File

@ -33,22 +33,50 @@
# define __DECL_SIMD_acos __DECL_SIMD_aarch64
# undef __DECL_SIMD_acosf
# define __DECL_SIMD_acosf __DECL_SIMD_aarch64
# undef __DECL_SIMD_acosh
# define __DECL_SIMD_acosh __DECL_SIMD_aarch64
# undef __DECL_SIMD_acoshf
# define __DECL_SIMD_acoshf __DECL_SIMD_aarch64
# undef __DECL_SIMD_asin
# define __DECL_SIMD_asin __DECL_SIMD_aarch64
# undef __DECL_SIMD_asinf
# define __DECL_SIMD_asinf __DECL_SIMD_aarch64
# undef __DECL_SIMD_asinh
# define __DECL_SIMD_asinh __DECL_SIMD_aarch64
# undef __DECL_SIMD_asinhf
# define __DECL_SIMD_asinhf __DECL_SIMD_aarch64
# undef __DECL_SIMD_atan
# define __DECL_SIMD_atan __DECL_SIMD_aarch64
# undef __DECL_SIMD_atanf
# define __DECL_SIMD_atanf __DECL_SIMD_aarch64
# undef __DECL_SIMD_atanh
# define __DECL_SIMD_atanh __DECL_SIMD_aarch64
# undef __DECL_SIMD_atanhf
# define __DECL_SIMD_atanhf __DECL_SIMD_aarch64
# undef __DECL_SIMD_atan2
# define __DECL_SIMD_atan2 __DECL_SIMD_aarch64
# undef __DECL_SIMD_atan2f
# define __DECL_SIMD_atan2f __DECL_SIMD_aarch64
# undef __DECL_SIMD_cbrt
# define __DECL_SIMD_cbrt __DECL_SIMD_aarch64
# undef __DECL_SIMD_cbrtf
# define __DECL_SIMD_cbrtf __DECL_SIMD_aarch64
# undef __DECL_SIMD_cos
# define __DECL_SIMD_cos __DECL_SIMD_aarch64
# undef __DECL_SIMD_cosf
# define __DECL_SIMD_cosf __DECL_SIMD_aarch64
# undef __DECL_SIMD_cosh
# define __DECL_SIMD_cosh __DECL_SIMD_aarch64
# undef __DECL_SIMD_coshf
# define __DECL_SIMD_coshf __DECL_SIMD_aarch64
# undef __DECL_SIMD_erf
# define __DECL_SIMD_erf __DECL_SIMD_aarch64
# undef __DECL_SIMD_erff
# define __DECL_SIMD_erff __DECL_SIMD_aarch64
# undef __DECL_SIMD_erfc
# define __DECL_SIMD_erfc __DECL_SIMD_aarch64
# undef __DECL_SIMD_erfcf
# define __DECL_SIMD_erfcf __DECL_SIMD_aarch64
# undef __DECL_SIMD_exp
# define __DECL_SIMD_exp __DECL_SIMD_aarch64
# undef __DECL_SIMD_expf
@ -65,6 +93,10 @@
# define __DECL_SIMD_expm1 __DECL_SIMD_aarch64
# undef __DECL_SIMD_expm1f
# define __DECL_SIMD_expm1f __DECL_SIMD_aarch64
# undef __DECL_SIMD_hypot
# define __DECL_SIMD_hypot __DECL_SIMD_aarch64
# undef __DECL_SIMD_hypotf
# define __DECL_SIMD_hypotf __DECL_SIMD_aarch64
# undef __DECL_SIMD_log
# define __DECL_SIMD_log __DECL_SIMD_aarch64
# undef __DECL_SIMD_logf
@ -81,14 +113,26 @@
# define __DECL_SIMD_log2 __DECL_SIMD_aarch64
# undef __DECL_SIMD_log2f
# define __DECL_SIMD_log2f __DECL_SIMD_aarch64
# undef __DECL_SIMD_pow
# define __DECL_SIMD_pow __DECL_SIMD_aarch64
# undef __DECL_SIMD_powf
# define __DECL_SIMD_powf __DECL_SIMD_aarch64
# undef __DECL_SIMD_sin
# define __DECL_SIMD_sin __DECL_SIMD_aarch64
# undef __DECL_SIMD_sinf
# define __DECL_SIMD_sinf __DECL_SIMD_aarch64
# undef __DECL_SIMD_sinh
# define __DECL_SIMD_sinh __DECL_SIMD_aarch64
# undef __DECL_SIMD_sinhf
# define __DECL_SIMD_sinhf __DECL_SIMD_aarch64
# undef __DECL_SIMD_tan
# define __DECL_SIMD_tan __DECL_SIMD_aarch64
# undef __DECL_SIMD_tanf
# define __DECL_SIMD_tanf __DECL_SIMD_aarch64
# undef __DECL_SIMD_tanh
# define __DECL_SIMD_tanh __DECL_SIMD_aarch64
# undef __DECL_SIMD_tanhf
# define __DECL_SIMD_tanhf __DECL_SIMD_aarch64
#endif
#if __GNUC_PREREQ(9, 0)
@ -117,35 +161,57 @@ typedef __SVBool_t __sv_bool_t;
__vpcs __f32x4_t _ZGVnN4vv_atan2f (__f32x4_t, __f32x4_t);
__vpcs __f32x4_t _ZGVnN4v_acosf (__f32x4_t);
__vpcs __f32x4_t _ZGVnN4v_acoshf (__f32x4_t);
__vpcs __f32x4_t _ZGVnN4v_asinf (__f32x4_t);
__vpcs __f32x4_t _ZGVnN4v_asinhf (__f32x4_t);
__vpcs __f32x4_t _ZGVnN4v_atanf (__f32x4_t);
__vpcs __f32x4_t _ZGVnN4v_atanhf (__f32x4_t);
__vpcs __f32x4_t _ZGVnN4v_cbrtf (__f32x4_t);
__vpcs __f32x4_t _ZGVnN4v_cosf (__f32x4_t);
__vpcs __f32x4_t _ZGVnN4v_coshf (__f32x4_t);
__vpcs __f32x4_t _ZGVnN4v_erff (__f32x4_t);
__vpcs __f32x4_t _ZGVnN4v_erfcf (__f32x4_t);
__vpcs __f32x4_t _ZGVnN4v_expf (__f32x4_t);
__vpcs __f32x4_t _ZGVnN4v_exp10f (__f32x4_t);
__vpcs __f32x4_t _ZGVnN4v_exp2f (__f32x4_t);
__vpcs __f32x4_t _ZGVnN4v_expm1f (__f32x4_t);
__vpcs __f32x4_t _ZGVnN4vv_hypotf (__f32x4_t, __f32x4_t);
__vpcs __f32x4_t _ZGVnN4v_logf (__f32x4_t);
__vpcs __f32x4_t _ZGVnN4v_log10f (__f32x4_t);
__vpcs __f32x4_t _ZGVnN4v_log1pf (__f32x4_t);
__vpcs __f32x4_t _ZGVnN4v_log2f (__f32x4_t);
__vpcs __f32x4_t _ZGVnN4vv_powf (__f32x4_t, __f32x4_t);
__vpcs __f32x4_t _ZGVnN4v_sinf (__f32x4_t);
__vpcs __f32x4_t _ZGVnN4v_sinhf (__f32x4_t);
__vpcs __f32x4_t _ZGVnN4v_tanf (__f32x4_t);
__vpcs __f32x4_t _ZGVnN4v_tanhf (__f32x4_t);
__vpcs __f64x2_t _ZGVnN2vv_atan2 (__f64x2_t, __f64x2_t);
__vpcs __f64x2_t _ZGVnN2v_acos (__f64x2_t);
__vpcs __f64x2_t _ZGVnN2v_acosh (__f64x2_t);
__vpcs __f64x2_t _ZGVnN2v_asin (__f64x2_t);
__vpcs __f64x2_t _ZGVnN2v_asinh (__f64x2_t);
__vpcs __f64x2_t _ZGVnN2v_atan (__f64x2_t);
__vpcs __f64x2_t _ZGVnN2v_atanh (__f64x2_t);
__vpcs __f64x2_t _ZGVnN2v_cbrt (__f64x2_t);
__vpcs __f64x2_t _ZGVnN2v_cos (__f64x2_t);
__vpcs __f64x2_t _ZGVnN2v_cosh (__f64x2_t);
__vpcs __f64x2_t _ZGVnN2v_erf (__f64x2_t);
__vpcs __f64x2_t _ZGVnN2v_erfc (__f64x2_t);
__vpcs __f64x2_t _ZGVnN2v_exp (__f64x2_t);
__vpcs __f64x2_t _ZGVnN2v_exp10 (__f64x2_t);
__vpcs __f64x2_t _ZGVnN2v_exp2 (__f64x2_t);
__vpcs __f64x2_t _ZGVnN2v_expm1 (__f64x2_t);
__vpcs __f64x2_t _ZGVnN2vv_hypot (__f64x2_t, __f64x2_t);
__vpcs __f64x2_t _ZGVnN2v_log (__f64x2_t);
__vpcs __f64x2_t _ZGVnN2v_log10 (__f64x2_t);
__vpcs __f64x2_t _ZGVnN2v_log1p (__f64x2_t);
__vpcs __f64x2_t _ZGVnN2v_log2 (__f64x2_t);
__vpcs __f64x2_t _ZGVnN2vv_pow (__f64x2_t, __f64x2_t);
__vpcs __f64x2_t _ZGVnN2v_sin (__f64x2_t);
__vpcs __f64x2_t _ZGVnN2v_sinh (__f64x2_t);
__vpcs __f64x2_t _ZGVnN2v_tan (__f64x2_t);
__vpcs __f64x2_t _ZGVnN2v_tanh (__f64x2_t);
# undef __ADVSIMD_VEC_MATH_SUPPORTED
#endif /* __ADVSIMD_VEC_MATH_SUPPORTED */
@ -154,35 +220,57 @@ __vpcs __f64x2_t _ZGVnN2v_tan (__f64x2_t);
__sv_f32_t _ZGVsMxvv_atan2f (__sv_f32_t, __sv_f32_t, __sv_bool_t);
__sv_f32_t _ZGVsMxv_acosf (__sv_f32_t, __sv_bool_t);
__sv_f32_t _ZGVsMxv_acoshf (__sv_f32_t, __sv_bool_t);
__sv_f32_t _ZGVsMxv_asinf (__sv_f32_t, __sv_bool_t);
__sv_f32_t _ZGVsMxv_asinhf (__sv_f32_t, __sv_bool_t);
__sv_f32_t _ZGVsMxv_atanf (__sv_f32_t, __sv_bool_t);
__sv_f32_t _ZGVsMxv_atanhf (__sv_f32_t, __sv_bool_t);
__sv_f32_t _ZGVsMxv_cbrtf (__sv_f32_t, __sv_bool_t);
__sv_f32_t _ZGVsMxv_cosf (__sv_f32_t, __sv_bool_t);
__sv_f32_t _ZGVsMxv_coshf (__sv_f32_t, __sv_bool_t);
__sv_f32_t _ZGVsMxv_erff (__sv_f32_t, __sv_bool_t);
__sv_f32_t _ZGVsMxv_erfcf (__sv_f32_t, __sv_bool_t);
__sv_f32_t _ZGVsMxv_expf (__sv_f32_t, __sv_bool_t);
__sv_f32_t _ZGVsMxv_exp10f (__sv_f32_t, __sv_bool_t);
__sv_f32_t _ZGVsMxv_exp2f (__sv_f32_t, __sv_bool_t);
__sv_f32_t _ZGVsMxv_expm1f (__sv_f32_t, __sv_bool_t);
__sv_f32_t _ZGVsMxvv_hypotf (__sv_f32_t, __sv_f32_t, __sv_bool_t);
__sv_f32_t _ZGVsMxv_logf (__sv_f32_t, __sv_bool_t);
__sv_f32_t _ZGVsMxv_log10f (__sv_f32_t, __sv_bool_t);
__sv_f32_t _ZGVsMxv_log1pf (__sv_f32_t, __sv_bool_t);
__sv_f32_t _ZGVsMxv_log2f (__sv_f32_t, __sv_bool_t);
__sv_f32_t _ZGVsMxvv_powf (__sv_f32_t, __sv_f32_t, __sv_bool_t);
__sv_f32_t _ZGVsMxv_sinf (__sv_f32_t, __sv_bool_t);
__sv_f32_t _ZGVsMxv_sinhf (__sv_f32_t, __sv_bool_t);
__sv_f32_t _ZGVsMxv_tanf (__sv_f32_t, __sv_bool_t);
__sv_f32_t _ZGVsMxv_tanhf (__sv_f32_t, __sv_bool_t);
__sv_f64_t _ZGVsMxvv_atan2 (__sv_f64_t, __sv_f64_t, __sv_bool_t);
__sv_f64_t _ZGVsMxv_acos (__sv_f64_t, __sv_bool_t);
__sv_f64_t _ZGVsMxv_acosh (__sv_f64_t, __sv_bool_t);
__sv_f64_t _ZGVsMxv_asin (__sv_f64_t, __sv_bool_t);
__sv_f64_t _ZGVsMxv_asinh (__sv_f64_t, __sv_bool_t);
__sv_f64_t _ZGVsMxv_atan (__sv_f64_t, __sv_bool_t);
__sv_f64_t _ZGVsMxv_atanh (__sv_f64_t, __sv_bool_t);
__sv_f64_t _ZGVsMxv_cbrt (__sv_f64_t, __sv_bool_t);
__sv_f64_t _ZGVsMxv_cos (__sv_f64_t, __sv_bool_t);
__sv_f64_t _ZGVsMxv_cosh (__sv_f64_t, __sv_bool_t);
__sv_f64_t _ZGVsMxv_erf (__sv_f64_t, __sv_bool_t);
__sv_f64_t _ZGVsMxv_erfc (__sv_f64_t, __sv_bool_t);
__sv_f64_t _ZGVsMxv_exp (__sv_f64_t, __sv_bool_t);
__sv_f64_t _ZGVsMxv_exp10 (__sv_f64_t, __sv_bool_t);
__sv_f64_t _ZGVsMxv_exp2 (__sv_f64_t, __sv_bool_t);
__sv_f64_t _ZGVsMxv_expm1 (__sv_f64_t, __sv_bool_t);
__sv_f64_t _ZGVsMxvv_hypot (__sv_f64_t, __sv_f64_t, __sv_bool_t);
__sv_f64_t _ZGVsMxv_log (__sv_f64_t, __sv_bool_t);
__sv_f64_t _ZGVsMxv_log10 (__sv_f64_t, __sv_bool_t);
__sv_f64_t _ZGVsMxv_log1p (__sv_f64_t, __sv_bool_t);
__sv_f64_t _ZGVsMxv_log2 (__sv_f64_t, __sv_bool_t);
__sv_f64_t _ZGVsMxvv_pow (__sv_f64_t, __sv_f64_t, __sv_bool_t);
__sv_f64_t _ZGVsMxv_sin (__sv_f64_t, __sv_bool_t);
__sv_f64_t _ZGVsMxv_sinh (__sv_f64_t, __sv_bool_t);
__sv_f64_t _ZGVsMxv_tan (__sv_f64_t, __sv_bool_t);
__sv_f64_t _ZGVsMxv_tanh (__sv_f64_t, __sv_bool_t);
# undef __SVE_VEC_MATH_SUPPORTED
#endif /* __SVE_VEC_MATH_SUPPORTED */

View File

@ -73,7 +73,7 @@ fenv_t;
# define FE_NOMASK_ENV ((const fenv_t *) -2)
#endif
#if __GLIBC_USE (IEC_60559_BFP_EXT_C2X)
#if __GLIBC_USE (IEC_60559_BFP_EXT_C23)
/* Type representing floating-point control modes. */
typedef unsigned int femode_t;

View File

@ -99,4 +99,22 @@
#define HWCAP2_SME_B16B16 (1UL << 41)
#define HWCAP2_SME_F16F16 (1UL << 42)
#define HWCAP2_MOPS (1UL << 43)
#define HWCAP2_HBC (1UL << 44)
#define HWCAP2_HBC (1UL << 44)
#define HWCAP2_SVE_B16B16 (1UL << 45)
#define HWCAP2_LRCPC3 (1UL << 46)
#define HWCAP2_LSE128 (1UL << 47)
#define HWCAP2_FPMR (1UL << 48)
#define HWCAP2_LUT (1UL << 49)
#define HWCAP2_FAMINMAX (1UL << 50)
#define HWCAP2_F8CVT (1UL << 51)
#define HWCAP2_F8FMA (1UL << 52)
#define HWCAP2_F8DP4 (1UL << 53)
#define HWCAP2_F8DP2 (1UL << 54)
#define HWCAP2_F8E4M3 (1UL << 55)
#define HWCAP2_F8E5M2 (1UL << 56)
#define HWCAP2_SME_LUTV2 (1UL << 57)
#define HWCAP2_SME_F8F16 (1UL << 58)
#define HWCAP2_SME_F8F32 (1UL << 59)
#define HWCAP2_SME_SF8FMA (1UL << 60)
#define HWCAP2_SME_SF8DP4 (1UL << 61)
#define HWCAP2_SME_SF8DP2 (1UL << 62)

View File

@ -33,22 +33,50 @@
# define __DECL_SIMD_acos __DECL_SIMD_aarch64
# undef __DECL_SIMD_acosf
# define __DECL_SIMD_acosf __DECL_SIMD_aarch64
# undef __DECL_SIMD_acosh
# define __DECL_SIMD_acosh __DECL_SIMD_aarch64
# undef __DECL_SIMD_acoshf
# define __DECL_SIMD_acoshf __DECL_SIMD_aarch64
# undef __DECL_SIMD_asin
# define __DECL_SIMD_asin __DECL_SIMD_aarch64
# undef __DECL_SIMD_asinf
# define __DECL_SIMD_asinf __DECL_SIMD_aarch64
# undef __DECL_SIMD_asinh
# define __DECL_SIMD_asinh __DECL_SIMD_aarch64
# undef __DECL_SIMD_asinhf
# define __DECL_SIMD_asinhf __DECL_SIMD_aarch64
# undef __DECL_SIMD_atan
# define __DECL_SIMD_atan __DECL_SIMD_aarch64
# undef __DECL_SIMD_atanf
# define __DECL_SIMD_atanf __DECL_SIMD_aarch64
# undef __DECL_SIMD_atanh
# define __DECL_SIMD_atanh __DECL_SIMD_aarch64
# undef __DECL_SIMD_atanhf
# define __DECL_SIMD_atanhf __DECL_SIMD_aarch64
# undef __DECL_SIMD_atan2
# define __DECL_SIMD_atan2 __DECL_SIMD_aarch64
# undef __DECL_SIMD_atan2f
# define __DECL_SIMD_atan2f __DECL_SIMD_aarch64
# undef __DECL_SIMD_cbrt
# define __DECL_SIMD_cbrt __DECL_SIMD_aarch64
# undef __DECL_SIMD_cbrtf
# define __DECL_SIMD_cbrtf __DECL_SIMD_aarch64
# undef __DECL_SIMD_cos
# define __DECL_SIMD_cos __DECL_SIMD_aarch64
# undef __DECL_SIMD_cosf
# define __DECL_SIMD_cosf __DECL_SIMD_aarch64
# undef __DECL_SIMD_cosh
# define __DECL_SIMD_cosh __DECL_SIMD_aarch64
# undef __DECL_SIMD_coshf
# define __DECL_SIMD_coshf __DECL_SIMD_aarch64
# undef __DECL_SIMD_erf
# define __DECL_SIMD_erf __DECL_SIMD_aarch64
# undef __DECL_SIMD_erff
# define __DECL_SIMD_erff __DECL_SIMD_aarch64
# undef __DECL_SIMD_erfc
# define __DECL_SIMD_erfc __DECL_SIMD_aarch64
# undef __DECL_SIMD_erfcf
# define __DECL_SIMD_erfcf __DECL_SIMD_aarch64
# undef __DECL_SIMD_exp
# define __DECL_SIMD_exp __DECL_SIMD_aarch64
# undef __DECL_SIMD_expf
@ -65,6 +93,10 @@
# define __DECL_SIMD_expm1 __DECL_SIMD_aarch64
# undef __DECL_SIMD_expm1f
# define __DECL_SIMD_expm1f __DECL_SIMD_aarch64
# undef __DECL_SIMD_hypot
# define __DECL_SIMD_hypot __DECL_SIMD_aarch64
# undef __DECL_SIMD_hypotf
# define __DECL_SIMD_hypotf __DECL_SIMD_aarch64
# undef __DECL_SIMD_log
# define __DECL_SIMD_log __DECL_SIMD_aarch64
# undef __DECL_SIMD_logf
@ -81,14 +113,26 @@
# define __DECL_SIMD_log2 __DECL_SIMD_aarch64
# undef __DECL_SIMD_log2f
# define __DECL_SIMD_log2f __DECL_SIMD_aarch64
# undef __DECL_SIMD_pow
# define __DECL_SIMD_pow __DECL_SIMD_aarch64
# undef __DECL_SIMD_powf
# define __DECL_SIMD_powf __DECL_SIMD_aarch64
# undef __DECL_SIMD_sin
# define __DECL_SIMD_sin __DECL_SIMD_aarch64
# undef __DECL_SIMD_sinf
# define __DECL_SIMD_sinf __DECL_SIMD_aarch64
# undef __DECL_SIMD_sinh
# define __DECL_SIMD_sinh __DECL_SIMD_aarch64
# undef __DECL_SIMD_sinhf
# define __DECL_SIMD_sinhf __DECL_SIMD_aarch64
# undef __DECL_SIMD_tan
# define __DECL_SIMD_tan __DECL_SIMD_aarch64
# undef __DECL_SIMD_tanf
# define __DECL_SIMD_tanf __DECL_SIMD_aarch64
# undef __DECL_SIMD_tanh
# define __DECL_SIMD_tanh __DECL_SIMD_aarch64
# undef __DECL_SIMD_tanhf
# define __DECL_SIMD_tanhf __DECL_SIMD_aarch64
#endif
#if __GNUC_PREREQ(9, 0)
@ -117,35 +161,57 @@ typedef __SVBool_t __sv_bool_t;
__vpcs __f32x4_t _ZGVnN4vv_atan2f (__f32x4_t, __f32x4_t);
__vpcs __f32x4_t _ZGVnN4v_acosf (__f32x4_t);
__vpcs __f32x4_t _ZGVnN4v_acoshf (__f32x4_t);
__vpcs __f32x4_t _ZGVnN4v_asinf (__f32x4_t);
__vpcs __f32x4_t _ZGVnN4v_asinhf (__f32x4_t);
__vpcs __f32x4_t _ZGVnN4v_atanf (__f32x4_t);
__vpcs __f32x4_t _ZGVnN4v_atanhf (__f32x4_t);
__vpcs __f32x4_t _ZGVnN4v_cbrtf (__f32x4_t);
__vpcs __f32x4_t _ZGVnN4v_cosf (__f32x4_t);
__vpcs __f32x4_t _ZGVnN4v_coshf (__f32x4_t);
__vpcs __f32x4_t _ZGVnN4v_erff (__f32x4_t);
__vpcs __f32x4_t _ZGVnN4v_erfcf (__f32x4_t);
__vpcs __f32x4_t _ZGVnN4v_expf (__f32x4_t);
__vpcs __f32x4_t _ZGVnN4v_exp10f (__f32x4_t);
__vpcs __f32x4_t _ZGVnN4v_exp2f (__f32x4_t);
__vpcs __f32x4_t _ZGVnN4v_expm1f (__f32x4_t);
__vpcs __f32x4_t _ZGVnN4vv_hypotf (__f32x4_t, __f32x4_t);
__vpcs __f32x4_t _ZGVnN4v_logf (__f32x4_t);
__vpcs __f32x4_t _ZGVnN4v_log10f (__f32x4_t);
__vpcs __f32x4_t _ZGVnN4v_log1pf (__f32x4_t);
__vpcs __f32x4_t _ZGVnN4v_log2f (__f32x4_t);
__vpcs __f32x4_t _ZGVnN4vv_powf (__f32x4_t, __f32x4_t);
__vpcs __f32x4_t _ZGVnN4v_sinf (__f32x4_t);
__vpcs __f32x4_t _ZGVnN4v_sinhf (__f32x4_t);
__vpcs __f32x4_t _ZGVnN4v_tanf (__f32x4_t);
__vpcs __f32x4_t _ZGVnN4v_tanhf (__f32x4_t);
__vpcs __f64x2_t _ZGVnN2vv_atan2 (__f64x2_t, __f64x2_t);
__vpcs __f64x2_t _ZGVnN2v_acos (__f64x2_t);
__vpcs __f64x2_t _ZGVnN2v_acosh (__f64x2_t);
__vpcs __f64x2_t _ZGVnN2v_asin (__f64x2_t);
__vpcs __f64x2_t _ZGVnN2v_asinh (__f64x2_t);
__vpcs __f64x2_t _ZGVnN2v_atan (__f64x2_t);
__vpcs __f64x2_t _ZGVnN2v_atanh (__f64x2_t);
__vpcs __f64x2_t _ZGVnN2v_cbrt (__f64x2_t);
__vpcs __f64x2_t _ZGVnN2v_cos (__f64x2_t);
__vpcs __f64x2_t _ZGVnN2v_cosh (__f64x2_t);
__vpcs __f64x2_t _ZGVnN2v_erf (__f64x2_t);
__vpcs __f64x2_t _ZGVnN2v_erfc (__f64x2_t);
__vpcs __f64x2_t _ZGVnN2v_exp (__f64x2_t);
__vpcs __f64x2_t _ZGVnN2v_exp10 (__f64x2_t);
__vpcs __f64x2_t _ZGVnN2v_exp2 (__f64x2_t);
__vpcs __f64x2_t _ZGVnN2v_expm1 (__f64x2_t);
__vpcs __f64x2_t _ZGVnN2vv_hypot (__f64x2_t, __f64x2_t);
__vpcs __f64x2_t _ZGVnN2v_log (__f64x2_t);
__vpcs __f64x2_t _ZGVnN2v_log10 (__f64x2_t);
__vpcs __f64x2_t _ZGVnN2v_log1p (__f64x2_t);
__vpcs __f64x2_t _ZGVnN2v_log2 (__f64x2_t);
__vpcs __f64x2_t _ZGVnN2vv_pow (__f64x2_t, __f64x2_t);
__vpcs __f64x2_t _ZGVnN2v_sin (__f64x2_t);
__vpcs __f64x2_t _ZGVnN2v_sinh (__f64x2_t);
__vpcs __f64x2_t _ZGVnN2v_tan (__f64x2_t);
__vpcs __f64x2_t _ZGVnN2v_tanh (__f64x2_t);
# undef __ADVSIMD_VEC_MATH_SUPPORTED
#endif /* __ADVSIMD_VEC_MATH_SUPPORTED */
@ -154,35 +220,57 @@ __vpcs __f64x2_t _ZGVnN2v_tan (__f64x2_t);
__sv_f32_t _ZGVsMxvv_atan2f (__sv_f32_t, __sv_f32_t, __sv_bool_t);
__sv_f32_t _ZGVsMxv_acosf (__sv_f32_t, __sv_bool_t);
__sv_f32_t _ZGVsMxv_acoshf (__sv_f32_t, __sv_bool_t);
__sv_f32_t _ZGVsMxv_asinf (__sv_f32_t, __sv_bool_t);
__sv_f32_t _ZGVsMxv_asinhf (__sv_f32_t, __sv_bool_t);
__sv_f32_t _ZGVsMxv_atanf (__sv_f32_t, __sv_bool_t);
__sv_f32_t _ZGVsMxv_atanhf (__sv_f32_t, __sv_bool_t);
__sv_f32_t _ZGVsMxv_cbrtf (__sv_f32_t, __sv_bool_t);
__sv_f32_t _ZGVsMxv_cosf (__sv_f32_t, __sv_bool_t);
__sv_f32_t _ZGVsMxv_coshf (__sv_f32_t, __sv_bool_t);
__sv_f32_t _ZGVsMxv_erff (__sv_f32_t, __sv_bool_t);
__sv_f32_t _ZGVsMxv_erfcf (__sv_f32_t, __sv_bool_t);
__sv_f32_t _ZGVsMxv_expf (__sv_f32_t, __sv_bool_t);
__sv_f32_t _ZGVsMxv_exp10f (__sv_f32_t, __sv_bool_t);
__sv_f32_t _ZGVsMxv_exp2f (__sv_f32_t, __sv_bool_t);
__sv_f32_t _ZGVsMxv_expm1f (__sv_f32_t, __sv_bool_t);
__sv_f32_t _ZGVsMxvv_hypotf (__sv_f32_t, __sv_f32_t, __sv_bool_t);
__sv_f32_t _ZGVsMxv_logf (__sv_f32_t, __sv_bool_t);
__sv_f32_t _ZGVsMxv_log10f (__sv_f32_t, __sv_bool_t);
__sv_f32_t _ZGVsMxv_log1pf (__sv_f32_t, __sv_bool_t);
__sv_f32_t _ZGVsMxv_log2f (__sv_f32_t, __sv_bool_t);
__sv_f32_t _ZGVsMxvv_powf (__sv_f32_t, __sv_f32_t, __sv_bool_t);
__sv_f32_t _ZGVsMxv_sinf (__sv_f32_t, __sv_bool_t);
__sv_f32_t _ZGVsMxv_sinhf (__sv_f32_t, __sv_bool_t);
__sv_f32_t _ZGVsMxv_tanf (__sv_f32_t, __sv_bool_t);
__sv_f32_t _ZGVsMxv_tanhf (__sv_f32_t, __sv_bool_t);
__sv_f64_t _ZGVsMxvv_atan2 (__sv_f64_t, __sv_f64_t, __sv_bool_t);
__sv_f64_t _ZGVsMxv_acos (__sv_f64_t, __sv_bool_t);
__sv_f64_t _ZGVsMxv_acosh (__sv_f64_t, __sv_bool_t);
__sv_f64_t _ZGVsMxv_asin (__sv_f64_t, __sv_bool_t);
__sv_f64_t _ZGVsMxv_asinh (__sv_f64_t, __sv_bool_t);
__sv_f64_t _ZGVsMxv_atan (__sv_f64_t, __sv_bool_t);
__sv_f64_t _ZGVsMxv_atanh (__sv_f64_t, __sv_bool_t);
__sv_f64_t _ZGVsMxv_cbrt (__sv_f64_t, __sv_bool_t);
__sv_f64_t _ZGVsMxv_cos (__sv_f64_t, __sv_bool_t);
__sv_f64_t _ZGVsMxv_cosh (__sv_f64_t, __sv_bool_t);
__sv_f64_t _ZGVsMxv_erf (__sv_f64_t, __sv_bool_t);
__sv_f64_t _ZGVsMxv_erfc (__sv_f64_t, __sv_bool_t);
__sv_f64_t _ZGVsMxv_exp (__sv_f64_t, __sv_bool_t);
__sv_f64_t _ZGVsMxv_exp10 (__sv_f64_t, __sv_bool_t);
__sv_f64_t _ZGVsMxv_exp2 (__sv_f64_t, __sv_bool_t);
__sv_f64_t _ZGVsMxv_expm1 (__sv_f64_t, __sv_bool_t);
__sv_f64_t _ZGVsMxvv_hypot (__sv_f64_t, __sv_f64_t, __sv_bool_t);
__sv_f64_t _ZGVsMxv_log (__sv_f64_t, __sv_bool_t);
__sv_f64_t _ZGVsMxv_log10 (__sv_f64_t, __sv_bool_t);
__sv_f64_t _ZGVsMxv_log1p (__sv_f64_t, __sv_bool_t);
__sv_f64_t _ZGVsMxv_log2 (__sv_f64_t, __sv_bool_t);
__sv_f64_t _ZGVsMxvv_pow (__sv_f64_t, __sv_f64_t, __sv_bool_t);
__sv_f64_t _ZGVsMxv_sin (__sv_f64_t, __sv_bool_t);
__sv_f64_t _ZGVsMxv_sinh (__sv_f64_t, __sv_bool_t);
__sv_f64_t _ZGVsMxv_tan (__sv_f64_t, __sv_bool_t);
__sv_f64_t _ZGVsMxv_tanh (__sv_f64_t, __sv_bool_t);
# undef __SVE_VEC_MATH_SUPPORTED
#endif /* __SVE_VEC_MATH_SUPPORTED */

View File

@ -2,6 +2,17 @@
#ifndef __ASM_GENERIC_BITS_PER_LONG
#define __ASM_GENERIC_BITS_PER_LONG
#ifndef __BITS_PER_LONG
/*
* In order to keep safe and avoid regression, only unify uapi
* bitsperlong.h for some archs which are using newer toolchains
* that have the definitions of __CHAR_BIT__ and __SIZEOF_LONG__.
* See the following link for more info:
* https://lore.kernel.org/linux-arch/b9624545-2c80-49a1-ac3c-39264a591f7b@app.fastmail.com/
*/
#if defined(__CHAR_BIT__) && defined(__SIZEOF_LONG__)
#define __BITS_PER_LONG (__CHAR_BIT__ * __SIZEOF_LONG__)
#else
/*
* There seems to be no way of detecting this automatically from user
* space, so 64 bit architectures should override this in their
@ -9,8 +20,12 @@
* both 32 and 64 bit user space must not rely on CONFIG_64BIT
* to decide it, but rather check a compiler provided macro.
*/
#ifndef __BITS_PER_LONG
#define __BITS_PER_LONG 32
#endif
#endif
#ifndef __BITS_PER_LONG_LONG
#define __BITS_PER_LONG_LONG 64
#endif
#endif /* __ASM_GENERIC_BITS_PER_LONG */

View File

@ -68,11 +68,6 @@ union __sifields {
/* SIGILL, SIGFPE, SIGSEGV, SIGBUS, SIGTRAP, SIGEMT */
struct {
void *_addr; /* faulting insn/memory ref. */
#ifdef __ia64__
int _imm; /* immediate value for "break" */
unsigned int _flags; /* see ia64 si_flags */
unsigned long _isr; /* isr */
#endif
#define __ADDR_BND_PKEY_PAD (__alignof__(void *) < sizeof(short) ? \
sizeof(short) : __alignof__(void *))
@ -242,7 +237,8 @@ typedef struct siginfo {
#define SEGV_ADIPERR 7 /* Precise MCD exception */
#define SEGV_MTEAERR 8 /* Asynchronous ARM MTE error */
#define SEGV_MTESERR 9 /* Synchronous ARM MTE exception */
#define NSIGSEGV 9
#define SEGV_CPERR 10 /* Control protection fault */
#define NSIGSEGV 10
/*
* SIGBUS si_codes

View File

@ -132,6 +132,9 @@
#define SO_RCVMARK 75
#define SO_PASSPIDFD 76
#define SO_PEERPIDFD 77
#if __BITS_PER_LONG == 64 || (defined(__x86_64__) && defined(__ILP32__))
/* on 64-bit and x32, avoid the ?: operator */

View File

@ -38,12 +38,12 @@ __SYSCALL(__NR_io_destroy, sys_io_destroy)
__SC_COMP(__NR_io_submit, sys_io_submit, compat_sys_io_submit)
#define __NR_io_cancel 3
__SYSCALL(__NR_io_cancel, sys_io_cancel)
#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_io_getevents 4
__SC_3264(__NR_io_getevents, sys_io_getevents_time32, sys_io_getevents)
#endif
/* fs/xattr.c */
#define __NR_setxattr 5
__SYSCALL(__NR_setxattr, sys_setxattr)
#define __NR_lsetxattr 6
@ -68,58 +68,38 @@ __SYSCALL(__NR_removexattr, sys_removexattr)
__SYSCALL(__NR_lremovexattr, sys_lremovexattr)
#define __NR_fremovexattr 16
__SYSCALL(__NR_fremovexattr, sys_fremovexattr)
/* fs/dcache.c */
#define __NR_getcwd 17
__SYSCALL(__NR_getcwd, sys_getcwd)
/* fs/cookies.c */
#define __NR_lookup_dcookie 18
__SC_COMP(__NR_lookup_dcookie, sys_lookup_dcookie, compat_sys_lookup_dcookie)
/* fs/eventfd.c */
__SYSCALL(__NR_lookup_dcookie, sys_ni_syscall)
#define __NR_eventfd2 19
__SYSCALL(__NR_eventfd2, sys_eventfd2)
/* fs/eventpoll.c */
#define __NR_epoll_create1 20
__SYSCALL(__NR_epoll_create1, sys_epoll_create1)
#define __NR_epoll_ctl 21
__SYSCALL(__NR_epoll_ctl, sys_epoll_ctl)
#define __NR_epoll_pwait 22
__SC_COMP(__NR_epoll_pwait, sys_epoll_pwait, compat_sys_epoll_pwait)
/* fs/fcntl.c */
#define __NR_dup 23
__SYSCALL(__NR_dup, sys_dup)
#define __NR_dup3 24
__SYSCALL(__NR_dup3, sys_dup3)
#define __NR3264_fcntl 25
__SC_COMP_3264(__NR3264_fcntl, sys_fcntl64, sys_fcntl, compat_sys_fcntl64)
/* fs/inotify_user.c */
#define __NR_inotify_init1 26
__SYSCALL(__NR_inotify_init1, sys_inotify_init1)
#define __NR_inotify_add_watch 27
__SYSCALL(__NR_inotify_add_watch, sys_inotify_add_watch)
#define __NR_inotify_rm_watch 28
__SYSCALL(__NR_inotify_rm_watch, sys_inotify_rm_watch)
/* fs/ioctl.c */
#define __NR_ioctl 29
__SC_COMP(__NR_ioctl, sys_ioctl, compat_sys_ioctl)
/* fs/ioprio.c */
#define __NR_ioprio_set 30
__SYSCALL(__NR_ioprio_set, sys_ioprio_set)
#define __NR_ioprio_get 31
__SYSCALL(__NR_ioprio_get, sys_ioprio_get)
/* fs/locks.c */
#define __NR_flock 32
__SYSCALL(__NR_flock, sys_flock)
/* fs/namei.c */
#define __NR_mknodat 33
__SYSCALL(__NR_mknodat, sys_mknodat)
#define __NR_mkdirat 34
@ -130,25 +110,21 @@ __SYSCALL(__NR_unlinkat, sys_unlinkat)
__SYSCALL(__NR_symlinkat, sys_symlinkat)
#define __NR_linkat 37
__SYSCALL(__NR_linkat, sys_linkat)
#ifdef __ARCH_WANT_RENAMEAT
/* renameat is superseded with flags by renameat2 */
#define __NR_renameat 38
__SYSCALL(__NR_renameat, sys_renameat)
#endif /* __ARCH_WANT_RENAMEAT */
/* fs/namespace.c */
#define __NR_umount2 39
__SYSCALL(__NR_umount2, sys_umount)
#define __NR_mount 40
__SYSCALL(__NR_mount, sys_mount)
#define __NR_pivot_root 41
__SYSCALL(__NR_pivot_root, sys_pivot_root)
/* fs/nfsctl.c */
#define __NR_nfsservctl 42
__SYSCALL(__NR_nfsservctl, sys_ni_syscall)
/* fs/open.c */
#define __NR3264_statfs 43
__SC_COMP_3264(__NR3264_statfs, sys_statfs64, sys_statfs, \
compat_sys_statfs64)
@ -161,7 +137,6 @@ __SC_COMP_3264(__NR3264_truncate, sys_truncate64, sys_truncate, \
#define __NR3264_ftruncate 46
__SC_COMP_3264(__NR3264_ftruncate, sys_ftruncate64, sys_ftruncate, \
compat_sys_ftruncate64)
#define __NR_fallocate 47
__SC_COMP(__NR_fallocate, sys_fallocate, compat_sys_fallocate)
#define __NR_faccessat 48
@ -186,20 +161,12 @@ __SYSCALL(__NR_openat, sys_openat)
__SYSCALL(__NR_close, sys_close)
#define __NR_vhangup 58
__SYSCALL(__NR_vhangup, sys_vhangup)
/* fs/pipe.c */
#define __NR_pipe2 59
__SYSCALL(__NR_pipe2, sys_pipe2)
/* fs/quota.c */
#define __NR_quotactl 60
__SYSCALL(__NR_quotactl, sys_quotactl)
/* fs/readdir.c */
#define __NR_getdents64 61
__SYSCALL(__NR_getdents64, sys_getdents64)
/* fs/read_write.c */
#define __NR3264_lseek 62
__SC_3264(__NR3264_lseek, sys_llseek, sys_lseek)
#define __NR_read 63
@ -218,12 +185,9 @@ __SC_COMP(__NR_pwrite64, sys_pwrite64, compat_sys_pwrite64)
__SC_COMP(__NR_preadv, sys_preadv, compat_sys_preadv)
#define __NR_pwritev 70
__SC_COMP(__NR_pwritev, sys_pwritev, compat_sys_pwritev)
/* fs/sendfile.c */
#define __NR3264_sendfile 71
__SYSCALL(__NR3264_sendfile, sys_sendfile64)
/* fs/select.c */
#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_pselect6 72
__SC_COMP_3264(__NR_pselect6, sys_pselect6_time32, sys_pselect6, compat_sys_pselect6_time32)
@ -231,21 +195,17 @@ __SC_COMP_3264(__NR_pselect6, sys_pselect6_time32, sys_pselect6, compat_sys_psel
__SC_COMP_3264(__NR_ppoll, sys_ppoll_time32, sys_ppoll, compat_sys_ppoll_time32)
#endif
/* fs/signalfd.c */
#define __NR_signalfd4 74
__SC_COMP(__NR_signalfd4, sys_signalfd4, compat_sys_signalfd4)
/* fs/splice.c */
#define __NR_vmsplice 75
__SYSCALL(__NR_vmsplice, sys_vmsplice)
#define __NR_splice 76
__SYSCALL(__NR_splice, sys_splice)
#define __NR_tee 77
__SYSCALL(__NR_tee, sys_tee)
/* fs/stat.c */
#define __NR_readlinkat 78
__SYSCALL(__NR_readlinkat, sys_readlinkat)
#if defined(__ARCH_WANT_NEW_STAT) || defined(__ARCH_WANT_STAT64)
#define __NR3264_fstatat 79
__SC_3264(__NR3264_fstatat, sys_fstatat64, sys_newfstatat)
@ -253,13 +213,13 @@ __SC_3264(__NR3264_fstatat, sys_fstatat64, sys_newfstatat)
__SC_3264(__NR3264_fstat, sys_fstat64, sys_newfstat)
#endif
/* fs/sync.c */
#define __NR_sync 81
__SYSCALL(__NR_sync, sys_sync)
#define __NR_fsync 82
__SYSCALL(__NR_fsync, sys_fsync)
#define __NR_fdatasync 83
__SYSCALL(__NR_fdatasync, sys_fdatasync)
#ifdef __ARCH_WANT_SYNC_FILE_RANGE2
#define __NR_sync_file_range2 84
__SC_COMP(__NR_sync_file_range2, sys_sync_file_range2, \
@ -270,9 +230,9 @@ __SC_COMP(__NR_sync_file_range, sys_sync_file_range, \
compat_sys_sync_file_range)
#endif
/* fs/timerfd.c */
#define __NR_timerfd_create 85
__SYSCALL(__NR_timerfd_create, sys_timerfd_create)
#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_timerfd_settime 86
__SC_3264(__NR_timerfd_settime, sys_timerfd_settime32, \
@ -282,45 +242,35 @@ __SC_3264(__NR_timerfd_gettime, sys_timerfd_gettime32, \
sys_timerfd_gettime)
#endif
/* fs/utimes.c */
#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_utimensat 88
__SC_3264(__NR_utimensat, sys_utimensat_time32, sys_utimensat)
#endif
/* kernel/acct.c */
#define __NR_acct 89
__SYSCALL(__NR_acct, sys_acct)
/* kernel/capability.c */
#define __NR_capget 90
__SYSCALL(__NR_capget, sys_capget)
#define __NR_capset 91
__SYSCALL(__NR_capset, sys_capset)
/* kernel/exec_domain.c */
#define __NR_personality 92
__SYSCALL(__NR_personality, sys_personality)
/* kernel/exit.c */
#define __NR_exit 93
__SYSCALL(__NR_exit, sys_exit)
#define __NR_exit_group 94
__SYSCALL(__NR_exit_group, sys_exit_group)
#define __NR_waitid 95
__SC_COMP(__NR_waitid, sys_waitid, compat_sys_waitid)
/* kernel/fork.c */
#define __NR_set_tid_address 96
__SYSCALL(__NR_set_tid_address, sys_set_tid_address)
#define __NR_unshare 97
__SYSCALL(__NR_unshare, sys_unshare)
/* kernel/futex.c */
#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_futex 98
__SC_3264(__NR_futex, sys_futex_time32, sys_futex)
#endif
#define __NR_set_robust_list 99
__SC_COMP(__NR_set_robust_list, sys_set_robust_list, \
compat_sys_set_robust_list)
@ -328,43 +278,40 @@ __SC_COMP(__NR_set_robust_list, sys_set_robust_list, \
__SC_COMP(__NR_get_robust_list, sys_get_robust_list, \
compat_sys_get_robust_list)
/* kernel/hrtimer.c */
#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_nanosleep 101
__SC_3264(__NR_nanosleep, sys_nanosleep_time32, sys_nanosleep)
#endif
/* kernel/itimer.c */
#define __NR_getitimer 102
__SC_COMP(__NR_getitimer, sys_getitimer, compat_sys_getitimer)
#define __NR_setitimer 103
__SC_COMP(__NR_setitimer, sys_setitimer, compat_sys_setitimer)
/* kernel/kexec.c */
#define __NR_kexec_load 104
__SC_COMP(__NR_kexec_load, sys_kexec_load, compat_sys_kexec_load)
/* kernel/module.c */
#define __NR_init_module 105
__SYSCALL(__NR_init_module, sys_init_module)
#define __NR_delete_module 106
__SYSCALL(__NR_delete_module, sys_delete_module)
/* kernel/posix-timers.c */
#define __NR_timer_create 107
__SC_COMP(__NR_timer_create, sys_timer_create, compat_sys_timer_create)
#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_timer_gettime 108
__SC_3264(__NR_timer_gettime, sys_timer_gettime32, sys_timer_gettime)
#endif
#define __NR_timer_getoverrun 109
__SYSCALL(__NR_timer_getoverrun, sys_timer_getoverrun)
#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_timer_settime 110
__SC_3264(__NR_timer_settime, sys_timer_settime32, sys_timer_settime)
#endif
#define __NR_timer_delete 111
__SYSCALL(__NR_timer_delete, sys_timer_delete)
#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_clock_settime 112
__SC_3264(__NR_clock_settime, sys_clock_settime32, sys_clock_settime)
@ -377,15 +324,10 @@ __SC_3264(__NR_clock_nanosleep, sys_clock_nanosleep_time32, \
sys_clock_nanosleep)
#endif
/* kernel/printk.c */
#define __NR_syslog 116
__SYSCALL(__NR_syslog, sys_syslog)
/* kernel/ptrace.c */
#define __NR_ptrace 117
__SC_COMP(__NR_ptrace, sys_ptrace, compat_sys_ptrace)
/* kernel/sched/core.c */
#define __NR_sched_setparam 118
__SYSCALL(__NR_sched_setparam, sys_sched_setparam)
#define __NR_sched_setscheduler 119
@ -406,13 +348,13 @@ __SYSCALL(__NR_sched_yield, sys_sched_yield)
__SYSCALL(__NR_sched_get_priority_max, sys_sched_get_priority_max)
#define __NR_sched_get_priority_min 126
__SYSCALL(__NR_sched_get_priority_min, sys_sched_get_priority_min)
#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_sched_rr_get_interval 127
__SC_3264(__NR_sched_rr_get_interval, sys_sched_rr_get_interval_time32, \
sys_sched_rr_get_interval)
#endif
/* kernel/signal.c */
#define __NR_restart_syscall 128
__SYSCALL(__NR_restart_syscall, sys_restart_syscall)
#define __NR_kill 129
@ -431,18 +373,18 @@ __SC_COMP(__NR_rt_sigaction, sys_rt_sigaction, compat_sys_rt_sigaction)
__SC_COMP(__NR_rt_sigprocmask, sys_rt_sigprocmask, compat_sys_rt_sigprocmask)
#define __NR_rt_sigpending 136
__SC_COMP(__NR_rt_sigpending, sys_rt_sigpending, compat_sys_rt_sigpending)
#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_rt_sigtimedwait 137
__SC_COMP_3264(__NR_rt_sigtimedwait, sys_rt_sigtimedwait_time32, \
sys_rt_sigtimedwait, compat_sys_rt_sigtimedwait_time32)
#endif
#define __NR_rt_sigqueueinfo 138
__SC_COMP(__NR_rt_sigqueueinfo, sys_rt_sigqueueinfo, \
compat_sys_rt_sigqueueinfo)
#define __NR_rt_sigreturn 139
__SC_COMP(__NR_rt_sigreturn, sys_rt_sigreturn, compat_sys_rt_sigreturn)
/* kernel/sys.c */
#define __NR_setpriority 140
__SYSCALL(__NR_setpriority, sys_setpriority)
#define __NR_getpriority 141
@ -507,7 +449,6 @@ __SYSCALL(__NR_prctl, sys_prctl)
#define __NR_getcpu 168
__SYSCALL(__NR_getcpu, sys_getcpu)
/* kernel/time.c */
#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_gettimeofday 169
__SC_COMP(__NR_gettimeofday, sys_gettimeofday, compat_sys_gettimeofday)
@ -517,7 +458,6 @@ __SC_COMP(__NR_settimeofday, sys_settimeofday, compat_sys_settimeofday)
__SC_3264(__NR_adjtimex, sys_adjtimex_time32, sys_adjtimex)
#endif
/* kernel/sys.c */
#define __NR_getpid 172
__SYSCALL(__NR_getpid, sys_getpid)
#define __NR_getppid 173
@ -534,12 +474,11 @@ __SYSCALL(__NR_getegid, sys_getegid)
__SYSCALL(__NR_gettid, sys_gettid)
#define __NR_sysinfo 179
__SC_COMP(__NR_sysinfo, sys_sysinfo, compat_sys_sysinfo)
/* ipc/mqueue.c */
#define __NR_mq_open 180
__SC_COMP(__NR_mq_open, sys_mq_open, compat_sys_mq_open)
#define __NR_mq_unlink 181
__SYSCALL(__NR_mq_unlink, sys_mq_unlink)
#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_mq_timedsend 182
__SC_3264(__NR_mq_timedsend, sys_mq_timedsend_time32, sys_mq_timedsend)
@ -547,12 +486,11 @@ __SC_3264(__NR_mq_timedsend, sys_mq_timedsend_time32, sys_mq_timedsend)
__SC_3264(__NR_mq_timedreceive, sys_mq_timedreceive_time32, \
sys_mq_timedreceive)
#endif
#define __NR_mq_notify 184
__SC_COMP(__NR_mq_notify, sys_mq_notify, compat_sys_mq_notify)
#define __NR_mq_getsetattr 185
__SC_COMP(__NR_mq_getsetattr, sys_mq_getsetattr, compat_sys_mq_getsetattr)
/* ipc/msg.c */
#define __NR_msgget 186
__SYSCALL(__NR_msgget, sys_msgget)
#define __NR_msgctl 187
@ -561,20 +499,18 @@ __SC_COMP(__NR_msgctl, sys_msgctl, compat_sys_msgctl)
__SC_COMP(__NR_msgrcv, sys_msgrcv, compat_sys_msgrcv)
#define __NR_msgsnd 189
__SC_COMP(__NR_msgsnd, sys_msgsnd, compat_sys_msgsnd)
/* ipc/sem.c */
#define __NR_semget 190
__SYSCALL(__NR_semget, sys_semget)
#define __NR_semctl 191
__SC_COMP(__NR_semctl, sys_semctl, compat_sys_semctl)
#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_semtimedop 192
__SC_3264(__NR_semtimedop, sys_semtimedop_time32, sys_semtimedop)
#endif
#define __NR_semop 193
__SYSCALL(__NR_semop, sys_semop)
/* ipc/shm.c */
#define __NR_shmget 194
__SYSCALL(__NR_shmget, sys_shmget)
#define __NR_shmctl 195
@ -583,8 +519,6 @@ __SC_COMP(__NR_shmctl, sys_shmctl, compat_sys_shmctl)
__SC_COMP(__NR_shmat, sys_shmat, compat_sys_shmat)
#define __NR_shmdt 197
__SYSCALL(__NR_shmdt, sys_shmdt)
/* net/socket.c */
#define __NR_socket 198
__SYSCALL(__NR_socket, sys_socket)
#define __NR_socketpair 199
@ -615,40 +549,30 @@ __SYSCALL(__NR_shutdown, sys_shutdown)
__SC_COMP(__NR_sendmsg, sys_sendmsg, compat_sys_sendmsg)
#define __NR_recvmsg 212
__SC_COMP(__NR_recvmsg, sys_recvmsg, compat_sys_recvmsg)
/* mm/filemap.c */
#define __NR_readahead 213
__SC_COMP(__NR_readahead, sys_readahead, compat_sys_readahead)
/* mm/nommu.c, also with MMU */
#define __NR_brk 214
__SYSCALL(__NR_brk, sys_brk)
#define __NR_munmap 215
__SYSCALL(__NR_munmap, sys_munmap)
#define __NR_mremap 216
__SYSCALL(__NR_mremap, sys_mremap)
/* security/keys/keyctl.c */
#define __NR_add_key 217
__SYSCALL(__NR_add_key, sys_add_key)
#define __NR_request_key 218
__SYSCALL(__NR_request_key, sys_request_key)
#define __NR_keyctl 219
__SC_COMP(__NR_keyctl, sys_keyctl, compat_sys_keyctl)
/* arch/example/kernel/sys_example.c */
#define __NR_clone 220
__SYSCALL(__NR_clone, sys_clone)
#define __NR_execve 221
__SC_COMP(__NR_execve, sys_execve, compat_sys_execve)
#define __NR3264_mmap 222
__SC_3264(__NR3264_mmap, sys_mmap2, sys_mmap)
/* mm/fadvise.c */
#define __NR3264_fadvise64 223
__SC_COMP(__NR3264_fadvise64, sys_fadvise64_64, compat_sys_fadvise64_64)
/* mm/, CONFIG_MMU only */
/* CONFIG_MMU only */
#ifndef __ARCH_NOMMU
#define __NR_swapon 224
__SYSCALL(__NR_swapon, sys_swapon)
@ -691,6 +615,7 @@ __SC_COMP(__NR_rt_tgsigqueueinfo, sys_rt_tgsigqueueinfo, \
__SYSCALL(__NR_perf_event_open, sys_perf_event_open)
#define __NR_accept4 242
__SYSCALL(__NR_accept4, sys_accept4)
#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_recvmmsg 243
__SC_COMP_3264(__NR_recvmmsg, sys_recvmmsg_time32, sys_recvmmsg, compat_sys_recvmmsg_time32)
@ -706,6 +631,7 @@ __SC_COMP_3264(__NR_recvmmsg, sys_recvmmsg_time32, sys_recvmmsg, compat_sys_recv
#define __NR_wait4 260
__SC_COMP(__NR_wait4, sys_wait4, compat_sys_wait4)
#endif
#define __NR_prlimit64 261
__SYSCALL(__NR_prlimit64, sys_prlimit64)
#define __NR_fanotify_init 262
@ -716,10 +642,12 @@ __SYSCALL(__NR_fanotify_mark, sys_fanotify_mark)
__SYSCALL(__NR_name_to_handle_at, sys_name_to_handle_at)
#define __NR_open_by_handle_at 265
__SYSCALL(__NR_open_by_handle_at, sys_open_by_handle_at)
#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_clock_adjtime 266
__SC_3264(__NR_clock_adjtime, sys_clock_adjtime32, sys_clock_adjtime)
#endif
#define __NR_syncfs 267
__SYSCALL(__NR_syncfs, sys_syncfs)
#define __NR_setns 268
@ -770,15 +698,19 @@ __SYSCALL(__NR_pkey_alloc, sys_pkey_alloc)
__SYSCALL(__NR_pkey_free, sys_pkey_free)
#define __NR_statx 291
__SYSCALL(__NR_statx, sys_statx)
#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_io_pgetevents 292
__SC_COMP_3264(__NR_io_pgetevents, sys_io_pgetevents_time32, sys_io_pgetevents, compat_sys_io_pgetevents)
#endif
#define __NR_rseq 293
__SYSCALL(__NR_rseq, sys_rseq)
#define __NR_kexec_file_load 294
__SYSCALL(__NR_kexec_file_load, sys_kexec_file_load)
/* 295 through 402 are unassigned to sync up with generic numbers, don't use */
#if defined(__SYSCALL_COMPAT) || __BITS_PER_LONG == 32
#define __NR_clock_gettime64 403
__SYSCALL(__NR_clock_gettime64, sys_clock_gettime)
@ -805,7 +737,7 @@ __SC_COMP(__NR_pselect6_time64, sys_pselect6, compat_sys_pselect6_time64)
#define __NR_ppoll_time64 414
__SC_COMP(__NR_ppoll_time64, sys_ppoll, compat_sys_ppoll_time64)
#define __NR_io_pgetevents_time64 416
__SYSCALL(__NR_io_pgetevents_time64, sys_io_pgetevents)
__SC_COMP(__NR_io_pgetevents_time64, sys_io_pgetevents, compat_sys_io_pgetevents_time64)
#define __NR_recvmmsg_time64 417
__SC_COMP(__NR_recvmmsg_time64, sys_recvmmsg, compat_sys_recvmmsg_time64)
#define __NR_mq_timedsend_time64 418
@ -844,13 +776,14 @@ __SYSCALL(__NR_fsmount, sys_fsmount)
__SYSCALL(__NR_fspick, sys_fspick)
#define __NR_pidfd_open 434
__SYSCALL(__NR_pidfd_open, sys_pidfd_open)
#ifdef __ARCH_WANT_SYS_CLONE3
#define __NR_clone3 435
__SYSCALL(__NR_clone3, sys_clone3)
#endif
#define __NR_close_range 436
__SYSCALL(__NR_close_range, sys_close_range)
#define __NR_openat2 437
__SYSCALL(__NR_openat2, sys_openat2)
#define __NR_pidfd_getfd 438
@ -865,7 +798,6 @@ __SC_COMP(__NR_epoll_pwait2, sys_epoll_pwait2, compat_sys_epoll_pwait2)
__SYSCALL(__NR_mount_setattr, sys_mount_setattr)
#define __NR_quotactl_fd 443
__SYSCALL(__NR_quotactl_fd, sys_quotactl_fd)
#define __NR_landlock_create_ruleset 444
__SYSCALL(__NR_landlock_create_ruleset, sys_landlock_create_ruleset)
#define __NR_landlock_add_rule 445
@ -877,17 +809,44 @@ __SYSCALL(__NR_landlock_restrict_self, sys_landlock_restrict_self)
#define __NR_memfd_secret 447
__SYSCALL(__NR_memfd_secret, sys_memfd_secret)
#endif
#define __NR_process_mrelease 448
__SYSCALL(__NR_process_mrelease, sys_process_mrelease)
#define __NR_futex_waitv 449
__SYSCALL(__NR_futex_waitv, sys_futex_waitv)
#define __NR_set_mempolicy_home_node 450
__SYSCALL(__NR_set_mempolicy_home_node, sys_set_mempolicy_home_node)
#define __NR_cachestat 451
__SYSCALL(__NR_cachestat, sys_cachestat)
#define __NR_fchmodat2 452
__SYSCALL(__NR_fchmodat2, sys_fchmodat2)
#define __NR_map_shadow_stack 453
__SYSCALL(__NR_map_shadow_stack, sys_map_shadow_stack)
#define __NR_futex_wake 454
__SYSCALL(__NR_futex_wake, sys_futex_wake)
#define __NR_futex_wait 455
__SYSCALL(__NR_futex_wait, sys_futex_wait)
#define __NR_futex_requeue 456
__SYSCALL(__NR_futex_requeue, sys_futex_requeue)
#define __NR_statmount 457
__SYSCALL(__NR_statmount, sys_statmount)
#define __NR_listmount 458
__SYSCALL(__NR_listmount, sys_listmount)
#define __NR_lsm_get_self_attr 459
__SYSCALL(__NR_lsm_get_self_attr, sys_lsm_get_self_attr)
#define __NR_lsm_set_self_attr 460
__SYSCALL(__NR_lsm_set_self_attr, sys_lsm_set_self_attr)
#define __NR_lsm_list_modules 461
__SYSCALL(__NR_lsm_list_modules, sys_lsm_list_modules)
#define __NR_mseal 462
__SYSCALL(__NR_mseal, sys_mseal)
#undef __NR_syscalls
#define __NR_syscalls 451
#define __NR_syscalls 463
/*
* 32 bit systems traditionally used different

View File

@ -94,6 +94,9 @@ extern "C" {
*
* %AMDGPU_GEM_DOMAIN_OA Ordered append, used by 3D or Compute engines
* for appending data.
*
* %AMDGPU_GEM_DOMAIN_DOORBELL Doorbell. It is an MMIO region for
* signalling user mode queues.
*/
#define AMDGPU_GEM_DOMAIN_CPU 0x1
#define AMDGPU_GEM_DOMAIN_GTT 0x2
@ -101,12 +104,14 @@ extern "C" {
#define AMDGPU_GEM_DOMAIN_GDS 0x8
#define AMDGPU_GEM_DOMAIN_GWS 0x10
#define AMDGPU_GEM_DOMAIN_OA 0x20
#define AMDGPU_GEM_DOMAIN_DOORBELL 0x40
#define AMDGPU_GEM_DOMAIN_MASK (AMDGPU_GEM_DOMAIN_CPU | \
AMDGPU_GEM_DOMAIN_GTT | \
AMDGPU_GEM_DOMAIN_VRAM | \
AMDGPU_GEM_DOMAIN_GDS | \
AMDGPU_GEM_DOMAIN_GWS | \
AMDGPU_GEM_DOMAIN_OA)
AMDGPU_GEM_DOMAIN_OA | \
AMDGPU_GEM_DOMAIN_DOORBELL)
/* Flag that CPU access will be required for the case of VRAM domain */
#define AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED (1 << 0)
@ -145,7 +150,7 @@ extern "C" {
*/
#define AMDGPU_GEM_CREATE_DISCARDABLE (1 << 12)
/* Flag that BO is shared coherently between multiple devices or CPU threads.
* May depend on GPU instructions to flush caches explicitly
* May depend on GPU instructions to flush caches to system scope explicitly.
*
* This influences the choice of MTYPE in the PTEs on GFXv9 and later GPUs and
* may override the MTYPE selected in AMDGPU_VA_OP_MAP.
@ -158,6 +163,14 @@ extern "C" {
* may override the MTYPE selected in AMDGPU_VA_OP_MAP.
*/
#define AMDGPU_GEM_CREATE_UNCACHED (1 << 14)
/* Flag that BO should be coherent across devices when using device-level
* atomics. May depend on GPU instructions to flush caches to device scope
* explicitly, promoting them to system scope automatically.
*
* This influences the choice of MTYPE in the PTEs on GFXv9 and later GPUs and
* may override the MTYPE selected in AMDGPU_VA_OP_MAP.
*/
#define AMDGPU_GEM_CREATE_EXT_COHERENT (1 << 15)
struct drm_amdgpu_gem_create_in {
/** the requested memory size */
@ -236,15 +249,17 @@ union drm_amdgpu_bo_list {
/* unknown cause */
#define AMDGPU_CTX_UNKNOWN_RESET 3
/* indicate gpu reset occured after ctx created */
/* indicate gpu reset occurred after ctx created */
#define AMDGPU_CTX_QUERY2_FLAGS_RESET (1<<0)
/* indicate vram lost occured after ctx created */
/* indicate vram lost occurred after ctx created */
#define AMDGPU_CTX_QUERY2_FLAGS_VRAMLOST (1<<1)
/* indicate some job from this context once cause gpu hang */
#define AMDGPU_CTX_QUERY2_FLAGS_GUILTY (1<<2)
/* indicate some errors are detected by RAS */
#define AMDGPU_CTX_QUERY2_FLAGS_RAS_CE (1<<3)
#define AMDGPU_CTX_QUERY2_FLAGS_RAS_UE (1<<4)
/* indicate that the reset hasn't completed yet */
#define AMDGPU_CTX_QUERY2_FLAGS_RESET_IN_PROGRESS (1<<5)
/* Context priority level */
#define AMDGPU_CTX_PRIORITY_UNSET -2048
@ -579,7 +594,8 @@ struct drm_amdgpu_gem_va {
*/
#define AMDGPU_HW_IP_VCN_ENC 7
#define AMDGPU_HW_IP_VCN_JPEG 8
#define AMDGPU_HW_IP_NUM 9
#define AMDGPU_HW_IP_VPE 9
#define AMDGPU_HW_IP_NUM 10
#define AMDGPU_HW_IP_INSTANCE_MAX_COUNT 1
@ -592,6 +608,7 @@ struct drm_amdgpu_gem_va {
#define AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES 0x07
#define AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT 0x08
#define AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL 0x09
#define AMDGPU_CHUNK_ID_CP_GFX_SHADOW 0x0a
struct drm_amdgpu_cs_chunk {
__u32 chunk_id;
@ -708,6 +725,15 @@ struct drm_amdgpu_cs_chunk_data {
};
};
#define AMDGPU_CS_CHUNK_CP_GFX_SHADOW_FLAGS_INIT_SHADOW 0x1
struct drm_amdgpu_cs_chunk_cp_gfx_shadow {
__u64 shadow_va;
__u64 csa_va;
__u64 gds_va;
__u64 flags;
};
/*
* Query h/w info: Flag that this is integrated (a.h.a. fusion) GPU
*
@ -780,6 +806,8 @@ struct drm_amdgpu_cs_chunk_data {
#define AMDGPU_INFO_FW_MES 0x1a
/* Subquery id: Query IMU firmware version */
#define AMDGPU_INFO_FW_IMU 0x1b
/* Subquery id: Query VPE firmware version */
#define AMDGPU_INFO_FW_VPE 0x1c
/* number of bytes moved for TTM migration */
#define AMDGPU_INFO_NUM_BYTES_MOVED 0x0f
@ -837,6 +865,8 @@ struct drm_amdgpu_cs_chunk_data {
#define AMDGPU_INFO_SENSOR_PEAK_PSTATE_GFX_SCLK 0xa
/* Subquery id: Query GPU peak pstate memory clock */
#define AMDGPU_INFO_SENSOR_PEAK_PSTATE_GFX_MCLK 0xb
/* Subquery id: Query input GPU power */
#define AMDGPU_INFO_SENSOR_GPU_INPUT_POWER 0xc
/* Number of VRAM page faults on CPU access. */
#define AMDGPU_INFO_NUM_VRAM_CPU_PAGE_FAULTS 0x1E
#define AMDGPU_INFO_VRAM_LOST_COUNTER 0x1F
@ -876,6 +906,10 @@ struct drm_amdgpu_cs_chunk_data {
#define AMDGPU_INFO_VIDEO_CAPS_DECODE 0
/* Subquery id: Encode */
#define AMDGPU_INFO_VIDEO_CAPS_ENCODE 1
/* Query the max number of IBs per gang per submission */
#define AMDGPU_INFO_MAX_IBS 0x22
/* query last page fault info */
#define AMDGPU_INFO_GPUVM_FAULT 0x23
#define AMDGPU_INFO_MMR_SE_INDEX_SHIFT 0
#define AMDGPU_INFO_MMR_SE_INDEX_MASK 0xff
@ -1126,6 +1160,14 @@ struct drm_amdgpu_info_device {
__u64 mall_size; /* AKA infinity cache */
/* high 32 bits of the rb pipes mask */
__u32 enabled_rb_pipes_mask_hi;
/* shadow area size for gfx11 */
__u32 shadow_size;
/* shadow area base virtual alignment for gfx11 */
__u32 shadow_alignment;
/* context save area size for gfx11 */
__u32 csa_size;
/* context save area base virtual alignment for gfx11 */
__u32 csa_alignment;
};
struct drm_amdgpu_info_hw_ip {
@ -1193,6 +1235,20 @@ struct drm_amdgpu_info_video_caps {
struct drm_amdgpu_info_video_codec_info codec_info[AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_COUNT];
};
#define AMDGPU_VMHUB_TYPE_MASK 0xff
#define AMDGPU_VMHUB_TYPE_SHIFT 0
#define AMDGPU_VMHUB_TYPE_GFX 0
#define AMDGPU_VMHUB_TYPE_MM0 1
#define AMDGPU_VMHUB_TYPE_MM1 2
#define AMDGPU_VMHUB_IDX_MASK 0xff00
#define AMDGPU_VMHUB_IDX_SHIFT 8
struct drm_amdgpu_info_gpuvm_fault {
__u64 addr;
__u32 status;
__u32 vmhub;
};
/*
* Supported GPU families
*/
@ -1211,6 +1267,7 @@ struct drm_amdgpu_info_video_caps {
#define AMDGPU_FAMILY_GC_11_0_1 148 /* GC 11.0.1 */
#define AMDGPU_FAMILY_GC_10_3_6 149 /* GC 10.3.6 */
#define AMDGPU_FAMILY_GC_10_3_7 151 /* GC 10.3.7 */
#define AMDGPU_FAMILY_GC_11_5_0 150 /* GC 11.5.0 */
#if defined(__cplusplus)
}

View File

@ -667,8 +667,11 @@ struct drm_gem_open {
* Bitfield of supported PRIME sharing capabilities. See &DRM_PRIME_CAP_IMPORT
* and &DRM_PRIME_CAP_EXPORT.
*
* PRIME buffers are exposed as dma-buf file descriptors. See
* Documentation/gpu/drm-mm.rst, section "PRIME Buffer Sharing".
* Starting from kernel version 6.6, both &DRM_PRIME_CAP_IMPORT and
* &DRM_PRIME_CAP_EXPORT are always advertised.
*
* PRIME buffers are exposed as dma-buf file descriptors.
* See :ref:`prime_buffer_sharing`.
*/
#define DRM_CAP_PRIME 0x5
/**
@ -676,6 +679,8 @@ struct drm_gem_open {
*
* If this bit is set in &DRM_CAP_PRIME, the driver supports importing PRIME
* buffers via the &DRM_IOCTL_PRIME_FD_TO_HANDLE ioctl.
*
* Starting from kernel version 6.6, this bit is always set in &DRM_CAP_PRIME.
*/
#define DRM_PRIME_CAP_IMPORT 0x1
/**
@ -683,6 +688,8 @@ struct drm_gem_open {
*
* If this bit is set in &DRM_CAP_PRIME, the driver supports exporting PRIME
* buffers via the &DRM_IOCTL_PRIME_HANDLE_TO_FD ioctl.
*
* Starting from kernel version 6.6, this bit is always set in &DRM_CAP_PRIME.
*/
#define DRM_PRIME_CAP_EXPORT 0x2
/**
@ -700,7 +707,8 @@ struct drm_gem_open {
/**
* DRM_CAP_ASYNC_PAGE_FLIP
*
* If set to 1, the driver supports &DRM_MODE_PAGE_FLIP_ASYNC.
* If set to 1, the driver supports &DRM_MODE_PAGE_FLIP_ASYNC for legacy
* page-flips.
*/
#define DRM_CAP_ASYNC_PAGE_FLIP 0x7
/**
@ -750,17 +758,23 @@ struct drm_gem_open {
/**
* DRM_CAP_SYNCOBJ
*
* If set to 1, the driver supports sync objects. See
* Documentation/gpu/drm-mm.rst, section "DRM Sync Objects".
* If set to 1, the driver supports sync objects. See :ref:`drm_sync_objects`.
*/
#define DRM_CAP_SYNCOBJ 0x13
/**
* DRM_CAP_SYNCOBJ_TIMELINE
*
* If set to 1, the driver supports timeline operations on sync objects. See
* Documentation/gpu/drm-mm.rst, section "DRM Sync Objects".
* :ref:`drm_sync_objects`.
*/
#define DRM_CAP_SYNCOBJ_TIMELINE 0x14
/**
* DRM_CAP_ATOMIC_ASYNC_PAGE_FLIP
*
* If set to 1, the driver supports &DRM_MODE_PAGE_FLIP_ASYNC for atomic
* commits.
*/
#define DRM_CAP_ATOMIC_ASYNC_PAGE_FLIP 0x15
/* DRM_IOCTL_GET_CAP ioctl argument type */
struct drm_get_cap {
@ -830,6 +844,31 @@ struct drm_get_cap {
*/
#define DRM_CLIENT_CAP_WRITEBACK_CONNECTORS 5
/**
* DRM_CLIENT_CAP_CURSOR_PLANE_HOTSPOT
*
* Drivers for para-virtualized hardware (e.g. vmwgfx, qxl, virtio and
* virtualbox) have additional restrictions for cursor planes (thus
* making cursor planes on those drivers not truly universal,) e.g.
* they need cursor planes to act like one would expect from a mouse
* cursor and have correctly set hotspot properties.
* If this client cap is not set the DRM core will hide cursor plane on
* those virtualized drivers because not setting it implies that the
* client is not capable of dealing with those extra restictions.
* Clients which do set cursor hotspot and treat the cursor plane
* like a mouse cursor should set this property.
* The client must enable &DRM_CLIENT_CAP_ATOMIC first.
*
* Setting this property on drivers which do not special case
* cursor planes (i.e. non-virtualized drivers) will return
* EOPNOTSUPP, which can be used by userspace to gauge
* requirements of the hardware/drivers they're running on.
*
* This capability is always supported for atomic-capable virtualized
* drivers starting from kernel version 6.6.
*/
#define DRM_CLIENT_CAP_CURSOR_PLANE_HOTSPOT 6
/* DRM_IOCTL_SET_CLIENT_CAP ioctl argument type */
struct drm_set_client_cap {
__u64 capability;
@ -881,6 +920,7 @@ struct drm_syncobj_transfer {
#define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL (1 << 0)
#define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT (1 << 1)
#define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE (1 << 2) /* wait for time point to become available */
#define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_DEADLINE (1 << 3) /* set fence deadline to deadline_nsec */
struct drm_syncobj_wait {
__u64 handles;
/* absolute timeout */
@ -889,6 +929,14 @@ struct drm_syncobj_wait {
__u32 flags;
__u32 first_signaled; /* only valid when not waiting all */
__u32 pad;
/**
* @deadline_nsec - fence deadline hint
*
* Deadline hint, in absolute CLOCK_MONOTONIC, to set on backing
* fence(s) if the DRM_SYNCOBJ_WAIT_FLAGS_WAIT_DEADLINE flag is
* set.
*/
__u64 deadline_nsec;
};
struct drm_syncobj_timeline_wait {
@ -901,6 +949,35 @@ struct drm_syncobj_timeline_wait {
__u32 flags;
__u32 first_signaled; /* only valid when not waiting all */
__u32 pad;
/**
* @deadline_nsec - fence deadline hint
*
* Deadline hint, in absolute CLOCK_MONOTONIC, to set on backing
* fence(s) if the DRM_SYNCOBJ_WAIT_FLAGS_WAIT_DEADLINE flag is
* set.
*/
__u64 deadline_nsec;
};
/**
* struct drm_syncobj_eventfd
* @handle: syncobj handle.
* @flags: Zero to wait for the point to be signalled, or
* &DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE to wait for a fence to be
* available for the point.
* @point: syncobj timeline point (set to zero for binary syncobjs).
* @fd: Existing eventfd to sent events to.
* @pad: Must be zero.
*
* Register an eventfd to be signalled by a syncobj. The eventfd counter will
* be incremented by one.
*/
struct drm_syncobj_eventfd {
__u32 handle;
__u32 flags;
__u64 point;
__s32 fd;
__u32 pad;
};
@ -966,6 +1043,19 @@ extern "C" {
#define DRM_IOCTL_GET_STATS DRM_IOR( 0x06, struct drm_stats)
#define DRM_IOCTL_SET_VERSION DRM_IOWR(0x07, struct drm_set_version)
#define DRM_IOCTL_MODESET_CTL DRM_IOW(0x08, struct drm_modeset_ctl)
/**
* DRM_IOCTL_GEM_CLOSE - Close a GEM handle.
*
* GEM handles are not reference-counted by the kernel. User-space is
* responsible for managing their lifetime. For example, if user-space imports
* the same memory object twice on the same DRM file description, the same GEM
* handle is returned by both imports, and user-space needs to ensure
* &DRM_IOCTL_GEM_CLOSE is performed once only. The same situation can happen
* when a memory object is allocated, then exported and imported again on the
* same DRM file description. The &DRM_IOCTL_MODE_GETFB2 IOCTL is an exception
* and always returns fresh new GEM handles even if an existing GEM handle
* already refers to the same memory object before the IOCTL is performed.
*/
#define DRM_IOCTL_GEM_CLOSE DRM_IOW (0x09, struct drm_gem_close)
#define DRM_IOCTL_GEM_FLINK DRM_IOWR(0x0a, struct drm_gem_flink)
#define DRM_IOCTL_GEM_OPEN DRM_IOWR(0x0b, struct drm_gem_open)
@ -1006,7 +1096,37 @@ extern "C" {
#define DRM_IOCTL_UNLOCK DRM_IOW( 0x2b, struct drm_lock)
#define DRM_IOCTL_FINISH DRM_IOW( 0x2c, struct drm_lock)
/**
* DRM_IOCTL_PRIME_HANDLE_TO_FD - Convert a GEM handle to a DMA-BUF FD.
*
* User-space sets &drm_prime_handle.handle with the GEM handle to export and
* &drm_prime_handle.flags, and gets back a DMA-BUF file descriptor in
* &drm_prime_handle.fd.
*
* The export can fail for any driver-specific reason, e.g. because export is
* not supported for this specific GEM handle (but might be for others).
*
* Support for exporting DMA-BUFs is advertised via &DRM_PRIME_CAP_EXPORT.
*/
#define DRM_IOCTL_PRIME_HANDLE_TO_FD DRM_IOWR(0x2d, struct drm_prime_handle)
/**
* DRM_IOCTL_PRIME_FD_TO_HANDLE - Convert a DMA-BUF FD to a GEM handle.
*
* User-space sets &drm_prime_handle.fd with a DMA-BUF file descriptor to
* import, and gets back a GEM handle in &drm_prime_handle.handle.
* &drm_prime_handle.flags is unused.
*
* If an existing GEM handle refers to the memory object backing the DMA-BUF,
* that GEM handle is returned. Therefore user-space which needs to handle
* arbitrary DMA-BUFs must have a user-space lookup data structure to manually
* reference-count duplicated GEM handles. For more information see
* &DRM_IOCTL_GEM_CLOSE.
*
* The import can fail for any driver-specific reason, e.g. because import is
* only supported for DMA-BUFs allocated on this DRM device.
*
* Support for importing DMA-BUFs is advertised via &DRM_PRIME_CAP_IMPORT.
*/
#define DRM_IOCTL_PRIME_FD_TO_HANDLE DRM_IOWR(0x2e, struct drm_prime_handle)
#define DRM_IOCTL_AGP_ACQUIRE DRM_IO( 0x30)
@ -1058,6 +1178,26 @@ extern "C" {
#define DRM_IOCTL_MODE_PAGE_FLIP DRM_IOWR(0xB0, struct drm_mode_crtc_page_flip)
#define DRM_IOCTL_MODE_DIRTYFB DRM_IOWR(0xB1, struct drm_mode_fb_dirty_cmd)
/**
* DRM_IOCTL_MODE_CREATE_DUMB - Create a new dumb buffer object.
*
* KMS dumb buffers provide a very primitive way to allocate a buffer object
* suitable for scanout and map it for software rendering. KMS dumb buffers are
* not suitable for hardware-accelerated rendering nor video decoding. KMS dumb
* buffers are not suitable to be displayed on any other device than the KMS
* device where they were allocated from. Also see
* :ref:`kms_dumb_buffer_objects`.
*
* The IOCTL argument is a struct drm_mode_create_dumb.
*
* User-space is expected to create a KMS dumb buffer via this IOCTL, then add
* it as a KMS framebuffer via &DRM_IOCTL_MODE_ADDFB and map it via
* &DRM_IOCTL_MODE_MAP_DUMB.
*
* &DRM_CAP_DUMB_BUFFER indicates whether this IOCTL is supported.
* &DRM_CAP_DUMB_PREFERRED_DEPTH and &DRM_CAP_DUMB_PREFER_SHADOW indicate
* driver preferences for dumb buffers.
*/
#define DRM_IOCTL_MODE_CREATE_DUMB DRM_IOWR(0xB2, struct drm_mode_create_dumb)
#define DRM_IOCTL_MODE_MAP_DUMB DRM_IOWR(0xB3, struct drm_mode_map_dumb)
#define DRM_IOCTL_MODE_DESTROY_DUMB DRM_IOWR(0xB4, struct drm_mode_destroy_dumb)
@ -1098,8 +1238,13 @@ extern "C" {
* struct as the output.
*
* If the client is DRM master or has &CAP_SYS_ADMIN, &drm_mode_fb_cmd2.handles
* will be filled with GEM buffer handles. Planes are valid until one has a
* zero handle -- this can be used to compute the number of planes.
* will be filled with GEM buffer handles. Fresh new GEM handles are always
* returned, even if another GEM handle referring to the same memory object
* already exists on the DRM file description. The caller is responsible for
* removing the new handles, e.g. via the &DRM_IOCTL_GEM_CLOSE IOCTL. The same
* new handle will be returned for multiple planes in case they use the same
* memory object. Planes are valid until one has a zero handle -- this can be
* used to compute the number of planes.
*
* Otherwise, &drm_mode_fb_cmd2.handles will be zeroed and planes are valid
* until one has a zero &drm_mode_fb_cmd2.pitches.
@ -1107,9 +1252,36 @@ extern "C" {
* If the framebuffer has a format modifier, &DRM_MODE_FB_MODIFIERS will be set
* in &drm_mode_fb_cmd2.flags and &drm_mode_fb_cmd2.modifier will contain the
* modifier. Otherwise, user-space must ignore &drm_mode_fb_cmd2.modifier.
*
* To obtain DMA-BUF FDs for each plane without leaking GEM handles, user-space
* can export each handle via &DRM_IOCTL_PRIME_HANDLE_TO_FD, then immediately
* close each unique handle via &DRM_IOCTL_GEM_CLOSE, making sure to not
* double-close handles which are specified multiple times in the array.
*/
#define DRM_IOCTL_MODE_GETFB2 DRM_IOWR(0xCE, struct drm_mode_fb_cmd2)
#define DRM_IOCTL_SYNCOBJ_EVENTFD DRM_IOWR(0xCF, struct drm_syncobj_eventfd)
/**
* DRM_IOCTL_MODE_CLOSEFB - Close a framebuffer.
*
* This closes a framebuffer previously added via ADDFB/ADDFB2. The IOCTL
* argument is a framebuffer object ID.
*
* This IOCTL is similar to &DRM_IOCTL_MODE_RMFB, except it doesn't disable
* planes and CRTCs. As long as the framebuffer is used by a plane, it's kept
* alive. When the plane no longer uses the framebuffer (because the
* framebuffer is replaced with another one, or the plane is disabled), the
* framebuffer is cleaned up.
*
* This is useful to implement flicker-free transitions between two processes.
*
* Depending on the threat model, user-space may want to ensure that the
* framebuffer doesn't expose any sensitive user information: closed
* framebuffers attached to a plane can be read back by the next DRM master.
*/
#define DRM_IOCTL_MODE_CLOSEFB DRM_IOWR(0xD0, struct drm_mode_closefb)
/*
* Device specific ioctls should only be in their respective headers
* The device specific ioctl range is from 0x40 to 0x9f.
@ -1121,25 +1293,50 @@ extern "C" {
#define DRM_COMMAND_BASE 0x40
#define DRM_COMMAND_END 0xA0
/*
* Header for events written back to userspace on the drm fd. The
* type defines the type of event, the length specifies the total
* length of the event (including the header), and user_data is
* typically a 64 bit value passed with the ioctl that triggered the
* event. A read on the drm fd will always only return complete
* events, that is, if for example the read buffer is 100 bytes, and
* there are two 64 byte events pending, only one will be returned.
/**
* struct drm_event - Header for DRM events
* @type: event type.
* @length: total number of payload bytes (including header).
*
* Event types 0 - 0x7fffffff are generic drm events, 0x80000000 and
* up are chipset specific.
* This struct is a header for events written back to user-space on the DRM FD.
* A read on the DRM FD will always only return complete events: e.g. if the
* read buffer is 100 bytes large and there are two 64 byte events pending,
* only one will be returned.
*
* Event types 0 - 0x7fffffff are generic DRM events, 0x80000000 and
* up are chipset specific. Generic DRM events include &DRM_EVENT_VBLANK,
* &DRM_EVENT_FLIP_COMPLETE and &DRM_EVENT_CRTC_SEQUENCE.
*/
struct drm_event {
__u32 type;
__u32 length;
};
/**
* DRM_EVENT_VBLANK - vertical blanking event
*
* This event is sent in response to &DRM_IOCTL_WAIT_VBLANK with the
* &_DRM_VBLANK_EVENT flag set.
*
* The event payload is a struct drm_event_vblank.
*/
#define DRM_EVENT_VBLANK 0x01
/**
* DRM_EVENT_FLIP_COMPLETE - page-flip completion event
*
* This event is sent in response to an atomic commit or legacy page-flip with
* the &DRM_MODE_PAGE_FLIP_EVENT flag set.
*
* The event payload is a struct drm_event_vblank.
*/
#define DRM_EVENT_FLIP_COMPLETE 0x02
/**
* DRM_EVENT_CRTC_SEQUENCE - CRTC sequence event
*
* This event is sent in response to &DRM_IOCTL_CRTC_QUEUE_SEQUENCE.
*
* The event payload is a struct drm_event_crtc_sequence.
*/
#define DRM_EVENT_CRTC_SEQUENCE 0x03
struct drm_event_vblank {

View File

@ -54,7 +54,7 @@ extern "C" {
* Format modifiers may change any property of the buffer, including the number
* of planes and/or the required allocation size. Format modifiers are
* vendor-namespaced, and as such the relationship between a fourcc code and a
* modifier is specific to the modifer being used. For example, some modifiers
* modifier is specific to the modifier being used. For example, some modifiers
* may preserve meaning - such as number of planes - from the fourcc code,
* whereas others may not.
*
@ -79,7 +79,7 @@ extern "C" {
* format.
* - Higher-level programs interfacing with KMS/GBM/EGL/Vulkan/etc: these users
* see modifiers as opaque tokens they can check for equality and intersect.
* These users musn't need to know to reason about the modifier value
* These users mustn't need to know to reason about the modifier value
* (i.e. they are not expected to extract information out of the modifier).
*
* Vendors should document their modifier usage in as much detail as
@ -323,6 +323,8 @@ extern "C" {
* index 1 = Cr:Cb plane, [39:0] Cr1:Cb1:Cr0:Cb0 little endian
*/
#define DRM_FORMAT_NV15 fourcc_code('N', 'V', '1', '5') /* 2x2 subsampled Cr:Cb plane */
#define DRM_FORMAT_NV20 fourcc_code('N', 'V', '2', '0') /* 2x1 subsampled Cr:Cb plane */
#define DRM_FORMAT_NV30 fourcc_code('N', 'V', '3', '0') /* non-subsampled Cr:Cb plane */
/*
* 2 plane YCbCr MSB aligned
@ -538,7 +540,7 @@ extern "C" {
* This is a tiled layout using 4Kb tiles in row-major layout.
* Within the tile pixels are laid out in 16 256 byte units / sub-tiles which
* are arranged in four groups (two wide, two high) with column-major layout.
* Each group therefore consits out of four 256 byte units, which are also laid
* Each group therefore consists out of four 256 byte units, which are also laid
* out as 2x2 column-major.
* 256 byte units are made out of four 64 byte blocks of pixels, producing
* either a square block or a 2:1 unit.
@ -657,6 +659,49 @@ extern "C" {
*/
#define I915_FORMAT_MOD_4_TILED_DG2_RC_CCS_CC fourcc_mod_code(INTEL, 12)
/*
* Intel Color Control Surfaces (CCS) for display ver. 14 render compression.
*
* The main surface is tile4 and at plane index 0, the CCS is linear and
* at index 1. A 64B CCS cache line corresponds to an area of 4x1 tiles in
* main surface. In other words, 4 bits in CCS map to a main surface cache
* line pair. The main surface pitch is required to be a multiple of four
* tile4 widths.
*/
#define I915_FORMAT_MOD_4_TILED_MTL_RC_CCS fourcc_mod_code(INTEL, 13)
/*
* Intel Color Control Surfaces (CCS) for display ver. 14 media compression
*
* The main surface is tile4 and at plane index 0, the CCS is linear and
* at index 1. A 64B CCS cache line corresponds to an area of 4x1 tiles in
* main surface. In other words, 4 bits in CCS map to a main surface cache
* line pair. The main surface pitch is required to be a multiple of four
* tile4 widths. For semi-planar formats like NV12, CCS planes follow the
* Y and UV planes i.e., planes 0 and 1 are used for Y and UV surfaces,
* planes 2 and 3 for the respective CCS.
*/
#define I915_FORMAT_MOD_4_TILED_MTL_MC_CCS fourcc_mod_code(INTEL, 14)
/*
* Intel Color Control Surface with Clear Color (CCS) for display ver. 14 render
* compression.
*
* The main surface is tile4 and is at plane index 0 whereas CCS is linear
* and at index 1. The clear color is stored at index 2, and the pitch should
* be ignored. The clear color structure is 256 bits. The first 128 bits
* represents Raw Clear Color Red, Green, Blue and Alpha color each represented
* by 32 bits. The raw clear color is consumed by the 3d engine and generates
* the converted clear color of size 64 bits. The first 32 bits store the Lower
* Converted Clear Color value and the next 32 bits store the Higher Converted
* Clear Color value when applicable. The Converted Clear Color values are
* consumed by the DE. The last 64 bits are used to store Color Discard Enable
* and Depth Clear Value Valid which are ignored by the DE. A CCS cache line
* corresponds to an area of 4x1 tiles in the main surface. The main surface
* pitch is required to be a multiple of 4 tile widths.
*/
#define I915_FORMAT_MOD_4_TILED_MTL_RC_CCS_CC fourcc_mod_code(INTEL, 15)
/*
* Tiled, NV12MT, grouped in 64 (pixels) x 32 (lines) -sized macroblocks
*
@ -1058,7 +1103,7 @@ drm_fourcc_canonicalize_nvidia_format_mod(__u64 modifier)
*/
/*
* The top 4 bits (out of the 56 bits alloted for specifying vendor specific
* The top 4 bits (out of the 56 bits allotted for specifying vendor specific
* modifiers) denote the category for modifiers. Currently we have three
* categories of modifiers ie AFBC, MISC and AFRC. We can have a maximum of
* sixteen different categories.
@ -1374,7 +1419,7 @@ drm_fourcc_canonicalize_nvidia_format_mod(__u64 modifier)
* Amlogic FBC Memory Saving mode
*
* Indicates the storage is packed when pixel size is multiple of word
* boudaries, i.e. 8bit should be stored in this mode to save allocation
* boundaries, i.e. 8bit should be stored in this mode to save allocation
* memory.
*
* This mode reduces body layout to 3072 bytes per 64x32 superblock with

View File

@ -36,10 +36,10 @@ extern "C" {
/**
* DOC: overview
*
* DRM exposes many UAPI and structure definition to have a consistent
* and standardized interface with user.
* DRM exposes many UAPI and structure definitions to have a consistent
* and standardized interface with users.
* Userspace can refer to these structure definitions and UAPI formats
* to communicate to driver
* to communicate to drivers.
*/
#define DRM_CONNECTOR_NAME_LEN 32
@ -488,6 +488,9 @@ struct drm_mode_get_connector {
* This is not an object ID. This is a per-type connector number. Each
* (type, type_id) combination is unique across all connectors of a DRM
* device.
*
* The (type, type_id) combination is not a stable identifier: the
* type_id can change depending on the driver probe order.
*/
__u32 connector_type_id;
@ -537,7 +540,7 @@ struct drm_mode_get_connector {
/* the PROP_ATOMIC flag is used to hide properties from userspace that
* is not aware of atomic properties. This is mostly to work around
* older userspace (DDX drivers) that read/write each prop they find,
* witout being aware that this could be triggering a lengthy modeset.
* without being aware that this could be triggering a lengthy modeset.
*/
#define DRM_MODE_PROP_ATOMIC 0x80000000
@ -661,7 +664,7 @@ struct drm_mode_fb_cmd {
};
#define DRM_MODE_FB_INTERLACED (1<<0) /* for interlaced framebuffers */
#define DRM_MODE_FB_MODIFIERS (1<<1) /* enables ->modifer[] */
#define DRM_MODE_FB_MODIFIERS (1<<1) /* enables ->modifier[] */
/**
* struct drm_mode_fb_cmd2 - Frame-buffer metadata.
@ -834,10 +837,23 @@ struct drm_color_ctm {
/*
* Conversion matrix in S31.32 sign-magnitude
* (not two's complement!) format.
*
* out matrix in
* |R| |0 1 2| |R|
* |G| = |3 4 5| x |G|
* |B| |6 7 8| |B|
*/
__u64 matrix[9];
};
struct drm_color_ctm_3x4 {
/*
* Conversion matrix with 3x4 dimensions in S31.32 sign-magnitude
* (not two's complement!) format.
*/
__u64 matrix[12];
};
struct drm_color_lut {
/*
* Values are mapped linearly to 0.0 - 1.0 range, with 0x0 == 0.0 and
@ -849,6 +865,17 @@ struct drm_color_lut {
__u16 reserved;
};
/**
* struct drm_plane_size_hint - Plane size hints
*
* The plane SIZE_HINTS property blob contains an
* array of struct drm_plane_size_hint.
*/
struct drm_plane_size_hint {
__u16 width;
__u16 height;
};
/**
* struct hdr_metadata_infoframe - HDR Metadata Infoframe Data.
*
@ -873,23 +900,23 @@ struct hdr_metadata_infoframe {
* These are coded as unsigned 16-bit values in units of
* 0.00002, where 0x0000 represents zero and 0xC350
* represents 1.0000.
* @display_primaries.x: X cordinate of color primary.
* @display_primaries.y: Y cordinate of color primary.
* @display_primaries.x: X coordinate of color primary.
* @display_primaries.y: Y coordinate of color primary.
*/
struct {
__u16 x, y;
} display_primaries[3];
} display_primaries[3];
/**
* @white_point: White Point of Colorspace Data.
* These are coded as unsigned 16-bit values in units of
* 0.00002, where 0x0000 represents zero and 0xC350
* represents 1.0000.
* @white_point.x: X cordinate of whitepoint of color primary.
* @white_point.y: Y cordinate of whitepoint of color primary.
* @white_point.x: X coordinate of whitepoint of color primary.
* @white_point.y: Y coordinate of whitepoint of color primary.
*/
struct {
__u16 x, y;
} white_point;
} white_point;
/**
* @max_display_mastering_luminance: Max Mastering Display Luminance.
* This value is coded as an unsigned 16-bit value in units of 1 cd/m2,
@ -949,6 +976,15 @@ struct hdr_output_metadata {
* Request that the page-flip is performed as soon as possible, ie. with no
* delay due to waiting for vblank. This may cause tearing to be visible on
* the screen.
*
* When used with atomic uAPI, the driver will return an error if the hardware
* doesn't support performing an asynchronous page-flip for this update.
* User-space should handle this, e.g. by falling back to a regular page-flip.
*
* Note, some hardware might need to perform one last synchronous page-flip
* before being able to switch to asynchronous page-flips. As an exception,
* the driver will return success even though that first page-flip is not
* asynchronous.
*/
#define DRM_MODE_PAGE_FLIP_ASYNC 0x02
#define DRM_MODE_PAGE_FLIP_TARGET_ABSOLUTE 0x4
@ -1024,13 +1060,25 @@ struct drm_mode_crtc_page_flip_target {
__u64 user_data;
};
/* create a dumb scanout buffer */
/**
* struct drm_mode_create_dumb - Create a KMS dumb buffer for scanout.
* @height: buffer height in pixels
* @width: buffer width in pixels
* @bpp: bits per pixel
* @flags: must be zero
* @handle: buffer object handle
* @pitch: number of bytes between two consecutive lines
* @size: size of the whole buffer in bytes
*
* User-space fills @height, @width, @bpp and @flags. If the IOCTL succeeds,
* the kernel fills @handle, @pitch and @size.
*/
struct drm_mode_create_dumb {
__u32 height;
__u32 width;
__u32 bpp;
__u32 flags;
/* handle, pitch, size will be returned */
__u32 handle;
__u32 pitch;
__u64 size;
@ -1303,6 +1351,16 @@ struct drm_mode_rect {
__s32 y2;
};
/**
* struct drm_mode_closefb
* @fb_id: Framebuffer ID.
* @pad: Must be zero.
*/
struct drm_mode_closefb {
__u32 fb_id;
__u32 pad;
};
#if defined(__cplusplus)
}
#endif

View File

@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
*
* Copyright 2016-2022 HabanaLabs, Ltd.
* Copyright 2016-2023 HabanaLabs, Ltd.
* All Rights Reserved.
*
*/
@ -8,8 +8,7 @@
#ifndef HABANALABS_H_
#define HABANALABS_H_
#include <linux/types.h>
#include <linux/ioctl.h>
#include <drm/drm.h>
/*
* Defines that are asic-specific but constitutes as ABI between kernel driver
@ -607,9 +606,9 @@ enum gaudi2_engine_id {
/*
* ASIC specific PLL index
*
* Used to retrieve in frequency info of different IPs via
* HL_INFO_PLL_FREQUENCY under HL_IOCTL_INFO IOCTL. The enums need to be
* used as an index in struct hl_pll_frequency_info
* Used to retrieve in frequency info of different IPs via HL_INFO_PLL_FREQUENCY under
* DRM_IOCTL_HL_INFO IOCTL.
* The enums need to be used as an index in struct hl_pll_frequency_info.
*/
enum hl_goya_pll_index {
@ -708,7 +707,8 @@ enum hl_server_type {
HL_SERVER_GAUDI_HLS1H = 2,
HL_SERVER_GAUDI_TYPE1 = 3,
HL_SERVER_GAUDI_TYPE2 = 4,
HL_SERVER_GAUDI2_HLS2 = 5
HL_SERVER_GAUDI2_HLS2 = 5,
HL_SERVER_GAUDI2_TYPE1 = 7
};
/*
@ -723,6 +723,10 @@ enum hl_server_type {
* HL_NOTIFIER_EVENT_GENERAL_HW_ERR - Indicates device HW error
* HL_NOTIFIER_EVENT_RAZWI - Indicates razwi happened
* HL_NOTIFIER_EVENT_PAGE_FAULT - Indicates page fault happened
* HL_NOTIFIER_EVENT_CRITICAL_HW_ERR - Indicates a HW error that requires SW abort and
* HW reset
* HL_NOTIFIER_EVENT_CRITICAL_FW_ERR - Indicates a FW error that requires SW abort and
* HW reset
*/
#define HL_NOTIFIER_EVENT_TPC_ASSERT (1ULL << 0)
#define HL_NOTIFIER_EVENT_UNDEFINED_OPCODE (1ULL << 1)
@ -733,6 +737,8 @@ enum hl_server_type {
#define HL_NOTIFIER_EVENT_GENERAL_HW_ERR (1ULL << 6)
#define HL_NOTIFIER_EVENT_RAZWI (1ULL << 7)
#define HL_NOTIFIER_EVENT_PAGE_FAULT (1ULL << 8)
#define HL_NOTIFIER_EVENT_CRITICL_HW_ERR (1ULL << 9)
#define HL_NOTIFIER_EVENT_CRITICL_FW_ERR (1ULL << 10)
/* Opcode for management ioctl
*
@ -780,16 +786,29 @@ enum hl_server_type {
* The address which accessing it caused the razwi.
* Razwi initiator.
* Razwi cause, was it a page fault or MMU access error.
* May return 0 even though no new data is available, in that case
* timestamp will be 0.
* HL_INFO_DEV_MEM_ALLOC_PAGE_SIZES - Retrieve valid page sizes for device memory allocation
* HL_INFO_SECURED_ATTESTATION - Retrieve attestation report of the boot.
* HL_INFO_REGISTER_EVENTFD - Register eventfd for event notifications.
* HL_INFO_UNREGISTER_EVENTFD - Unregister eventfd
* HL_INFO_GET_EVENTS - Retrieve the last occurred events
* HL_INFO_UNDEFINED_OPCODE_EVENT - Retrieve last undefined opcode error information.
* May return 0 even though no new data is available, in that case
* timestamp will be 0.
* HL_INFO_ENGINE_STATUS - Retrieve the status of all the h/w engines in the asic.
* HL_INFO_PAGE_FAULT_EVENT - Retrieve parameters of captured page fault.
* May return 0 even though no new data is available, in that case
* timestamp will be 0.
* HL_INFO_USER_MAPPINGS - Retrieve user mappings, captured after page fault event.
* HL_INFO_FW_GENERIC_REQ - Send generic request to FW.
* HL_INFO_HW_ERR_EVENT - Retrieve information on the reported HW error.
* May return 0 even though no new data is available, in that case
* timestamp will be 0.
* HL_INFO_FW_ERR_EVENT - Retrieve information on the reported FW error.
* May return 0 even though no new data is available, in that case
* timestamp will be 0.
* HL_INFO_USER_ENGINE_ERR_EVENT - Retrieve the last engine id that reported an error.
*/
#define HL_INFO_HW_IP_INFO 0
#define HL_INFO_HW_EVENTS 1
@ -824,6 +843,10 @@ enum hl_server_type {
#define HL_INFO_PAGE_FAULT_EVENT 33
#define HL_INFO_USER_MAPPINGS 34
#define HL_INFO_FW_GENERIC_REQ 35
#define HL_INFO_HW_ERR_EVENT 36
#define HL_INFO_FW_ERR_EVENT 37
#define HL_INFO_USER_ENGINE_ERR_EVENT 38
#define HL_INFO_DEV_SIGNED 40
#define HL_INFO_VERSION_MAX_LEN 128
#define HL_INFO_CARD_NAME_MAX_LEN 16
@ -863,11 +886,11 @@ enum hl_server_type {
* @dram_enabled: Whether the DRAM is enabled.
* @security_enabled: Whether security is enabled on device.
* @mme_master_slave_mode: Indicate whether the MME is working in master/slave
* configuration. Relevant for Greco and later.
* configuration. Relevant for Gaudi2 and later.
* @cpucp_version: The CPUCP f/w version.
* @card_name: The card name as passed by the f/w.
* @tpc_enabled_mask_ext: Bit-mask that represents which TPCs are enabled.
* Relevant for Greco and later.
* Relevant for Gaudi2 and later.
* @dram_page_size: The DRAM physical page size.
* @edma_enabled_mask: Bit-mask that represents which EDMAs are enabled.
* Relevant for Gaudi2 and later.
@ -875,6 +898,12 @@ enum hl_server_type {
* application to use. Relevant for Gaudi2 and later.
* @device_mem_alloc_default_page_size: default page size used in device memory allocation.
* @revision_id: PCI revision ID of the ASIC.
* @tpc_interrupt_id: interrupt id for TPC to use in order to raise events towards the host.
* @rotator_enabled_mask: Bit-mask that represents which rotators are enabled.
* Relevant for Gaudi3 and later.
* @engine_core_interrupt_reg_addr: interrupt register address for engine core to use
* in order to raise events toward FW.
* @reserved_dram_size: DRAM size reserved for driver and firmware.
*/
struct hl_info_hw_ip_info {
__u64 sram_base_address;
@ -902,15 +931,20 @@ struct hl_info_hw_ip_info {
__u64 dram_page_size;
__u32 edma_enabled_mask;
__u16 number_of_user_interrupts;
__u16 pad2;
__u64 reserved4;
__u8 reserved1;
__u8 reserved2;
__u64 reserved3;
__u64 device_mem_alloc_default_page_size;
__u64 reserved4;
__u64 reserved5;
__u64 reserved6;
__u32 reserved7;
__u8 reserved8;
__u32 reserved6;
__u8 reserved7;
__u8 revision_id;
__u8 pad[2];
__u16 tpc_interrupt_id;
__u32 rotator_enabled_mask;
__u32 reserved9;
__u64 engine_core_interrupt_reg_addr;
__u64 reserved_dram_size;
};
struct hl_info_dram_usage {
@ -958,6 +992,7 @@ struct hl_info_reset_count {
struct hl_info_time_sync {
__u64 device_time;
__u64 host_time;
__u64 tsc_time;
};
/**
@ -1161,6 +1196,53 @@ struct hl_info_undefined_opcode_event {
__u32 stream_id;
};
/**
* struct hl_info_hw_err_event - info about HW error
* @timestamp: timestamp of error occurrence
* @event_id: The async event ID (specific to each device type).
* @pad: size padding for u64 granularity.
*/
struct hl_info_hw_err_event {
__s64 timestamp;
__u16 event_id;
__u16 pad[3];
};
/* FW error definition for event_type in struct hl_info_fw_err_event */
enum hl_info_fw_err_type {
HL_INFO_FW_HEARTBEAT_ERR,
HL_INFO_FW_REPORTED_ERR,
};
/**
* struct hl_info_fw_err_event - info about FW error
* @timestamp: time-stamp of error occurrence
* @err_type: The type of event as defined in hl_info_fw_err_type.
* @event_id: The async event ID (specific to each device type, applicable only when event type is
* HL_INFO_FW_REPORTED_ERR).
* @pad: size padding for u64 granularity.
*/
struct hl_info_fw_err_event {
__s64 timestamp;
__u16 err_type;
__u16 event_id;
__u32 pad;
};
/**
* struct hl_info_engine_err_event - engine error info
* @timestamp: time-stamp of error occurrence
* @engine_id: engine id who reported the error.
* @error_count: Amount of errors reported.
* @pad: size padding for u64 granularity.
*/
struct hl_info_engine_err_event {
__s64 timestamp;
__u16 engine_id;
__u16 error_count;
__u32 pad;
};
/**
* struct hl_info_dev_memalloc_page_sizes - valid page sizes in device mem alloc information.
* @page_order_bitmask: bitmap in which a set bit represents the order of the supported page size
@ -1175,6 +1257,7 @@ struct hl_info_dev_memalloc_page_sizes {
#define SEC_SIGNATURE_BUF_SZ 255 /* (256 - 1) 1 byte used for size */
#define SEC_PUB_DATA_BUF_SZ 510 /* (512 - 2) 2 bytes used for size */
#define SEC_CERTIFICATE_BUF_SZ 2046 /* (2048 - 2) 2 bytes used for size */
#define SEC_DEV_INFO_BUF_SZ 5120
/*
* struct hl_info_sec_attest - attestation report of the boot
@ -1209,6 +1292,32 @@ struct hl_info_sec_attest {
__u8 pad0[2];
};
/*
* struct hl_info_signed - device information signed by a secured device.
* @nonce: number only used once. random number provided by host. this also passed to the quote
* command as a qualifying data.
* @pub_data_len: length of the public data (bytes)
* @certificate_len: length of the certificate (bytes)
* @info_sig_len: length of the attestation signature (bytes)
* @public_data: public key info signed info data (outPublic + name + qualifiedName)
* @certificate: certificate for the signing key
* @info_sig: signature of the info + nonce data.
* @dev_info_len: length of device info (bytes)
* @dev_info: device info as byte array.
*/
struct hl_info_signed {
__u32 nonce;
__u16 pub_data_len;
__u16 certificate_len;
__u8 info_sig_len;
__u8 public_data[SEC_PUB_DATA_BUF_SZ];
__u8 certificate[SEC_CERTIFICATE_BUF_SZ];
__u8 info_sig[SEC_SIGNATURE_BUF_SZ];
__u16 dev_info_len;
__u8 dev_info[SEC_DEV_INFO_BUF_SZ];
__u8 pad[2];
};
/**
* struct hl_page_fault_info - page fault information.
* @timestamp: timestamp of page fault.
@ -1344,7 +1453,7 @@ union hl_cb_args {
*
* HL_CS_CHUNK_FLAGS_USER_ALLOC_CB:
* Indicates if the CB was allocated and mapped by userspace
* (relevant to greco and above). User allocated CB is a command buffer,
* (relevant to Gaudi2 and later). User allocated CB is a command buffer,
* allocated by the user, via malloc (or similar). After allocating the
* CB, the user invokes - memory ioctl to map the user memory into a
* device virtual address. The user provides this address via the
@ -1369,7 +1478,7 @@ struct hl_cs_chunk {
* a DRAM address of the internal CB. In Gaudi, this might also
* represent a mapped host address of the CB.
*
* Greco onwards:
* Gaudi2 onwards:
* For H/W queue, this represents either a Handle of CB on the
* Host, or an SRAM, a DRAM, or a mapped host address of the CB.
*
@ -1486,17 +1595,31 @@ struct hl_cs_chunk {
*/
#define HL_CS_FLAGS_FLUSH_PCI_HBW_WRITES 0x8000
/*
* The engines CS is merged into the existing CS ioctls.
* Use it to control engines modes.
*/
#define HL_CS_FLAGS_ENGINES_COMMAND 0x10000
#define HL_CS_STATUS_SUCCESS 0
#define HL_MAX_JOBS_PER_CS 512
/* HL_ENGINE_CORE_ values
/*
* enum hl_engine_command - engine command
*
* HL_ENGINE_CORE_HALT: engine core halt
* HL_ENGINE_CORE_RUN: engine core run
* @HL_ENGINE_CORE_HALT: engine core halt
* @HL_ENGINE_CORE_RUN: engine core run
* @HL_ENGINE_STALL: user engine/s stall
* @HL_ENGINE_RESUME: user engine/s resume
*/
#define HL_ENGINE_CORE_HALT (1 << 0)
#define HL_ENGINE_CORE_RUN (1 << 1)
enum hl_engine_command {
HL_ENGINE_CORE_HALT = 1,
HL_ENGINE_CORE_RUN = 2,
HL_ENGINE_STALL = 3,
HL_ENGINE_RESUME = 4,
HL_ENGINE_COMMAND_MAX
};
struct hl_cs_in {
@ -1520,6 +1643,18 @@ struct hl_cs_in {
/* the core command to be sent towards engine cores */
__u32 core_command;
};
/* Valid only when HL_CS_FLAGS_ENGINES_COMMAND is set */
struct {
/* this holds address of array of uint32 for engines */
__u64 engines;
/* number of engines in engines array */
__u32 num_engines;
/* the engine command to be sent towards engines */
__u32 engine_command;
};
};
union {
@ -2056,6 +2191,13 @@ struct hl_debug_args {
__u32 ctx_id;
};
#define HL_IOCTL_INFO 0x00
#define HL_IOCTL_CB 0x01
#define HL_IOCTL_CS 0x02
#define HL_IOCTL_WAIT_CS 0x03
#define HL_IOCTL_MEMORY 0x04
#define HL_IOCTL_DEBUG 0x05
/*
* Various information operations such as:
* - H/W IP information
@ -2070,8 +2212,7 @@ struct hl_debug_args {
* definitions of structures in kernel and userspace, e.g. in case of old
* userspace and new kernel driver
*/
#define HL_IOCTL_INFO \
_IOWR('H', 0x01, struct hl_info_args)
#define DRM_IOCTL_HL_INFO DRM_IOWR(DRM_COMMAND_BASE + HL_IOCTL_INFO, struct hl_info_args)
/*
* Command Buffer
@ -2092,8 +2233,7 @@ struct hl_debug_args {
* and won't be returned to user.
*
*/
#define HL_IOCTL_CB \
_IOWR('H', 0x02, union hl_cb_args)
#define DRM_IOCTL_HL_CB DRM_IOWR(DRM_COMMAND_BASE + HL_IOCTL_CB, union hl_cb_args)
/*
* Command Submission
@ -2115,7 +2255,7 @@ struct hl_debug_args {
* internal. The driver will get completion notifications from the device only
* on JOBS which are enqueued in the external queues.
*
* Greco onwards:
* Gaudi2 onwards:
* There is a single type of queue for all types of engines, either DMA engines
* for transfers from/to the host or inside the device, or compute engines.
* The driver will get completion notifications from the device for all queues.
@ -2145,8 +2285,7 @@ struct hl_debug_args {
* and only if CS N and CS N-1 are exactly the same (same CBs for the same
* queues).
*/
#define HL_IOCTL_CS \
_IOWR('H', 0x03, union hl_cs_args)
#define DRM_IOCTL_HL_CS DRM_IOWR(DRM_COMMAND_BASE + HL_IOCTL_CS, union hl_cs_args)
/*
* Wait for Command Submission
@ -2178,9 +2317,7 @@ struct hl_debug_args {
* HL_WAIT_CS_STATUS_ABORTED - The CS was aborted, usually because the
* device was reset (EIO)
*/
#define HL_IOCTL_WAIT_CS \
_IOWR('H', 0x04, union hl_wait_cs_args)
#define DRM_IOCTL_HL_WAIT_CS DRM_IOWR(DRM_COMMAND_BASE + HL_IOCTL_WAIT_CS, union hl_wait_cs_args)
/*
* Memory
@ -2197,8 +2334,7 @@ struct hl_debug_args {
* There is an option for the user to specify the requested virtual address.
*
*/
#define HL_IOCTL_MEMORY \
_IOWR('H', 0x05, union hl_mem_args)
#define DRM_IOCTL_HL_MEMORY DRM_IOWR(DRM_COMMAND_BASE + HL_IOCTL_MEMORY, union hl_mem_args)
/*
* Debug
@ -2224,10 +2360,9 @@ struct hl_debug_args {
* The driver can decide to "kick out" the user if he abuses this interface.
*
*/
#define HL_IOCTL_DEBUG \
_IOWR('H', 0x06, struct hl_debug_args)
#define DRM_IOCTL_HL_DEBUG DRM_IOWR(DRM_COMMAND_BASE + HL_IOCTL_DEBUG, struct hl_debug_args)
#define HL_COMMAND_START 0x01
#define HL_COMMAND_END 0x07
#define HL_COMMAND_START (DRM_COMMAND_BASE + HL_IOCTL_INFO)
#define HL_COMMAND_END (DRM_COMMAND_BASE + HL_IOCTL_DEBUG + 1)
#endif /* HABANALABS_H_ */

View File

@ -38,13 +38,13 @@ extern "C" {
*/
/**
* DOC: uevents generated by i915 on it's device node
* DOC: uevents generated by i915 on its device node
*
* I915_L3_PARITY_UEVENT - Generated when the driver receives a parity mismatch
* event from the gpu l3 cache. Additional information supplied is ROW,
* event from the GPU L3 cache. Additional information supplied is ROW,
* BANK, SUBBANK, SLICE of the affected cacheline. Userspace should keep
* track of these events and if a specific cache-line seems to have a
* persistent error remap it with the l3 remapping tool supplied in
* track of these events, and if a specific cache-line seems to have a
* persistent error, remap it with the L3 remapping tool supplied in
* intel-gpu-tools. The value supplied with the event is always 1.
*
* I915_ERROR_UEVENT - Generated upon error detection, currently only via
@ -280,7 +280,16 @@ enum drm_i915_pmu_engine_sample {
#define I915_PMU_ENGINE_SEMA(class, instance) \
__I915_PMU_ENGINE(class, instance, I915_SAMPLE_SEMA)
#define __I915_PMU_OTHER(x) (__I915_PMU_ENGINE(0xff, 0xff, 0xf) + 1 + (x))
/*
* Top 4 bits of every non-engine counter are GT id.
*/
#define __I915_PMU_GT_SHIFT (60)
#define ___I915_PMU_OTHER(gt, x) \
(((__u64)__I915_PMU_ENGINE(0xff, 0xff, 0xf) + 1 + (x)) | \
((__u64)(gt) << __I915_PMU_GT_SHIFT))
#define __I915_PMU_OTHER(x) ___I915_PMU_OTHER(0, x)
#define I915_PMU_ACTUAL_FREQUENCY __I915_PMU_OTHER(0)
#define I915_PMU_REQUESTED_FREQUENCY __I915_PMU_OTHER(1)
@ -290,6 +299,12 @@ enum drm_i915_pmu_engine_sample {
#define I915_PMU_LAST /* Deprecated - do not use */ I915_PMU_RC6_RESIDENCY
#define __I915_PMU_ACTUAL_FREQUENCY(gt) ___I915_PMU_OTHER(gt, 0)
#define __I915_PMU_REQUESTED_FREQUENCY(gt) ___I915_PMU_OTHER(gt, 1)
#define __I915_PMU_INTERRUPTS(gt) ___I915_PMU_OTHER(gt, 2)
#define __I915_PMU_RC6_RESIDENCY(gt) ___I915_PMU_OTHER(gt, 3)
#define __I915_PMU_SOFTWARE_GT_AWAKE_TIME(gt) ___I915_PMU_OTHER(gt, 4)
/* Each region is a minimum of 16k, and there are at most 255 of them.
*/
#define I915_NR_TEX_REGIONS 255 /* table size 2k - maximum due to use
@ -659,7 +674,8 @@ typedef struct drm_i915_irq_wait {
* If the IOCTL is successful, the returned parameter will be set to one of the
* following values:
* * 0 if HuC firmware load is not complete,
* * 1 if HuC firmware is authenticated and running.
* * 1 if HuC firmware is loaded and fully authenticated,
* * 2 if HuC firmware is loaded and authenticated for clear media only
*/
#define I915_PARAM_HUC_STATUS 42
@ -677,7 +693,7 @@ typedef struct drm_i915_irq_wait {
#define I915_PARAM_HAS_EXEC_FENCE 44
/* Query whether DRM_I915_GEM_EXECBUFFER2 supports the ability to capture
* user specified bufffers for post-mortem debugging of GPU hangs. See
* user-specified buffers for post-mortem debugging of GPU hangs. See
* EXEC_OBJECT_CAPTURE.
*/
#define I915_PARAM_HAS_EXEC_CAPTURE 45
@ -771,6 +787,31 @@ typedef struct drm_i915_irq_wait {
*/
#define I915_PARAM_OA_TIMESTAMP_FREQUENCY 57
/*
* Query the status of PXP support in i915.
*
* The query can fail in the following scenarios with the listed error codes:
* -ENODEV = PXP support is not available on the GPU device or in the
* kernel due to missing component drivers or kernel configs.
*
* If the IOCTL is successful, the returned parameter will be set to one of
* the following values:
* 1 = PXP feature is supported and is ready for use.
* 2 = PXP feature is supported but should be ready soon (pending
* initialization of non-i915 system dependencies).
*
* NOTE: When param is supported (positive return values), user space should
* still refer to the GEM PXP context-creation UAPI header specs to be
* aware of possible failure due to system state machine at the time.
*/
#define I915_PARAM_PXP_STATUS 58
/*
* Query if kernel allows marking a context to send a Freq hint to SLPC. This
* will enable use of the strategies allowed by the SLPC algorithm.
*/
#define I915_PARAM_HAS_CONTEXT_FREQ_HINT 59
/* Must be kept compact -- no holes and well documented */
/**
@ -1571,7 +1612,7 @@ struct drm_i915_gem_busy {
* is accurate.
*
* The returned dword is split into two fields to indicate both
* the engine classess on which the object is being read, and the
* the engine classes on which the object is being read, and the
* engine class on which it is currently being written (if any).
*
* The low word (bits 0:15) indicate if the object is being written
@ -1780,7 +1821,7 @@ struct drm_i915_gem_madvise {
__u32 handle;
/* Advice: either the buffer will be needed again in the near future,
* or wont be and could be discarded under memory pressure.
* or won't be and could be discarded under memory pressure.
*/
__u32 madv;
@ -2096,8 +2137,32 @@ struct drm_i915_gem_context_param {
*
* -ENODEV: feature not available
* -EPERM: trying to mark a recoverable or not bannable context as protected
* -ENXIO: A dependency such as a component driver or firmware is not yet
* loaded so user space may need to attempt again. Depending on the
* device, this error may be reported if protected context creation is
* attempted very early after kernel start because the internal timeout
* waiting for such dependencies is not guaranteed to be larger than
* required (numbers differ depending on system and kernel config):
* - ADL/RPL: dependencies may take up to 3 seconds from kernel start
* while context creation internal timeout is 250 milisecs
* - MTL: dependencies may take up to 8 seconds from kernel start
* while context creation internal timeout is 250 milisecs
* NOTE: such dependencies happen once, so a subsequent call to create a
* protected context after a prior successful call will not experience
* such timeouts and will not return -ENXIO (unless the driver is reloaded,
* or, depending on the device, resumes from a suspended state).
* -EIO: The firmware did not succeed in creating the protected context.
*/
#define I915_CONTEXT_PARAM_PROTECTED_CONTENT 0xd
/*
* I915_CONTEXT_PARAM_LOW_LATENCY:
*
* Mark this context as a low latency workload which requires aggressive GT
* frequency scaling. Use I915_PARAM_HAS_CONTEXT_FREQ_HINT to check if the kernel
* supports this per context flag.
*/
#define I915_CONTEXT_PARAM_LOW_LATENCY 0xe
/* Must be kept compact -- no holes and well documented */
/** @value: Context parameter value to be set or queried */
@ -2491,7 +2556,7 @@ struct i915_context_param_engines {
#define I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE 0 /* see i915_context_engines_load_balance */
#define I915_CONTEXT_ENGINES_EXT_BOND 1 /* see i915_context_engines_bond */
#define I915_CONTEXT_ENGINES_EXT_PARALLEL_SUBMIT 2 /* see i915_context_engines_parallel_submit */
struct i915_engine_class_instance engines[0];
struct i915_engine_class_instance engines[];
} __attribute__((packed));
#define I915_DEFINE_CONTEXT_PARAM_ENGINES(name__, N__) struct { \
@ -2573,19 +2638,29 @@ struct drm_i915_reg_read {
*
*/
/*
* struct drm_i915_reset_stats - Return global reset and other context stats
*
* Driver keeps few stats for each contexts and also global reset count.
* This struct can be used to query those stats.
*/
struct drm_i915_reset_stats {
/** @ctx_id: ID of the requested context */
__u32 ctx_id;
/** @flags: MBZ */
__u32 flags;
/* All resets since boot/module reload, for all contexts */
/** @reset_count: All resets since boot/module reload, for all contexts */
__u32 reset_count;
/* Number of batches lost when active in GPU, for this context */
/** @batch_active: Number of batches lost when active in GPU, for this context */
__u32 batch_active;
/* Number of batches lost pending for execution, for this context */
/** @batch_pending: Number of batches lost pending for execution, for this context */
__u32 batch_pending;
/** @pad: MBZ */
__u32 pad;
};
@ -2676,6 +2751,10 @@ enum drm_i915_oa_format {
I915_OAR_FORMAT_A32u40_A4u32_B8_C8,
I915_OA_FORMAT_A24u40_A14u32_B8_C8,
/* MTL OAM */
I915_OAM_FORMAT_MPEC8u64_B8_C8,
I915_OAM_FORMAT_MPEC8u32_B8_C8,
I915_OA_FORMAT_MAX /* non-ABI */
};
@ -2758,6 +2837,25 @@ enum drm_i915_perf_property_id {
*/
DRM_I915_PERF_PROP_POLL_OA_PERIOD,
/**
* Multiple engines may be mapped to the same OA unit. The OA unit is
* identified by class:instance of any engine mapped to it.
*
* This parameter specifies the engine class and must be passed along
* with DRM_I915_PERF_PROP_OA_ENGINE_INSTANCE.
*
* This property is available in perf revision 6.
*/
DRM_I915_PERF_PROP_OA_ENGINE_CLASS,
/**
* This parameter specifies the engine instance and must be passed along
* with DRM_I915_PERF_PROP_OA_ENGINE_CLASS.
*
* This property is available in perf revision 6.
*/
DRM_I915_PERF_PROP_OA_ENGINE_INSTANCE,
DRM_I915_PERF_PROP_MAX /* non-ABI */
};
@ -2940,6 +3038,7 @@ struct drm_i915_query_item {
* - %DRM_I915_QUERY_MEMORY_REGIONS (see struct drm_i915_query_memory_regions)
* - %DRM_I915_QUERY_HWCONFIG_BLOB (see `GuC HWCONFIG blob uAPI`)
* - %DRM_I915_QUERY_GEOMETRY_SUBSLICES (see struct drm_i915_query_topology_info)
* - %DRM_I915_QUERY_GUC_SUBMISSION_VERSION (see struct drm_i915_query_guc_submission_version)
*/
__u64 query_id;
#define DRM_I915_QUERY_TOPOLOGY_INFO 1
@ -2948,6 +3047,7 @@ struct drm_i915_query_item {
#define DRM_I915_QUERY_MEMORY_REGIONS 4
#define DRM_I915_QUERY_HWCONFIG_BLOB 5
#define DRM_I915_QUERY_GEOMETRY_SUBSLICES 6
#define DRM_I915_QUERY_GUC_SUBMISSION_VERSION 7
/* Must be kept compact -- no holes and well documented */
/**
@ -3173,7 +3273,7 @@ struct drm_i915_query_topology_info {
* // enough to hold our array of engines. The kernel will fill out the
* // item.length for us, which is the number of bytes we need.
* //
* // Alternatively a large buffer can be allocated straight away enabling
* // Alternatively a large buffer can be allocated straightaway enabling
* // querying in one pass, in which case item.length should contain the
* // length of the provided buffer.
* err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query);
@ -3183,7 +3283,7 @@ struct drm_i915_query_topology_info {
* // Now that we allocated the required number of bytes, we call the ioctl
* // again, this time with the data_ptr pointing to our newly allocated
* // blob, which the kernel can then populate with info on all engines.
* item.data_ptr = (uintptr_t)&info,
* item.data_ptr = (uintptr_t)&info;
*
* err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query);
* if (err) ...
@ -3213,7 +3313,7 @@ struct drm_i915_query_topology_info {
/**
* struct drm_i915_engine_info
*
* Describes one engine and it's capabilities as known to the driver.
* Describes one engine and its capabilities as known to the driver.
*/
struct drm_i915_engine_info {
/** @engine: Engine class and instance. */
@ -3493,6 +3593,20 @@ struct drm_i915_query_memory_regions {
struct drm_i915_memory_region_info regions[];
};
/**
* struct drm_i915_query_guc_submission_version - query GuC submission interface version
*/
struct drm_i915_query_guc_submission_version {
/** @branch: Firmware branch version. */
__u32 branch;
/** @major: Firmware major version. */
__u32 major;
/** @minor: Firmware minor version. */
__u32 minor;
/** @patch: Firmware patch version. */
__u32 patch;
};
/**
* DOC: GuC HWCONFIG blob uAPI
*
@ -3607,9 +3721,13 @@ struct drm_i915_gem_create_ext {
*
* For I915_GEM_CREATE_EXT_PROTECTED_CONTENT usage see
* struct drm_i915_gem_create_ext_protected_content.
*
* For I915_GEM_CREATE_EXT_SET_PAT usage see
* struct drm_i915_gem_create_ext_set_pat.
*/
#define I915_GEM_CREATE_EXT_MEMORY_REGIONS 0
#define I915_GEM_CREATE_EXT_PROTECTED_CONTENT 1
#define I915_GEM_CREATE_EXT_SET_PAT 2
__u64 extensions;
};
@ -3724,6 +3842,43 @@ struct drm_i915_gem_create_ext_protected_content {
__u32 flags;
};
/**
* struct drm_i915_gem_create_ext_set_pat - The
* I915_GEM_CREATE_EXT_SET_PAT extension.
*
* If this extension is provided, the specified caching policy (PAT index) is
* applied to the buffer object.
*
* Below is an example on how to create an object with specific caching policy:
*
* .. code-block:: C
*
* struct drm_i915_gem_create_ext_set_pat set_pat_ext = {
* .base = { .name = I915_GEM_CREATE_EXT_SET_PAT },
* .pat_index = 0,
* };
* struct drm_i915_gem_create_ext create_ext = {
* .size = PAGE_SIZE,
* .extensions = (uintptr_t)&set_pat_ext,
* };
*
* int err = ioctl(fd, DRM_IOCTL_I915_GEM_CREATE_EXT, &create_ext);
* if (err) ...
*/
struct drm_i915_gem_create_ext_set_pat {
/** @base: Extension link. See struct i915_user_extension. */
struct i915_user_extension base;
/**
* @pat_index: PAT index to be set
* PAT index is a bit field in Page Table Entry to control caching
* behaviors for GPU accesses. The definition of PAT index is
* platform dependent and can be found in hardware specifications,
*/
__u32 pat_index;
/** @rsvd: reserved for future use */
__u32 rsvd;
};
/* ID of the protected content session managed by i915 when PXP is active */
#define I915_PROTECTED_CONTENT_DEFAULT_SESSION 0xf

View File

@ -53,21 +53,44 @@ extern "C" {
#define DRM_IVPU_PARAM_CORE_CLOCK_RATE 3
#define DRM_IVPU_PARAM_NUM_CONTEXTS 4
#define DRM_IVPU_PARAM_CONTEXT_BASE_ADDRESS 5
#define DRM_IVPU_PARAM_CONTEXT_PRIORITY 6
#define DRM_IVPU_PARAM_CONTEXT_PRIORITY 6 /* Deprecated */
#define DRM_IVPU_PARAM_CONTEXT_ID 7
#define DRM_IVPU_PARAM_FW_API_VERSION 8
#define DRM_IVPU_PARAM_ENGINE_HEARTBEAT 9
#define DRM_IVPU_PARAM_UNIQUE_INFERENCE_ID 10
#define DRM_IVPU_PARAM_TILE_CONFIG 11
#define DRM_IVPU_PARAM_SKU 12
#define DRM_IVPU_PARAM_CAPABILITIES 13
#define DRM_IVPU_PLATFORM_TYPE_SILICON 0
/* Deprecated, use DRM_IVPU_JOB_PRIORITY */
#define DRM_IVPU_CONTEXT_PRIORITY_IDLE 0
#define DRM_IVPU_CONTEXT_PRIORITY_NORMAL 1
#define DRM_IVPU_CONTEXT_PRIORITY_FOCUS 2
#define DRM_IVPU_CONTEXT_PRIORITY_REALTIME 3
#define DRM_IVPU_JOB_PRIORITY_DEFAULT 0
#define DRM_IVPU_JOB_PRIORITY_IDLE 1
#define DRM_IVPU_JOB_PRIORITY_NORMAL 2
#define DRM_IVPU_JOB_PRIORITY_FOCUS 3
#define DRM_IVPU_JOB_PRIORITY_REALTIME 4
/**
* DRM_IVPU_CAP_METRIC_STREAMER
*
* Metric streamer support. Provides sampling of various hardware performance
* metrics like DMA bandwidth and cache miss/hits. Can be used for profiling.
*/
#define DRM_IVPU_CAP_METRIC_STREAMER 1
/**
* DRM_IVPU_CAP_DMA_MEMORY_RANGE
*
* Driver has capability to allocate separate memory range
* accessible by hardware DMA.
*/
#define DRM_IVPU_CAP_DMA_MEMORY_RANGE 2
/**
* struct drm_ivpu_param - Get/Set VPU parameters
*/
@ -96,10 +119,6 @@ struct drm_ivpu_param {
* %DRM_IVPU_PARAM_CONTEXT_BASE_ADDRESS:
* Lowest VPU virtual address available in the current context (read-only)
*
* %DRM_IVPU_PARAM_CONTEXT_PRIORITY:
* Value of current context scheduling priority (read-write).
* See DRM_IVPU_CONTEXT_PRIORITY_* for possible values.
*
* %DRM_IVPU_PARAM_CONTEXT_ID:
* Current context ID, always greater than 0 (read-only)
*
@ -119,6 +138,8 @@ struct drm_ivpu_param {
* %DRM_IVPU_PARAM_SKU:
* VPU SKU ID (read-only)
*
* %DRM_IVPU_PARAM_CAPABILITIES:
* Supported capabilities (read-only)
*/
__u32 param;
@ -129,8 +150,10 @@ struct drm_ivpu_param {
__u64 value;
};
#define DRM_IVPU_BO_HIGH_MEM 0x00000001
#define DRM_IVPU_BO_SHAVE_MEM 0x00000001
#define DRM_IVPU_BO_HIGH_MEM DRM_IVPU_BO_SHAVE_MEM
#define DRM_IVPU_BO_MAPPABLE 0x00000002
#define DRM_IVPU_BO_DMA_MEM 0x00000004
#define DRM_IVPU_BO_CACHED 0x00000000
#define DRM_IVPU_BO_UNCACHED 0x00010000
@ -140,6 +163,7 @@ struct drm_ivpu_param {
#define DRM_IVPU_BO_FLAGS \
(DRM_IVPU_BO_HIGH_MEM | \
DRM_IVPU_BO_MAPPABLE | \
DRM_IVPU_BO_DMA_MEM | \
DRM_IVPU_BO_CACHE_MASK)
/**
@ -175,7 +199,7 @@ struct drm_ivpu_bo_create {
*
* %DRM_IVPU_BO_UNCACHED:
*
* Allocated BO will not be cached on host side nor snooped on the VPU side.
* Not supported. Use DRM_IVPU_BO_WC instead.
*
* %DRM_IVPU_BO_WC:
*
@ -265,10 +289,23 @@ struct drm_ivpu_submit {
* to be executed. The offset has to be 8-byte aligned.
*/
__u32 commands_offset;
/**
* @priority:
*
* Priority to be set for related job command queue, can be one of the following:
* %DRM_IVPU_JOB_PRIORITY_DEFAULT
* %DRM_IVPU_JOB_PRIORITY_IDLE
* %DRM_IVPU_JOB_PRIORITY_NORMAL
* %DRM_IVPU_JOB_PRIORITY_FOCUS
* %DRM_IVPU_JOB_PRIORITY_REALTIME
*/
__u32 priority;
};
/* drm_ivpu_bo_wait job status codes */
#define DRM_IVPU_JOB_STATUS_SUCCESS 0
#define DRM_IVPU_JOB_STATUS_ABORTED 256
/**
* struct drm_ivpu_bo_wait - Wait for BO to become inactive

View File

@ -86,6 +86,7 @@ struct drm_msm_timespec {
#define MSM_PARAM_CMDLINE 0x0d /* WO: override for task cmdline */
#define MSM_PARAM_VA_START 0x0e /* RO: start of valid GPU iova range */
#define MSM_PARAM_VA_SIZE 0x0f /* RO: size of valid GPU iova range (bytes) */
#define MSM_PARAM_HIGHEST_BANK_BIT 0x10 /* RO */
/* For backwards compat. The original support for preemption was based on
* a single ring per priority level so # of priority levels equals the #
@ -139,6 +140,8 @@ struct drm_msm_gem_new {
#define MSM_INFO_GET_NAME 0x03 /* get debug name, returned by pointer */
#define MSM_INFO_SET_IOVA 0x04 /* set the iova, passed by value */
#define MSM_INFO_GET_FLAGS 0x05 /* get the MSM_BO_x flags */
#define MSM_INFO_SET_METADATA 0x06 /* set userspace metadata */
#define MSM_INFO_GET_METADATA 0x07 /* get userspace metadata */
struct drm_msm_gem_info {
__u32 handle; /* in */
@ -151,8 +154,13 @@ struct drm_msm_gem_info {
#define MSM_PREP_READ 0x01
#define MSM_PREP_WRITE 0x02
#define MSM_PREP_NOSYNC 0x04
#define MSM_PREP_BOOST 0x08
#define MSM_PREP_FLAGS (MSM_PREP_READ | MSM_PREP_WRITE | MSM_PREP_NOSYNC)
#define MSM_PREP_FLAGS (MSM_PREP_READ | \
MSM_PREP_WRITE | \
MSM_PREP_NOSYNC | \
MSM_PREP_BOOST | \
0)
struct drm_msm_gem_cpu_prep {
__u32 handle; /* in */
@ -181,7 +189,11 @@ struct drm_msm_gem_cpu_fini {
*/
struct drm_msm_gem_submit_reloc {
__u32 submit_offset; /* in, offset from submit_bo */
#ifdef __cplusplus
__u32 _or; /* in, value OR'd with result */
#else
__u32 or; /* in, value OR'd with result */
#endif
__s32 shift; /* in, amount of left shift (can be negative) */
__u32 reloc_idx; /* in, index of reloc_bo buffer */
__u64 reloc_offset; /* in, offset from start of reloc_bo */
@ -286,6 +298,11 @@ struct drm_msm_gem_submit {
};
#define MSM_WAIT_FENCE_BOOST 0x00000001
#define MSM_WAIT_FENCE_FLAGS ( \
MSM_WAIT_FENCE_BOOST | \
0)
/* The normal way to synchronize with the GPU is just to CPU_PREP on
* a buffer if you need to access it from the CPU (other cmdstream
* submission from same or other contexts, PAGE_FLIP ioctl, etc, all
@ -295,7 +312,7 @@ struct drm_msm_gem_submit {
*/
struct drm_msm_wait_fence {
__u32 fence; /* in */
__u32 pad;
__u32 flags; /* in, bitmask of MSM_WAIT_FENCE_x */
struct drm_msm_timespec timeout; /* in */
__u32 queueid; /* in, submitqueue id */
};

View File

@ -33,11 +33,104 @@
extern "C" {
#endif
#define NOUVEAU_GETPARAM_PCI_VENDOR 3
#define NOUVEAU_GETPARAM_PCI_DEVICE 4
#define NOUVEAU_GETPARAM_BUS_TYPE 5
#define NOUVEAU_GETPARAM_FB_SIZE 8
#define NOUVEAU_GETPARAM_AGP_SIZE 9
#define NOUVEAU_GETPARAM_CHIPSET_ID 11
#define NOUVEAU_GETPARAM_VM_VRAM_BASE 12
#define NOUVEAU_GETPARAM_GRAPH_UNITS 13
#define NOUVEAU_GETPARAM_PTIMER_TIME 14
#define NOUVEAU_GETPARAM_HAS_BO_USAGE 15
#define NOUVEAU_GETPARAM_HAS_PAGEFLIP 16
/*
* NOUVEAU_GETPARAM_EXEC_PUSH_MAX - query max pushes through getparam
*
* Query the maximum amount of IBs that can be pushed through a single
* &drm_nouveau_exec structure and hence a single &DRM_IOCTL_NOUVEAU_EXEC
* ioctl().
*/
#define NOUVEAU_GETPARAM_EXEC_PUSH_MAX 17
/*
* NOUVEAU_GETPARAM_VRAM_BAR_SIZE - query bar size
*
* Query the VRAM BAR size.
*/
#define NOUVEAU_GETPARAM_VRAM_BAR_SIZE 18
/*
* NOUVEAU_GETPARAM_VRAM_USED
*
* Get remaining VRAM size.
*/
#define NOUVEAU_GETPARAM_VRAM_USED 19
/*
* NOUVEAU_GETPARAM_HAS_VMA_TILEMODE
*
* Query whether tile mode and PTE kind are accepted with VM allocs or not.
*/
#define NOUVEAU_GETPARAM_HAS_VMA_TILEMODE 20
struct drm_nouveau_getparam {
__u64 param;
__u64 value;
};
/*
* Those are used to support selecting the main engine used on Kepler.
* This goes into drm_nouveau_channel_alloc::tt_ctxdma_handle
*/
#define NOUVEAU_FIFO_ENGINE_GR 0x01
#define NOUVEAU_FIFO_ENGINE_VP 0x02
#define NOUVEAU_FIFO_ENGINE_PPP 0x04
#define NOUVEAU_FIFO_ENGINE_BSP 0x08
#define NOUVEAU_FIFO_ENGINE_CE 0x30
struct drm_nouveau_channel_alloc {
__u32 fb_ctxdma_handle;
__u32 tt_ctxdma_handle;
__s32 channel;
__u32 pushbuf_domains;
/* Notifier memory */
__u32 notifier_handle;
/* DRM-enforced subchannel assignments */
struct {
__u32 handle;
__u32 grclass;
} subchan[8];
__u32 nr_subchan;
};
struct drm_nouveau_channel_free {
__s32 channel;
};
struct drm_nouveau_notifierobj_alloc {
__u32 channel;
__u32 handle;
__u32 size;
__u32 offset;
};
struct drm_nouveau_gpuobj_free {
__s32 channel;
__u32 handle;
};
#define NOUVEAU_GEM_DOMAIN_CPU (1 << 0)
#define NOUVEAU_GEM_DOMAIN_VRAM (1 << 1)
#define NOUVEAU_GEM_DOMAIN_GART (1 << 2)
#define NOUVEAU_GEM_DOMAIN_MAPPABLE (1 << 3)
#define NOUVEAU_GEM_DOMAIN_COHERENT (1 << 4)
/* The BO will never be shared via import or export. */
#define NOUVEAU_GEM_DOMAIN_NO_SHARE (1 << 5)
#define NOUVEAU_GEM_TILE_COMP 0x00030000 /* nv50-only */
#define NOUVEAU_GEM_TILE_LAYOUT_MASK 0x0000ff00
@ -98,6 +191,7 @@ struct drm_nouveau_gem_pushbuf_push {
__u32 pad;
__u64 offset;
__u64 length;
#define NOUVEAU_GEM_PUSHBUF_NO_PREFETCH (1 << 23)
};
struct drm_nouveau_gem_pushbuf {
@ -126,16 +220,231 @@ struct drm_nouveau_gem_cpu_fini {
__u32 handle;
};
#define DRM_NOUVEAU_GETPARAM 0x00 /* deprecated */
/**
* struct drm_nouveau_sync - sync object
*
* This structure serves as synchronization mechanism for (potentially)
* asynchronous operations such as EXEC or VM_BIND.
*/
struct drm_nouveau_sync {
/**
* @flags: the flags for a sync object
*
* The first 8 bits are used to determine the type of the sync object.
*/
__u32 flags;
#define DRM_NOUVEAU_SYNC_SYNCOBJ 0x0
#define DRM_NOUVEAU_SYNC_TIMELINE_SYNCOBJ 0x1
#define DRM_NOUVEAU_SYNC_TYPE_MASK 0xf
/**
* @handle: the handle of the sync object
*/
__u32 handle;
/**
* @timeline_value:
*
* The timeline point of the sync object in case the syncobj is of
* type DRM_NOUVEAU_SYNC_TIMELINE_SYNCOBJ.
*/
__u64 timeline_value;
};
/**
* struct drm_nouveau_vm_init - GPU VA space init structure
*
* Used to initialize the GPU's VA space for a user client, telling the kernel
* which portion of the VA space is managed by the UMD and kernel respectively.
*
* For the UMD to use the VM_BIND uAPI, this must be called before any BOs or
* channels are created; if called afterwards DRM_IOCTL_NOUVEAU_VM_INIT fails
* with -ENOSYS.
*/
struct drm_nouveau_vm_init {
/**
* @kernel_managed_addr: start address of the kernel managed VA space
* region
*/
__u64 kernel_managed_addr;
/**
* @kernel_managed_size: size of the kernel managed VA space region in
* bytes
*/
__u64 kernel_managed_size;
};
/**
* struct drm_nouveau_vm_bind_op - VM_BIND operation
*
* This structure represents a single VM_BIND operation. UMDs should pass
* an array of this structure via struct drm_nouveau_vm_bind's &op_ptr field.
*/
struct drm_nouveau_vm_bind_op {
/**
* @op: the operation type
*
* Supported values:
*
* %DRM_NOUVEAU_VM_BIND_OP_MAP - Map a GEM object to the GPU's VA
* space. Optionally, the &DRM_NOUVEAU_VM_BIND_SPARSE flag can be
* passed to instruct the kernel to create sparse mappings for the
* given range.
*
* %DRM_NOUVEAU_VM_BIND_OP_UNMAP - Unmap an existing mapping in the
* GPU's VA space. If the region the mapping is located in is a
* sparse region, new sparse mappings are created where the unmapped
* (memory backed) mapping was mapped previously. To remove a sparse
* region the &DRM_NOUVEAU_VM_BIND_SPARSE must be set.
*/
__u32 op;
#define DRM_NOUVEAU_VM_BIND_OP_MAP 0x0
#define DRM_NOUVEAU_VM_BIND_OP_UNMAP 0x1
/**
* @flags: the flags for a &drm_nouveau_vm_bind_op
*
* Supported values:
*
* %DRM_NOUVEAU_VM_BIND_SPARSE - Indicates that an allocated VA
* space region should be sparse.
*/
__u32 flags;
#define DRM_NOUVEAU_VM_BIND_SPARSE (1 << 8)
/**
* @handle: the handle of the DRM GEM object to map
*/
__u32 handle;
/**
* @pad: 32 bit padding, should be 0
*/
__u32 pad;
/**
* @addr:
*
* the address the VA space region or (memory backed) mapping should be mapped to
*/
__u64 addr;
/**
* @bo_offset: the offset within the BO backing the mapping
*/
__u64 bo_offset;
/**
* @range: the size of the requested mapping in bytes
*/
__u64 range;
};
/**
* struct drm_nouveau_vm_bind - structure for DRM_IOCTL_NOUVEAU_VM_BIND
*/
struct drm_nouveau_vm_bind {
/**
* @op_count: the number of &drm_nouveau_vm_bind_op
*/
__u32 op_count;
/**
* @flags: the flags for a &drm_nouveau_vm_bind ioctl
*
* Supported values:
*
* %DRM_NOUVEAU_VM_BIND_RUN_ASYNC - Indicates that the given VM_BIND
* operation should be executed asynchronously by the kernel.
*
* If this flag is not supplied the kernel executes the associated
* operations synchronously and doesn't accept any &drm_nouveau_sync
* objects.
*/
__u32 flags;
#define DRM_NOUVEAU_VM_BIND_RUN_ASYNC 0x1
/**
* @wait_count: the number of wait &drm_nouveau_syncs
*/
__u32 wait_count;
/**
* @sig_count: the number of &drm_nouveau_syncs to signal when finished
*/
__u32 sig_count;
/**
* @wait_ptr: pointer to &drm_nouveau_syncs to wait for
*/
__u64 wait_ptr;
/**
* @sig_ptr: pointer to &drm_nouveau_syncs to signal when finished
*/
__u64 sig_ptr;
/**
* @op_ptr: pointer to the &drm_nouveau_vm_bind_ops to execute
*/
__u64 op_ptr;
};
/**
* struct drm_nouveau_exec_push - EXEC push operation
*
* This structure represents a single EXEC push operation. UMDs should pass an
* array of this structure via struct drm_nouveau_exec's &push_ptr field.
*/
struct drm_nouveau_exec_push {
/**
* @va: the virtual address of the push buffer mapping
*/
__u64 va;
/**
* @va_len: the length of the push buffer mapping
*/
__u32 va_len;
/**
* @flags: the flags for this push buffer mapping
*/
__u32 flags;
#define DRM_NOUVEAU_EXEC_PUSH_NO_PREFETCH 0x1
};
/**
* struct drm_nouveau_exec - structure for DRM_IOCTL_NOUVEAU_EXEC
*/
struct drm_nouveau_exec {
/**
* @channel: the channel to execute the push buffer in
*/
__u32 channel;
/**
* @push_count: the number of &drm_nouveau_exec_push ops
*/
__u32 push_count;
/**
* @wait_count: the number of wait &drm_nouveau_syncs
*/
__u32 wait_count;
/**
* @sig_count: the number of &drm_nouveau_syncs to signal when finished
*/
__u32 sig_count;
/**
* @wait_ptr: pointer to &drm_nouveau_syncs to wait for
*/
__u64 wait_ptr;
/**
* @sig_ptr: pointer to &drm_nouveau_syncs to signal when finished
*/
__u64 sig_ptr;
/**
* @push_ptr: pointer to &drm_nouveau_exec_push ops
*/
__u64 push_ptr;
};
#define DRM_NOUVEAU_GETPARAM 0x00
#define DRM_NOUVEAU_SETPARAM 0x01 /* deprecated */
#define DRM_NOUVEAU_CHANNEL_ALLOC 0x02 /* deprecated */
#define DRM_NOUVEAU_CHANNEL_FREE 0x03 /* deprecated */
#define DRM_NOUVEAU_CHANNEL_ALLOC 0x02
#define DRM_NOUVEAU_CHANNEL_FREE 0x03
#define DRM_NOUVEAU_GROBJ_ALLOC 0x04 /* deprecated */
#define DRM_NOUVEAU_NOTIFIEROBJ_ALLOC 0x05 /* deprecated */
#define DRM_NOUVEAU_GPUOBJ_FREE 0x06 /* deprecated */
#define DRM_NOUVEAU_NVIF 0x07
#define DRM_NOUVEAU_SVM_INIT 0x08
#define DRM_NOUVEAU_SVM_BIND 0x09
#define DRM_NOUVEAU_VM_INIT 0x10
#define DRM_NOUVEAU_VM_BIND 0x11
#define DRM_NOUVEAU_EXEC 0x12
#define DRM_NOUVEAU_GEM_NEW 0x40
#define DRM_NOUVEAU_GEM_PUSHBUF 0x41
#define DRM_NOUVEAU_GEM_CPU_PREP 0x42
@ -188,6 +497,10 @@ struct drm_nouveau_svm_bind {
#define NOUVEAU_SVM_BIND_TARGET__GPU_VRAM (1UL << 31)
#define DRM_IOCTL_NOUVEAU_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_GETPARAM, struct drm_nouveau_getparam)
#define DRM_IOCTL_NOUVEAU_CHANNEL_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_CHANNEL_ALLOC, struct drm_nouveau_channel_alloc)
#define DRM_IOCTL_NOUVEAU_CHANNEL_FREE DRM_IOW (DRM_COMMAND_BASE + DRM_NOUVEAU_CHANNEL_FREE, struct drm_nouveau_channel_free)
#define DRM_IOCTL_NOUVEAU_SVM_INIT DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_SVM_INIT, struct drm_nouveau_svm_init)
#define DRM_IOCTL_NOUVEAU_SVM_BIND DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_SVM_BIND, struct drm_nouveau_svm_bind)
@ -197,6 +510,9 @@ struct drm_nouveau_svm_bind {
#define DRM_IOCTL_NOUVEAU_GEM_CPU_FINI DRM_IOW (DRM_COMMAND_BASE + DRM_NOUVEAU_GEM_CPU_FINI, struct drm_nouveau_gem_cpu_fini)
#define DRM_IOCTL_NOUVEAU_GEM_INFO DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_GEM_INFO, struct drm_nouveau_gem_info)
#define DRM_IOCTL_NOUVEAU_VM_INIT DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_VM_INIT, struct drm_nouveau_vm_init)
#define DRM_IOCTL_NOUVEAU_VM_BIND DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_VM_BIND, struct drm_nouveau_vm_bind)
#define DRM_IOCTL_NOUVEAU_EXEC DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_EXEC, struct drm_nouveau_exec)
#if defined(__cplusplus)
}
#endif

View File

@ -0,0 +1,962 @@
/* SPDX-License-Identifier: MIT */
/* Copyright (C) 2023 Collabora ltd. */
#ifndef _PANTHOR_DRM_H_
#define _PANTHOR_DRM_H_
#include "drm.h"
#if defined(__cplusplus)
extern "C" {
#endif
/**
* DOC: Introduction
*
* This documentation describes the Panthor IOCTLs.
*
* Just a few generic rules about the data passed to the Panthor IOCTLs:
*
* - Structures must be aligned on 64-bit/8-byte. If the object is not
* naturally aligned, a padding field must be added.
* - Fields must be explicitly aligned to their natural type alignment with
* pad[0..N] fields.
* - All padding fields will be checked by the driver to make sure they are
* zeroed.
* - Flags can be added, but not removed/replaced.
* - New fields can be added to the main structures (the structures
* directly passed to the ioctl). Those fields can be added at the end of
* the structure, or replace existing padding fields. Any new field being
* added must preserve the behavior that existed before those fields were
* added when a value of zero is passed.
* - New fields can be added to indirect objects (objects pointed by the
* main structure), iff those objects are passed a size to reflect the
* size known by the userspace driver (see drm_panthor_obj_array::stride
* or drm_panthor_dev_query::size).
* - If the kernel driver is too old to know some fields, those will be
* ignored if zero, and otherwise rejected (and so will be zero on output).
* - If userspace is too old to know some fields, those will be zeroed
* (input) before the structure is parsed by the kernel driver.
* - Each new flag/field addition must come with a driver version update so
* the userspace driver doesn't have to trial and error to know which
* flags are supported.
* - Structures should not contain unions, as this would defeat the
* extensibility of such structures.
* - IOCTLs can't be removed or replaced. New IOCTL IDs should be placed
* at the end of the drm_panthor_ioctl_id enum.
*/
/**
* DOC: MMIO regions exposed to userspace.
*
* .. c:macro:: DRM_PANTHOR_USER_MMIO_OFFSET
*
* File offset for all MMIO regions being exposed to userspace. Don't use
* this value directly, use DRM_PANTHOR_USER_<name>_OFFSET values instead.
* pgoffset passed to mmap2() is an unsigned long, which forces us to use a
* different offset on 32-bit and 64-bit systems.
*
* .. c:macro:: DRM_PANTHOR_USER_FLUSH_ID_MMIO_OFFSET
*
* File offset for the LATEST_FLUSH_ID register. The Userspace driver controls
* GPU cache flushing through CS instructions, but the flush reduction
* mechanism requires a flush_id. This flush_id could be queried with an
* ioctl, but Arm provides a well-isolated register page containing only this
* read-only register, so let's expose this page through a static mmap offset
* and allow direct mapping of this MMIO region so we can avoid the
* user <-> kernel round-trip.
*/
#define DRM_PANTHOR_USER_MMIO_OFFSET_32BIT (1ull << 43)
#define DRM_PANTHOR_USER_MMIO_OFFSET_64BIT (1ull << 56)
#define DRM_PANTHOR_USER_MMIO_OFFSET (sizeof(unsigned long) < 8 ? \
DRM_PANTHOR_USER_MMIO_OFFSET_32BIT : \
DRM_PANTHOR_USER_MMIO_OFFSET_64BIT)
#define DRM_PANTHOR_USER_FLUSH_ID_MMIO_OFFSET (DRM_PANTHOR_USER_MMIO_OFFSET | 0)
/**
* DOC: IOCTL IDs
*
* enum drm_panthor_ioctl_id - IOCTL IDs
*
* Place new ioctls at the end, don't re-order, don't replace or remove entries.
*
* These IDs are not meant to be used directly. Use the DRM_IOCTL_PANTHOR_xxx
* definitions instead.
*/
enum drm_panthor_ioctl_id {
/** @DRM_PANTHOR_DEV_QUERY: Query device information. */
DRM_PANTHOR_DEV_QUERY = 0,
/** @DRM_PANTHOR_VM_CREATE: Create a VM. */
DRM_PANTHOR_VM_CREATE,
/** @DRM_PANTHOR_VM_DESTROY: Destroy a VM. */
DRM_PANTHOR_VM_DESTROY,
/** @DRM_PANTHOR_VM_BIND: Bind/unbind memory to a VM. */
DRM_PANTHOR_VM_BIND,
/** @DRM_PANTHOR_VM_GET_STATE: Get VM state. */
DRM_PANTHOR_VM_GET_STATE,
/** @DRM_PANTHOR_BO_CREATE: Create a buffer object. */
DRM_PANTHOR_BO_CREATE,
/**
* @DRM_PANTHOR_BO_MMAP_OFFSET: Get the file offset to pass to
* mmap to map a GEM object.
*/
DRM_PANTHOR_BO_MMAP_OFFSET,
/** @DRM_PANTHOR_GROUP_CREATE: Create a scheduling group. */
DRM_PANTHOR_GROUP_CREATE,
/** @DRM_PANTHOR_GROUP_DESTROY: Destroy a scheduling group. */
DRM_PANTHOR_GROUP_DESTROY,
/**
* @DRM_PANTHOR_GROUP_SUBMIT: Submit jobs to queues belonging
* to a specific scheduling group.
*/
DRM_PANTHOR_GROUP_SUBMIT,
/** @DRM_PANTHOR_GROUP_GET_STATE: Get the state of a scheduling group. */
DRM_PANTHOR_GROUP_GET_STATE,
/** @DRM_PANTHOR_TILER_HEAP_CREATE: Create a tiler heap. */
DRM_PANTHOR_TILER_HEAP_CREATE,
/** @DRM_PANTHOR_TILER_HEAP_DESTROY: Destroy a tiler heap. */
DRM_PANTHOR_TILER_HEAP_DESTROY,
};
/**
* DRM_IOCTL_PANTHOR() - Build a Panthor IOCTL number
* @__access: Access type. Must be R, W or RW.
* @__id: One of the DRM_PANTHOR_xxx id.
* @__type: Suffix of the type being passed to the IOCTL.
*
* Don't use this macro directly, use the DRM_IOCTL_PANTHOR_xxx
* values instead.
*
* Return: An IOCTL number to be passed to ioctl() from userspace.
*/
#define DRM_IOCTL_PANTHOR(__access, __id, __type) \
DRM_IO ## __access(DRM_COMMAND_BASE + DRM_PANTHOR_ ## __id, \
struct drm_panthor_ ## __type)
#define DRM_IOCTL_PANTHOR_DEV_QUERY \
DRM_IOCTL_PANTHOR(WR, DEV_QUERY, dev_query)
#define DRM_IOCTL_PANTHOR_VM_CREATE \
DRM_IOCTL_PANTHOR(WR, VM_CREATE, vm_create)
#define DRM_IOCTL_PANTHOR_VM_DESTROY \
DRM_IOCTL_PANTHOR(WR, VM_DESTROY, vm_destroy)
#define DRM_IOCTL_PANTHOR_VM_BIND \
DRM_IOCTL_PANTHOR(WR, VM_BIND, vm_bind)
#define DRM_IOCTL_PANTHOR_VM_GET_STATE \
DRM_IOCTL_PANTHOR(WR, VM_GET_STATE, vm_get_state)
#define DRM_IOCTL_PANTHOR_BO_CREATE \
DRM_IOCTL_PANTHOR(WR, BO_CREATE, bo_create)
#define DRM_IOCTL_PANTHOR_BO_MMAP_OFFSET \
DRM_IOCTL_PANTHOR(WR, BO_MMAP_OFFSET, bo_mmap_offset)
#define DRM_IOCTL_PANTHOR_GROUP_CREATE \
DRM_IOCTL_PANTHOR(WR, GROUP_CREATE, group_create)
#define DRM_IOCTL_PANTHOR_GROUP_DESTROY \
DRM_IOCTL_PANTHOR(WR, GROUP_DESTROY, group_destroy)
#define DRM_IOCTL_PANTHOR_GROUP_SUBMIT \
DRM_IOCTL_PANTHOR(WR, GROUP_SUBMIT, group_submit)
#define DRM_IOCTL_PANTHOR_GROUP_GET_STATE \
DRM_IOCTL_PANTHOR(WR, GROUP_GET_STATE, group_get_state)
#define DRM_IOCTL_PANTHOR_TILER_HEAP_CREATE \
DRM_IOCTL_PANTHOR(WR, TILER_HEAP_CREATE, tiler_heap_create)
#define DRM_IOCTL_PANTHOR_TILER_HEAP_DESTROY \
DRM_IOCTL_PANTHOR(WR, TILER_HEAP_DESTROY, tiler_heap_destroy)
/**
* DOC: IOCTL arguments
*/
/**
* struct drm_panthor_obj_array - Object array.
*
* This object is used to pass an array of objects whose size is subject to changes in
* future versions of the driver. In order to support this mutability, we pass a stride
* describing the size of the object as known by userspace.
*
* You shouldn't fill drm_panthor_obj_array fields directly. You should instead use
* the DRM_PANTHOR_OBJ_ARRAY() macro that takes care of initializing the stride to
* the object size.
*/
struct drm_panthor_obj_array {
/** @stride: Stride of object struct. Used for versioning. */
__u32 stride;
/** @count: Number of objects in the array. */
__u32 count;
/** @array: User pointer to an array of objects. */
__u64 array;
};
/**
* DRM_PANTHOR_OBJ_ARRAY() - Initialize a drm_panthor_obj_array field.
* @cnt: Number of elements in the array.
* @ptr: Pointer to the array to pass to the kernel.
*
* Macro initializing a drm_panthor_obj_array based on the object size as known
* by userspace.
*/
#define DRM_PANTHOR_OBJ_ARRAY(cnt, ptr) \
{ .stride = sizeof((ptr)[0]), .count = (cnt), .array = (__u64)(uintptr_t)(ptr) }
/**
* enum drm_panthor_sync_op_flags - Synchronization operation flags.
*/
enum drm_panthor_sync_op_flags {
/** @DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_MASK: Synchronization handle type mask. */
DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_MASK = 0xff,
/** @DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_SYNCOBJ: Synchronization object type. */
DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_SYNCOBJ = 0,
/**
* @DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_TIMELINE_SYNCOBJ: Timeline synchronization
* object type.
*/
DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_TIMELINE_SYNCOBJ = 1,
/** @DRM_PANTHOR_SYNC_OP_WAIT: Wait operation. */
DRM_PANTHOR_SYNC_OP_WAIT = 0 << 31,
/** @DRM_PANTHOR_SYNC_OP_SIGNAL: Signal operation. */
DRM_PANTHOR_SYNC_OP_SIGNAL = (int)(1u << 31),
};
/**
* struct drm_panthor_sync_op - Synchronization operation.
*/
struct drm_panthor_sync_op {
/** @flags: Synchronization operation flags. Combination of DRM_PANTHOR_SYNC_OP values. */
__u32 flags;
/** @handle: Sync handle. */
__u32 handle;
/**
* @timeline_value: MBZ if
* (flags & DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_MASK) !=
* DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_TIMELINE_SYNCOBJ.
*/
__u64 timeline_value;
};
/**
* enum drm_panthor_dev_query_type - Query type
*
* Place new types at the end, don't re-order, don't remove or replace.
*/
enum drm_panthor_dev_query_type {
/** @DRM_PANTHOR_DEV_QUERY_GPU_INFO: Query GPU information. */
DRM_PANTHOR_DEV_QUERY_GPU_INFO = 0,
/** @DRM_PANTHOR_DEV_QUERY_CSIF_INFO: Query command-stream interface information. */
DRM_PANTHOR_DEV_QUERY_CSIF_INFO,
};
/**
* struct drm_panthor_gpu_info - GPU information
*
* Structure grouping all queryable information relating to the GPU.
*/
struct drm_panthor_gpu_info {
/** @gpu_id : GPU ID. */
__u32 gpu_id;
#define DRM_PANTHOR_ARCH_MAJOR(x) ((x) >> 28)
#define DRM_PANTHOR_ARCH_MINOR(x) (((x) >> 24) & 0xf)
#define DRM_PANTHOR_ARCH_REV(x) (((x) >> 20) & 0xf)
#define DRM_PANTHOR_PRODUCT_MAJOR(x) (((x) >> 16) & 0xf)
#define DRM_PANTHOR_VERSION_MAJOR(x) (((x) >> 12) & 0xf)
#define DRM_PANTHOR_VERSION_MINOR(x) (((x) >> 4) & 0xff)
#define DRM_PANTHOR_VERSION_STATUS(x) ((x) & 0xf)
/** @gpu_rev: GPU revision. */
__u32 gpu_rev;
/** @csf_id: Command stream frontend ID. */
__u32 csf_id;
#define DRM_PANTHOR_CSHW_MAJOR(x) (((x) >> 26) & 0x3f)
#define DRM_PANTHOR_CSHW_MINOR(x) (((x) >> 20) & 0x3f)
#define DRM_PANTHOR_CSHW_REV(x) (((x) >> 16) & 0xf)
#define DRM_PANTHOR_MCU_MAJOR(x) (((x) >> 10) & 0x3f)
#define DRM_PANTHOR_MCU_MINOR(x) (((x) >> 4) & 0x3f)
#define DRM_PANTHOR_MCU_REV(x) ((x) & 0xf)
/** @l2_features: L2-cache features. */
__u32 l2_features;
/** @tiler_features: Tiler features. */
__u32 tiler_features;
/** @mem_features: Memory features. */
__u32 mem_features;
/** @mmu_features: MMU features. */
__u32 mmu_features;
#define DRM_PANTHOR_MMU_VA_BITS(x) ((x) & 0xff)
/** @thread_features: Thread features. */
__u32 thread_features;
/** @max_threads: Maximum number of threads. */
__u32 max_threads;
/** @thread_max_workgroup_size: Maximum workgroup size. */
__u32 thread_max_workgroup_size;
/**
* @thread_max_barrier_size: Maximum number of threads that can wait
* simultaneously on a barrier.
*/
__u32 thread_max_barrier_size;
/** @coherency_features: Coherency features. */
__u32 coherency_features;
/** @texture_features: Texture features. */
__u32 texture_features[4];
/** @as_present: Bitmask encoding the number of address-space exposed by the MMU. */
__u32 as_present;
/** @shader_present: Bitmask encoding the shader cores exposed by the GPU. */
__u64 shader_present;
/** @l2_present: Bitmask encoding the L2 caches exposed by the GPU. */
__u64 l2_present;
/** @tiler_present: Bitmask encoding the tiler units exposed by the GPU. */
__u64 tiler_present;
/** @core_features: Used to discriminate core variants when they exist. */
__u32 core_features;
/** @pad: MBZ. */
__u32 pad;
};
/**
* struct drm_panthor_csif_info - Command stream interface information
*
* Structure grouping all queryable information relating to the command stream interface.
*/
struct drm_panthor_csif_info {
/** @csg_slot_count: Number of command stream group slots exposed by the firmware. */
__u32 csg_slot_count;
/** @cs_slot_count: Number of command stream slots per group. */
__u32 cs_slot_count;
/** @cs_reg_count: Number of command stream registers. */
__u32 cs_reg_count;
/** @scoreboard_slot_count: Number of scoreboard slots. */
__u32 scoreboard_slot_count;
/**
* @unpreserved_cs_reg_count: Number of command stream registers reserved by
* the kernel driver to call a userspace command stream.
*
* All registers can be used by a userspace command stream, but the
* [cs_slot_count - unpreserved_cs_reg_count .. cs_slot_count] registers are
* used by the kernel when DRM_PANTHOR_IOCTL_GROUP_SUBMIT is called.
*/
__u32 unpreserved_cs_reg_count;
/**
* @pad: Padding field, set to zero.
*/
__u32 pad;
};
/**
* struct drm_panthor_dev_query - Arguments passed to DRM_PANTHOR_IOCTL_DEV_QUERY
*/
struct drm_panthor_dev_query {
/** @type: the query type (see drm_panthor_dev_query_type). */
__u32 type;
/**
* @size: size of the type being queried.
*
* If pointer is NULL, size is updated by the driver to provide the
* output structure size. If pointer is not NULL, the driver will
* only copy min(size, actual_structure_size) bytes to the pointer,
* and update the size accordingly. This allows us to extend query
* types without breaking userspace.
*/
__u32 size;
/**
* @pointer: user pointer to a query type struct.
*
* Pointer can be NULL, in which case, nothing is copied, but the
* actual structure size is returned. If not NULL, it must point to
* a location that's large enough to hold size bytes.
*/
__u64 pointer;
};
/**
* struct drm_panthor_vm_create - Arguments passed to DRM_PANTHOR_IOCTL_VM_CREATE
*/
struct drm_panthor_vm_create {
/** @flags: VM flags, MBZ. */
__u32 flags;
/** @id: Returned VM ID. */
__u32 id;
/**
* @user_va_range: Size of the VA space reserved for user objects.
*
* The kernel will pick the remaining space to map kernel-only objects to the
* VM (heap chunks, heap context, ring buffers, kernel synchronization objects,
* ...). If the space left for kernel objects is too small, kernel object
* allocation will fail further down the road. One can use
* drm_panthor_gpu_info::mmu_features to extract the total virtual address
* range, and chose a user_va_range that leaves some space to the kernel.
*
* If user_va_range is zero, the kernel will pick a sensible value based on
* TASK_SIZE and the virtual range supported by the GPU MMU (the kernel/user
* split should leave enough VA space for userspace processes to support SVM,
* while still allowing the kernel to map some amount of kernel objects in
* the kernel VA range). The value chosen by the driver will be returned in
* @user_va_range.
*
* User VA space always starts at 0x0, kernel VA space is always placed after
* the user VA range.
*/
__u64 user_va_range;
};
/**
* struct drm_panthor_vm_destroy - Arguments passed to DRM_PANTHOR_IOCTL_VM_DESTROY
*/
struct drm_panthor_vm_destroy {
/** @id: ID of the VM to destroy. */
__u32 id;
/** @pad: MBZ. */
__u32 pad;
};
/**
* enum drm_panthor_vm_bind_op_flags - VM bind operation flags
*/
enum drm_panthor_vm_bind_op_flags {
/**
* @DRM_PANTHOR_VM_BIND_OP_MAP_READONLY: Map the memory read-only.
*
* Only valid with DRM_PANTHOR_VM_BIND_OP_TYPE_MAP.
*/
DRM_PANTHOR_VM_BIND_OP_MAP_READONLY = 1 << 0,
/**
* @DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC: Map the memory not-executable.
*
* Only valid with DRM_PANTHOR_VM_BIND_OP_TYPE_MAP.
*/
DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC = 1 << 1,
/**
* @DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED: Map the memory uncached.
*
* Only valid with DRM_PANTHOR_VM_BIND_OP_TYPE_MAP.
*/
DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED = 1 << 2,
/**
* @DRM_PANTHOR_VM_BIND_OP_TYPE_MASK: Mask used to determine the type of operation.
*/
DRM_PANTHOR_VM_BIND_OP_TYPE_MASK = (int)(0xfu << 28),
/** @DRM_PANTHOR_VM_BIND_OP_TYPE_MAP: Map operation. */
DRM_PANTHOR_VM_BIND_OP_TYPE_MAP = 0 << 28,
/** @DRM_PANTHOR_VM_BIND_OP_TYPE_UNMAP: Unmap operation. */
DRM_PANTHOR_VM_BIND_OP_TYPE_UNMAP = 1 << 28,
/**
* @DRM_PANTHOR_VM_BIND_OP_TYPE_SYNC_ONLY: No VM operation.
*
* Just serves as a synchronization point on a VM queue.
*
* Only valid if %DRM_PANTHOR_VM_BIND_ASYNC is set in drm_panthor_vm_bind::flags,
* and drm_panthor_vm_bind_op::syncs contains at least one element.
*/
DRM_PANTHOR_VM_BIND_OP_TYPE_SYNC_ONLY = 2 << 28,
};
/**
* struct drm_panthor_vm_bind_op - VM bind operation
*/
struct drm_panthor_vm_bind_op {
/** @flags: Combination of drm_panthor_vm_bind_op_flags flags. */
__u32 flags;
/**
* @bo_handle: Handle of the buffer object to map.
* MBZ for unmap or sync-only operations.
*/
__u32 bo_handle;
/**
* @bo_offset: Buffer object offset.
* MBZ for unmap or sync-only operations.
*/
__u64 bo_offset;
/**
* @va: Virtual address to map/unmap.
* MBZ for sync-only operations.
*/
__u64 va;
/**
* @size: Size to map/unmap.
* MBZ for sync-only operations.
*/
__u64 size;
/**
* @syncs: Array of struct drm_panthor_sync_op synchronization
* operations.
*
* This array must be empty if %DRM_PANTHOR_VM_BIND_ASYNC is not set on
* the drm_panthor_vm_bind object containing this VM bind operation.
*
* This array shall not be empty for sync-only operations.
*/
struct drm_panthor_obj_array syncs;
};
/**
* enum drm_panthor_vm_bind_flags - VM bind flags
*/
enum drm_panthor_vm_bind_flags {
/**
* @DRM_PANTHOR_VM_BIND_ASYNC: VM bind operations are queued to the VM
* queue instead of being executed synchronously.
*/
DRM_PANTHOR_VM_BIND_ASYNC = 1 << 0,
};
/**
* struct drm_panthor_vm_bind - Arguments passed to DRM_IOCTL_PANTHOR_VM_BIND
*/
struct drm_panthor_vm_bind {
/** @vm_id: VM targeted by the bind request. */
__u32 vm_id;
/** @flags: Combination of drm_panthor_vm_bind_flags flags. */
__u32 flags;
/** @ops: Array of struct drm_panthor_vm_bind_op bind operations. */
struct drm_panthor_obj_array ops;
};
/**
* enum drm_panthor_vm_state - VM states.
*/
enum drm_panthor_vm_state {
/**
* @DRM_PANTHOR_VM_STATE_USABLE: VM is usable.
*
* New VM operations will be accepted on this VM.
*/
DRM_PANTHOR_VM_STATE_USABLE,
/**
* @DRM_PANTHOR_VM_STATE_UNUSABLE: VM is unusable.
*
* Something put the VM in an unusable state (like an asynchronous
* VM_BIND request failing for any reason).
*
* Once the VM is in this state, all new MAP operations will be
* rejected, and any GPU job targeting this VM will fail.
* UNMAP operations are still accepted.
*
* The only way to recover from an unusable VM is to create a new
* VM, and destroy the old one.
*/
DRM_PANTHOR_VM_STATE_UNUSABLE,
};
/**
* struct drm_panthor_vm_get_state - Get VM state.
*/
struct drm_panthor_vm_get_state {
/** @vm_id: VM targeted by the get_state request. */
__u32 vm_id;
/**
* @state: state returned by the driver.
*
* Must be one of the enum drm_panthor_vm_state values.
*/
__u32 state;
};
/**
* enum drm_panthor_bo_flags - Buffer object flags, passed at creation time.
*/
enum drm_panthor_bo_flags {
/** @DRM_PANTHOR_BO_NO_MMAP: The buffer object will never be CPU-mapped in userspace. */
DRM_PANTHOR_BO_NO_MMAP = (1 << 0),
};
/**
* struct drm_panthor_bo_create - Arguments passed to DRM_IOCTL_PANTHOR_BO_CREATE.
*/
struct drm_panthor_bo_create {
/**
* @size: Requested size for the object
*
* The (page-aligned) allocated size for the object will be returned.
*/
__u64 size;
/**
* @flags: Flags. Must be a combination of drm_panthor_bo_flags flags.
*/
__u32 flags;
/**
* @exclusive_vm_id: Exclusive VM this buffer object will be mapped to.
*
* If not zero, the field must refer to a valid VM ID, and implies that:
* - the buffer object will only ever be bound to that VM
* - cannot be exported as a PRIME fd
*/
__u32 exclusive_vm_id;
/**
* @handle: Returned handle for the object.
*
* Object handles are nonzero.
*/
__u32 handle;
/** @pad: MBZ. */
__u32 pad;
};
/**
* struct drm_panthor_bo_mmap_offset - Arguments passed to DRM_IOCTL_PANTHOR_BO_MMAP_OFFSET.
*/
struct drm_panthor_bo_mmap_offset {
/** @handle: Handle of the object we want an mmap offset for. */
__u32 handle;
/** @pad: MBZ. */
__u32 pad;
/** @offset: The fake offset to use for subsequent mmap calls. */
__u64 offset;
};
/**
* struct drm_panthor_queue_create - Queue creation arguments.
*/
struct drm_panthor_queue_create {
/**
* @priority: Defines the priority of queues inside a group. Goes from 0 to 15,
* 15 being the highest priority.
*/
__u8 priority;
/** @pad: Padding fields, MBZ. */
__u8 pad[3];
/** @ringbuf_size: Size of the ring buffer to allocate to this queue. */
__u32 ringbuf_size;
};
/**
* enum drm_panthor_group_priority - Scheduling group priority
*/
enum drm_panthor_group_priority {
/** @PANTHOR_GROUP_PRIORITY_LOW: Low priority group. */
PANTHOR_GROUP_PRIORITY_LOW = 0,
/** @PANTHOR_GROUP_PRIORITY_MEDIUM: Medium priority group. */
PANTHOR_GROUP_PRIORITY_MEDIUM,
/** @PANTHOR_GROUP_PRIORITY_HIGH: High priority group. */
PANTHOR_GROUP_PRIORITY_HIGH,
};
/**
* struct drm_panthor_group_create - Arguments passed to DRM_IOCTL_PANTHOR_GROUP_CREATE
*/
struct drm_panthor_group_create {
/** @queues: Array of drm_panthor_queue_create elements. */
struct drm_panthor_obj_array queues;
/**
* @max_compute_cores: Maximum number of cores that can be used by compute
* jobs across CS queues bound to this group.
*
* Must be less or equal to the number of bits set in @compute_core_mask.
*/
__u8 max_compute_cores;
/**
* @max_fragment_cores: Maximum number of cores that can be used by fragment
* jobs across CS queues bound to this group.
*
* Must be less or equal to the number of bits set in @fragment_core_mask.
*/
__u8 max_fragment_cores;
/**
* @max_tiler_cores: Maximum number of tilers that can be used by tiler jobs
* across CS queues bound to this group.
*
* Must be less or equal to the number of bits set in @tiler_core_mask.
*/
__u8 max_tiler_cores;
/** @priority: Group priority (see enum drm_panthor_group_priority). */
__u8 priority;
/** @pad: Padding field, MBZ. */
__u32 pad;
/**
* @compute_core_mask: Mask encoding cores that can be used for compute jobs.
*
* This field must have at least @max_compute_cores bits set.
*
* The bits set here should also be set in drm_panthor_gpu_info::shader_present.
*/
__u64 compute_core_mask;
/**
* @fragment_core_mask: Mask encoding cores that can be used for fragment jobs.
*
* This field must have at least @max_fragment_cores bits set.
*
* The bits set here should also be set in drm_panthor_gpu_info::shader_present.
*/
__u64 fragment_core_mask;
/**
* @tiler_core_mask: Mask encoding cores that can be used for tiler jobs.
*
* This field must have at least @max_tiler_cores bits set.
*
* The bits set here should also be set in drm_panthor_gpu_info::tiler_present.
*/
__u64 tiler_core_mask;
/**
* @vm_id: VM ID to bind this group to.
*
* All submission to queues bound to this group will use this VM.
*/
__u32 vm_id;
/**
* @group_handle: Returned group handle. Passed back when submitting jobs or
* destroying a group.
*/
__u32 group_handle;
};
/**
* struct drm_panthor_group_destroy - Arguments passed to DRM_IOCTL_PANTHOR_GROUP_DESTROY
*/
struct drm_panthor_group_destroy {
/** @group_handle: Group to destroy */
__u32 group_handle;
/** @pad: Padding field, MBZ. */
__u32 pad;
};
/**
* struct drm_panthor_queue_submit - Job submission arguments.
*
* This is describing the userspace command stream to call from the kernel
* command stream ring-buffer. Queue submission is always part of a group
* submission, taking one or more jobs to submit to the underlying queues.
*/
struct drm_panthor_queue_submit {
/** @queue_index: Index of the queue inside a group. */
__u32 queue_index;
/**
* @stream_size: Size of the command stream to execute.
*
* Must be 64-bit/8-byte aligned (the size of a CS instruction)
*
* Can be zero if stream_addr is zero too.
*
* When the stream size is zero, the queue submit serves as a
* synchronization point.
*/
__u32 stream_size;
/**
* @stream_addr: GPU address of the command stream to execute.
*
* Must be aligned on 64-byte.
*
* Can be zero is stream_size is zero too.
*/
__u64 stream_addr;
/**
* @latest_flush: FLUSH_ID read at the time the stream was built.
*
* This allows cache flush elimination for the automatic
* flush+invalidate(all) done at submission time, which is needed to
* ensure the GPU doesn't get garbage when reading the indirect command
* stream buffers. If you want the cache flush to happen
* unconditionally, pass a zero here.
*
* Ignored when stream_size is zero.
*/
__u32 latest_flush;
/** @pad: MBZ. */
__u32 pad;
/** @syncs: Array of struct drm_panthor_sync_op sync operations. */
struct drm_panthor_obj_array syncs;
};
/**
* struct drm_panthor_group_submit - Arguments passed to DRM_IOCTL_PANTHOR_GROUP_SUBMIT
*/
struct drm_panthor_group_submit {
/** @group_handle: Handle of the group to queue jobs to. */
__u32 group_handle;
/** @pad: MBZ. */
__u32 pad;
/** @queue_submits: Array of drm_panthor_queue_submit objects. */
struct drm_panthor_obj_array queue_submits;
};
/**
* enum drm_panthor_group_state_flags - Group state flags
*/
enum drm_panthor_group_state_flags {
/**
* @DRM_PANTHOR_GROUP_STATE_TIMEDOUT: Group had unfinished jobs.
*
* When a group ends up with this flag set, no jobs can be submitted to its queues.
*/
DRM_PANTHOR_GROUP_STATE_TIMEDOUT = 1 << 0,
/**
* @DRM_PANTHOR_GROUP_STATE_FATAL_FAULT: Group had fatal faults.
*
* When a group ends up with this flag set, no jobs can be submitted to its queues.
*/
DRM_PANTHOR_GROUP_STATE_FATAL_FAULT = 1 << 1,
};
/**
* struct drm_panthor_group_get_state - Arguments passed to DRM_IOCTL_PANTHOR_GROUP_GET_STATE
*
* Used to query the state of a group and decide whether a new group should be created to
* replace it.
*/
struct drm_panthor_group_get_state {
/** @group_handle: Handle of the group to query state on */
__u32 group_handle;
/**
* @state: Combination of DRM_PANTHOR_GROUP_STATE_* flags encoding the
* group state.
*/
__u32 state;
/** @fatal_queues: Bitmask of queues that faced fatal faults. */
__u32 fatal_queues;
/** @pad: MBZ */
__u32 pad;
};
/**
* struct drm_panthor_tiler_heap_create - Arguments passed to DRM_IOCTL_PANTHOR_TILER_HEAP_CREATE
*/
struct drm_panthor_tiler_heap_create {
/** @vm_id: VM ID the tiler heap should be mapped to */
__u32 vm_id;
/** @initial_chunk_count: Initial number of chunks to allocate. Must be at least one. */
__u32 initial_chunk_count;
/**
* @chunk_size: Chunk size.
*
* Must be page-aligned and lie in the [128k:8M] range.
*/
__u32 chunk_size;
/**
* @max_chunks: Maximum number of chunks that can be allocated.
*
* Must be at least @initial_chunk_count.
*/
__u32 max_chunks;
/**
* @target_in_flight: Maximum number of in-flight render passes.
*
* If the heap has more than tiler jobs in-flight, the FW will wait for render
* passes to finish before queuing new tiler jobs.
*/
__u32 target_in_flight;
/** @handle: Returned heap handle. Passed back to DESTROY_TILER_HEAP. */
__u32 handle;
/** @tiler_heap_ctx_gpu_va: Returned heap GPU virtual address returned */
__u64 tiler_heap_ctx_gpu_va;
/**
* @first_heap_chunk_gpu_va: First heap chunk.
*
* The tiler heap is formed of heap chunks forming a single-link list. This
* is the first element in the list.
*/
__u64 first_heap_chunk_gpu_va;
};
/**
* struct drm_panthor_tiler_heap_destroy - Arguments passed to DRM_IOCTL_PANTHOR_TILER_HEAP_DESTROY
*/
struct drm_panthor_tiler_heap_destroy {
/**
* @handle: Handle of the tiler heap to destroy.
*
* Must be a valid heap handle returned by DRM_IOCTL_PANTHOR_TILER_HEAP_CREATE.
*/
__u32 handle;
/** @pad: Padding field, MBZ. */
__u32 pad;
};
#if defined(__cplusplus)
}
#endif
#endif /* _PANTHOR_DRM_H_ */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,399 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
*
* Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef QAIC_ACCEL_H_
#define QAIC_ACCEL_H_
#include "drm.h"
#if defined(__cplusplus)
extern "C" {
#endif
/* The length(4K) includes len and count fields of qaic_manage_msg */
#define QAIC_MANAGE_MAX_MSG_LENGTH SZ_4K
/* semaphore flags */
#define QAIC_SEM_INSYNCFENCE 2
#define QAIC_SEM_OUTSYNCFENCE 1
/* Semaphore commands */
#define QAIC_SEM_NOP 0
#define QAIC_SEM_INIT 1
#define QAIC_SEM_INC 2
#define QAIC_SEM_DEC 3
#define QAIC_SEM_WAIT_EQUAL 4
#define QAIC_SEM_WAIT_GT_EQ 5 /* Greater than or equal */
#define QAIC_SEM_WAIT_GT_0 6 /* Greater than 0 */
#define QAIC_TRANS_UNDEFINED 0
#define QAIC_TRANS_PASSTHROUGH_FROM_USR 1
#define QAIC_TRANS_PASSTHROUGH_TO_USR 2
#define QAIC_TRANS_PASSTHROUGH_FROM_DEV 3
#define QAIC_TRANS_PASSTHROUGH_TO_DEV 4
#define QAIC_TRANS_DMA_XFER_FROM_USR 5
#define QAIC_TRANS_DMA_XFER_TO_DEV 6
#define QAIC_TRANS_ACTIVATE_FROM_USR 7
#define QAIC_TRANS_ACTIVATE_FROM_DEV 8
#define QAIC_TRANS_ACTIVATE_TO_DEV 9
#define QAIC_TRANS_DEACTIVATE_FROM_USR 10
#define QAIC_TRANS_DEACTIVATE_FROM_DEV 11
#define QAIC_TRANS_STATUS_FROM_USR 12
#define QAIC_TRANS_STATUS_TO_USR 13
#define QAIC_TRANS_STATUS_FROM_DEV 14
#define QAIC_TRANS_STATUS_TO_DEV 15
#define QAIC_TRANS_TERMINATE_FROM_DEV 16
#define QAIC_TRANS_TERMINATE_TO_DEV 17
#define QAIC_TRANS_DMA_XFER_CONT 18
#define QAIC_TRANS_VALIDATE_PARTITION_FROM_DEV 19
#define QAIC_TRANS_VALIDATE_PARTITION_TO_DEV 20
/**
* struct qaic_manage_trans_hdr - Header for a transaction in a manage message.
* @type: In. Identifies this transaction. See QAIC_TRANS_* defines.
* @len: In. Length of this transaction, including this header.
*/
struct qaic_manage_trans_hdr {
__u32 type;
__u32 len;
};
/**
* struct qaic_manage_trans_passthrough - Defines a passthrough transaction.
* @hdr: In. Header to identify this transaction.
* @data: In. Payload of this ransaction. Opaque to the driver. Userspace must
* encode in little endian and align/pad to 64-bit.
*/
struct qaic_manage_trans_passthrough {
struct qaic_manage_trans_hdr hdr;
__u8 data[];
};
/**
* struct qaic_manage_trans_dma_xfer - Defines a DMA transfer transaction.
* @hdr: In. Header to identify this transaction.
* @tag: In. Identified this transfer in other transactions. Opaque to the
* driver.
* @pad: Structure padding.
* @addr: In. Address of the data to DMA to the device.
* @size: In. Length of the data to DMA to the device.
*/
struct qaic_manage_trans_dma_xfer {
struct qaic_manage_trans_hdr hdr;
__u32 tag;
__u32 pad;
__u64 addr;
__u64 size;
};
/**
* struct qaic_manage_trans_activate_to_dev - Defines an activate request.
* @hdr: In. Header to identify this transaction.
* @queue_size: In. Number of elements for DBC request and response queues.
* @eventfd: Unused.
* @options: In. Device specific options for this activate.
* @pad: Structure padding. Must be 0.
*/
struct qaic_manage_trans_activate_to_dev {
struct qaic_manage_trans_hdr hdr;
__u32 queue_size;
__u32 eventfd;
__u32 options;
__u32 pad;
};
/**
* struct qaic_manage_trans_activate_from_dev - Defines an activate response.
* @hdr: Out. Header to identify this transaction.
* @status: Out. Return code of the request from the device.
* @dbc_id: Out. Id of the assigned DBC for successful request.
* @options: Out. Device specific options for this activate.
*/
struct qaic_manage_trans_activate_from_dev {
struct qaic_manage_trans_hdr hdr;
__u32 status;
__u32 dbc_id;
__u64 options;
};
/**
* struct qaic_manage_trans_deactivate - Defines a deactivate request.
* @hdr: In. Header to identify this transaction.
* @dbc_id: In. Id of assigned DBC.
* @pad: Structure padding. Must be 0.
*/
struct qaic_manage_trans_deactivate {
struct qaic_manage_trans_hdr hdr;
__u32 dbc_id;
__u32 pad;
};
/**
* struct qaic_manage_trans_status_to_dev - Defines a status request.
* @hdr: In. Header to identify this transaction.
*/
struct qaic_manage_trans_status_to_dev {
struct qaic_manage_trans_hdr hdr;
};
/**
* struct qaic_manage_trans_status_from_dev - Defines a status response.
* @hdr: Out. Header to identify this transaction.
* @major: Out. NNC protocol version major number.
* @minor: Out. NNC protocol version minor number.
* @status: Out. Return code from device.
* @status_flags: Out. Flags from device. Bit 0 indicates if CRCs are required.
*/
struct qaic_manage_trans_status_from_dev {
struct qaic_manage_trans_hdr hdr;
__u16 major;
__u16 minor;
__u32 status;
__u64 status_flags;
};
/**
* struct qaic_manage_msg - Defines a message to the device.
* @len: In. Length of all the transactions contained within this message.
* @count: In. Number of transactions in this message.
* @data: In. Address to an array where the transactions can be found.
*/
struct qaic_manage_msg {
__u32 len;
__u32 count;
__u64 data;
};
/**
* struct qaic_create_bo - Defines a request to create a buffer object.
* @size: In. Size of the buffer in bytes.
* @handle: Out. GEM handle for the BO.
* @pad: Structure padding. Must be 0.
*/
struct qaic_create_bo {
__u64 size;
__u32 handle;
__u32 pad;
};
/**
* struct qaic_mmap_bo - Defines a request to prepare a BO for mmap().
* @handle: In. Handle of the GEM BO to prepare for mmap().
* @pad: Structure padding. Must be 0.
* @offset: Out. Offset value to provide to mmap().
*/
struct qaic_mmap_bo {
__u32 handle;
__u32 pad;
__u64 offset;
};
/**
* struct qaic_sem - Defines a semaphore command for a BO slice.
* @val: In. Only lower 12 bits are valid.
* @index: In. Only lower 5 bits are valid.
* @presync: In. 1 if presync operation, 0 if postsync.
* @cmd: In. One of QAIC_SEM_*.
* @flags: In. Bitfield. See QAIC_SEM_INSYNCFENCE and QAIC_SEM_OUTSYNCFENCE
* @pad: Structure padding. Must be 0.
*/
struct qaic_sem {
__u16 val;
__u8 index;
__u8 presync;
__u8 cmd;
__u8 flags;
__u16 pad;
};
/**
* struct qaic_attach_slice_entry - Defines a single BO slice.
* @size: In. Size of this slice in bytes.
* @sem0: In. Semaphore command 0. Must be 0 is not valid.
* @sem1: In. Semaphore command 1. Must be 0 is not valid.
* @sem2: In. Semaphore command 2. Must be 0 is not valid.
* @sem3: In. Semaphore command 3. Must be 0 is not valid.
* @dev_addr: In. Device address this slice pushes to or pulls from.
* @db_addr: In. Address of the doorbell to ring.
* @db_data: In. Data to write to the doorbell.
* @db_len: In. Size of the doorbell data in bits - 32, 16, or 8. 0 is for
* inactive doorbells.
* @offset: In. Start of this slice as an offset from the start of the BO.
*/
struct qaic_attach_slice_entry {
__u64 size;
struct qaic_sem sem0;
struct qaic_sem sem1;
struct qaic_sem sem2;
struct qaic_sem sem3;
__u64 dev_addr;
__u64 db_addr;
__u32 db_data;
__u32 db_len;
__u64 offset;
};
/**
* struct qaic_attach_slice_hdr - Defines metadata for a set of BO slices.
* @count: In. Number of slices for this BO.
* @dbc_id: In. Associate the sliced BO with this DBC.
* @handle: In. GEM handle of the BO to slice.
* @dir: In. Direction of data flow. 1 = DMA_TO_DEVICE, 2 = DMA_FROM_DEVICE
* @size: Deprecated. This value is ignored and size of @handle is used instead.
*/
struct qaic_attach_slice_hdr {
__u32 count;
__u32 dbc_id;
__u32 handle;
__u32 dir;
__u64 size;
};
/**
* struct qaic_attach_slice - Defines a set of BO slices.
* @hdr: In. Metadata of the set of slices.
* @data: In. Pointer to an array containing the slice definitions.
*/
struct qaic_attach_slice {
struct qaic_attach_slice_hdr hdr;
__u64 data;
};
/**
* struct qaic_execute_entry - Defines a BO to submit to the device.
* @handle: In. GEM handle of the BO to commit to the device.
* @dir: In. Direction of data. 1 = to device, 2 = from device.
*/
struct qaic_execute_entry {
__u32 handle;
__u32 dir;
};
/**
* struct qaic_partial_execute_entry - Defines a BO to resize and submit.
* @handle: In. GEM handle of the BO to commit to the device.
* @dir: In. Direction of data. 1 = to device, 2 = from device.
* @resize: In. New size of the BO. Must be <= the original BO size.
* @resize as 0 would be interpreted as no DMA transfer is
* involved.
*/
struct qaic_partial_execute_entry {
__u32 handle;
__u32 dir;
__u64 resize;
};
/**
* struct qaic_execute_hdr - Defines metadata for BO submission.
* @count: In. Number of BOs to submit.
* @dbc_id: In. DBC to submit the BOs on.
*/
struct qaic_execute_hdr {
__u32 count;
__u32 dbc_id;
};
/**
* struct qaic_execute - Defines a list of BOs to submit to the device.
* @hdr: In. BO list metadata.
* @data: In. Pointer to an array of BOs to submit.
*/
struct qaic_execute {
struct qaic_execute_hdr hdr;
__u64 data;
};
/**
* struct qaic_wait - Defines a blocking wait for BO execution.
* @handle: In. GEM handle of the BO to wait on.
* @timeout: In. Maximum time in ms to wait for the BO.
* @dbc_id: In. DBC the BO is submitted to.
* @pad: Structure padding. Must be 0.
*/
struct qaic_wait {
__u32 handle;
__u32 timeout;
__u32 dbc_id;
__u32 pad;
};
/**
* struct qaic_perf_stats_hdr - Defines metadata for getting BO perf info.
* @count: In. Number of BOs requested.
* @pad: Structure padding. Must be 0.
* @dbc_id: In. DBC the BO are associated with.
*/
struct qaic_perf_stats_hdr {
__u16 count;
__u16 pad;
__u32 dbc_id;
};
/**
* struct qaic_perf_stats - Defines a request for getting BO perf info.
* @hdr: In. Request metadata
* @data: In. Pointer to array of stats structures that will receive the data.
*/
struct qaic_perf_stats {
struct qaic_perf_stats_hdr hdr;
__u64 data;
};
/**
* struct qaic_perf_stats_entry - Defines a BO perf info.
* @handle: In. GEM handle of the BO to get perf stats for.
* @queue_level_before: Out. Number of elements in the queue before this BO
* was submitted.
* @num_queue_element: Out. Number of elements added to the queue to submit
* this BO.
* @submit_latency_us: Out. Time taken by the driver to submit this BO.
* @device_latency_us: Out. Time taken by the device to execute this BO.
* @pad: Structure padding. Must be 0.
*/
struct qaic_perf_stats_entry {
__u32 handle;
__u32 queue_level_before;
__u32 num_queue_element;
__u32 submit_latency_us;
__u32 device_latency_us;
__u32 pad;
};
/**
* struct qaic_detach_slice - Detaches slicing configuration from BO.
* @handle: In. GEM handle of the BO to detach slicing configuration.
* @pad: Structure padding. Must be 0.
*/
struct qaic_detach_slice {
__u32 handle;
__u32 pad;
};
#define DRM_QAIC_MANAGE 0x00
#define DRM_QAIC_CREATE_BO 0x01
#define DRM_QAIC_MMAP_BO 0x02
#define DRM_QAIC_ATTACH_SLICE_BO 0x03
#define DRM_QAIC_EXECUTE_BO 0x04
#define DRM_QAIC_PARTIAL_EXECUTE_BO 0x05
#define DRM_QAIC_WAIT_BO 0x06
#define DRM_QAIC_PERF_STATS_BO 0x07
#define DRM_QAIC_DETACH_SLICE_BO 0x08
#define DRM_IOCTL_QAIC_MANAGE DRM_IOWR(DRM_COMMAND_BASE + DRM_QAIC_MANAGE, struct qaic_manage_msg)
#define DRM_IOCTL_QAIC_CREATE_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_QAIC_CREATE_BO, struct qaic_create_bo)
#define DRM_IOCTL_QAIC_MMAP_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_QAIC_MMAP_BO, struct qaic_mmap_bo)
#define DRM_IOCTL_QAIC_ATTACH_SLICE_BO DRM_IOW(DRM_COMMAND_BASE + DRM_QAIC_ATTACH_SLICE_BO, struct qaic_attach_slice)
#define DRM_IOCTL_QAIC_EXECUTE_BO DRM_IOW(DRM_COMMAND_BASE + DRM_QAIC_EXECUTE_BO, struct qaic_execute)
#define DRM_IOCTL_QAIC_PARTIAL_EXECUTE_BO DRM_IOW(DRM_COMMAND_BASE + DRM_QAIC_PARTIAL_EXECUTE_BO, struct qaic_execute)
#define DRM_IOCTL_QAIC_WAIT_BO DRM_IOW(DRM_COMMAND_BASE + DRM_QAIC_WAIT_BO, struct qaic_wait)
#define DRM_IOCTL_QAIC_PERF_STATS_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_QAIC_PERF_STATS_BO, struct qaic_perf_stats)
#define DRM_IOCTL_QAIC_DETACH_SLICE_BO DRM_IOW(DRM_COMMAND_BASE + DRM_QAIC_DETACH_SLICE_BO, struct qaic_detach_slice)
#if defined(__cplusplus)
}
#endif
#endif /* QAIC_ACCEL_H_ */

View File

@ -41,6 +41,7 @@ extern "C" {
#define DRM_V3D_PERFMON_CREATE 0x08
#define DRM_V3D_PERFMON_DESTROY 0x09
#define DRM_V3D_PERFMON_GET_VALUES 0x0a
#define DRM_V3D_SUBMIT_CPU 0x0b
#define DRM_IOCTL_V3D_SUBMIT_CL DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_SUBMIT_CL, struct drm_v3d_submit_cl)
#define DRM_IOCTL_V3D_WAIT_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_WAIT_BO, struct drm_v3d_wait_bo)
@ -56,6 +57,7 @@ extern "C" {
struct drm_v3d_perfmon_destroy)
#define DRM_IOCTL_V3D_PERFMON_GET_VALUES DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_PERFMON_GET_VALUES, \
struct drm_v3d_perfmon_get_values)
#define DRM_IOCTL_V3D_SUBMIT_CPU DRM_IOW(DRM_COMMAND_BASE + DRM_V3D_SUBMIT_CPU, struct drm_v3d_submit_cpu)
#define DRM_V3D_SUBMIT_CL_FLUSH_CACHE 0x01
#define DRM_V3D_SUBMIT_EXTENSION 0x02
@ -69,7 +71,13 @@ extern "C" {
struct drm_v3d_extension {
__u64 next;
__u32 id;
#define DRM_V3D_EXT_ID_MULTI_SYNC 0x01
#define DRM_V3D_EXT_ID_MULTI_SYNC 0x01
#define DRM_V3D_EXT_ID_CPU_INDIRECT_CSD 0x02
#define DRM_V3D_EXT_ID_CPU_TIMESTAMP_QUERY 0x03
#define DRM_V3D_EXT_ID_CPU_RESET_TIMESTAMP_QUERY 0x04
#define DRM_V3D_EXT_ID_CPU_COPY_TIMESTAMP_QUERY 0x05
#define DRM_V3D_EXT_ID_CPU_RESET_PERFORMANCE_QUERY 0x06
#define DRM_V3D_EXT_ID_CPU_COPY_PERFORMANCE_QUERY 0x07
__u32 flags; /* mbz */
};
@ -93,6 +101,7 @@ enum v3d_queue {
V3D_TFU,
V3D_CSD,
V3D_CACHE_CLEAN,
V3D_CPU,
};
/**
@ -276,6 +285,7 @@ enum drm_v3d_param {
DRM_V3D_PARAM_SUPPORTS_CACHE_FLUSH,
DRM_V3D_PARAM_SUPPORTS_PERFMON,
DRM_V3D_PARAM_SUPPORTS_MULTISYNC_EXT,
DRM_V3D_PARAM_SUPPORTS_CPU_QUEUE,
};
struct drm_v3d_get_param {
@ -319,6 +329,11 @@ struct drm_v3d_submit_tfu {
/* Pointer to an array of ioctl extensions*/
__u64 extensions;
struct {
__u32 ioc;
__u32 pad;
} v71;
};
/* Submits a compute shader for dispatch. This job will block on any
@ -356,6 +371,234 @@ struct drm_v3d_submit_csd {
__u32 pad;
};
/**
* struct drm_v3d_indirect_csd - ioctl extension for the CPU job to create an
* indirect CSD
*
* When an extension of DRM_V3D_EXT_ID_CPU_INDIRECT_CSD id is defined, it
* points to this extension to define a indirect CSD submission. It creates a
* CPU job linked to a CSD job. The CPU job waits for the indirect CSD
* dependencies and, once they are signaled, it updates the CSD job config
* before allowing the CSD job execution.
*/
struct drm_v3d_indirect_csd {
struct drm_v3d_extension base;
/* Indirect CSD */
struct drm_v3d_submit_csd submit;
/* Handle of the indirect BO, that should be also attached to the
* indirect CSD.
*/
__u32 indirect;
/* Offset within the BO where the workgroup counts are stored */
__u32 offset;
/* Workgroups size */
__u32 wg_size;
/* Indices of the uniforms with the workgroup dispatch counts
* in the uniform stream. If the uniform rewrite is not needed,
* the offset must be 0xffffffff.
*/
__u32 wg_uniform_offsets[3];
};
/**
* struct drm_v3d_timestamp_query - ioctl extension for the CPU job to calculate
* a timestamp query
*
* When an extension DRM_V3D_EXT_ID_TIMESTAMP_QUERY is defined, it points to
* this extension to define a timestamp query submission. This CPU job will
* calculate the timestamp query and update the query value within the
* timestamp BO. Moreover, it will signal the timestamp syncobj to indicate
* query availability.
*/
struct drm_v3d_timestamp_query {
struct drm_v3d_extension base;
/* Array of queries' offsets within the timestamp BO for their value */
__u64 offsets;
/* Array of timestamp's syncobjs to indicate its availability */
__u64 syncs;
/* Number of queries */
__u32 count;
/* mbz */
__u32 pad;
};
/**
* struct drm_v3d_reset_timestamp_query - ioctl extension for the CPU job to
* reset timestamp queries
*
* When an extension DRM_V3D_EXT_ID_CPU_RESET_TIMESTAMP_QUERY is defined, it
* points to this extension to define a reset timestamp submission. This CPU
* job will reset the timestamp queries based on value offset of the first
* query. Moreover, it will reset the timestamp syncobj to reset query
* availability.
*/
struct drm_v3d_reset_timestamp_query {
struct drm_v3d_extension base;
/* Array of timestamp's syncobjs to indicate its availability */
__u64 syncs;
/* Offset of the first query within the timestamp BO for its value */
__u32 offset;
/* Number of queries */
__u32 count;
};
/**
* struct drm_v3d_copy_timestamp_query - ioctl extension for the CPU job to copy
* query results to a buffer
*
* When an extension DRM_V3D_EXT_ID_CPU_COPY_TIMESTAMP_QUERY is defined, it
* points to this extension to define a copy timestamp query submission. This
* CPU job will copy the timestamp queries results to a BO with the offset
* and stride defined in the extension.
*/
struct drm_v3d_copy_timestamp_query {
struct drm_v3d_extension base;
/* Define if should write to buffer using 64 or 32 bits */
__u8 do_64bit;
/* Define if it can write to buffer even if the query is not available */
__u8 do_partial;
/* Define if it should write availability bit to buffer */
__u8 availability_bit;
/* mbz */
__u8 pad;
/* Offset of the buffer in the BO */
__u32 offset;
/* Stride of the buffer in the BO */
__u32 stride;
/* Number of queries */
__u32 count;
/* Array of queries' offsets within the timestamp BO for their value */
__u64 offsets;
/* Array of timestamp's syncobjs to indicate its availability */
__u64 syncs;
};
/**
* struct drm_v3d_reset_performance_query - ioctl extension for the CPU job to
* reset performance queries
*
* When an extension DRM_V3D_EXT_ID_CPU_RESET_PERFORMANCE_QUERY is defined, it
* points to this extension to define a reset performance submission. This CPU
* job will reset the performance queries by resetting the values of the
* performance monitors. Moreover, it will reset the syncobj to reset query
* availability.
*/
struct drm_v3d_reset_performance_query {
struct drm_v3d_extension base;
/* Array of performance queries's syncobjs to indicate its availability */
__u64 syncs;
/* Number of queries */
__u32 count;
/* Number of performance monitors */
__u32 nperfmons;
/* Array of u64 user-pointers that point to an array of kperfmon_ids */
__u64 kperfmon_ids;
};
/**
* struct drm_v3d_copy_performance_query - ioctl extension for the CPU job to copy
* performance query results to a buffer
*
* When an extension DRM_V3D_EXT_ID_CPU_COPY_PERFORMANCE_QUERY is defined, it
* points to this extension to define a copy performance query submission. This
* CPU job will copy the performance queries results to a BO with the offset
* and stride defined in the extension.
*/
struct drm_v3d_copy_performance_query {
struct drm_v3d_extension base;
/* Define if should write to buffer using 64 or 32 bits */
__u8 do_64bit;
/* Define if it can write to buffer even if the query is not available */
__u8 do_partial;
/* Define if it should write availability bit to buffer */
__u8 availability_bit;
/* mbz */
__u8 pad;
/* Offset of the buffer in the BO */
__u32 offset;
/* Stride of the buffer in the BO */
__u32 stride;
/* Number of performance monitors */
__u32 nperfmons;
/* Number of performance counters related to this query pool */
__u32 ncounters;
/* Number of queries */
__u32 count;
/* Array of performance queries's syncobjs to indicate its availability */
__u64 syncs;
/* Array of u64 user-pointers that point to an array of kperfmon_ids */
__u64 kperfmon_ids;
};
struct drm_v3d_submit_cpu {
/* Pointer to a u32 array of the BOs that are referenced by the job.
*
* For DRM_V3D_EXT_ID_CPU_INDIRECT_CSD, it must contain only one BO,
* that contains the workgroup counts.
*
* For DRM_V3D_EXT_ID_TIMESTAMP_QUERY, it must contain only one BO,
* that will contain the timestamp.
*
* For DRM_V3D_EXT_ID_CPU_RESET_TIMESTAMP_QUERY, it must contain only
* one BO, that contains the timestamp.
*
* For DRM_V3D_EXT_ID_CPU_COPY_TIMESTAMP_QUERY, it must contain two
* BOs. The first is the BO where the timestamp queries will be written
* to. The second is the BO that contains the timestamp.
*
* For DRM_V3D_EXT_ID_CPU_RESET_PERFORMANCE_QUERY, it must contain no
* BOs.
*
* For DRM_V3D_EXT_ID_CPU_COPY_PERFORMANCE_QUERY, it must contain one
* BO, where the performance queries will be written.
*/
__u64 bo_handles;
/* Number of BO handles passed in (size is that times 4). */
__u32 bo_handle_count;
__u32 flags;
/* Pointer to an array of ioctl extensions*/
__u64 extensions;
};
enum {
V3D_PERFCNT_FEP_VALID_PRIMTS_NO_PIXELS,
V3D_PERFCNT_FEP_VALID_PRIMS,

View File

@ -64,6 +64,16 @@ struct drm_virtgpu_map {
__u32 pad;
};
#define VIRTGPU_EXECBUF_SYNCOBJ_RESET 0x01
#define VIRTGPU_EXECBUF_SYNCOBJ_FLAGS ( \
VIRTGPU_EXECBUF_SYNCOBJ_RESET | \
0)
struct drm_virtgpu_execbuffer_syncobj {
__u32 handle;
__u32 flags;
__u64 point;
};
/* fence_fd is modified on success if VIRTGPU_EXECBUF_FENCE_FD_OUT flag is set. */
struct drm_virtgpu_execbuffer {
__u32 flags;
@ -73,7 +83,11 @@ struct drm_virtgpu_execbuffer {
__u32 num_bo_handles;
__s32 fence_fd; /* in/out fence fd (see VIRTGPU_EXECBUF_FENCE_FD_IN/OUT) */
__u32 ring_idx; /* command ring index (see VIRTGPU_EXECBUF_RING_IDX) */
__u32 pad;
__u32 syncobj_stride; /* size of @drm_virtgpu_execbuffer_syncobj */
__u32 num_in_syncobjs;
__u32 num_out_syncobjs;
__u64 in_syncobjs;
__u64 out_syncobjs;
};
#define VIRTGPU_PARAM_3D_FEATURES 1 /* do we have 3D features in the hw */
@ -83,6 +97,7 @@ struct drm_virtgpu_execbuffer {
#define VIRTGPU_PARAM_CROSS_DEVICE 5 /* Cross virtio-device resource sharing */
#define VIRTGPU_PARAM_CONTEXT_INIT 6 /* DRM_VIRTGPU_CONTEXT_INIT */
#define VIRTGPU_PARAM_SUPPORTED_CAPSET_IDs 7 /* Bitmask of supported capability set ids */
#define VIRTGPU_PARAM_EXPLICIT_DEBUG_NAME 8 /* Ability to set debug name from userspace */
struct drm_virtgpu_getparam {
__u64 param;
@ -184,6 +199,7 @@ struct drm_virtgpu_resource_create_blob {
#define VIRTGPU_CONTEXT_PARAM_CAPSET_ID 0x0001
#define VIRTGPU_CONTEXT_PARAM_NUM_RINGS 0x0002
#define VIRTGPU_CONTEXT_PARAM_POLL_RINGS_MASK 0x0003
#define VIRTGPU_CONTEXT_PARAM_DEBUG_NAME 0x0004
struct drm_virtgpu_context_set_param {
__u64 param;
__u64 value;

View File

@ -1,6 +1,7 @@
/* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) OR MIT */
/**************************************************************************
*
* Copyright © 2009-2022 VMware, Inc., Palo Alto, CA., USA
* Copyright © 2009-2023 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@ -902,7 +903,8 @@ struct drm_vmw_shader_arg {
/**
* enum drm_vmw_surface_flags
*
* @drm_vmw_surface_flag_shareable: Whether the surface is shareable
* @drm_vmw_surface_flag_shareable: Deprecated - all userspace surfaces are
* shareable.
* @drm_vmw_surface_flag_scanout: Whether the surface is a scanout
* surface.
* @drm_vmw_surface_flag_create_buffer: Create a backup buffer if none is

File diff suppressed because it is too large Load Diff

View File

@ -7,42 +7,42 @@
/* Just the needed definitions for the RDB of an Amiga HD. */
struct RigidDiskBlock {
__u32 rdb_ID;
__be32 rdb_ID;
__be32 rdb_SummedLongs;
__s32 rdb_ChkSum;
__u32 rdb_HostID;
__be32 rdb_ChkSum;
__be32 rdb_HostID;
__be32 rdb_BlockBytes;
__u32 rdb_Flags;
__u32 rdb_BadBlockList;
__be32 rdb_Flags;
__be32 rdb_BadBlockList;
__be32 rdb_PartitionList;
__u32 rdb_FileSysHeaderList;
__u32 rdb_DriveInit;
__u32 rdb_Reserved1[6];
__u32 rdb_Cylinders;
__u32 rdb_Sectors;
__u32 rdb_Heads;
__u32 rdb_Interleave;
__u32 rdb_Park;
__u32 rdb_Reserved2[3];
__u32 rdb_WritePreComp;
__u32 rdb_ReducedWrite;
__u32 rdb_StepRate;
__u32 rdb_Reserved3[5];
__u32 rdb_RDBBlocksLo;
__u32 rdb_RDBBlocksHi;
__u32 rdb_LoCylinder;
__u32 rdb_HiCylinder;
__u32 rdb_CylBlocks;
__u32 rdb_AutoParkSeconds;
__u32 rdb_HighRDSKBlock;
__u32 rdb_Reserved4;
__be32 rdb_FileSysHeaderList;
__be32 rdb_DriveInit;
__be32 rdb_Reserved1[6];
__be32 rdb_Cylinders;
__be32 rdb_Sectors;
__be32 rdb_Heads;
__be32 rdb_Interleave;
__be32 rdb_Park;
__be32 rdb_Reserved2[3];
__be32 rdb_WritePreComp;
__be32 rdb_ReducedWrite;
__be32 rdb_StepRate;
__be32 rdb_Reserved3[5];
__be32 rdb_RDBBlocksLo;
__be32 rdb_RDBBlocksHi;
__be32 rdb_LoCylinder;
__be32 rdb_HiCylinder;
__be32 rdb_CylBlocks;
__be32 rdb_AutoParkSeconds;
__be32 rdb_HighRDSKBlock;
__be32 rdb_Reserved4;
char rdb_DiskVendor[8];
char rdb_DiskProduct[16];
char rdb_DiskRevision[4];
char rdb_ControllerVendor[8];
char rdb_ControllerProduct[16];
char rdb_ControllerRevision[4];
__u32 rdb_Reserved5[10];
__be32 rdb_Reserved5[10];
};
#define IDNAME_RIGIDDISK 0x5244534B /* "RDSK" */
@ -50,16 +50,16 @@ struct RigidDiskBlock {
struct PartitionBlock {
__be32 pb_ID;
__be32 pb_SummedLongs;
__s32 pb_ChkSum;
__u32 pb_HostID;
__be32 pb_ChkSum;
__be32 pb_HostID;
__be32 pb_Next;
__u32 pb_Flags;
__u32 pb_Reserved1[2];
__u32 pb_DevFlags;
__be32 pb_Flags;
__be32 pb_Reserved1[2];
__be32 pb_DevFlags;
__u8 pb_DriveName[32];
__u32 pb_Reserved2[15];
__be32 pb_Reserved2[15];
__be32 pb_Environment[17];
__u32 pb_EReserved[15];
__be32 pb_EReserved[15];
};
#define IDNAME_PARTITION 0x50415254 /* "PART" */

View File

@ -251,20 +251,22 @@ struct binder_extended_error {
__s32 param;
};
#define BINDER_WRITE_READ _IOWR('b', 1, struct binder_write_read)
#define BINDER_SET_IDLE_TIMEOUT _IOW('b', 3, __s64)
#define BINDER_SET_MAX_THREADS _IOW('b', 5, __u32)
#define BINDER_SET_IDLE_PRIORITY _IOW('b', 6, __s32)
#define BINDER_SET_CONTEXT_MGR _IOW('b', 7, __s32)
#define BINDER_THREAD_EXIT _IOW('b', 8, __s32)
#define BINDER_VERSION _IOWR('b', 9, struct binder_version)
#define BINDER_GET_NODE_DEBUG_INFO _IOWR('b', 11, struct binder_node_debug_info)
#define BINDER_GET_NODE_INFO_FOR_REF _IOWR('b', 12, struct binder_node_info_for_ref)
#define BINDER_SET_CONTEXT_MGR_EXT _IOW('b', 13, struct flat_binder_object)
#define BINDER_FREEZE _IOW('b', 14, struct binder_freeze_info)
#define BINDER_GET_FROZEN_INFO _IOWR('b', 15, struct binder_frozen_status_info)
#define BINDER_ENABLE_ONEWAY_SPAM_DETECTION _IOW('b', 16, __u32)
#define BINDER_GET_EXTENDED_ERROR _IOWR('b', 17, struct binder_extended_error)
enum {
BINDER_WRITE_READ = _IOWR('b', 1, struct binder_write_read),
BINDER_SET_IDLE_TIMEOUT = _IOW('b', 3, __s64),
BINDER_SET_MAX_THREADS = _IOW('b', 5, __u32),
BINDER_SET_IDLE_PRIORITY = _IOW('b', 6, __s32),
BINDER_SET_CONTEXT_MGR = _IOW('b', 7, __s32),
BINDER_THREAD_EXIT = _IOW('b', 8, __s32),
BINDER_VERSION = _IOWR('b', 9, struct binder_version),
BINDER_GET_NODE_DEBUG_INFO = _IOWR('b', 11, struct binder_node_debug_info),
BINDER_GET_NODE_INFO_FOR_REF = _IOWR('b', 12, struct binder_node_info_for_ref),
BINDER_SET_CONTEXT_MGR_EXT = _IOW('b', 13, struct flat_binder_object),
BINDER_FREEZE = _IOW('b', 14, struct binder_freeze_info),
BINDER_GET_FROZEN_INFO = _IOWR('b', 15, struct binder_frozen_status_info),
BINDER_ENABLE_ONEWAY_SPAM_DETECTION = _IOW('b', 16, __u32),
BINDER_GET_EXTENDED_ERROR = _IOWR('b', 17, struct binder_extended_error),
};
/*
* NOTE: Two special error codes you should check for when calling

View File

@ -101,10 +101,6 @@ struct atm_dev_stats {
/* use backend to make new if */
#define ATM_ADDPARTY _IOW('a', ATMIOC_SPECIAL+4,struct atm_iobuf)
/* add party to p2mp call */
#ifdef CONFIG_COMPAT
/* It actually takes struct sockaddr_atmsvc, not struct atm_iobuf */
#define COMPAT_ATM_ADDPARTY _IOW('a', ATMIOC_SPECIAL+4,struct compat_atm_iobuf)
#endif
#define ATM_DROPPARTY _IOW('a', ATMIOC_SPECIAL+5,int)
/* drop party from p2mp call */

View File

@ -109,7 +109,7 @@ struct autofs_dev_ioctl {
struct args_ismountpoint ismountpoint;
};
char path[0];
char path[];
};
static __inline__ void init_autofs_dev_ioctl(struct autofs_dev_ioctl *in)

View File

@ -32,6 +32,8 @@
#define AT_HWCAP2 26 /* extension of AT_HWCAP */
#define AT_RSEQ_FEATURE_SIZE 27 /* rseq supported feature size */
#define AT_RSEQ_ALIGN 28 /* rseq allocation alignment */
#define AT_HWCAP3 29 /* extension of AT_HWCAP */
#define AT_HWCAP4 30 /* extension of AT_HWCAP */
#define AT_EXECFN 31 /* filename of program */

View File

@ -116,6 +116,9 @@ enum batadv_icmp_packettype {
* only need routable IPv4 multicast packets we signed up for explicitly
* @BATADV_MCAST_WANT_NO_RTR6: we have no IPv6 multicast router and therefore
* only need routable IPv6 multicast packets we signed up for explicitly
* @BATADV_MCAST_HAVE_MC_PTYPE_CAPA: we can parse, receive and forward
* batman-adv multicast packets with a multicast tracker TVLV. And all our
* hard interfaces have an MTU of at least 1280 bytes.
*/
enum batadv_mcast_flags {
BATADV_MCAST_WANT_ALL_UNSNOOPABLES = 1UL << 0,
@ -123,6 +126,7 @@ enum batadv_mcast_flags {
BATADV_MCAST_WANT_ALL_IPV6 = 1UL << 2,
BATADV_MCAST_WANT_NO_RTR4 = 1UL << 3,
BATADV_MCAST_WANT_NO_RTR6 = 1UL << 4,
BATADV_MCAST_HAVE_MC_PTYPE_CAPA = 1UL << 5,
};
/* tt data subtypes */
@ -174,14 +178,16 @@ enum batadv_bla_claimframe {
* @BATADV_TVLV_TT: translation table tvlv
* @BATADV_TVLV_ROAM: roaming advertisement tvlv
* @BATADV_TVLV_MCAST: multicast capability tvlv
* @BATADV_TVLV_MCAST_TRACKER: multicast tracker tvlv
*/
enum batadv_tvlv_type {
BATADV_TVLV_GW = 0x01,
BATADV_TVLV_DAT = 0x02,
BATADV_TVLV_NC = 0x03,
BATADV_TVLV_TT = 0x04,
BATADV_TVLV_ROAM = 0x05,
BATADV_TVLV_MCAST = 0x06,
BATADV_TVLV_GW = 0x01,
BATADV_TVLV_DAT = 0x02,
BATADV_TVLV_NC = 0x03,
BATADV_TVLV_TT = 0x04,
BATADV_TVLV_ROAM = 0x05,
BATADV_TVLV_MCAST = 0x06,
BATADV_TVLV_MCAST_TRACKER = 0x07,
};
#pragma pack(2)
@ -487,6 +493,25 @@ struct batadv_bcast_packet {
*/
};
/**
* struct batadv_mcast_packet - multicast packet for network payload
* @packet_type: batman-adv packet type, part of the general header
* @version: batman-adv protocol version, part of the general header
* @ttl: time to live for this packet, part of the general header
* @reserved: reserved byte for alignment
* @tvlv_len: length of the appended tvlv buffer (in bytes)
*/
struct batadv_mcast_packet {
__u8 packet_type;
__u8 version;
__u8 ttl;
__u8 reserved;
__be16 tvlv_len;
/* "4 bytes boundary + 2 bytes" long to make the payload after the
* following ethernet header again 4 bytes boundary aligned
*/
};
/**
* struct batadv_coded_packet - network coded packet
* @packet_type: batman-adv packet type, part of the general header
@ -628,6 +653,14 @@ struct batadv_tvlv_mcast_data {
__u8 reserved[3];
};
/**
* struct batadv_tvlv_mcast_tracker - payload of a multicast tracker tvlv
* @num_dests: number of subsequent destination originator MAC addresses
*/
struct batadv_tvlv_mcast_tracker {
__be16 num_dests;
};
#pragma pack()
#endif /* _LINUX_BATADV_PACKET_H_ */

View File

@ -0,0 +1,15 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/* bits.h: Macros for dealing with bitmasks. */
#ifndef _LINUX_BITS_H
#define _LINUX_BITS_H
#define __GENMASK(h, l) \
(((~_UL(0)) - (_UL(1) << (l)) + 1) & \
(~_UL(0) >> (__BITS_PER_LONG - 1 - (h))))
#define __GENMASK_ULL(h, l) \
(((~_ULL(0)) - (_ULL(1) << (l)) + 1) & \
(~_ULL(0) >> (__BITS_PER_LONG_LONG - 1 - (h))))
#endif /* _LINUX_BITS_H */

View File

@ -51,13 +51,13 @@ enum blk_zone_type {
*
* The Zone Condition state machine in the ZBC/ZAC standards maps the above
* deinitions as:
* - ZC1: Empty | BLK_ZONE_EMPTY
* - ZC1: Empty | BLK_ZONE_COND_EMPTY
* - ZC2: Implicit Open | BLK_ZONE_COND_IMP_OPEN
* - ZC3: Explicit Open | BLK_ZONE_COND_EXP_OPEN
* - ZC4: Closed | BLK_ZONE_CLOSED
* - ZC5: Full | BLK_ZONE_FULL
* - ZC6: Read Only | BLK_ZONE_READONLY
* - ZC7: Offline | BLK_ZONE_OFFLINE
* - ZC4: Closed | BLK_ZONE_COND_CLOSED
* - ZC5: Full | BLK_ZONE_COND_FULL
* - ZC6: Read Only | BLK_ZONE_COND_READONLY
* - ZC7: Offline | BLK_ZONE_COND_OFFLINE
*
* Conditions 0x5 to 0xC are reserved by the current ZBC/ZAC spec and should
* be considered invalid.

View File

@ -19,6 +19,7 @@
/* ld/ldx fields */
#define BPF_DW 0x18 /* double word (64-bit) */
#define BPF_MEMSX 0x80 /* load with sign extension */
#define BPF_ATOMIC 0xc0 /* atomic memory ops - op type in immediate */
#define BPF_XADD 0xc0 /* exclusive add - legacy name */
@ -41,6 +42,7 @@
#define BPF_JSGE 0x70 /* SGE is signed '>=', GE in x86 */
#define BPF_JSLT 0xc0 /* SLT is signed, '<' */
#define BPF_JSLE 0xd0 /* SLE is signed, '<=' */
#define BPF_JCOND 0xe0 /* conditional pseudo jumps: may_goto, goto_or_nop */
#define BPF_CALL 0x80 /* function call */
#define BPF_EXIT 0x90 /* function return */
@ -49,6 +51,10 @@
#define BPF_XCHG (0xe0 | BPF_FETCH) /* atomic exchange */
#define BPF_CMPXCHG (0xf0 | BPF_FETCH) /* atomic compare-and-write */
enum bpf_cond_pseudo_jmp {
BPF_MAY_GOTO = 0,
};
/* Register numbers */
enum {
BPF_REG_0 = 0,
@ -76,12 +82,29 @@ struct bpf_insn {
__s32 imm; /* signed immediate constant */
};
/* Key of an a BPF_MAP_TYPE_LPM_TRIE entry */
/* Deprecated: use struct bpf_lpm_trie_key_u8 (when the "data" member is needed for
* byte access) or struct bpf_lpm_trie_key_hdr (when using an alternative type for
* the trailing flexible array member) instead.
*/
struct bpf_lpm_trie_key {
__u32 prefixlen; /* up to 32 for AF_INET, 128 for AF_INET6 */
__u8 data[0]; /* Arbitrary size */
};
/* Header for bpf_lpm_trie_key structs */
struct bpf_lpm_trie_key_hdr {
__u32 prefixlen;
};
/* Key of an a BPF_MAP_TYPE_LPM_TRIE entry, with trailing byte array. */
struct bpf_lpm_trie_key_u8 {
union {
struct bpf_lpm_trie_key_hdr hdr;
__u32 prefixlen;
};
__u8 data[]; /* Arbitrary size */
};
struct bpf_cgroup_storage_key {
__u64 cgroup_inode_id; /* cgroup inode id */
__u32 attach_type; /* program attach type (enum bpf_attach_type) */
@ -616,7 +639,11 @@ union bpf_iter_link_info {
* to NULL to begin the batched operation. After each subsequent
* **BPF_MAP_LOOKUP_BATCH**, the caller should pass the resultant
* *out_batch* as the *in_batch* for the next operation to
* continue iteration from the current point.
* continue iteration from the current point. Both *in_batch* and
* *out_batch* must point to memory large enough to hold a key,
* except for maps of type **BPF_MAP_TYPE_{HASH, PERCPU_HASH,
* LRU_HASH, LRU_PERCPU_HASH}**, for which batch parameters
* must be at least 4 bytes wide regardless of key size.
*
* The *keys* and *values* are output parameters which must point
* to memory large enough to hold *count* items based on the key
@ -846,6 +873,36 @@ union bpf_iter_link_info {
* Returns zero on success. On error, -1 is returned and *errno*
* is set appropriately.
*
* BPF_TOKEN_CREATE
* Description
* Create BPF token with embedded information about what
* BPF-related functionality it allows:
* - a set of allowed bpf() syscall commands;
* - a set of allowed BPF map types to be created with
* BPF_MAP_CREATE command, if BPF_MAP_CREATE itself is allowed;
* - a set of allowed BPF program types and BPF program attach
* types to be loaded with BPF_PROG_LOAD command, if
* BPF_PROG_LOAD itself is allowed.
*
* BPF token is created (derived) from an instance of BPF FS,
* assuming it has necessary delegation mount options specified.
* This BPF token can be passed as an extra parameter to various
* bpf() syscall commands to grant BPF subsystem functionality to
* unprivileged processes.
*
* When created, BPF token is "associated" with the owning
* user namespace of BPF FS instance (super block) that it was
* derived from, and subsequent BPF operations performed with
* BPF token would be performing capabilities checks (i.e.,
* CAP_BPF, CAP_PERFMON, CAP_NET_ADMIN, CAP_SYS_ADMIN) within
* that user namespace. Without BPF token, such capabilities
* have to be granted in init user namespace, making bpf()
* syscall incompatible with user namespace, for the most part.
*
* Return
* A new file descriptor (a nonnegative integer), or -1 if an
* error occurred (in which case, *errno* is set appropriately).
*
* NOTES
* eBPF objects (maps and programs) can be shared between processes.
*
@ -900,6 +957,8 @@ enum bpf_cmd {
BPF_ITER_CREATE,
BPF_LINK_DETACH,
BPF_PROG_BIND_MAP,
BPF_TOKEN_CREATE,
__MAX_BPF_CMD,
};
enum bpf_map_type {
@ -931,7 +990,14 @@ enum bpf_map_type {
*/
BPF_MAP_TYPE_CGROUP_STORAGE = BPF_MAP_TYPE_CGROUP_STORAGE_DEPRECATED,
BPF_MAP_TYPE_REUSEPORT_SOCKARRAY,
BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE,
BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE_DEPRECATED,
/* BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE is available to bpf programs
* attaching to a cgroup. The new mechanism (BPF_MAP_TYPE_CGRP_STORAGE +
* local percpu kptr) supports all BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE
* functionality and more. So mark * BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE
* deprecated.
*/
BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE_DEPRECATED,
BPF_MAP_TYPE_QUEUE,
BPF_MAP_TYPE_STACK,
BPF_MAP_TYPE_SK_STORAGE,
@ -943,6 +1009,8 @@ enum bpf_map_type {
BPF_MAP_TYPE_BLOOM_FILTER,
BPF_MAP_TYPE_USER_RINGBUF,
BPF_MAP_TYPE_CGRP_STORAGE,
BPF_MAP_TYPE_ARENA,
__MAX_BPF_MAP_TYPE
};
/* Note that tracing related programs such as
@ -986,6 +1054,8 @@ enum bpf_prog_type {
BPF_PROG_TYPE_LSM,
BPF_PROG_TYPE_SK_LOOKUP,
BPF_PROG_TYPE_SYSCALL, /* a program that can execute syscalls */
BPF_PROG_TYPE_NETFILTER,
__MAX_BPF_PROG_TYPE
};
enum bpf_attach_type {
@ -1033,6 +1103,19 @@ enum bpf_attach_type {
BPF_PERF_EVENT,
BPF_TRACE_KPROBE_MULTI,
BPF_LSM_CGROUP,
BPF_STRUCT_OPS,
BPF_NETFILTER,
BPF_TCX_INGRESS,
BPF_TCX_EGRESS,
BPF_TRACE_UPROBE_MULTI,
BPF_CGROUP_UNIX_CONNECT,
BPF_CGROUP_UNIX_SENDMSG,
BPF_CGROUP_UNIX_RECVMSG,
BPF_CGROUP_UNIX_GETPEERNAME,
BPF_CGROUP_UNIX_GETSOCKNAME,
BPF_NETKIT_PRIMARY,
BPF_NETKIT_PEER,
BPF_TRACE_KPROBE_SESSION,
__MAX_BPF_ATTACH_TYPE
};
@ -1049,8 +1132,24 @@ enum bpf_link_type {
BPF_LINK_TYPE_PERF_EVENT = 7,
BPF_LINK_TYPE_KPROBE_MULTI = 8,
BPF_LINK_TYPE_STRUCT_OPS = 9,
BPF_LINK_TYPE_NETFILTER = 10,
BPF_LINK_TYPE_TCX = 11,
BPF_LINK_TYPE_UPROBE_MULTI = 12,
BPF_LINK_TYPE_NETKIT = 13,
BPF_LINK_TYPE_SOCKMAP = 14,
__MAX_BPF_LINK_TYPE,
};
MAX_BPF_LINK_TYPE,
#define MAX_BPF_LINK_TYPE __MAX_BPF_LINK_TYPE
enum bpf_perf_event_type {
BPF_PERF_EVENT_UNSPEC = 0,
BPF_PERF_EVENT_UPROBE = 1,
BPF_PERF_EVENT_URETPROBE = 2,
BPF_PERF_EVENT_KPROBE = 3,
BPF_PERF_EVENT_KRETPROBE = 4,
BPF_PERF_EVENT_TRACEPOINT = 5,
BPF_PERF_EVENT_EVENT = 6,
};
/* cgroup-bpf attach flags used in BPF_PROG_ATTACH command
@ -1099,7 +1198,12 @@ enum bpf_link_type {
*/
#define BPF_F_ALLOW_OVERRIDE (1U << 0)
#define BPF_F_ALLOW_MULTI (1U << 1)
/* Generic attachment flags. */
#define BPF_F_REPLACE (1U << 2)
#define BPF_F_BEFORE (1U << 3)
#define BPF_F_AFTER (1U << 4)
#define BPF_F_ID (1U << 5)
#define BPF_F_LINK BPF_F_LINK /* 1 << 13 */
/* If BPF_F_STRICT_ALIGNMENT is used in BPF_PROG_LOAD command, the
* verifier will perform strict alignment checking as if the kernel
@ -1108,7 +1212,7 @@ enum bpf_link_type {
*/
#define BPF_F_STRICT_ALIGNMENT (1U << 0)
/* If BPF_F_ANY_ALIGNMENT is used in BPF_PROF_LOAD command, the
/* If BPF_F_ANY_ALIGNMENT is used in BPF_PROG_LOAD command, the
* verifier will allow any alignment whatsoever. On platforms
* with strict alignment requirements for loads ands stores (such
* as sparc and mips) the verifier validates that all loads and
@ -1161,10 +1265,27 @@ enum bpf_link_type {
*/
#define BPF_F_XDP_DEV_BOUND_ONLY (1U << 6)
/* The verifier internal test flag. Behavior is undefined */
#define BPF_F_TEST_REG_INVARIANTS (1U << 7)
/* link_create.kprobe_multi.flags used in LINK_CREATE command for
* BPF_TRACE_KPROBE_MULTI attach type to create return probe.
*/
#define BPF_F_KPROBE_MULTI_RETURN (1U << 0)
enum {
BPF_F_KPROBE_MULTI_RETURN = (1U << 0)
};
/* link_create.uprobe_multi.flags used in LINK_CREATE command for
* BPF_TRACE_UPROBE_MULTI attach type to create return probe.
*/
enum {
BPF_F_UPROBE_MULTI_RETURN = (1U << 0)
};
/* link_create.netfilter.flags used in LINK_CREATE command for
* BPF_PROG_TYPE_NETFILTER to enable IP packet defragmentation.
*/
#define BPF_F_NETFILTER_IP_DEFRAG (1U << 0)
/* When BPF ldimm64's insn[0].src_reg != 0 then this can have
* the following extensions:
@ -1220,6 +1341,10 @@ enum bpf_link_type {
*/
#define BPF_PSEUDO_KFUNC_CALL 2
enum bpf_addr_space_cast {
BPF_ADDR_SPACE_CAST = 1,
};
/* flags for BPF_MAP_UPDATE_ELEM command */
enum {
BPF_ANY = 0, /* create new element or update existing */
@ -1266,6 +1391,24 @@ enum {
/* Create a map that is suitable to be an inner map with dynamic max entries */
BPF_F_INNER_MAP = (1U << 12),
/* Create a map that will be registered/unregesitered by the backed bpf_link */
BPF_F_LINK = (1U << 13),
/* Get path from provided FD in BPF_OBJ_PIN/BPF_OBJ_GET commands */
BPF_F_PATH_FD = (1U << 14),
/* Flag for value_type_btf_obj_fd, the fd is available */
BPF_F_VTYPE_BTF_OBJ_FD = (1U << 15),
/* BPF token FD is passed in a corresponding command's token_fd field */
BPF_F_TOKEN_FD = (1U << 16),
/* When user space page faults in bpf_arena send SIGSEGV instead of inserting new page */
BPF_F_SEGV_ON_FAULT = (1U << 17),
/* Do not translate kernel bpf_arena pointers to user pointers */
BPF_F_NO_USER_CONV = (1U << 18),
};
/* Flags for BPF_PROG_QUERY. */
@ -1337,8 +1480,20 @@ union bpf_attr {
* BPF_MAP_TYPE_BLOOM_FILTER - the lowest 4 bits indicate the
* number of hash functions (if 0, the bloom filter will default
* to using 5 hash functions).
*
* BPF_MAP_TYPE_ARENA - contains the address where user space
* is going to mmap() the arena. It has to be page aligned.
*/
__u64 map_extra;
__s32 value_type_btf_obj_fd; /* fd pointing to a BTF
* type data for
* btf_vmlinux_value_type_id.
*/
/* BPF token FD to use with BPF_MAP_CREATE operation.
* If provided, map_flags should have BPF_F_TOKEN_FD flag set.
*/
__s32 map_token_fd;
};
struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */
@ -1403,23 +1558,44 @@ union bpf_attr {
__aligned_u64 fd_array; /* array of FDs */
__aligned_u64 core_relos;
__u32 core_relo_rec_size; /* sizeof(struct bpf_core_relo) */
/* output: actual total log contents size (including termintaing zero).
* It could be both larger than original log_size (if log was
* truncated), or smaller (if log buffer wasn't filled completely).
*/
__u32 log_true_size;
/* BPF token FD to use with BPF_PROG_LOAD operation.
* If provided, prog_flags should have BPF_F_TOKEN_FD flag set.
*/
__s32 prog_token_fd;
};
struct { /* anonymous struct used by BPF_OBJ_* commands */
__aligned_u64 pathname;
__u32 bpf_fd;
__u32 file_flags;
/* Same as dirfd in openat() syscall; see openat(2)
* manpage for details of path FD and pathname semantics;
* path_fd should accompanied by BPF_F_PATH_FD flag set in
* file_flags field, otherwise it should be set to zero;
* if BPF_F_PATH_FD flag is not set, AT_FDCWD is assumed.
*/
__s32 path_fd;
};
struct { /* anonymous struct used by BPF_PROG_ATTACH/DETACH commands */
__u32 target_fd; /* container object to attach to */
__u32 attach_bpf_fd; /* eBPF program to attach */
union {
__u32 target_fd; /* target object to attach to or ... */
__u32 target_ifindex; /* target ifindex */
};
__u32 attach_bpf_fd;
__u32 attach_type;
__u32 attach_flags;
__u32 replace_bpf_fd; /* previously attached eBPF
* program to replace if
* BPF_F_REPLACE is used
*/
__u32 replace_bpf_fd;
union {
__u32 relative_fd;
__u32 relative_id;
};
__u64 expected_revision;
};
struct { /* anonymous struct used by BPF_PROG_TEST_RUN command */
@ -1465,21 +1641,33 @@ union bpf_attr {
} info;
struct { /* anonymous struct used by BPF_PROG_QUERY command */
__u32 target_fd; /* container object to query */
union {
__u32 target_fd; /* target object to query or ... */
__u32 target_ifindex; /* target ifindex */
};
__u32 attach_type;
__u32 query_flags;
__u32 attach_flags;
__aligned_u64 prog_ids;
__u32 prog_cnt;
union {
__u32 prog_cnt;
__u32 count;
};
__u32 :32;
/* output: per-program attach_flags.
* not allowed to be set during effective query.
*/
__aligned_u64 prog_attach_flags;
__aligned_u64 link_ids;
__aligned_u64 link_attach_flags;
__u64 revision;
} query;
struct { /* anonymous struct used by BPF_RAW_TRACEPOINT_OPEN command */
__u64 name;
__u32 prog_fd;
__u64 name;
__u32 prog_fd;
__u32 :32;
__aligned_u64 cookie;
} raw_tracepoint;
struct { /* anonymous struct for BPF_BTF_LOAD */
@ -1488,6 +1676,16 @@ union bpf_attr {
__u32 btf_size;
__u32 btf_log_size;
__u32 btf_log_level;
/* output: actual total log contents size (including termintaing zero).
* It could be both larger than original log_size (if log was
* truncated), or smaller (if log buffer wasn't filled completely).
*/
__u32 btf_log_true_size;
__u32 btf_flags;
/* BPF token FD to use with BPF_BTF_LOAD operation.
* If provided, btf_flags should have BPF_F_TOKEN_FD flag set.
*/
__s32 btf_token_fd;
};
struct {
@ -1507,15 +1705,18 @@ union bpf_attr {
} task_fd_query;
struct { /* struct used by BPF_LINK_CREATE command */
__u32 prog_fd; /* eBPF program to attach */
union {
__u32 target_fd; /* object to attach to */
__u32 target_ifindex; /* target ifindex */
__u32 prog_fd; /* eBPF program to attach */
__u32 map_fd; /* struct_ops to attach */
};
union {
__u32 target_fd; /* target object to attach to or ... */
__u32 target_ifindex; /* target ifindex */
};
__u32 attach_type; /* attach type */
__u32 flags; /* extra flags */
union {
__u32 target_btf_id; /* btf_id of target to attach to */
__u32 target_btf_id; /* btf_id of target to attach to */
struct {
__aligned_u64 iter_info; /* extra bpf_iter_link_info */
__u32 iter_info_len; /* iter_info length */
@ -1543,17 +1744,57 @@ union bpf_attr {
*/
__u64 cookie;
} tracing;
struct {
__u32 pf;
__u32 hooknum;
__s32 priority;
__u32 flags;
} netfilter;
struct {
union {
__u32 relative_fd;
__u32 relative_id;
};
__u64 expected_revision;
} tcx;
struct {
__aligned_u64 path;
__aligned_u64 offsets;
__aligned_u64 ref_ctr_offsets;
__aligned_u64 cookies;
__u32 cnt;
__u32 flags;
__u32 pid;
} uprobe_multi;
struct {
union {
__u32 relative_fd;
__u32 relative_id;
};
__u64 expected_revision;
} netkit;
};
} link_create;
struct { /* struct used by BPF_LINK_UPDATE command */
__u32 link_fd; /* link fd */
/* new program fd to update link with */
__u32 new_prog_fd;
union {
/* new program fd to update link with */
__u32 new_prog_fd;
/* new struct_ops map fd to update link with */
__u32 new_map_fd;
};
__u32 flags; /* extra flags */
/* expected link's program fd; is specified only if
* BPF_F_REPLACE flag is set in flags */
__u32 old_prog_fd;
union {
/* expected link's program fd; is specified only if
* BPF_F_REPLACE flag is set in flags.
*/
__u32 old_prog_fd;
/* expected link's map fd; is specified only
* if BPF_F_REPLACE flag is set.
*/
__u32 old_map_fd;
};
} link_update;
struct {
@ -1575,6 +1816,11 @@ union bpf_attr {
__u32 flags; /* extra flags */
} prog_bind_map;
struct { /* struct used by BPF_TOKEN_CREATE command */
__u32 flags;
__u32 bpffs_fd;
} token_create;
} __attribute__((aligned(8)));
/* The description below is an attempt at providing documentation to eBPF
@ -1647,17 +1893,17 @@ union bpf_attr {
* Description
* This helper is a "printk()-like" facility for debugging. It
* prints a message defined by format *fmt* (of size *fmt_size*)
* to file *\/sys/kernel/debug/tracing/trace* from DebugFS, if
* to file *\/sys/kernel/tracing/trace* from TraceFS, if
* available. It can take up to three additional **u64**
* arguments (as an eBPF helpers, the total number of arguments is
* limited to five).
*
* Each time the helper is called, it appends a line to the trace.
* Lines are discarded while *\/sys/kernel/debug/tracing/trace* is
* open, use *\/sys/kernel/debug/tracing/trace_pipe* to avoid this.
* Lines are discarded while *\/sys/kernel/tracing/trace* is
* open, use *\/sys/kernel/tracing/trace_pipe* to avoid this.
* The format of the trace is customizable, and the exact output
* one will get depends on the options set in
* *\/sys/kernel/debug/tracing/trace_options* (see also the
* *\/sys/kernel/tracing/trace_options* (see also the
* *README* file under the same directory). However, it usually
* defaults to something like:
*
@ -1850,7 +2096,9 @@ union bpf_attr {
* performed again, if the helper is used in combination with
* direct packet access.
* Return
* 0 on success, or a negative error in case of failure.
* 0 on success, or a negative error in case of failure. Positive
* error indicates a potential drop or congestion in the target
* device. The particular positive error codes are not defined.
*
* u64 bpf_get_current_pid_tgid(void)
* Description
@ -2583,8 +2831,8 @@ union bpf_attr {
* *bpf_socket* should be one of the following:
*
* * **struct bpf_sock_ops** for **BPF_PROG_TYPE_SOCK_OPS**.
* * **struct bpf_sock_addr** for **BPF_CGROUP_INET4_CONNECT**
* and **BPF_CGROUP_INET6_CONNECT**.
* * **struct bpf_sock_addr** for **BPF_CGROUP_INET4_CONNECT**,
* **BPF_CGROUP_INET6_CONNECT** and **BPF_CGROUP_UNIX_CONNECT**.
*
* This helper actually implements a subset of **setsockopt()**.
* It supports the following *level*\ s:
@ -2822,8 +3070,8 @@ union bpf_attr {
* *bpf_socket* should be one of the following:
*
* * **struct bpf_sock_ops** for **BPF_PROG_TYPE_SOCK_OPS**.
* * **struct bpf_sock_addr** for **BPF_CGROUP_INET4_CONNECT**
* and **BPF_CGROUP_INET6_CONNECT**.
* * **struct bpf_sock_addr** for **BPF_CGROUP_INET4_CONNECT**,
* **BPF_CGROUP_INET6_CONNECT** and **BPF_CGROUP_UNIX_CONNECT**.
*
* This helper actually implements a subset of **getsockopt()**.
* It supports the same set of *optname*\ s that is supported by
@ -3131,6 +3379,10 @@ union bpf_attr {
* **BPF_FIB_LOOKUP_DIRECT**
* Do a direct table lookup vs full lookup using FIB
* rules.
* **BPF_FIB_LOOKUP_TBID**
* Used with BPF_FIB_LOOKUP_DIRECT.
* Use the routing table ID present in *params*->tbid
* for the fib lookup.
* **BPF_FIB_LOOKUP_OUTPUT**
* Perform lookup from an egress perspective (default is
* ingress).
@ -3139,6 +3391,15 @@ union bpf_attr {
* and *params*->smac will not be set as output. A common
* use case is to call **bpf_redirect_neigh**\ () after
* doing **bpf_fib_lookup**\ ().
* **BPF_FIB_LOOKUP_SRC**
* Derive and set source IP addr in *params*->ipv{4,6}_src
* for the nexthop. If the src addr cannot be derived,
* **BPF_FIB_LKUP_RET_NO_SRC_ADDR** is returned. In this
* case, *params*->dmac and *params*->smac are not set either.
* **BPF_FIB_LOOKUP_MARK**
* Use the mark present in *params*->mark for the fib lookup.
* This option should not be used with BPF_FIB_LOOKUP_DIRECT,
* as it only has meaning for full lookups.
*
* *ctx* is either **struct xdp_md** for XDP programs or
* **struct sk_buff** tc cls_act programs.
@ -4108,9 +4369,6 @@ union bpf_attr {
* **-EOPNOTSUPP** if the operation is not supported, for example
* a call from outside of TC ingress.
*
* **-ESOCKTNOSUPPORT** if the socket type is not supported
* (reuseport).
*
* long bpf_sk_assign(struct bpf_sk_lookup *ctx, struct bpf_sock *sk, u64 flags)
* Description
* Helper is overloaded depending on BPF program type. This
@ -4375,6 +4633,8 @@ union bpf_attr {
* long bpf_get_task_stack(struct task_struct *task, void *buf, u32 size, u64 flags)
* Description
* Return a user or a kernel stack in bpf program provided buffer.
* Note: the user stack will only be populated if the *task* is
* the current task; all other tasks will return -EOPNOTSUPP.
* To achieve this, the helper needs *task*, which is a valid
* pointer to **struct task_struct**. To store the stacktrace, the
* bpf program provides *buf* with a nonnegative *size*.
@ -4386,6 +4646,7 @@ union bpf_attr {
*
* **BPF_F_USER_STACK**
* Collect a user space stack instead of a kernel stack.
* The *task* must be the current task.
* **BPF_F_USER_BUILD_ID**
* Collect buildid+offset instead of ips for user stack,
* only valid if **BPF_F_USER_STACK** is also specified.
@ -4689,9 +4950,9 @@ union bpf_attr {
* going through the CPU's backlog queue.
*
* The *flags* argument is reserved and must be 0. The helper is
* currently only supported for tc BPF program types at the ingress
* hook and for veth device types. The peer device must reside in a
* different network namespace.
* currently only supported for tc BPF program types at the
* ingress hook and for veth and netkit target device types. The
* peer device must reside in a different network namespace.
* Return
* The helper returns **TC_ACT_REDIRECT** on success or
* **TC_ACT_SHOT** on error.
@ -4767,7 +5028,7 @@ union bpf_attr {
* bytes will be copied to *dst*
* Return
* The **hash_algo** is returned on success,
* **-EOPNOTSUP** if IMA is disabled or **-EINVAL** if
* **-EOPNOTSUPP** if IMA is disabled or **-EINVAL** if
* invalid arguments are passed.
*
* struct socket *bpf_sock_from_file(struct file *file)
@ -4969,6 +5230,14 @@ union bpf_attr {
* different maps if key/value layout matches across maps.
* Every bpf_timer_set_callback() can have different callback_fn.
*
* *flags* can be one of:
*
* **BPF_F_TIMER_ABS**
* Start the timer in absolute expire value instead of the
* default relative one.
* **BPF_F_TIMER_CPU_PIN**
* Timer will be pinned to the CPU of the caller.
*
* Return
* 0 on success.
* **-EINVAL** if *timer* was not initialized with bpf_timer_init() earlier
@ -4987,9 +5256,14 @@ union bpf_attr {
* u64 bpf_get_func_ip(void *ctx)
* Description
* Get address of the traced function (for tracing and kprobe programs).
*
* When called for kprobe program attached as uprobe it returns
* probe address for both entry and return uprobe.
*
* Return
* Address of the traced function.
* Address of the traced function for kprobe.
* 0 for kprobes placed within the function (not at the entry).
* Address of the probe for uprobe and return uprobe.
*
* u64 bpf_get_attach_cookie(void *ctx)
* Description
@ -5240,7 +5514,7 @@ union bpf_attr {
* bytes will be copied to *dst*
* Return
* The **hash_algo** is returned on success,
* **-EOPNOTSUP** if the hash calculation failed or **-EINVAL** if
* **-EOPNOTSUPP** if the hash calculation failed or **-EINVAL** if
* invalid arguments are passed.
*
* void *bpf_kptr_xchg(void *map_value, void *ptr)
@ -5325,11 +5599,22 @@ union bpf_attr {
* Description
* Write *len* bytes from *src* into *dst*, starting from *offset*
* into *dst*.
* *flags* is currently unused.
*
* *flags* must be 0 except for skb-type dynptrs.
*
* For skb-type dynptrs:
* * All data slices of the dynptr are automatically
* invalidated after **bpf_dynptr_write**\ (). This is
* because writing may pull the skb and change the
* underlying packet buffer.
*
* * For *flags*, please see the flags accepted by
* **bpf_skb_store_bytes**\ ().
* Return
* 0 on success, -E2BIG if *offset* + *len* exceeds the length
* of *dst*'s data, -EINVAL if *dst* is an invalid dynptr or if *dst*
* is a read-only dynptr or if *flags* is not 0.
* is a read-only dynptr or if *flags* is not correct. For skb-type dynptrs,
* other errors correspond to errors returned by **bpf_skb_store_bytes**\ ().
*
* void *bpf_dynptr_data(const struct bpf_dynptr *ptr, u32 offset, u32 len)
* Description
@ -5337,6 +5622,9 @@ union bpf_attr {
*
* *len* must be a statically known value. The returned data slice
* is invalidated whenever the dynptr is invalidated.
*
* skb and xdp type dynptrs may not use bpf_dynptr_data. They should
* instead use bpf_dynptr_slice and bpf_dynptr_slice_rdwr.
* Return
* Pointer to the underlying dynptr data, NULL if the dynptr is
* read-only, if the dynptr is invalid, or if the offset and length
@ -6116,6 +6404,19 @@ struct bpf_sock_tuple {
};
};
/* (Simplified) user return codes for tcx prog type.
* A valid tcx program must return one of these defined values. All other
* return codes are reserved for future use. Must remain compatible with
* their TC_ACT_* counter-parts. For compatibility in behavior, unknown
* return codes are mapped to TCX_NEXT.
*/
enum tcx_action_base {
TCX_NEXT = -1,
TCX_PASS = 0,
TCX_DROP = 2,
TCX_REDIRECT = 7,
};
struct bpf_xdp_sock {
__u32 queue_id;
};
@ -6297,7 +6598,7 @@ struct bpf_map_info {
__u32 btf_id;
__u32 btf_key_type_id;
__u32 btf_value_type_id;
__u32 :32; /* alignment pad */
__u32 btf_vmlinux_id;
__u64 map_extra;
} __attribute__((aligned(8)));
@ -6359,6 +6660,76 @@ struct bpf_link_info {
struct {
__u32 ifindex;
} xdp;
struct {
__u32 map_id;
} struct_ops;
struct {
__u32 pf;
__u32 hooknum;
__s32 priority;
__u32 flags;
} netfilter;
struct {
__aligned_u64 addrs;
__u32 count; /* in/out: kprobe_multi function count */
__u32 flags;
__u64 missed;
__aligned_u64 cookies;
} kprobe_multi;
struct {
__aligned_u64 path;
__aligned_u64 offsets;
__aligned_u64 ref_ctr_offsets;
__aligned_u64 cookies;
__u32 path_size; /* in/out: real path size on success, including zero byte */
__u32 count; /* in/out: uprobe_multi offsets/ref_ctr_offsets/cookies count */
__u32 flags;
__u32 pid;
} uprobe_multi;
struct {
__u32 type; /* enum bpf_perf_event_type */
__u32 :32;
union {
struct {
__aligned_u64 file_name; /* in/out */
__u32 name_len;
__u32 offset; /* offset from file_name */
__u64 cookie;
} uprobe; /* BPF_PERF_EVENT_UPROBE, BPF_PERF_EVENT_URETPROBE */
struct {
__aligned_u64 func_name; /* in/out */
__u32 name_len;
__u32 offset; /* offset from func_name */
__u64 addr;
__u64 missed;
__u64 cookie;
} kprobe; /* BPF_PERF_EVENT_KPROBE, BPF_PERF_EVENT_KRETPROBE */
struct {
__aligned_u64 tp_name; /* in/out */
__u32 name_len;
__u32 :32;
__u64 cookie;
} tracepoint; /* BPF_PERF_EVENT_TRACEPOINT */
struct {
__u64 config;
__u32 type;
__u32 :32;
__u64 cookie;
} event; /* BPF_PERF_EVENT_EVENT */
};
} perf_event;
struct {
__u32 ifindex;
__u32 attach_type;
} tcx;
struct {
__u32 ifindex;
__u32 attach_type;
} netkit;
struct {
__u32 map_id;
__u32 attach_type;
} sockmap;
};
} __attribute__((aligned(8)));
@ -6577,6 +6948,8 @@ enum {
* socket transition to LISTEN state.
*/
BPF_SOCK_OPS_RTT_CB, /* Called on every RTT.
* Arg1: measured RTT input (mrtt)
* Arg2: updated srtt
*/
BPF_SOCK_OPS_PARSE_HDR_OPT_CB, /* Parse the header option.
* It will be called to handle
@ -6655,6 +7028,7 @@ enum {
BPF_TCP_LISTEN,
BPF_TCP_CLOSING, /* Now a valid state */
BPF_TCP_NEW_SYN_RECV,
BPF_TCP_BOUND_INACTIVE,
BPF_TCP_MAX_STATES /* Leave at the end! */
};
@ -6756,6 +7130,9 @@ enum {
BPF_FIB_LOOKUP_DIRECT = (1U << 0),
BPF_FIB_LOOKUP_OUTPUT = (1U << 1),
BPF_FIB_LOOKUP_SKIP_NEIGH = (1U << 2),
BPF_FIB_LOOKUP_TBID = (1U << 3),
BPF_FIB_LOOKUP_SRC = (1U << 4),
BPF_FIB_LOOKUP_MARK = (1U << 5),
};
enum {
@ -6768,6 +7145,7 @@ enum {
BPF_FIB_LKUP_RET_UNSUPP_LWT, /* fwd requires encapsulation */
BPF_FIB_LKUP_RET_NO_NEIGH, /* no neighbor entry for nh */
BPF_FIB_LKUP_RET_FRAG_NEEDED, /* fragmentation required to fwd */
BPF_FIB_LKUP_RET_NO_SRC_ADDR, /* failed to derive IP src addr */
};
struct bpf_fib_lookup {
@ -6787,7 +7165,7 @@ struct bpf_fib_lookup {
/* output: MTU value */
__u16 mtu_result;
};
} __attribute__((packed, aligned(2)));
/* input: L3 device index for lookup
* output: device index from FIB lookup
*/
@ -6802,6 +7180,9 @@ struct bpf_fib_lookup {
__u32 rt_metric;
};
/* input: source address to consider for lookup
* output: source address result from lookup
*/
union {
__be32 ipv4_src;
__u32 ipv6_src[4]; /* in6_addr; network order */
@ -6816,11 +7197,32 @@ struct bpf_fib_lookup {
__u32 ipv6_dst[4]; /* in6_addr; network order */
};
/* output */
__be16 h_vlan_proto;
__be16 h_vlan_TCI;
__u8 smac[6]; /* ETH_ALEN */
__u8 dmac[6]; /* ETH_ALEN */
union {
struct {
/* output */
__be16 h_vlan_proto;
__be16 h_vlan_TCI;
};
/* input: when accompanied with the
* 'BPF_FIB_LOOKUP_DIRECT | BPF_FIB_LOOKUP_TBID` flags, a
* specific routing table to use for the fib lookup.
*/
__u32 tbid;
};
union {
/* input */
struct {
__u32 mark; /* policy routing */
/* 2 4-byte holes for input */
};
/* output: source and dest mac */
struct {
__u8 smac[6]; /* ETH_ALEN */
__u8 dmac[6]; /* ETH_ALEN */
};
};
};
struct bpf_redir_neigh {
@ -6904,36 +7306,37 @@ struct bpf_spin_lock {
};
struct bpf_timer {
__u64 :64;
__u64 :64;
__u64 __opaque[2];
} __attribute__((aligned(8)));
struct bpf_wq {
__u64 __opaque[2];
} __attribute__((aligned(8)));
struct bpf_dynptr {
__u64 :64;
__u64 :64;
__u64 __opaque[2];
} __attribute__((aligned(8)));
struct bpf_list_head {
__u64 :64;
__u64 :64;
__u64 __opaque[2];
} __attribute__((aligned(8)));
struct bpf_list_node {
__u64 :64;
__u64 :64;
__u64 __opaque[3];
} __attribute__((aligned(8)));
struct bpf_rb_root {
__u64 :64;
__u64 :64;
__u64 __opaque[2];
} __attribute__((aligned(8)));
struct bpf_rb_node {
__u64 :64;
__u64 :64;
__u64 :64;
__u64 __opaque[4];
} __attribute__((aligned(8)));
struct bpf_refcount {
__u32 __opaque[1];
} __attribute__((aligned(4)));
struct bpf_sysctl {
__u32 write; /* Sysctl is being read (= 0) or written (= 1).
* Allows 1,2,4-byte read, but no write.
@ -7083,4 +7486,23 @@ struct bpf_core_relo {
enum bpf_core_relo_kind kind;
};
/*
* Flags to control bpf_timer_start() behaviour.
* - BPF_F_TIMER_ABS: Timeout passed is absolute time, by default it is
* relative to current time.
* - BPF_F_TIMER_CPU_PIN: Timer will be pinned to the CPU of the caller.
*/
enum {
BPF_F_TIMER_ABS = (1ULL << 0),
BPF_F_TIMER_CPU_PIN = (1ULL << 1),
};
/* BPF numbers iterator state */
struct bpf_iter_num {
/* opaque iterator state; having __u64 here allows to preserve correct
* alignment requirements in vmlinux.h, generated from BTF
*/
__u64 __opaque[1];
} __attribute__((aligned(8)));
#endif /* __LINUX_BPF_H__ */

View File

@ -1,21 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef _LINUX_BPFILTER_H
#define _LINUX_BPFILTER_H
#include <linux/if.h>
enum {
BPFILTER_IPT_SO_SET_REPLACE = 64,
BPFILTER_IPT_SO_SET_ADD_COUNTERS = 65,
BPFILTER_IPT_SET_MAX,
};
enum {
BPFILTER_IPT_SO_GET_INFO = 64,
BPFILTER_IPT_SO_GET_ENTRIES = 65,
BPFILTER_IPT_SO_GET_REVISION_MATCH = 66,
BPFILTER_IPT_SO_GET_REVISION_TARGET = 67,
BPFILTER_IPT_GET_MAX,
};
#endif /* _LINUX_BPFILTER_H */

View File

@ -90,6 +90,7 @@ struct btrfs_qgroup_limit {
* struct btrfs_qgroup_inherit.flags
*/
#define BTRFS_QGROUP_INHERIT_SET_LIMITS (1ULL << 0)
#define BTRFS_QGROUP_INHERIT_FLAGS_SUPP (BTRFS_QGROUP_INHERIT_SET_LIMITS)
struct btrfs_qgroup_inherit {
__u64 flags;
@ -331,6 +332,8 @@ struct btrfs_ioctl_fs_info_args {
#define BTRFS_FEATURE_INCOMPAT_RAID1C34 (1ULL << 11)
#define BTRFS_FEATURE_INCOMPAT_ZONED (1ULL << 12)
#define BTRFS_FEATURE_INCOMPAT_EXTENT_TREE_V2 (1ULL << 13)
#define BTRFS_FEATURE_INCOMPAT_RAID_STRIPE_TREE (1ULL << 14)
#define BTRFS_FEATURE_INCOMPAT_SIMPLE_QUOTA (1ULL << 16)
struct btrfs_ioctl_feature_flags {
__u64 compat_flags;
@ -610,6 +613,9 @@ struct btrfs_ioctl_clone_range_args {
*/
#define BTRFS_DEFRAG_RANGE_COMPRESS 1
#define BTRFS_DEFRAG_RANGE_START_IO 2
#define BTRFS_DEFRAG_RANGE_FLAGS_SUPP (BTRFS_DEFRAG_RANGE_COMPRESS | \
BTRFS_DEFRAG_RANGE_START_IO)
struct btrfs_ioctl_defrag_range_args {
/* start of the defrag operation */
__u64 start;
@ -751,6 +757,7 @@ struct btrfs_ioctl_get_dev_stats {
#define BTRFS_QUOTA_CTL_ENABLE 1
#define BTRFS_QUOTA_CTL_DISABLE 2
#define BTRFS_QUOTA_CTL_RESCAN__NOTUSED 3
#define BTRFS_QUOTA_CTL_ENABLE_SIMPLE_QUOTA 4
struct btrfs_ioctl_quota_ctl_args {
__u64 cmd;
__u64 status;

View File

@ -69,6 +69,9 @@
/* Holds the block group items for extent tree v2. */
#define BTRFS_BLOCK_GROUP_TREE_OBJECTID 11ULL
/* Tracks RAID stripes in block groups. */
#define BTRFS_RAID_STRIPE_TREE_OBJECTID 12ULL
/* device stats in the device tree */
#define BTRFS_DEV_STATS_OBJECTID 0ULL
@ -212,11 +215,31 @@
*/
#define BTRFS_METADATA_ITEM_KEY 169
/*
* Special __inline__ ref key which stores the id of the subvolume which originally
* created the extent. This subvolume owns the extent permanently from the
* perspective of simple quotas. Needed to know which subvolume to free quota
* usage from when the extent is deleted.
*
* Stored as an __inline__ ref rather to avoid wasting space on a separate item on
* top of the existing extent item. However, unlike the other __inline__ refs,
* there is one one owner ref per extent rather than one per extent.
*
* Because of this, it goes at the front of the list of __inline__ refs, and thus
* must have a lower type value than any other __inline__ ref type (to satisfy the
* disk format rule that __inline__ refs have non-decreasing type).
*/
#define BTRFS_EXTENT_OWNER_REF_KEY 172
#define BTRFS_TREE_BLOCK_REF_KEY 176
#define BTRFS_EXTENT_DATA_REF_KEY 178
#define BTRFS_EXTENT_REF_V0_KEY 180
/*
* Obsolete key. Defintion removed in 6.6, value may be reused in the future.
*
* #define BTRFS_EXTENT_REF_V0_KEY 180
*/
#define BTRFS_SHARED_BLOCK_REF_KEY 182
@ -253,6 +276,8 @@
#define BTRFS_DEV_ITEM_KEY 216
#define BTRFS_CHUNK_ITEM_KEY 228
#define BTRFS_RAID_STRIPE_KEY 230
/*
* Records the overall state of the qgroups.
* There's only one instance of this key present,
@ -711,6 +736,30 @@ struct btrfs_free_space_header {
__le64 num_bitmaps;
} __attribute__ ((__packed__));
struct btrfs_raid_stride {
/* The id of device this raid extent lives on. */
__le64 devid;
/* The physical location on disk. */
__le64 physical;
} __attribute__ ((__packed__));
/* The stripe_extent::encoding, 1:1 mapping of enum btrfs_raid_types. */
#define BTRFS_STRIPE_RAID0 1
#define BTRFS_STRIPE_RAID1 2
#define BTRFS_STRIPE_DUP 3
#define BTRFS_STRIPE_RAID10 4
#define BTRFS_STRIPE_RAID5 5
#define BTRFS_STRIPE_RAID6 6
#define BTRFS_STRIPE_RAID1C3 7
#define BTRFS_STRIPE_RAID1C4 8
struct btrfs_stripe_extent {
__u8 encoding;
__u8 reserved[7];
/* An array of raid strides this stripe is composed of. */
struct btrfs_raid_stride strides[];
} __attribute__ ((__packed__));
#define BTRFS_HEADER_FLAG_WRITTEN (1ULL << 0)
#define BTRFS_HEADER_FLAG_RELOC (1ULL << 1)
@ -779,6 +828,10 @@ struct btrfs_shared_data_ref {
__le32 count;
} __attribute__ ((__packed__));
struct btrfs_extent_owner_ref {
__le64 root_id;
} __attribute__ ((__packed__));
struct btrfs_extent_inline_ref {
__u8 type;
__le64 offset;
@ -1196,9 +1249,17 @@ static __inline__ __u16 btrfs_qgroup_level(__u64 qgroupid)
*/
#define BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT (1ULL << 2)
/*
* Whether or not this filesystem is using simple quotas. Not exactly the
* incompat bit, because we support using simple quotas, disabling it, then
* going back to full qgroup quotas.
*/
#define BTRFS_QGROUP_STATUS_FLAG_SIMPLE_MODE (1ULL << 3)
#define BTRFS_QGROUP_STATUS_FLAGS_MASK (BTRFS_QGROUP_STATUS_FLAG_ON | \
BTRFS_QGROUP_STATUS_FLAG_RESCAN | \
BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT)
BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT | \
BTRFS_QGROUP_STATUS_FLAG_SIMPLE_MODE)
#define BTRFS_QGROUP_STATUS_VERSION 1
@ -1220,6 +1281,15 @@ struct btrfs_qgroup_status_item {
* of the scan. It contains a logical address
*/
__le64 rescan;
/*
* The generation when quotas were last enabled. Used by simple quotas to
* avoid decrementing when freeing an extent that was written before
* enable.
*
* Set only if flags contain BTRFS_QGROUP_STATUS_FLAG_SIMPLE_MODE.
*/
__le64 enable_gen;
} __attribute__ ((__packed__));
struct btrfs_qgroup_info_item {

View File

@ -193,9 +193,14 @@ struct canfd_frame {
#define CANXL_XLF 0x80 /* mandatory CAN XL frame flag (must always be set!) */
#define CANXL_SEC 0x01 /* Simple Extended Content (security/segmentation) */
/* the 8-bit VCID is optionally placed in the canxl_frame.prio element */
#define CANXL_VCID_OFFSET 16 /* bit offset of VCID in prio element */
#define CANXL_VCID_VAL_MASK 0xFFUL /* VCID is an 8-bit value */
#define CANXL_VCID_MASK (CANXL_VCID_VAL_MASK << CANXL_VCID_OFFSET)
/**
* struct canxl_frame - CAN with e'X'tended frame 'L'ength frame structure
* @prio: 11 bit arbitration priority with zero'ed CAN_*_FLAG flags
* @prio: 11 bit arbitration priority with zero'ed CAN_*_FLAG flags / VCID
* @flags: additional flags for CAN XL
* @sdt: SDU (service data unit) type
* @len: frame payload length in byte (CANXL_MIN_DLEN .. CANXL_MAX_DLEN)
@ -205,7 +210,7 @@ struct canfd_frame {
* @prio shares the same position as @can_id from struct can[fd]_frame.
*/
struct canxl_frame {
canid_t prio; /* 11 bit priority for arbitration (canid_t) */
canid_t prio; /* 11 bit priority for arbitration / 8 bit VCID */
__u8 flags; /* additional flags for CAN XL */
__u8 sdt; /* SDU (service data unit) type */
__u16 len; /* frame payload length in byte */
@ -285,6 +290,5 @@ struct can_filter {
};
#define CAN_INV_FILTER 0x20000000U /* to be set in can_filter.can_id */
#define CAN_RAW_FILTER_MAX 512 /* maximum number of can_filter set via setsockopt() */
#endif /* !_UAPI_CAN_H */

View File

@ -137,6 +137,7 @@ struct can_isotp_ll_options {
#define CAN_ISOTP_WAIT_TX_DONE 0x0400 /* wait for tx completion */
#define CAN_ISOTP_SF_BROADCAST 0x0800 /* 1-to-N functional addressing */
#define CAN_ISOTP_CF_BROADCAST 0x1000 /* 1-to-N transmission w/o FC */
#define CAN_ISOTP_DYN_FC_PARMS 0x2000 /* dynamic FC parameters BS/STmin */
/* protocol machine default values */

View File

@ -49,6 +49,8 @@
#include <linux/can.h>
#define SOL_CAN_RAW (SOL_CAN_BASE + CAN_RAW)
#define CAN_RAW_FILTER_MAX 512 /* maximum number of can_filter set via setsockopt() */
enum {
SCM_CAN_RAW_ERRQUEUE = 1,
};
@ -63,6 +65,22 @@ enum {
CAN_RAW_FD_FRAMES, /* allow CAN FD frames (default:off) */
CAN_RAW_JOIN_FILTERS, /* all filters must match to trigger */
CAN_RAW_XL_FRAMES, /* allow CAN XL frames (default:off) */
CAN_RAW_XL_VCID_OPTS, /* CAN XL VCID configuration options */
};
/* configuration for CAN XL virtual CAN identifier (VCID) handling */
struct can_raw_vcid_options {
__u8 flags; /* flags for vcid (filter) behaviour */
__u8 tx_vcid; /* VCID value set into canxl_frame.prio */
__u8 rx_vcid; /* VCID value for VCID filter */
__u8 rx_vcid_mask; /* VCID mask for VCID filter */
};
/* can_raw_vcid_options.flags for CAN XL virtual CAN identifier handling */
#define CAN_RAW_XL_VCID_TX_SET 0x01
#define CAN_RAW_XL_VCID_TX_PASS 0x02
#define CAN_RAW_XL_VCID_RX_FILTER 0x04
#endif /* !_UAPI_CAN_RAW_H */

View File

@ -41,11 +41,12 @@ typedef struct __user_cap_header_struct {
int pid;
} *cap_user_header_t;
typedef struct __user_cap_data_struct {
struct __user_cap_data_struct {
__u32 effective;
__u32 permitted;
__u32 inheritable;
} *cap_user_data_t;
};
typedef struct __user_cap_data_struct *cap_user_data_t;
#define VFS_CAP_REVISION_MASK 0xFF000000

View File

@ -24,8 +24,6 @@
* basis. This data is shared using taskstats.
*
* Most of these states are derived by looking at the task->state value
* For the nr_io_wait state, a flag in the delay accounting structure
* indicates that the task is waiting on IO
*
* Each member is aligned to a 8 byte boundary.
*/

View File

@ -1,64 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef _CM4000_H_
#define _CM4000_H_
#include <linux/types.h>
#include <linux/ioctl.h>
#define MAX_ATR 33
#define CM4000_MAX_DEV 4
/* those two structures are passed via ioctl() from/to userspace. They are
* used by existing userspace programs, so I kepth the awkward "bIFSD" naming
* not to break compilation of userspace apps. -HW */
typedef struct atreq {
__s32 atr_len;
unsigned char atr[64];
__s32 power_act;
unsigned char bIFSD;
unsigned char bIFSC;
} atreq_t;
/* what is particularly stupid in the original driver is the arch-dependent
* member sizes. This leads to CONFIG_COMPAT breakage, since 32bit userspace
* will lay out the structure members differently than the 64bit kernel.
*
* I've changed "ptsreq.protocol" from "unsigned long" to "__u32".
* On 32bit this will make no difference. With 64bit kernels, it will make
* 32bit apps work, too.
*/
typedef struct ptsreq {
__u32 protocol; /*T=0: 2^0, T=1: 2^1*/
unsigned char flags;
unsigned char pts1;
unsigned char pts2;
unsigned char pts3;
} ptsreq_t;
#define CM_IOC_MAGIC 'c'
#define CM_IOC_MAXNR 255
#define CM_IOCGSTATUS _IOR (CM_IOC_MAGIC, 0, unsigned char *)
#define CM_IOCGATR _IOWR(CM_IOC_MAGIC, 1, atreq_t *)
#define CM_IOCSPTS _IOW (CM_IOC_MAGIC, 2, ptsreq_t *)
#define CM_IOCSRDR _IO (CM_IOC_MAGIC, 3)
#define CM_IOCARDOFF _IO (CM_IOC_MAGIC, 4)
#define CM_IOSDBGLVL _IOW(CM_IOC_MAGIC, 250, int*)
/* card and device states */
#define CM_CARD_INSERTED 0x01
#define CM_CARD_POWERED 0x02
#define CM_ATR_PRESENT 0x04
#define CM_ATR_VALID 0x08
#define CM_STATE_VALID 0x0f
/* extra info only from CM4000 */
#define CM_NO_READER 0x10
#define CM_BAD_CARD 0x20
#endif /* _CM4000_H_ */

View File

@ -30,6 +30,48 @@ enum proc_cn_mcast_op {
PROC_CN_MCAST_IGNORE = 2
};
#define PROC_EVENT_ALL (PROC_EVENT_FORK | PROC_EVENT_EXEC | PROC_EVENT_UID | \
PROC_EVENT_GID | PROC_EVENT_SID | PROC_EVENT_PTRACE | \
PROC_EVENT_COMM | PROC_EVENT_NONZERO_EXIT | \
PROC_EVENT_COREDUMP | PROC_EVENT_EXIT)
/*
* If you add an entry in proc_cn_event, make sure you add it in
* PROC_EVENT_ALL above as well.
*/
enum proc_cn_event {
/* Use successive bits so the enums can be used to record
* sets of events as well
*/
PROC_EVENT_NONE = 0x00000000,
PROC_EVENT_FORK = 0x00000001,
PROC_EVENT_EXEC = 0x00000002,
PROC_EVENT_UID = 0x00000004,
PROC_EVENT_GID = 0x00000040,
PROC_EVENT_SID = 0x00000080,
PROC_EVENT_PTRACE = 0x00000100,
PROC_EVENT_COMM = 0x00000200,
/* "next" should be 0x00000400 */
/* "last" is the last process event: exit,
* while "next to last" is coredumping event
* before that is report only if process dies
* with non-zero exit status
*/
PROC_EVENT_NONZERO_EXIT = 0x20000000,
PROC_EVENT_COREDUMP = 0x40000000,
PROC_EVENT_EXIT = 0x80000000
};
struct proc_input {
enum proc_cn_mcast_op mcast_op;
enum proc_cn_event event_type;
};
static __inline__ enum proc_cn_event valid_event(enum proc_cn_event ev_type)
{
return (enum proc_cn_event)(ev_type & PROC_EVENT_ALL);
}
/*
* From the user's point of view, the process
* ID is the thread group ID and thread ID is the internal
@ -44,24 +86,7 @@ enum proc_cn_mcast_op {
*/
struct proc_event {
enum what {
/* Use successive bits so the enums can be used to record
* sets of events as well
*/
PROC_EVENT_NONE = 0x00000000,
PROC_EVENT_FORK = 0x00000001,
PROC_EVENT_EXEC = 0x00000002,
PROC_EVENT_UID = 0x00000004,
PROC_EVENT_GID = 0x00000040,
PROC_EVENT_SID = 0x00000080,
PROC_EVENT_PTRACE = 0x00000100,
PROC_EVENT_COMM = 0x00000200,
/* "next" should be 0x00000400 */
/* "last" is the last process event: exit,
* while "next to last" is coredumping event */
PROC_EVENT_COREDUMP = 0x40000000,
PROC_EVENT_EXIT = 0x80000000
} what;
enum proc_cn_event what;
__u32 cpu;
__u64 __attribute__((aligned(8))) timestamp_ns;
/* Number of nano seconds since system boot */

Some files were not shown because too many files have changed in this diff Show More