Merge tag 'v4.14-rc2' into patchwork

Linux 4.14-rc2

* tag 'v4.14-rc2': (12066 commits)
  Linux 4.14-rc2
  tpm: ibmvtpm: simplify crq initialization and document crq format
  tpm: replace msleep() with  usleep_range() in TPM 1.2/2.0 generic drivers
  Documentation: tpm: add powered-while-suspended binding documentation
  tpm: tpm_crb: constify acpi_device_id.
  tpm: vtpm: constify vio_device_id
  security: fix description of values returned by cap_inode_need_killpriv
  x86/asm: Fix inline asm call constraints for Clang
  objtool: Handle another GCC stack pointer adjustment bug
  inet: fix improper empty comparison
  net: use inet6_rcv_saddr to compare sockets
  net: set tb->fast_sk_family
  net: orphan frags on stand-alone ptype in dev_queue_xmit_nit
  MAINTAINERS: update git tree locations for ieee802154 subsystem
  SMB3: Don't ignore O_SYNC/O_DSYNC and O_DIRECT flags
  SMB3: handle new statx fields
  arch: remove unused *_segments() macros/functions
  parisc: Unbreak bootloader due to gcc-7 optimizations
  parisc: Reintroduce option to gzip-compress the kernel
  apparmor: fix apparmorfs DAC access permissions
  ...
This commit is contained in:
Mauro Carvalho Chehab
2017-09-29 05:24:10 -04:00
11173 changed files with 591630 additions and 365843 deletions

View File

@@ -0,0 +1,34 @@
#ifndef _ASM_GENERIC_HUGETLB_ENCODE_H_
#define _ASM_GENERIC_HUGETLB_ENCODE_H_
/*
* Several system calls take a flag to request "hugetlb" huge pages.
* Without further specification, these system calls will use the
* system's default huge page size. If a system supports multiple
* huge page sizes, the desired huge page size can be specified in
* bits [26:31] of the flag arguments. The value in these 6 bits
* will encode the log2 of the huge page size.
*
* The following definitions are associated with this huge page size
* encoding in flag arguments. System call specific header files
* that use this encoding should include this file. They can then
* provide definitions based on these with their own specific prefix.
* for example:
* #define MAP_HUGE_SHIFT HUGETLB_FLAG_ENCODE_SHIFT
*/
#define HUGETLB_FLAG_ENCODE_SHIFT 26
#define HUGETLB_FLAG_ENCODE_MASK 0x3f
#define HUGETLB_FLAG_ENCODE_64KB (16 << HUGETLB_FLAG_ENCODE_SHIFT)
#define HUGETLB_FLAG_ENCODE_512KB (19 << HUGETLB_FLAG_ENCODE_SHIFT)
#define HUGETLB_FLAG_ENCODE_1MB (20 << HUGETLB_FLAG_ENCODE_SHIFT)
#define HUGETLB_FLAG_ENCODE_2MB (21 << HUGETLB_FLAG_ENCODE_SHIFT)
#define HUGETLB_FLAG_ENCODE_8MB (23 << HUGETLB_FLAG_ENCODE_SHIFT)
#define HUGETLB_FLAG_ENCODE_16MB (24 << HUGETLB_FLAG_ENCODE_SHIFT)
#define HUGETLB_FLAG_ENCODE_256MB (28 << HUGETLB_FLAG_ENCODE_SHIFT)
#define HUGETLB_FLAG_ENCODE_1GB (30 << HUGETLB_FLAG_ENCODE_SHIFT)
#define HUGETLB_FLAG_ENCODE_2GB (31 << HUGETLB_FLAG_ENCODE_SHIFT)
#define HUGETLB_FLAG_ENCODE_16GB (34 << HUGETLB_FLAG_ENCODE_SHIFT)
#endif /* _ASM_GENERIC_HUGETLB_ENCODE_H_ */

View File

@@ -58,20 +58,12 @@
overrides the coredump filter bits */
#define MADV_DODUMP 17 /* Clear the MADV_DONTDUMP flag */
#define MADV_WIPEONFORK 18 /* Zero memory on fork, child only */
#define MADV_KEEPONFORK 19 /* Undo MADV_WIPEONFORK */
/* compatibility flags */
#define MAP_FILE 0
/*
* When MAP_HUGETLB is set bits [26:31] encode the log2 of the huge page size.
* This gives us 6 bits, which is enough until someone invents 128 bit address
* spaces.
*
* Assume these are all power of twos.
* When 0 use the default page size.
*/
#define MAP_HUGE_SHIFT 26
#define MAP_HUGE_MASK 0x3f
#define PKEY_DISABLE_ACCESS 0x1
#define PKEY_DISABLE_WRITE 0x2
#define PKEY_ACCESS_MASK (PKEY_DISABLE_ACCESS |\

View File

@@ -151,29 +151,6 @@ typedef struct siginfo {
#define si_arch _sifields._sigsys._arch
#endif
#ifdef __KERNEL__
#define __SI_MASK 0xffff0000u
#define __SI_KILL (0 << 16)
#define __SI_TIMER (1 << 16)
#define __SI_POLL (2 << 16)
#define __SI_FAULT (3 << 16)
#define __SI_CHLD (4 << 16)
#define __SI_RT (5 << 16)
#define __SI_MESGQ (6 << 16)
#define __SI_SYS (7 << 16)
#define __SI_CODE(T,N) ((T) | ((N) & 0xffff))
#else /* __KERNEL__ */
#define __SI_KILL 0
#define __SI_TIMER 0
#define __SI_POLL 0
#define __SI_FAULT 0
#define __SI_CHLD 0
#define __SI_RT 0
#define __SI_MESGQ 0
#define __SI_SYS 0
#define __SI_CODE(T,N) (N)
#endif /* __KERNEL__ */
/*
* si_code values
* Digital reserves positive values for kernel-generated signals.
@@ -181,8 +158,8 @@ typedef struct siginfo {
#define SI_USER 0 /* sent by kill, sigsend, raise */
#define SI_KERNEL 0x80 /* sent by the kernel from somewhere */
#define SI_QUEUE -1 /* sent by sigqueue */
#define SI_TIMER __SI_CODE(__SI_TIMER,-2) /* sent by timer expiration */
#define SI_MESGQ __SI_CODE(__SI_MESGQ,-3) /* sent by real time mesq state change */
#define SI_TIMER -2 /* sent by timer expiration */
#define SI_MESGQ -3 /* sent by real time mesq state change */
#define SI_ASYNCIO -4 /* sent by AIO completion */
#define SI_SIGIO -5 /* sent by queued SIGIO */
#define SI_TKILL -6 /* sent by tkill system call */
@@ -194,86 +171,86 @@ typedef struct siginfo {
/*
* SIGILL si_codes
*/
#define ILL_ILLOPC (__SI_FAULT|1) /* illegal opcode */
#define ILL_ILLOPN (__SI_FAULT|2) /* illegal operand */
#define ILL_ILLADR (__SI_FAULT|3) /* illegal addressing mode */
#define ILL_ILLTRP (__SI_FAULT|4) /* illegal trap */
#define ILL_PRVOPC (__SI_FAULT|5) /* privileged opcode */
#define ILL_PRVREG (__SI_FAULT|6) /* privileged register */
#define ILL_COPROC (__SI_FAULT|7) /* coprocessor error */
#define ILL_BADSTK (__SI_FAULT|8) /* internal stack error */
#define ILL_ILLOPC 1 /* illegal opcode */
#define ILL_ILLOPN 2 /* illegal operand */
#define ILL_ILLADR 3 /* illegal addressing mode */
#define ILL_ILLTRP 4 /* illegal trap */
#define ILL_PRVOPC 5 /* privileged opcode */
#define ILL_PRVREG 6 /* privileged register */
#define ILL_COPROC 7 /* coprocessor error */
#define ILL_BADSTK 8 /* internal stack error */
#define NSIGILL 8
/*
* SIGFPE si_codes
*/
#define FPE_INTDIV (__SI_FAULT|1) /* integer divide by zero */
#define FPE_INTOVF (__SI_FAULT|2) /* integer overflow */
#define FPE_FLTDIV (__SI_FAULT|3) /* floating point divide by zero */
#define FPE_FLTOVF (__SI_FAULT|4) /* floating point overflow */
#define FPE_FLTUND (__SI_FAULT|5) /* floating point underflow */
#define FPE_FLTRES (__SI_FAULT|6) /* floating point inexact result */
#define FPE_FLTINV (__SI_FAULT|7) /* floating point invalid operation */
#define FPE_FLTSUB (__SI_FAULT|8) /* subscript out of range */
#define FPE_INTDIV 1 /* integer divide by zero */
#define FPE_INTOVF 2 /* integer overflow */
#define FPE_FLTDIV 3 /* floating point divide by zero */
#define FPE_FLTOVF 4 /* floating point overflow */
#define FPE_FLTUND 5 /* floating point underflow */
#define FPE_FLTRES 6 /* floating point inexact result */
#define FPE_FLTINV 7 /* floating point invalid operation */
#define FPE_FLTSUB 8 /* subscript out of range */
#define NSIGFPE 8
/*
* SIGSEGV si_codes
*/
#define SEGV_MAPERR (__SI_FAULT|1) /* address not mapped to object */
#define SEGV_ACCERR (__SI_FAULT|2) /* invalid permissions for mapped object */
#define SEGV_BNDERR (__SI_FAULT|3) /* failed address bound checks */
#define SEGV_PKUERR (__SI_FAULT|4) /* failed protection key checks */
#define SEGV_MAPERR 1 /* address not mapped to object */
#define SEGV_ACCERR 2 /* invalid permissions for mapped object */
#define SEGV_BNDERR 3 /* failed address bound checks */
#define SEGV_PKUERR 4 /* failed protection key checks */
#define NSIGSEGV 4
/*
* SIGBUS si_codes
*/
#define BUS_ADRALN (__SI_FAULT|1) /* invalid address alignment */
#define BUS_ADRERR (__SI_FAULT|2) /* non-existent physical address */
#define BUS_OBJERR (__SI_FAULT|3) /* object specific hardware error */
#define BUS_ADRALN 1 /* invalid address alignment */
#define BUS_ADRERR 2 /* non-existent physical address */
#define BUS_OBJERR 3 /* object specific hardware error */
/* hardware memory error consumed on a machine check: action required */
#define BUS_MCEERR_AR (__SI_FAULT|4)
#define BUS_MCEERR_AR 4
/* hardware memory error detected in process but not consumed: action optional*/
#define BUS_MCEERR_AO (__SI_FAULT|5)
#define BUS_MCEERR_AO 5
#define NSIGBUS 5
/*
* SIGTRAP si_codes
*/
#define TRAP_BRKPT (__SI_FAULT|1) /* process breakpoint */
#define TRAP_TRACE (__SI_FAULT|2) /* process trace trap */
#define TRAP_BRANCH (__SI_FAULT|3) /* process taken branch trap */
#define TRAP_HWBKPT (__SI_FAULT|4) /* hardware breakpoint/watchpoint */
#define TRAP_BRKPT 1 /* process breakpoint */
#define TRAP_TRACE 2 /* process trace trap */
#define TRAP_BRANCH 3 /* process taken branch trap */
#define TRAP_HWBKPT 4 /* hardware breakpoint/watchpoint */
#define NSIGTRAP 4
/*
* SIGCHLD si_codes
*/
#define CLD_EXITED (__SI_CHLD|1) /* child has exited */
#define CLD_KILLED (__SI_CHLD|2) /* child was killed */
#define CLD_DUMPED (__SI_CHLD|3) /* child terminated abnormally */
#define CLD_TRAPPED (__SI_CHLD|4) /* traced child has trapped */
#define CLD_STOPPED (__SI_CHLD|5) /* child has stopped */
#define CLD_CONTINUED (__SI_CHLD|6) /* stopped child has continued */
#define CLD_EXITED 1 /* child has exited */
#define CLD_KILLED 2 /* child was killed */
#define CLD_DUMPED 3 /* child terminated abnormally */
#define CLD_TRAPPED 4 /* traced child has trapped */
#define CLD_STOPPED 5 /* child has stopped */
#define CLD_CONTINUED 6 /* stopped child has continued */
#define NSIGCHLD 6
/*
* SIGPOLL si_codes
* SIGPOLL (or any other signal without signal specific si_codes) si_codes
*/
#define POLL_IN (__SI_POLL|1) /* data input available */
#define POLL_OUT (__SI_POLL|2) /* output buffers available */
#define POLL_MSG (__SI_POLL|3) /* input message available */
#define POLL_ERR (__SI_POLL|4) /* i/o error */
#define POLL_PRI (__SI_POLL|5) /* high priority input available */
#define POLL_HUP (__SI_POLL|6) /* device disconnected */
#define POLL_IN 1 /* data input available */
#define POLL_OUT 2 /* output buffers available */
#define POLL_MSG 3 /* input message available */
#define POLL_ERR 4 /* i/o error */
#define POLL_PRI 5 /* high priority input available */
#define POLL_HUP 6 /* device disconnected */
#define NSIGPOLL 6
/*
* SIGSYS si_codes
*/
#define SYS_SECCOMP (__SI_SYS|1) /* seccomp triggered */
#define NSIGSYS 1
#define SYS_SECCOMP 1 /* seccomp triggered */
#define NSIGSYS 1
/*
* sigevent definitions

View File

@@ -104,4 +104,6 @@
#define SO_PEERGROUPS 59
#define SO_ZEROCOPY 60
#endif /* __ASM_GENERIC_SOCKET_H */

View File

@@ -23,27 +23,27 @@ extern "C" {
DRM_##dir(DRM_COMMAND_BASE + DRM_ARMADA_##name, struct drm_armada_##str)
struct drm_armada_gem_create {
uint32_t handle;
uint32_t size;
__u32 handle;
__u32 size;
};
#define DRM_IOCTL_ARMADA_GEM_CREATE \
ARMADA_IOCTL(IOWR, GEM_CREATE, gem_create)
struct drm_armada_gem_mmap {
uint32_t handle;
uint32_t pad;
uint64_t offset;
uint64_t size;
uint64_t addr;
__u32 handle;
__u32 pad;
__u64 offset;
__u64 size;
__u64 addr;
};
#define DRM_IOCTL_ARMADA_GEM_MMAP \
ARMADA_IOCTL(IOWR, GEM_MMAP, gem_mmap)
struct drm_armada_gem_pwrite {
uint64_t ptr;
uint32_t handle;
uint32_t offset;
uint32_t size;
__u64 ptr;
__u32 handle;
__u32 offset;
__u32 size;
};
#define DRM_IOCTL_ARMADA_GEM_PWRITE \
ARMADA_IOCTL(IOW, GEM_PWRITE, gem_pwrite)

View File

@@ -700,6 +700,7 @@ struct drm_prime_handle {
struct drm_syncobj_create {
__u32 handle;
#define DRM_SYNCOBJ_CREATE_SIGNALED (1 << 0)
__u32 flags;
};
@@ -718,6 +719,24 @@ struct drm_syncobj_handle {
__u32 pad;
};
#define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL (1 << 0)
#define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT (1 << 1)
struct drm_syncobj_wait {
__u64 handles;
/* absolute timeout */
__s64 timeout_nsec;
__u32 count_handles;
__u32 flags;
__u32 first_signaled; /* only valid when not waiting all */
__u32 pad;
};
struct drm_syncobj_array {
__u64 handles;
__u32 count_handles;
__u32 pad;
};
#if defined(__cplusplus)
}
#endif
@@ -840,6 +859,9 @@ extern "C" {
#define DRM_IOCTL_SYNCOBJ_DESTROY DRM_IOWR(0xC0, struct drm_syncobj_destroy)
#define DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD DRM_IOWR(0xC1, struct drm_syncobj_handle)
#define DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE DRM_IOWR(0xC2, struct drm_syncobj_handle)
#define DRM_IOCTL_SYNCOBJ_WAIT DRM_IOWR(0xC3, struct drm_syncobj_wait)
#define DRM_IOCTL_SYNCOBJ_RESET DRM_IOWR(0xC4, struct drm_syncobj_array)
#define DRM_IOCTL_SYNCOBJ_SIGNAL DRM_IOWR(0xC5, struct drm_syncobj_array)
/**
* Device specific ioctls should only be in their respective headers

View File

@@ -185,6 +185,8 @@ extern "C" {
#define DRM_FORMAT_MOD_VENDOR_BROADCOM 0x07
/* add more to the end as needed */
#define DRM_FORMAT_RESERVED ((1ULL << 56) - 1)
#define fourcc_mod_code(vendor, val) \
((((__u64)DRM_FORMAT_MOD_VENDOR_## vendor) << 56) | (val & 0x00ffffffffffffffULL))
@@ -196,6 +198,15 @@ extern "C" {
* authoritative source for all of these.
*/
/*
* Invalid Modifier
*
* This modifier can be used as a sentinel to terminate the format modifiers
* list, or to initialize a variable with an invalid modifier. It might also be
* used to report an error back to userspace for certain APIs.
*/
#define DRM_FORMAT_MOD_INVALID fourcc_mod_code(NONE, DRM_FORMAT_RESERVED)
/*
* Linear Layout
*
@@ -252,6 +263,26 @@ extern "C" {
*/
#define I915_FORMAT_MOD_Yf_TILED fourcc_mod_code(INTEL, 3)
/*
* Intel color control surface (CCS) for render compression
*
* The framebuffer format must be one of the 8:8:8:8 RGB formats.
* The main surface will be plane index 0 and must be Y/Yf-tiled,
* the CCS will be plane index 1.
*
* Each CCS tile matches a 1024x512 pixel area of the main surface.
* To match certain aspects of the 3D hardware the CCS is
* considered to be made up of normal 128Bx32 Y tiles, Thus
* the CCS pitch must be specified in multiples of 128 bytes.
*
* In reality the CCS tile appears to be a 64Bx64 Y tile, composed
* of QWORD (8 bytes) chunks instead of OWORD (16 bytes) chunks.
* But that fact is not relevant unless the memory is accessed
* directly.
*/
#define I915_FORMAT_MOD_Y_TILED_CCS fourcc_mod_code(INTEL, 4)
#define I915_FORMAT_MOD_Yf_TILED_CCS fourcc_mod_code(INTEL, 5)
/*
* Tiled, NV12MT, grouped in 64 (pixels) x 32 (lines) -sized macroblocks
*

View File

@@ -712,6 +712,56 @@ struct drm_mode_atomic {
__u64 user_data;
};
struct drm_format_modifier_blob {
#define FORMAT_BLOB_CURRENT 1
/* Version of this blob format */
__u32 version;
/* Flags */
__u32 flags;
/* Number of fourcc formats supported */
__u32 count_formats;
/* Where in this blob the formats exist (in bytes) */
__u32 formats_offset;
/* Number of drm_format_modifiers */
__u32 count_modifiers;
/* Where in this blob the modifiers exist (in bytes) */
__u32 modifiers_offset;
/* __u32 formats[] */
/* struct drm_format_modifier modifiers[] */
};
struct drm_format_modifier {
/* Bitmask of formats in get_plane format list this info applies to. The
* offset allows a sliding window of which 64 formats (bits).
*
* Some examples:
* In today's world with < 65 formats, and formats 0, and 2 are
* supported
* 0x0000000000000005
* ^-offset = 0, formats = 5
*
* If the number formats grew to 128, and formats 98-102 are
* supported with the modifier:
*
* 0x0000003c00000000 0000000000000000
* ^
* |__offset = 64, formats = 0x3c00000000
*
*/
__u64 formats;
__u32 offset;
__u32 pad;
/* The modifier that applies to the >get_plane format list bitmask. */
__u64 modifier;
};
/**
* Create a new 'blob' data property, copying length bytes from data pointer,
* and returning new blob ID.

View File

@@ -260,6 +260,8 @@ typedef struct _drm_i915_sarea {
#define DRM_I915_GEM_CONTEXT_GETPARAM 0x34
#define DRM_I915_GEM_CONTEXT_SETPARAM 0x35
#define DRM_I915_PERF_OPEN 0x36
#define DRM_I915_PERF_ADD_CONFIG 0x37
#define DRM_I915_PERF_REMOVE_CONFIG 0x38
#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
@@ -315,6 +317,8 @@ typedef struct _drm_i915_sarea {
#define DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_GETPARAM, struct drm_i915_gem_context_param)
#define DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_SETPARAM, struct drm_i915_gem_context_param)
#define DRM_IOCTL_I915_PERF_OPEN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_OPEN, struct drm_i915_perf_open_param)
#define DRM_IOCTL_I915_PERF_ADD_CONFIG DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_ADD_CONFIG, struct drm_i915_perf_oa_config)
#define DRM_IOCTL_I915_PERF_REMOVE_CONFIG DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_REMOVE_CONFIG, __u64)
/* Allow drivers to submit batchbuffers directly to hardware, relying
* on the security mechanisms provided by hardware.
@@ -431,6 +435,11 @@ typedef struct drm_i915_irq_wait {
*/
#define I915_PARAM_HAS_EXEC_BATCH_FIRST 48
/* Query whether DRM_I915_GEM_EXECBUFFER2 supports supplying an array of
* drm_i915_gem_exec_fence structures. See I915_EXEC_FENCE_ARRAY.
*/
#define I915_PARAM_HAS_EXEC_FENCE_ARRAY 49
typedef struct drm_i915_getparam {
__s32 param;
/*
@@ -812,6 +821,17 @@ struct drm_i915_gem_exec_object2 {
__u64 rsvd2;
};
struct drm_i915_gem_exec_fence {
/**
* User's handle for a drm_syncobj to wait on or signal.
*/
__u32 handle;
#define I915_EXEC_FENCE_WAIT (1<<0)
#define I915_EXEC_FENCE_SIGNAL (1<<1)
__u32 flags;
};
struct drm_i915_gem_execbuffer2 {
/**
* List of gem_exec_object2 structs
@@ -826,7 +846,11 @@ struct drm_i915_gem_execbuffer2 {
__u32 DR1;
__u32 DR4;
__u32 num_cliprects;
/** This is a struct drm_clip_rect *cliprects */
/**
* This is a struct drm_clip_rect *cliprects if I915_EXEC_FENCE_ARRAY
* is not set. If I915_EXEC_FENCE_ARRAY is set, then this is a
* struct drm_i915_gem_exec_fence *fences.
*/
__u64 cliprects_ptr;
#define I915_EXEC_RING_MASK (7<<0)
#define I915_EXEC_DEFAULT (0<<0)
@@ -927,7 +951,14 @@ struct drm_i915_gem_execbuffer2 {
* element).
*/
#define I915_EXEC_BATCH_FIRST (1<<18)
#define __I915_EXEC_UNKNOWN_FLAGS (-(I915_EXEC_BATCH_FIRST<<1))
/* Setting I915_FENCE_ARRAY implies that num_cliprects and cliprects_ptr
* define an array of i915_gem_exec_fence structures which specify a set of
* dma fences to wait upon or signal.
*/
#define I915_EXEC_FENCE_ARRAY (1<<19)
#define __I915_EXEC_UNKNOWN_FLAGS (-(I915_EXEC_FENCE_ARRAY<<1))
#define I915_EXEC_CONTEXT_ID_MASK (0xffffffff)
#define i915_execbuffer2_set_context_id(eb2, context) \
@@ -1467,6 +1498,22 @@ enum drm_i915_perf_record_type {
DRM_I915_PERF_RECORD_MAX /* non-ABI */
};
/**
* Structure to upload perf dynamic configuration into the kernel.
*/
struct drm_i915_perf_oa_config {
/** String formatted like "%08x-%04x-%04x-%04x-%012x" */
char uuid[36];
__u32 n_mux_regs;
__u32 n_boolean_regs;
__u32 n_flex_regs;
__u64 __user mux_regs_ptr;
__u64 __user boolean_regs_ptr;
__u64 __user flex_regs_ptr;
};
#if defined(__cplusplus)
}
#endif

View File

@@ -171,7 +171,7 @@ struct drm_msm_gem_submit_cmd {
__u32 size; /* in, cmdstream size */
__u32 pad;
__u32 nr_relocs; /* in, number of submit_reloc's */
__u64 __user relocs; /* in, ptr to array of submit_reloc's */
__u64 relocs; /* in, ptr to array of submit_reloc's */
};
/* Each buffer referenced elsewhere in the cmdstream submit (ie. the
@@ -215,8 +215,8 @@ struct drm_msm_gem_submit {
__u32 fence; /* out */
__u32 nr_bos; /* in, number of submit_bo's */
__u32 nr_cmds; /* in, number of submit_cmd's */
__u64 __user bos; /* in, ptr to array of submit_bo's */
__u64 __user cmds; /* in, ptr to array of submit_cmd's */
__u64 bos; /* in, ptr to array of submit_bo's */
__u64 cmds; /* in, ptr to array of submit_cmd's */
__s32 fence_fd; /* in/out fence fd (see MSM_SUBMIT_FENCE_FD_IN/OUT) */
};

View File

@@ -80,8 +80,8 @@ struct drm_qxl_reloc {
};
struct drm_qxl_command {
__u64 __user command; /* void* */
__u64 __user relocs; /* struct drm_qxl_reloc* */
__u64 command; /* void* */
__u64 relocs; /* struct drm_qxl_reloc* */
__u32 type;
__u32 command_size;
__u32 relocs_num;
@@ -91,7 +91,7 @@ struct drm_qxl_command {
struct drm_qxl_execbuffer {
__u32 flags; /* for future use */
__u32 commands_num;
__u64 __user commands; /* struct drm_qxl_command* */
__u64 commands; /* struct drm_qxl_command* */
};
struct drm_qxl_update_area {

View File

@@ -40,6 +40,7 @@ extern "C" {
#define DRM_VC4_GET_PARAM 0x07
#define DRM_VC4_SET_TILING 0x08
#define DRM_VC4_GET_TILING 0x09
#define DRM_VC4_LABEL_BO 0x0a
#define DRM_IOCTL_VC4_SUBMIT_CL DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_SUBMIT_CL, struct drm_vc4_submit_cl)
#define DRM_IOCTL_VC4_WAIT_SEQNO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_WAIT_SEQNO, struct drm_vc4_wait_seqno)
@@ -51,6 +52,7 @@ extern "C" {
#define DRM_IOCTL_VC4_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_GET_PARAM, struct drm_vc4_get_param)
#define DRM_IOCTL_VC4_SET_TILING DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_SET_TILING, struct drm_vc4_set_tiling)
#define DRM_IOCTL_VC4_GET_TILING DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_GET_TILING, struct drm_vc4_get_tiling)
#define DRM_IOCTL_VC4_LABEL_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_LABEL_BO, struct drm_vc4_label_bo)
struct drm_vc4_submit_rcl_surface {
__u32 hindex; /* Handle index, or ~0 if not present. */
@@ -153,6 +155,16 @@ struct drm_vc4_submit_cl {
__u32 pad:24;
#define VC4_SUBMIT_CL_USE_CLEAR_COLOR (1 << 0)
/* By default, the kernel gets to choose the order that the tiles are
* rendered in. If this is set, then the tiles will be rendered in a
* raster order, with the right-to-left vs left-to-right and
* top-to-bottom vs bottom-to-top dictated by
* VC4_SUBMIT_CL_RCL_ORDER_INCREASING_*. This allows overlapping
* blits to be implemented using the 3D engine.
*/
#define VC4_SUBMIT_CL_FIXED_RCL_ORDER (1 << 1)
#define VC4_SUBMIT_CL_RCL_ORDER_INCREASING_X (1 << 2)
#define VC4_SUBMIT_CL_RCL_ORDER_INCREASING_Y (1 << 3)
__u32 flags;
/* Returned value of the seqno of this render job (for the
@@ -292,6 +304,7 @@ struct drm_vc4_get_hang_state {
#define DRM_VC4_PARAM_SUPPORTS_BRANCHES 3
#define DRM_VC4_PARAM_SUPPORTS_ETC1 4
#define DRM_VC4_PARAM_SUPPORTS_THREADED_FS 5
#define DRM_VC4_PARAM_SUPPORTS_FIXED_RCL_ORDER 6
struct drm_vc4_get_param {
__u32 param;
@@ -311,6 +324,15 @@ struct drm_vc4_set_tiling {
__u64 modifier;
};
/**
* struct drm_vc4_label_bo - Attach a name to a BO for debug purposes.
*/
struct drm_vc4_label_bo {
__u32 handle;
__u32 len;
__u64 name;
};
#if defined(__cplusplus)
}
#endif

View File

@@ -297,13 +297,17 @@ union drm_vmw_surface_reference_arg {
* @version: Allows expanding the execbuf ioctl parameters without breaking
* backwards compatibility, since user-space will always tell the kernel
* which version it uses.
* @flags: Execbuf flags. None currently.
* @flags: Execbuf flags.
* @imported_fence_fd: FD for a fence imported from another device
*
* Argument to the DRM_VMW_EXECBUF Ioctl.
*/
#define DRM_VMW_EXECBUF_VERSION 2
#define DRM_VMW_EXECBUF_FLAG_IMPORT_FENCE_FD (1 << 0)
#define DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD (1 << 1)
struct drm_vmw_execbuf_arg {
__u64 commands;
__u32 command_size;
@@ -312,7 +316,7 @@ struct drm_vmw_execbuf_arg {
__u32 version;
__u32 flags;
__u32 context_handle;
__u32 pad64;
__s32 imported_fence_fd;
};
/**
@@ -328,6 +332,7 @@ struct drm_vmw_execbuf_arg {
* @passed_seqno: The highest seqno number processed by the hardware
* so far. This can be used to mark user-space fence objects as signaled, and
* to determine whether a fence seqno might be stale.
* @fd: FD associated with the fence, -1 if not exported
* @error: This member should've been set to -EFAULT on submission.
* The following actions should be take on completion:
* error == -EFAULT: Fence communication failed. The host is synchronized.
@@ -345,7 +350,7 @@ struct drm_vmw_fence_rep {
__u32 mask;
__u32 seqno;
__u32 passed_seqno;
__u32 pad64;
__s32 fd;
__s32 error;
};

View File

@@ -28,6 +28,7 @@
#define __LINUX__AIO_ABI_H
#include <linux/types.h>
#include <linux/fs.h>
#include <asm/byteorder.h>
typedef __kernel_ulong_t aio_context_t;
@@ -62,14 +63,6 @@ struct io_event {
__s64 res2; /* secondary result */
};
#if defined(__BYTE_ORDER) ? __BYTE_ORDER == __LITTLE_ENDIAN : defined(__LITTLE_ENDIAN)
#define PADDED(x,y) x, y
#elif defined(__BYTE_ORDER) ? __BYTE_ORDER == __BIG_ENDIAN : defined(__BIG_ENDIAN)
#define PADDED(x,y) y, x
#else
#error edit for your odd byteorder.
#endif
/*
* we always use a 64bit off_t when communicating
* with userland. its up to libraries to do the
@@ -79,8 +72,16 @@ struct io_event {
struct iocb {
/* these are internal to the kernel/libc. */
__u64 aio_data; /* data to be returned in event's data */
__u32 PADDED(aio_key, aio_rw_flags);
/* the kernel sets aio_key to the req # */
#if defined(__BYTE_ORDER) ? __BYTE_ORDER == __LITTLE_ENDIAN : defined(__LITTLE_ENDIAN)
__u32 aio_key; /* the kernel sets aio_key to the req # */
__kernel_rwf_t aio_rw_flags; /* RWF_* flags */
#elif defined(__BYTE_ORDER) ? __BYTE_ORDER == __BIG_ENDIAN : defined(__BIG_ENDIAN)
__kernel_rwf_t aio_rw_flags; /* RWF_* flags */
__u32 aio_key; /* the kernel sets aio_key to the req # */
#else
#error edit for your odd byteorder.
#endif
/* common fields */
__u16 aio_lio_opcode; /* see IOCB_CMD_ above */

View File

@@ -132,6 +132,7 @@ enum {
/* struct binder_fd_array_object - object describing an array of fds in a buffer
* @hdr: common header structure
* @pad: padding to ensure correct alignment
* @num_fds: number of file descriptors in the buffer
* @parent: index in offset array to buffer holding the fd array
* @parent_offset: start offset of fd array in the buffer
@@ -152,6 +153,7 @@ enum {
*/
struct binder_fd_array_object {
struct binder_object_header hdr;
__u32 pad;
binder_size_t num_fds;
binder_size_t parent;
binder_size_t parent_offset;
@@ -184,6 +186,19 @@ struct binder_version {
#define BINDER_CURRENT_PROTOCOL_VERSION 8
#endif
/*
* Use with BINDER_GET_NODE_DEBUG_INFO, driver reads ptr, writes to all fields.
* Set ptr to NULL for the first call to get the info for the first node, and
* then repeat the call passing the previously returned value to get the next
* nodes. ptr will be 0 when there are no more nodes.
*/
struct binder_node_debug_info {
binder_uintptr_t ptr;
binder_uintptr_t cookie;
__u32 has_strong_ref;
__u32 has_weak_ref;
};
#define BINDER_WRITE_READ _IOWR('b', 1, struct binder_write_read)
#define BINDER_SET_IDLE_TIMEOUT _IOW('b', 3, __s64)
#define BINDER_SET_MAX_THREADS _IOW('b', 5, __u32)
@@ -191,6 +206,7 @@ struct binder_version {
#define BINDER_SET_CONTEXT_MGR _IOW('b', 7, __s32)
#define BINDER_THREAD_EXIT _IOW('b', 8, __s32)
#define BINDER_VERSION _IOWR('b', 9, struct binder_version)
#define BINDER_GET_NODE_DEBUG_INFO _IOWR('b', 11, struct binder_node_debug_info)
/*
* NOTE: Two special error codes you should check for when calling

View File

@@ -16,7 +16,7 @@
#define AUTOFS_DEVICE_NAME "autofs"
#define AUTOFS_DEV_IOCTL_VERSION_MAJOR 1
#define AUTOFS_DEV_IOCTL_VERSION_MINOR 0
#define AUTOFS_DEV_IOCTL_VERSION_MINOR 1
#define AUTOFS_DEV_IOCTL_SIZE sizeof(struct autofs_dev_ioctl)

View File

@@ -155,8 +155,6 @@ enum {
};
#define AUTOFS_IOC_EXPIRE_MULTI _IOW(AUTOFS_IOCTL, AUTOFS_IOC_EXPIRE_MULTI_CMD, int)
#define AUTOFS_IOC_EXPIRE_INDIRECT AUTOFS_IOC_EXPIRE_MULTI
#define AUTOFS_IOC_EXPIRE_DIRECT AUTOFS_IOC_EXPIRE_MULTI
#define AUTOFS_IOC_PROTOSUBVER _IOR(AUTOFS_IOCTL, AUTOFS_IOC_PROTOSUBVER_CMD, int)
#define AUTOFS_IOC_ASKUMOUNT _IOR(AUTOFS_IOCTL, AUTOFS_IOC_ASKUMOUNT_CMD, int)

View File

@@ -52,6 +52,7 @@ enum blktrace_act {
__BLK_TA_REMAP, /* bio was remapped */
__BLK_TA_ABORT, /* request aborted */
__BLK_TA_DRV_DATA, /* driver-specific binary data */
__BLK_TA_CGROUP = 1 << 8, /* from a cgroup*/
};
/*
@@ -61,6 +62,7 @@ enum blktrace_notify {
__BLK_TN_PROCESS = 0, /* establish pid/name mapping */
__BLK_TN_TIMESTAMP, /* include system clock */
__BLK_TN_MESSAGE, /* Character string message */
__BLK_TN_CGROUP = __BLK_TA_CGROUP, /* from a cgroup */
};
@@ -107,6 +109,7 @@ struct blk_io_trace {
__u32 cpu; /* on what cpu did it happen */
__u16 error; /* completion error */
__u16 pdu_len; /* length of data after this trace */
/* cgroup id will be stored here if exists */
};
/*

View File

@@ -30,9 +30,14 @@
#define BPF_FROM_LE BPF_TO_LE
#define BPF_FROM_BE BPF_TO_BE
/* jmp encodings */
#define BPF_JNE 0x50 /* jump != */
#define BPF_JLT 0xa0 /* LT is unsigned, '<' */
#define BPF_JLE 0xb0 /* LE is unsigned, '<=' */
#define BPF_JSGT 0x60 /* SGT is signed '>', GT in x86 */
#define BPF_JSGE 0x70 /* SGE is signed '>=', GE in x86 */
#define BPF_JSLT 0xc0 /* SLT is signed, '<' */
#define BPF_JSLE 0xd0 /* SLE is signed, '<=' */
#define BPF_CALL 0x80 /* function call */
#define BPF_EXIT 0x90 /* function return */
@@ -104,6 +109,8 @@ enum bpf_map_type {
BPF_MAP_TYPE_LPM_TRIE,
BPF_MAP_TYPE_ARRAY_OF_MAPS,
BPF_MAP_TYPE_HASH_OF_MAPS,
BPF_MAP_TYPE_DEVMAP,
BPF_MAP_TYPE_SOCKMAP,
};
enum bpf_prog_type {
@@ -121,6 +128,7 @@ enum bpf_prog_type {
BPF_PROG_TYPE_LWT_OUT,
BPF_PROG_TYPE_LWT_XMIT,
BPF_PROG_TYPE_SOCK_OPS,
BPF_PROG_TYPE_SK_SKB,
};
enum bpf_attach_type {
@@ -128,6 +136,8 @@ enum bpf_attach_type {
BPF_CGROUP_INET_EGRESS,
BPF_CGROUP_INET_SOCK_CREATE,
BPF_CGROUP_SOCK_OPS,
BPF_SK_SKB_STREAM_PARSER,
BPF_SK_SKB_STREAM_VERDICT,
__MAX_BPF_ATTACH_TYPE
};
@@ -153,6 +163,7 @@ enum bpf_attach_type {
#define BPF_NOEXIST 1 /* create new element if it didn't exist */
#define BPF_EXIST 2 /* update existing element */
/* flags for BPF_MAP_CREATE command */
#define BPF_F_NO_PREALLOC (1U << 0)
/* Instead of having one common LRU list in the
* BPF_MAP_TYPE_LRU_[PERCPU_]HASH map, use a percpu LRU list
@@ -161,6 +172,8 @@ enum bpf_attach_type {
* across different LRU lists.
*/
#define BPF_F_NO_COMMON_LRU (1U << 1)
/* Specify numa node during map creation */
#define BPF_F_NUMA_NODE (1U << 2)
union bpf_attr {
struct { /* anonymous struct used by BPF_MAP_CREATE command */
@@ -168,8 +181,13 @@ union bpf_attr {
__u32 key_size; /* size of key in bytes */
__u32 value_size; /* size of value in bytes */
__u32 max_entries; /* max number of entries in a map */
__u32 map_flags; /* prealloc or not */
__u32 map_flags; /* BPF_MAP_CREATE related
* flags defined above.
*/
__u32 inner_map_fd; /* fd pointing to the inner map */
__u32 numa_node; /* numa node (effective only if
* BPF_F_NUMA_NODE is set).
*/
};
struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */
@@ -344,9 +362,20 @@ union bpf_attr {
* int bpf_redirect(ifindex, flags)
* redirect to another netdev
* @ifindex: ifindex of the net device
* @flags: bit 0 - if set, redirect to ingress instead of egress
* other bits - reserved
* Return: TC_ACT_REDIRECT
* @flags:
* cls_bpf:
* bit 0 - if set, redirect to ingress instead of egress
* other bits - reserved
* xdp_bpf:
* all bits - reserved
* Return: cls_bpf: TC_ACT_REDIRECT on success or TC_ACT_SHOT on error
* xdp_bfp: XDP_REDIRECT on success or XDP_ABORT on error
* int bpf_redirect_map(map, key, flags)
* redirect to endpoint in map
* @map: pointer to dev map
* @key: index in map to lookup
* @flags: --
* Return: XDP_REDIRECT on success or XDP_ABORT on error
*
* u32 bpf_get_route_realm(skb)
* retrieve a dst's tclassid
@@ -539,6 +568,20 @@ union bpf_attr {
* @mode: operation mode (enum bpf_adj_room_mode)
* @flags: reserved for future use
* Return: 0 on success or negative error code
*
* int bpf_sk_redirect_map(map, key, flags)
* Redirect skb to a sock in map using key as a lookup key for the
* sock in map.
* @map: pointer to sockmap
* @key: key to lookup sock in map
* @flags: reserved for future use
* Return: SK_REDIRECT
*
* int bpf_sock_map_update(skops, map, key, flags)
* @skops: pointer to bpf_sock_ops
* @map: pointer to sockmap to update
* @key: key to insert/update sock in map
* @flags: same flags as map update elem
*/
#define __BPF_FUNC_MAPPER(FN) \
FN(unspec), \
@@ -591,7 +634,10 @@ union bpf_attr {
FN(get_socket_uid), \
FN(set_hash), \
FN(setsockopt), \
FN(skb_adjust_room),
FN(skb_adjust_room), \
FN(redirect_map), \
FN(sk_redirect_map), \
FN(sock_map_update), \
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
* function eBPF program intends to call
@@ -668,6 +714,15 @@ struct __sk_buff {
__u32 data;
__u32 data_end;
__u32 napi_id;
/* accessed by BPF_PROG_TYPE_sk_skb types */
__u32 family;
__u32 remote_ip4; /* Stored in network byte order */
__u32 local_ip4; /* Stored in network byte order */
__u32 remote_ip6[4]; /* Stored in network byte order */
__u32 local_ip6[4]; /* Stored in network byte order */
__u32 remote_port; /* Stored in network byte order */
__u32 local_port; /* stored in host byte order */
};
struct bpf_tunnel_key {
@@ -703,20 +758,23 @@ struct bpf_sock {
__u32 family;
__u32 type;
__u32 protocol;
__u32 mark;
__u32 priority;
};
#define XDP_PACKET_HEADROOM 256
/* User return codes for XDP prog type.
* A valid XDP program must return one of these defined values. All other
* return codes are reserved for future use. Unknown return codes will result
* in packet drop.
* return codes are reserved for future use. Unknown return codes will
* result in packet drops and a warning via bpf_warn_invalid_xdp_action().
*/
enum xdp_action {
XDP_ABORTED = 0,
XDP_DROP,
XDP_PASS,
XDP_TX,
XDP_REDIRECT,
};
/* user accessible metadata for XDP packet hook
@@ -727,6 +785,12 @@ struct xdp_md {
__u32 data_end;
};
enum sk_action {
SK_ABORTED = 0,
SK_DROP,
SK_REDIRECT,
};
#define BPF_TAG_SIZE 8
struct bpf_prog_info {

View File

@@ -255,13 +255,7 @@ struct btrfs_ioctl_fs_info_args {
#define BTRFS_FEATURE_INCOMPAT_DEFAULT_SUBVOL (1ULL << 1)
#define BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS (1ULL << 2)
#define BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO (1ULL << 3)
/*
* some patches floated around with a second compression method
* lets save that incompat here for when they do get in
* Note we don't actually support it, we're just reserving the
* number
*/
#define BTRFS_FEATURE_INCOMPAT_COMPRESS_LZOv2 (1ULL << 4)
#define BTRFS_FEATURE_INCOMPAT_COMPRESS_ZSTD (1ULL << 4)
/*
* older kernels tried to do bigger metadata blocks, but the

View File

@@ -60,9 +60,13 @@ typedef struct __user_cap_data_struct {
#define VFS_CAP_U32_2 2
#define XATTR_CAPS_SZ_2 (sizeof(__le32)*(1 + 2*VFS_CAP_U32_2))
#define XATTR_CAPS_SZ XATTR_CAPS_SZ_2
#define VFS_CAP_U32 VFS_CAP_U32_2
#define VFS_CAP_REVISION VFS_CAP_REVISION_2
#define VFS_CAP_REVISION_3 0x03000000
#define VFS_CAP_U32_3 2
#define XATTR_CAPS_SZ_3 (sizeof(__le32)*(2 + 2*VFS_CAP_U32_3))
#define XATTR_CAPS_SZ XATTR_CAPS_SZ_3
#define VFS_CAP_U32 VFS_CAP_U32_3
#define VFS_CAP_REVISION VFS_CAP_REVISION_3
struct vfs_cap_data {
__le32 magic_etc; /* Little endian */
@@ -72,6 +76,18 @@ struct vfs_cap_data {
} data[VFS_CAP_U32];
};
/*
* same as vfs_cap_data but with a rootid at the end
*/
struct vfs_ns_cap_data {
__le32 magic_etc;
struct {
__le32 permitted; /* Little endian */
__le32 inheritable; /* Little endian */
} data[VFS_CAP_U32];
__le32 rootid;
};
#ifndef __KERNEL__
/*

View File

@@ -226,4 +226,22 @@ enum devlink_dpipe_action_type {
DEVLINK_DPIPE_ACTION_TYPE_FIELD_MODIFY,
};
enum devlink_dpipe_field_ethernet_id {
DEVLINK_DPIPE_FIELD_ETHERNET_DST_MAC,
};
enum devlink_dpipe_field_ipv4_id {
DEVLINK_DPIPE_FIELD_IPV4_DST_IP,
};
enum devlink_dpipe_field_ipv6_id {
DEVLINK_DPIPE_FIELD_IPV6_DST_IP,
};
enum devlink_dpipe_header_id {
DEVLINK_DPIPE_HEADER_ETHERNET,
DEVLINK_DPIPE_HEADER_IPV4,
DEVLINK_DPIPE_HEADER_IPV6,
};
#endif /* _UAPI_LINUX_DEVLINK_H_ */

View File

@@ -10,6 +10,7 @@
#define _DLM_NETLINK_H
#include <linux/types.h>
#include <linux/dlmconstants.h>
enum {
DLM_STATUS_WAITING = 1,

View File

@@ -18,10 +18,13 @@ struct sock_extended_err {
#define SO_EE_ORIGIN_ICMP 2
#define SO_EE_ORIGIN_ICMP6 3
#define SO_EE_ORIGIN_TXSTATUS 4
#define SO_EE_ORIGIN_ZEROCOPY 5
#define SO_EE_ORIGIN_TIMESTAMPING SO_EE_ORIGIN_TXSTATUS
#define SO_EE_OFFENDER(ee) ((struct sockaddr*)((ee)+1))
#define SO_EE_CODE_ZEROCOPY_COPIED 1
/**
* struct scm_timestamping - timestamps exposed through cmsg
*

View File

@@ -1238,6 +1238,47 @@ struct ethtool_per_queue_op {
char data[];
};
/**
* struct ethtool_fecparam - Ethernet forward error correction(fec) parameters
* @cmd: Command number = %ETHTOOL_GFECPARAM or %ETHTOOL_SFECPARAM
* @active_fec: FEC mode which is active on porte
* @fec: Bitmask of supported/configured FEC modes
* @rsvd: Reserved for future extensions. i.e FEC bypass feature.
*
* Drivers should reject a non-zero setting of @autoneg when
* autoneogotiation is disabled (or not supported) for the link.
*
*/
struct ethtool_fecparam {
__u32 cmd;
/* bitmask of FEC modes */
__u32 active_fec;
__u32 fec;
__u32 reserved;
};
/**
* enum ethtool_fec_config_bits - flags definition of ethtool_fec_configuration
* @ETHTOOL_FEC_NONE: FEC mode configuration is not supported
* @ETHTOOL_FEC_AUTO: Default/Best FEC mode provided by driver
* @ETHTOOL_FEC_OFF: No FEC Mode
* @ETHTOOL_FEC_RS: Reed-Solomon Forward Error Detection mode
* @ETHTOOL_FEC_BASER: Base-R/Reed-Solomon Forward Error Detection mode
*/
enum ethtool_fec_config_bits {
ETHTOOL_FEC_NONE_BIT,
ETHTOOL_FEC_AUTO_BIT,
ETHTOOL_FEC_OFF_BIT,
ETHTOOL_FEC_RS_BIT,
ETHTOOL_FEC_BASER_BIT,
};
#define ETHTOOL_FEC_NONE (1 << ETHTOOL_FEC_NONE_BIT)
#define ETHTOOL_FEC_AUTO (1 << ETHTOOL_FEC_AUTO_BIT)
#define ETHTOOL_FEC_OFF (1 << ETHTOOL_FEC_OFF_BIT)
#define ETHTOOL_FEC_RS (1 << ETHTOOL_FEC_RS_BIT)
#define ETHTOOL_FEC_BASER (1 << ETHTOOL_FEC_BASER_BIT)
/* CMDs currently supported */
#define ETHTOOL_GSET 0x00000001 /* DEPRECATED, Get settings.
* Please use ETHTOOL_GLINKSETTINGS
@@ -1330,6 +1371,8 @@ struct ethtool_per_queue_op {
#define ETHTOOL_SLINKSETTINGS 0x0000004d /* Set ethtool_link_settings */
#define ETHTOOL_PHY_GTUNABLE 0x0000004e /* Get PHY tunable configuration */
#define ETHTOOL_PHY_STUNABLE 0x0000004f /* Set PHY tunable configuration */
#define ETHTOOL_GFECPARAM 0x00000050 /* Get FEC settings */
#define ETHTOOL_SFECPARAM 0x00000051 /* Set FEC settings */
/* compatibility with older code */
#define SPARC_ETH_GSET ETHTOOL_GSET
@@ -1387,6 +1430,9 @@ enum ethtool_link_mode_bit_indices {
ETHTOOL_LINK_MODE_2500baseT_Full_BIT = 47,
ETHTOOL_LINK_MODE_5000baseT_Full_BIT = 48,
ETHTOOL_LINK_MODE_FEC_NONE_BIT = 49,
ETHTOOL_LINK_MODE_FEC_RS_BIT = 50,
ETHTOOL_LINK_MODE_FEC_BASER_BIT = 51,
/* Last allowed bit for __ETHTOOL_LINK_MODE_LEGACY_MASK is bit
* 31. Please do NOT define any SUPPORTED_* or ADVERTISED_*
@@ -1395,7 +1441,7 @@ enum ethtool_link_mode_bit_indices {
*/
__ETHTOOL_LINK_MODE_LAST
= ETHTOOL_LINK_MODE_5000baseT_Full_BIT,
= ETHTOOL_LINK_MODE_FEC_BASER_BIT,
};
#define __ETHTOOL_LINK_MODE_LEGACY_MASK(base_name) \
@@ -1707,6 +1753,8 @@ enum ethtool_reset_flags {
* %ethtool_link_mode_bit_indices for the link modes, and other
* link features that the link partner advertised through
* autonegotiation; 0 if unknown or not applicable. Read-only.
* @transceiver: Used to distinguish different possible PHY types,
* reported consistently by PHYLIB. Read-only.
*
* If autonegotiation is disabled, the speed and @duplex represent the
* fixed link mode and are writable if the driver supports multiple
@@ -1758,7 +1806,9 @@ struct ethtool_link_settings {
__u8 eth_tp_mdix;
__u8 eth_tp_mdix_ctrl;
__s8 link_mode_masks_nwords;
__u32 reserved[8];
__u8 transceiver;
__u8 reserved1[3];
__u32 reserved[7];
__u32 link_mode_masks[0];
/* layout of link_mode_masks fields:
* __u32 map_supported[link_mode_masks_nwords];

View File

@@ -358,13 +358,25 @@ struct fscrypt_key {
#define SYNC_FILE_RANGE_WRITE 2
#define SYNC_FILE_RANGE_WAIT_AFTER 4
/* flags for preadv2/pwritev2: */
#define RWF_HIPRI 0x00000001 /* high priority request, poll if possible */
#define RWF_DSYNC 0x00000002 /* per-IO O_DSYNC */
#define RWF_SYNC 0x00000004 /* per-IO O_SYNC */
#define RWF_NOWAIT 0x00000008 /* per-IO, return -EAGAIN if operation would block */
/*
* Flags for preadv2/pwritev2:
*/
#define RWF_SUPPORTED (RWF_HIPRI | RWF_DSYNC | RWF_SYNC |\
RWF_NOWAIT)
typedef int __bitwise __kernel_rwf_t;
/* high priority request, poll if possible */
#define RWF_HIPRI ((__force __kernel_rwf_t)0x00000001)
/* per-IO O_DSYNC */
#define RWF_DSYNC ((__force __kernel_rwf_t)0x00000002)
/* per-IO O_SYNC */
#define RWF_SYNC ((__force __kernel_rwf_t)0x00000004)
/* per-IO, return -EAGAIN if operation would block */
#define RWF_NOWAIT ((__force __kernel_rwf_t)0x00000008)
/* mask of flags supported by the kernel */
#define RWF_SUPPORTED (RWF_HIPRI | RWF_DSYNC | RWF_SYNC | RWF_NOWAIT)
#endif /* _UAPI_LINUX_FS_H */

View File

@@ -96,7 +96,7 @@ fsmap_advance(
#define FMR_OF_EXTENT_MAP 0x4 /* segment = extent map */
#define FMR_OF_SHARED 0x8 /* segment = shared with another file */
#define FMR_OF_SPECIAL_OWNER 0x10 /* owner is a special value */
#define FMR_OF_LAST 0x20 /* segment is the last in the FS */
#define FMR_OF_LAST 0x20 /* segment is the last in the dataset */
/* Each FS gets to define its own special owner codes. */
#define FMR_OWNER(type, code) (((__u64)type << 32) | \

View File

@@ -59,6 +59,7 @@
#define ARPHRD_LAPB 516 /* LAPB */
#define ARPHRD_DDCMP 517 /* Digital's DDCMP protocol */
#define ARPHRD_RAWHDLC 518 /* Raw HDLC */
#define ARPHRD_RAWIP 519 /* Raw IP */
#define ARPHRD_TUNNEL 768 /* IPIP tunnel */
#define ARPHRD_TUNNEL6 769 /* IP6IP6 tunnel */

View File

@@ -66,6 +66,7 @@
#define ETH_P_ATALK 0x809B /* Appletalk DDP */
#define ETH_P_AARP 0x80F3 /* Appletalk AARP */
#define ETH_P_8021Q 0x8100 /* 802.1Q VLAN Extended Header */
#define ETH_P_ERSPAN 0x88BE /* ERSPAN type II */
#define ETH_P_IPX 0x8137 /* IPX over DIX */
#define ETH_P_IPV6 0x86DD /* IPv6 over bluebook */
#define ETH_P_PAUSE 0x8808 /* IEEE Pause frames. See 802.3 31B */
@@ -98,11 +99,13 @@
#define ETH_P_FIP 0x8914 /* FCoE Initialization Protocol */
#define ETH_P_80221 0x8917 /* IEEE 802.21 Media Independent Handover Protocol */
#define ETH_P_HSR 0x892F /* IEC 62439-3 HSRv1 */
#define ETH_P_NSH 0x894F /* Network Service Header */
#define ETH_P_LOOPBACK 0x9000 /* Ethernet loopback packet, per IEEE 802.3 */
#define ETH_P_QINQ1 0x9100 /* deprecated QinQ VLAN [ NOT AN OFFICIALLY REGISTERED ID ] */
#define ETH_P_QINQ2 0x9200 /* deprecated QinQ VLAN [ NOT AN OFFICIALLY REGISTERED ID ] */
#define ETH_P_QINQ3 0x9300 /* deprecated QinQ VLAN [ NOT AN OFFICIALLY REGISTERED ID ] */
#define ETH_P_EDSA 0xDADA /* Ethertype DSA [ NOT AN OFFICIALLY REGISTERED ID ] */
#define ETH_P_IFE 0xED3E /* ForCES inter-FE LFB type */
#define ETH_P_AF_IUCV 0xFBFB /* IBM af_iucv [ NOT AN OFFICIALLY REGISTERED ID ] */
#define ETH_P_802_3_MIN 0x0600 /* If the value in the ethernet type is less than this value
@@ -137,6 +140,9 @@
#define ETH_P_IEEE802154 0x00F6 /* IEEE802.15.4 frame */
#define ETH_P_CAIF 0x00F7 /* ST-Ericsson CAIF protocol */
#define ETH_P_XDSA 0x00F8 /* Multiplexed DSA protocol */
#define ETH_P_MAP 0x00F9 /* Qualcomm multiplexing and
* aggregation protocol
*/
/*
* This is an Ethernet frame header.

View File

@@ -134,6 +134,7 @@ enum {
IFLA_GRE_COLLECT_METADATA,
IFLA_GRE_IGNORE_DF,
IFLA_GRE_FWMARK,
IFLA_GRE_ERSPAN_INDEX,
__IFLA_GRE_MAX,
};

View File

@@ -142,6 +142,8 @@ enum {
INET_DIAG_PAD,
INET_DIAG_MARK,
INET_DIAG_BBRINFO,
INET_DIAG_CLASS_ID,
INET_DIAG_MD5SIG,
__INET_DIAG_MAX,
};

View File

@@ -23,15 +23,15 @@
#ifndef KFD_IOCTL_H_INCLUDED
#define KFD_IOCTL_H_INCLUDED
#include <linux/types.h>
#include <drm/drm.h>
#include <linux/ioctl.h>
#define KFD_IOCTL_MAJOR_VERSION 1
#define KFD_IOCTL_MINOR_VERSION 1
struct kfd_ioctl_get_version_args {
uint32_t major_version; /* from KFD */
uint32_t minor_version; /* from KFD */
__u32 major_version; /* from KFD */
__u32 minor_version; /* from KFD */
};
/* For kfd_ioctl_create_queue_args.queue_type. */
@@ -43,36 +43,36 @@ struct kfd_ioctl_get_version_args {
#define KFD_MAX_QUEUE_PRIORITY 15
struct kfd_ioctl_create_queue_args {
uint64_t ring_base_address; /* to KFD */
uint64_t write_pointer_address; /* from KFD */
uint64_t read_pointer_address; /* from KFD */
uint64_t doorbell_offset; /* from KFD */
__u64 ring_base_address; /* to KFD */
__u64 write_pointer_address; /* from KFD */
__u64 read_pointer_address; /* from KFD */
__u64 doorbell_offset; /* from KFD */
uint32_t ring_size; /* to KFD */
uint32_t gpu_id; /* to KFD */
uint32_t queue_type; /* to KFD */
uint32_t queue_percentage; /* to KFD */
uint32_t queue_priority; /* to KFD */
uint32_t queue_id; /* from KFD */
__u32 ring_size; /* to KFD */
__u32 gpu_id; /* to KFD */
__u32 queue_type; /* to KFD */
__u32 queue_percentage; /* to KFD */
__u32 queue_priority; /* to KFD */
__u32 queue_id; /* from KFD */
uint64_t eop_buffer_address; /* to KFD */
uint64_t eop_buffer_size; /* to KFD */
uint64_t ctx_save_restore_address; /* to KFD */
uint64_t ctx_save_restore_size; /* to KFD */
__u64 eop_buffer_address; /* to KFD */
__u64 eop_buffer_size; /* to KFD */
__u64 ctx_save_restore_address; /* to KFD */
__u64 ctx_save_restore_size; /* to KFD */
};
struct kfd_ioctl_destroy_queue_args {
uint32_t queue_id; /* to KFD */
uint32_t pad;
__u32 queue_id; /* to KFD */
__u32 pad;
};
struct kfd_ioctl_update_queue_args {
uint64_t ring_base_address; /* to KFD */
__u64 ring_base_address; /* to KFD */
uint32_t queue_id; /* to KFD */
uint32_t ring_size; /* to KFD */
uint32_t queue_percentage; /* to KFD */
uint32_t queue_priority; /* to KFD */
__u32 queue_id; /* to KFD */
__u32 ring_size; /* to KFD */
__u32 queue_percentage; /* to KFD */
__u32 queue_priority; /* to KFD */
};
/* For kfd_ioctl_set_memory_policy_args.default_policy and alternate_policy */
@@ -80,13 +80,13 @@ struct kfd_ioctl_update_queue_args {
#define KFD_IOC_CACHE_POLICY_NONCOHERENT 1
struct kfd_ioctl_set_memory_policy_args {
uint64_t alternate_aperture_base; /* to KFD */
uint64_t alternate_aperture_size; /* to KFD */
__u64 alternate_aperture_base; /* to KFD */
__u64 alternate_aperture_size; /* to KFD */
uint32_t gpu_id; /* to KFD */
uint32_t default_policy; /* to KFD */
uint32_t alternate_policy; /* to KFD */
uint32_t pad;
__u32 gpu_id; /* to KFD */
__u32 default_policy; /* to KFD */
__u32 alternate_policy; /* to KFD */
__u32 pad;
};
/*
@@ -97,26 +97,26 @@ struct kfd_ioctl_set_memory_policy_args {
*/
struct kfd_ioctl_get_clock_counters_args {
uint64_t gpu_clock_counter; /* from KFD */
uint64_t cpu_clock_counter; /* from KFD */
uint64_t system_clock_counter; /* from KFD */
uint64_t system_clock_freq; /* from KFD */
__u64 gpu_clock_counter; /* from KFD */
__u64 cpu_clock_counter; /* from KFD */
__u64 system_clock_counter; /* from KFD */
__u64 system_clock_freq; /* from KFD */
uint32_t gpu_id; /* to KFD */
uint32_t pad;
__u32 gpu_id; /* to KFD */
__u32 pad;
};
#define NUM_OF_SUPPORTED_GPUS 7
struct kfd_process_device_apertures {
uint64_t lds_base; /* from KFD */
uint64_t lds_limit; /* from KFD */
uint64_t scratch_base; /* from KFD */
uint64_t scratch_limit; /* from KFD */
uint64_t gpuvm_base; /* from KFD */
uint64_t gpuvm_limit; /* from KFD */
uint32_t gpu_id; /* from KFD */
uint32_t pad;
__u64 lds_base; /* from KFD */
__u64 lds_limit; /* from KFD */
__u64 scratch_base; /* from KFD */
__u64 scratch_limit; /* from KFD */
__u64 gpuvm_base; /* from KFD */
__u64 gpuvm_limit; /* from KFD */
__u32 gpu_id; /* from KFD */
__u32 pad;
};
struct kfd_ioctl_get_process_apertures_args {
@@ -124,8 +124,8 @@ struct kfd_ioctl_get_process_apertures_args {
process_apertures[NUM_OF_SUPPORTED_GPUS];/* from KFD */
/* from KFD, should be in the range [1 - NUM_OF_SUPPORTED_GPUS] */
uint32_t num_of_nodes;
uint32_t pad;
__u32 num_of_nodes;
__u32 pad;
};
#define MAX_ALLOWED_NUM_POINTS 100
@@ -133,25 +133,25 @@ struct kfd_ioctl_get_process_apertures_args {
#define MAX_ALLOWED_WAC_BUFF_SIZE 128
struct kfd_ioctl_dbg_register_args {
uint32_t gpu_id; /* to KFD */
uint32_t pad;
__u32 gpu_id; /* to KFD */
__u32 pad;
};
struct kfd_ioctl_dbg_unregister_args {
uint32_t gpu_id; /* to KFD */
uint32_t pad;
__u32 gpu_id; /* to KFD */
__u32 pad;
};
struct kfd_ioctl_dbg_address_watch_args {
uint64_t content_ptr; /* a pointer to the actual content */
uint32_t gpu_id; /* to KFD */
uint32_t buf_size_in_bytes; /*including gpu_id and buf_size */
__u64 content_ptr; /* a pointer to the actual content */
__u32 gpu_id; /* to KFD */
__u32 buf_size_in_bytes; /*including gpu_id and buf_size */
};
struct kfd_ioctl_dbg_wave_control_args {
uint64_t content_ptr; /* a pointer to the actual content */
uint32_t gpu_id; /* to KFD */
uint32_t buf_size_in_bytes; /*including gpu_id and buf_size */
__u64 content_ptr; /* a pointer to the actual content */
__u32 gpu_id; /* to KFD */
__u32 buf_size_in_bytes; /*including gpu_id and buf_size */
};
/* Matching HSA_EVENTTYPE */
@@ -172,44 +172,44 @@ struct kfd_ioctl_dbg_wave_control_args {
#define KFD_SIGNAL_EVENT_LIMIT 256
struct kfd_ioctl_create_event_args {
uint64_t event_page_offset; /* from KFD */
uint32_t event_trigger_data; /* from KFD - signal events only */
uint32_t event_type; /* to KFD */
uint32_t auto_reset; /* to KFD */
uint32_t node_id; /* to KFD - only valid for certain
__u64 event_page_offset; /* from KFD */
__u32 event_trigger_data; /* from KFD - signal events only */
__u32 event_type; /* to KFD */
__u32 auto_reset; /* to KFD */
__u32 node_id; /* to KFD - only valid for certain
event types */
uint32_t event_id; /* from KFD */
uint32_t event_slot_index; /* from KFD */
__u32 event_id; /* from KFD */
__u32 event_slot_index; /* from KFD */
};
struct kfd_ioctl_destroy_event_args {
uint32_t event_id; /* to KFD */
uint32_t pad;
__u32 event_id; /* to KFD */
__u32 pad;
};
struct kfd_ioctl_set_event_args {
uint32_t event_id; /* to KFD */
uint32_t pad;
__u32 event_id; /* to KFD */
__u32 pad;
};
struct kfd_ioctl_reset_event_args {
uint32_t event_id; /* to KFD */
uint32_t pad;
__u32 event_id; /* to KFD */
__u32 pad;
};
struct kfd_memory_exception_failure {
uint32_t NotPresent; /* Page not present or supervisor privilege */
uint32_t ReadOnly; /* Write access to a read-only page */
uint32_t NoExecute; /* Execute access to a page marked NX */
uint32_t pad;
__u32 NotPresent; /* Page not present or supervisor privilege */
__u32 ReadOnly; /* Write access to a read-only page */
__u32 NoExecute; /* Execute access to a page marked NX */
__u32 pad;
};
/* memory exception data*/
struct kfd_hsa_memory_exception_data {
struct kfd_memory_exception_failure failure;
uint64_t va;
uint32_t gpu_id;
uint32_t pad;
__u64 va;
__u32 gpu_id;
__u32 pad;
};
/* Event data*/
@@ -217,19 +217,48 @@ struct kfd_event_data {
union {
struct kfd_hsa_memory_exception_data memory_exception_data;
}; /* From KFD */
uint64_t kfd_event_data_ext; /* pointer to an extension structure
__u64 kfd_event_data_ext; /* pointer to an extension structure
for future exception types */
uint32_t event_id; /* to KFD */
uint32_t pad;
__u32 event_id; /* to KFD */
__u32 pad;
};
struct kfd_ioctl_wait_events_args {
uint64_t events_ptr; /* pointed to struct
__u64 events_ptr; /* pointed to struct
kfd_event_data array, to KFD */
uint32_t num_events; /* to KFD */
uint32_t wait_for_all; /* to KFD */
uint32_t timeout; /* to KFD */
uint32_t wait_result; /* from KFD */
__u32 num_events; /* to KFD */
__u32 wait_for_all; /* to KFD */
__u32 timeout; /* to KFD */
__u32 wait_result; /* from KFD */
};
struct kfd_ioctl_set_scratch_backing_va_args {
uint64_t va_addr; /* to KFD */
uint32_t gpu_id; /* to KFD */
uint32_t pad;
};
struct kfd_ioctl_get_tile_config_args {
/* to KFD: pointer to tile array */
uint64_t tile_config_ptr;
/* to KFD: pointer to macro tile array */
uint64_t macro_tile_config_ptr;
/* to KFD: array size allocated by user mode
* from KFD: array size filled by kernel
*/
uint32_t num_tile_configs;
/* to KFD: array size allocated by user mode
* from KFD: array size filled by kernel
*/
uint32_t num_macro_tile_configs;
uint32_t gpu_id; /* to KFD */
uint32_t gb_addr_config; /* from KFD */
uint32_t num_banks; /* from KFD */
uint32_t num_ranks; /* from KFD */
/* struct size can be extended later if needed
* without breaking ABI compatibility
*/
};
#define AMDKFD_IOCTL_BASE 'K'
@@ -286,7 +315,13 @@ struct kfd_ioctl_wait_events_args {
#define AMDKFD_IOC_DBG_WAVE_CONTROL \
AMDKFD_IOW(0x10, struct kfd_ioctl_dbg_wave_control_args)
#define AMDKFD_IOC_SET_SCRATCH_BACKING_VA \
AMDKFD_IOWR(0x11, struct kfd_ioctl_set_scratch_backing_va_args)
#define AMDKFD_IOC_GET_TILE_CONFIG \
AMDKFD_IOWR(0x12, struct kfd_ioctl_get_tile_config_args)
#define AMDKFD_COMMAND_START 0x01
#define AMDKFD_COMMAND_END 0x11
#define AMDKFD_COMMAND_END 0x13
#endif

View File

@@ -711,7 +711,8 @@ struct kvm_ppc_one_seg_page_size {
struct kvm_ppc_smmu_info {
__u64 flags;
__u32 slb_size;
__u32 pad;
__u16 data_keys; /* # storage keys supported for data */
__u16 instr_keys; /* # storage keys supported for instructions */
struct kvm_ppc_one_seg_page_size sps[KVM_PPC_PAGE_SIZES_MAX_SZ];
};

View File

@@ -22,7 +22,6 @@ enum {
LO_FLAGS_AUTOCLEAR = 4,
LO_FLAGS_PARTSCAN = 8,
LO_FLAGS_DIRECT_IO = 16,
LO_FLAGS_BLOCKSIZE = 32,
};
#include <asm/posix_types.h> /* for __kernel_old_dev_t */
@@ -60,8 +59,6 @@ struct loop_info64 {
__u64 lo_init[2];
};
#define LO_INFO_BLOCKSIZE(l) (l)->lo_init[0]
/*
* Loop filter types
*/
@@ -91,6 +88,7 @@ struct loop_info64 {
#define LOOP_CHANGE_FD 0x4C06
#define LOOP_SET_CAPACITY 0x4C07
#define LOOP_SET_DIRECT_IO 0x4C08
#define LOOP_SET_BLOCK_SIZE 0x4C09
/* /dev/loop-control interface */
#define LOOP_CTL_ADD 0x4C80

View File

@@ -11,6 +11,7 @@ enum lwtunnel_encap_types {
LWTUNNEL_ENCAP_IP6,
LWTUNNEL_ENCAP_SEG6,
LWTUNNEL_ENCAP_BPF,
LWTUNNEL_ENCAP_SEG6_LOCAL,
__LWTUNNEL_ENCAP_MAX,
};

View File

@@ -40,14 +40,33 @@
* (non-running threads are de facto in such a
* state). This covers threads from all processes
* running on the system. This command returns 0.
* @MEMBARRIER_CMD_PRIVATE_EXPEDITED:
* Execute a memory barrier on each running
* thread belonging to the same process as the current
* thread. Upon return from system call, the
* caller thread is ensured that all its running
* threads siblings have passed through a state
* where all memory accesses to user-space
* addresses match program order between entry
* to and return from the system call
* (non-running threads are de facto in such a
* state). This only covers threads from the
* same processes as the caller thread. This
* command returns 0. The "expedited" commands
* complete faster than the non-expedited ones,
* they never block, but have the downside of
* causing extra overhead.
*
* Command to be passed to the membarrier system call. The commands need to
* be a single bit each, except for MEMBARRIER_CMD_QUERY which is assigned to
* the value 0.
*/
enum membarrier_cmd {
MEMBARRIER_CMD_QUERY = 0,
MEMBARRIER_CMD_SHARED = (1 << 0),
MEMBARRIER_CMD_QUERY = 0,
MEMBARRIER_CMD_SHARED = (1 << 0),
/* reserved for MEMBARRIER_CMD_SHARED_EXPEDITED (1 << 1) */
/* reserved for MEMBARRIER_CMD_PRIVATE (1 << 2) */
MEMBARRIER_CMD_PRIVATE_EXPEDITED = (1 << 3),
};
#endif /* _UAPI_LINUX_MEMBARRIER_H */

View File

@@ -1,8 +1,32 @@
#ifndef _UAPI_LINUX_MEMFD_H
#define _UAPI_LINUX_MEMFD_H
#include <asm-generic/hugetlb_encode.h>
/* flags for memfd_create(2) (unsigned int) */
#define MFD_CLOEXEC 0x0001U
#define MFD_ALLOW_SEALING 0x0002U
#define MFD_HUGETLB 0x0004U
/*
* Huge page size encoding when MFD_HUGETLB is specified, and a huge page
* size other than the default is desired. See hugetlb_encode.h.
* All known huge page size encodings are provided here. It is the
* responsibility of the application to know which sizes are supported on
* the running system. See mmap(2) man page for details.
*/
#define MFD_HUGE_SHIFT HUGETLB_FLAG_ENCODE_SHIFT
#define MFD_HUGE_MASK HUGETLB_FLAG_ENCODE_MASK
#define MFD_HUGE_64KB HUGETLB_FLAG_ENCODE_64KB
#define MFD_HUGE_512KB HUGETLB_FLAG_ENCODE_512KB
#define MFD_HUGE_1MB HUGETLB_FLAG_ENCODE_1MB
#define MFD_HUGE_2MB HUGETLB_FLAG_ENCODE_2MB
#define MFD_HUGE_8MB HUGETLB_FLAG_ENCODE_8MB
#define MFD_HUGE_16MB HUGETLB_FLAG_ENCODE_16MB
#define MFD_HUGE_256MB HUGETLB_FLAG_ENCODE_256MB
#define MFD_HUGE_1GB HUGETLB_FLAG_ENCODE_1GB
#define MFD_HUGE_2GB HUGETLB_FLAG_ENCODE_2GB
#define MFD_HUGE_16GB HUGETLB_FLAG_ENCODE_16GB
#endif /* _UAPI_LINUX_MEMFD_H */

View File

@@ -2,6 +2,7 @@
#define _UAPI_LINUX_MMAN_H
#include <asm/mman.h>
#include <asm-generic/hugetlb_encode.h>
#define MREMAP_MAYMOVE 1
#define MREMAP_FIXED 2
@@ -10,4 +11,25 @@
#define OVERCOMMIT_ALWAYS 1
#define OVERCOMMIT_NEVER 2
/*
* Huge page size encoding when MAP_HUGETLB is specified, and a huge page
* size other than the default is desired. See hugetlb_encode.h.
* All known huge page size encodings are provided here. It is the
* responsibility of the application to know which sizes are supported on
* the running system. See mmap(2) man page for details.
*/
#define MAP_HUGE_SHIFT HUGETLB_FLAG_ENCODE_SHIFT
#define MAP_HUGE_MASK HUGETLB_FLAG_ENCODE_MASK
#define MAP_HUGE_64KB HUGETLB_FLAG_ENCODE_64KB
#define MAP_HUGE_512KB HUGETLB_FLAG_ENCODE_512KB
#define MAP_HUGE_1MB HUGETLB_FLAG_ENCODE_1MB
#define MAP_HUGE_2MB HUGETLB_FLAG_ENCODE_2MB
#define MAP_HUGE_8MB HUGETLB_FLAG_ENCODE_8MB
#define MAP_HUGE_16MB HUGETLB_FLAG_ENCODE_16MB
#define MAP_HUGE_256MB HUGETLB_FLAG_ENCODE_256MB
#define MAP_HUGE_1GB HUGETLB_FLAG_ENCODE_1GB
#define MAP_HUGE_2GB HUGETLB_FLAG_ENCODE_2GB
#define MAP_HUGE_16GB HUGETLB_FLAG_ENCODE_16GB
#endif /* _UAPI_LINUX_MMAN_H */

View File

@@ -145,43 +145,6 @@ struct nd_cmd_clear_error {
__u64 cleared;
} __packed;
struct nd_cmd_trans_spa {
__u64 spa;
__u32 status;
__u8 flags;
__u8 _reserved[3];
__u64 trans_length;
__u32 num_nvdimms;
struct nd_nvdimm_device {
__u32 nfit_device_handle;
__u32 _reserved;
__u64 dpa;
} __packed devices[0];
} __packed;
struct nd_cmd_ars_err_inj {
__u64 err_inj_spa_range_base;
__u64 err_inj_spa_range_length;
__u8 err_inj_options;
__u32 status;
} __packed;
struct nd_cmd_ars_err_inj_clr {
__u64 err_inj_clr_spa_range_base;
__u64 err_inj_clr_spa_range_length;
__u32 status;
} __packed;
struct nd_cmd_ars_err_inj_stat {
__u32 status;
__u32 inj_err_rec_count;
struct nd_error_stat_query_record {
__u64 err_inj_stat_spa_range_base;
__u64 err_inj_stat_spa_range_length;
} __packed record[0];
} __packed;
enum {
ND_CMD_IMPLEMENTED = 0,

View File

@@ -1,10 +1,11 @@
#ifndef _LINUX_NF_TABLES_H
#define _LINUX_NF_TABLES_H
#define NFT_TABLE_MAXNAMELEN 32
#define NFT_CHAIN_MAXNAMELEN 32
#define NFT_SET_MAXNAMELEN 32
#define NFT_OBJ_MAXNAMELEN 32
#define NFT_NAME_MAXLEN 256
#define NFT_TABLE_MAXNAMELEN NFT_NAME_MAXLEN
#define NFT_CHAIN_MAXNAMELEN NFT_NAME_MAXLEN
#define NFT_SET_MAXNAMELEN NFT_NAME_MAXLEN
#define NFT_OBJ_MAXNAMELEN NFT_NAME_MAXLEN
#define NFT_USERDATA_MAXLEN 256
/**
@@ -731,7 +732,8 @@ enum nft_exthdr_op {
* @NFTA_EXTHDR_OFFSET: extension header offset (NLA_U32)
* @NFTA_EXTHDR_LEN: extension header length (NLA_U32)
* @NFTA_EXTHDR_FLAGS: extension header flags (NLA_U32)
* @NFTA_EXTHDR_OP: option match type (NLA_U8)
* @NFTA_EXTHDR_OP: option match type (NLA_U32)
* @NFTA_EXTHDR_SREG: option match type (NLA_U32)
*/
enum nft_exthdr_attributes {
NFTA_EXTHDR_UNSPEC,
@@ -741,6 +743,7 @@ enum nft_exthdr_attributes {
NFTA_EXTHDR_LEN,
NFTA_EXTHDR_FLAGS,
NFTA_EXTHDR_OP,
NFTA_EXTHDR_SREG,
__NFTA_EXTHDR_MAX
};
#define NFTA_EXTHDR_MAX (__NFTA_EXTHDR_MAX - 1)
@@ -808,11 +811,13 @@ enum nft_meta_keys {
* @NFT_RT_CLASSID: realm value of packet's route (skb->dst->tclassid)
* @NFT_RT_NEXTHOP4: routing nexthop for IPv4
* @NFT_RT_NEXTHOP6: routing nexthop for IPv6
* @NFT_RT_TCPMSS: fetch current path tcp mss
*/
enum nft_rt_keys {
NFT_RT_CLASSID,
NFT_RT_NEXTHOP4,
NFT_RT_NEXTHOP6,
NFT_RT_TCPMSS,
};
/**
@@ -1221,6 +1226,8 @@ enum nft_objref_attributes {
enum nft_gen_attributes {
NFTA_GEN_UNSPEC,
NFTA_GEN_ID,
NFTA_GEN_PROC_PID,
NFTA_GEN_PROC_NAME,
__NFTA_GEN_MAX
};
#define NFTA_GEN_MAX (__NFTA_GEN_MAX - 1)
@@ -1275,7 +1282,8 @@ enum nft_ct_helper_attributes {
#define NFT_OBJECT_COUNTER 1
#define NFT_OBJECT_QUOTA 2
#define NFT_OBJECT_CT_HELPER 3
#define __NFT_OBJECT_MAX 4
#define NFT_OBJECT_LIMIT 4
#define __NFT_OBJECT_MAX 5
#define NFT_OBJECT_MAX (__NFT_OBJECT_MAX - 1)
/**

View File

@@ -19,12 +19,13 @@
struct xt_hashlimit_htable;
enum {
XT_HASHLIMIT_HASH_DIP = 1 << 0,
XT_HASHLIMIT_HASH_DPT = 1 << 1,
XT_HASHLIMIT_HASH_SIP = 1 << 2,
XT_HASHLIMIT_HASH_SPT = 1 << 3,
XT_HASHLIMIT_INVERT = 1 << 4,
XT_HASHLIMIT_BYTES = 1 << 5,
XT_HASHLIMIT_HASH_DIP = 1 << 0,
XT_HASHLIMIT_HASH_DPT = 1 << 1,
XT_HASHLIMIT_HASH_SIP = 1 << 2,
XT_HASHLIMIT_HASH_SPT = 1 << 3,
XT_HASHLIMIT_INVERT = 1 << 4,
XT_HASHLIMIT_BYTES = 1 << 5,
XT_HASHLIMIT_RATE_MATCH = 1 << 6,
};
struct hashlimit_cfg {
@@ -79,6 +80,21 @@ struct hashlimit_cfg2 {
__u8 srcmask, dstmask;
};
struct hashlimit_cfg3 {
__u64 avg; /* Average secs between packets * scale */
__u64 burst; /* Period multiplier for upper limit. */
__u32 mode; /* bitmask of XT_HASHLIMIT_HASH_* */
/* user specified */
__u32 size; /* how many buckets */
__u32 max; /* max number of entries */
__u32 gc_interval; /* gc interval */
__u32 expire; /* when do entries expire? */
__u32 interval;
__u8 srcmask, dstmask;
};
struct xt_hashlimit_mtinfo1 {
char name[IFNAMSIZ];
struct hashlimit_cfg1 cfg;
@@ -95,4 +111,12 @@ struct xt_hashlimit_mtinfo2 {
struct xt_hashlimit_htable *hinfo __attribute__((aligned(8)));
};
struct xt_hashlimit_mtinfo3 {
char name[NAME_MAX];
struct hashlimit_cfg3 cfg;
/* Used internally by the kernel */
struct xt_hashlimit_htable *hinfo __attribute__((aligned(8)));
};
#endif /* _UAPI_XT_HASHLIMIT_H */

View File

@@ -69,6 +69,9 @@ struct nlmsghdr {
#define NLM_F_CREATE 0x400 /* Create, if it does not exist */
#define NLM_F_APPEND 0x800 /* Add to end of list */
/* Modifiers to DELETE request */
#define NLM_F_NONREC 0x100 /* Do not delete recursively */
/* Flags for ACK message */
#define NLM_F_CAPPED 0x100 /* request was capped */
#define NLM_F_ACK_TLVS 0x200 /* extended ACK TVLs were included */
@@ -226,5 +229,22 @@ struct nlattr {
#define NLA_ALIGN(len) (((len) + NLA_ALIGNTO - 1) & ~(NLA_ALIGNTO - 1))
#define NLA_HDRLEN ((int) NLA_ALIGN(sizeof(struct nlattr)))
/* Generic 32 bitflags attribute content sent to the kernel.
*
* The value is a bitmap that defines the values being set
* The selector is a bitmask that defines which value is legit
*
* Examples:
* value = 0x0, and selector = 0x1
* implies we are selecting bit 1 and we want to set its value to 0.
*
* value = 0x2, and selector = 0x2
* implies we are selecting bit 2 and we want to set its value to 1.
*
*/
struct nla_bitfield32 {
__u32 value;
__u32 selector;
};
#endif /* _UAPI__LINUX_NETLINK_H */

View File

@@ -513,6 +513,7 @@
#define PCI_EXP_DEVSTA_URD 0x0008 /* Unsupported Request Detected */
#define PCI_EXP_DEVSTA_AUXPD 0x0010 /* AUX Power Detected */
#define PCI_EXP_DEVSTA_TRPND 0x0020 /* Transactions Pending */
#define PCI_CAP_EXP_RC_ENDPOINT_SIZEOF_V1 12 /* v1 endpoints without link end here */
#define PCI_EXP_LNKCAP 12 /* Link Capabilities */
#define PCI_EXP_LNKCAP_SLS 0x0000000f /* Supported Link Speeds */
#define PCI_EXP_LNKCAP_SLS_2_5GB 0x00000001 /* LNKCAP2 SLS Vector bit 0 */
@@ -556,7 +557,7 @@
#define PCI_EXP_LNKSTA_DLLLA 0x2000 /* Data Link Layer Link Active */
#define PCI_EXP_LNKSTA_LBMS 0x4000 /* Link Bandwidth Management Status */
#define PCI_EXP_LNKSTA_LABS 0x8000 /* Link Autonomous Bandwidth Status */
#define PCI_CAP_EXP_ENDPOINT_SIZEOF_V1 20 /* v1 endpoints end here */
#define PCI_CAP_EXP_ENDPOINT_SIZEOF_V1 20 /* v1 endpoints with link end here */
#define PCI_EXP_SLTCAP 20 /* Slot Capabilities */
#define PCI_EXP_SLTCAP_ABP 0x00000001 /* Attention Button Present */
#define PCI_EXP_SLTCAP_PCP 0x00000002 /* Power Controller Present */
@@ -639,7 +640,7 @@
#define PCI_EXP_DEVCTL2_OBFF_MSGB_EN 0x4000 /* Enable OBFF Message type B */
#define PCI_EXP_DEVCTL2_OBFF_WAKE_EN 0x6000 /* OBFF using WAKE# signaling */
#define PCI_EXP_DEVSTA2 42 /* Device Status 2 */
#define PCI_CAP_EXP_ENDPOINT_SIZEOF_V2 44 /* v2 endpoints end here */
#define PCI_CAP_EXP_RC_ENDPOINT_SIZEOF_V2 44 /* v2 endpoints without link end here */
#define PCI_EXP_LNKCAP2 44 /* Link Capabilities 2 */
#define PCI_EXP_LNKCAP2_SLS_2_5GB 0x00000002 /* Supported Speed 2.5GT/s */
#define PCI_EXP_LNKCAP2_SLS_5_0GB 0x00000004 /* Supported Speed 5.0GT/s */
@@ -647,6 +648,7 @@
#define PCI_EXP_LNKCAP2_CROSSLINK 0x00000100 /* Crosslink supported */
#define PCI_EXP_LNKCTL2 48 /* Link Control 2 */
#define PCI_EXP_LNKSTA2 50 /* Link Status 2 */
#define PCI_CAP_EXP_ENDPOINT_SIZEOF_V2 52 /* v2 endpoints with link end here */
#define PCI_EXP_SLTCAP2 52 /* Slot Capabilities 2 */
#define PCI_EXP_SLTCTL2 56 /* Slot Control 2 */
#define PCI_EXP_SLTSTA2 58 /* Slot Status 2 */
@@ -733,23 +735,17 @@
#define PCI_ERR_CAP_ECRC_CHKE 0x00000100 /* ECRC Check Enable */
#define PCI_ERR_HEADER_LOG 28 /* Header Log Register (16 bytes) */
#define PCI_ERR_ROOT_COMMAND 44 /* Root Error Command */
/* Correctable Err Reporting Enable */
#define PCI_ERR_ROOT_CMD_COR_EN 0x00000001
/* Non-fatal Err Reporting Enable */
#define PCI_ERR_ROOT_CMD_NONFATAL_EN 0x00000002
/* Fatal Err Reporting Enable */
#define PCI_ERR_ROOT_CMD_FATAL_EN 0x00000004
#define PCI_ERR_ROOT_CMD_COR_EN 0x00000001 /* Correctable Err Reporting Enable */
#define PCI_ERR_ROOT_CMD_NONFATAL_EN 0x00000002 /* Non-Fatal Err Reporting Enable */
#define PCI_ERR_ROOT_CMD_FATAL_EN 0x00000004 /* Fatal Err Reporting Enable */
#define PCI_ERR_ROOT_STATUS 48
#define PCI_ERR_ROOT_COR_RCV 0x00000001 /* ERR_COR Received */
/* Multi ERR_COR Received */
#define PCI_ERR_ROOT_MULTI_COR_RCV 0x00000002
/* ERR_FATAL/NONFATAL Received */
#define PCI_ERR_ROOT_UNCOR_RCV 0x00000004
/* Multi ERR_FATAL/NONFATAL Received */
#define PCI_ERR_ROOT_MULTI_UNCOR_RCV 0x00000008
#define PCI_ERR_ROOT_FIRST_FATAL 0x00000010 /* First Fatal */
#define PCI_ERR_ROOT_NONFATAL_RCV 0x00000020 /* Non-Fatal Received */
#define PCI_ERR_ROOT_FATAL_RCV 0x00000040 /* Fatal Received */
#define PCI_ERR_ROOT_COR_RCV 0x00000001 /* ERR_COR Received */
#define PCI_ERR_ROOT_MULTI_COR_RCV 0x00000002 /* Multiple ERR_COR */
#define PCI_ERR_ROOT_UNCOR_RCV 0x00000004 /* ERR_FATAL/NONFATAL */
#define PCI_ERR_ROOT_MULTI_UNCOR_RCV 0x00000008 /* Multiple FATAL/NONFATAL */
#define PCI_ERR_ROOT_FIRST_FATAL 0x00000010 /* First UNC is Fatal */
#define PCI_ERR_ROOT_NONFATAL_RCV 0x00000020 /* Non-Fatal Received */
#define PCI_ERR_ROOT_FATAL_RCV 0x00000040 /* Fatal Received */
#define PCI_ERR_ROOT_ERR_SRC 52 /* Error Source Identification */
/* Virtual Channel */
@@ -967,6 +963,7 @@
#define PCI_EXP_DPC_CAP_RP_EXT 0x20 /* Root Port Extensions for DPC */
#define PCI_EXP_DPC_CAP_POISONED_TLP 0x40 /* Poisoned TLP Egress Blocking Supported */
#define PCI_EXP_DPC_CAP_SW_TRIGGER 0x80 /* Software Triggering Supported */
#define PCI_EXP_DPC_RP_PIO_LOG_SIZE 0xF00 /* RP PIO log size */
#define PCI_EXP_DPC_CAP_DL_ACTIVE 0x1000 /* ERR_COR signal on DL_Active supported */
#define PCI_EXP_DPC_CTL 6 /* DPC control */
@@ -980,6 +977,15 @@
#define PCI_EXP_DPC_SOURCE_ID 10 /* DPC Source Identifier */
#define PCI_EXP_DPC_RP_PIO_STATUS 0x0C /* RP PIO Status */
#define PCI_EXP_DPC_RP_PIO_MASK 0x10 /* RP PIO MASK */
#define PCI_EXP_DPC_RP_PIO_SEVERITY 0x14 /* RP PIO Severity */
#define PCI_EXP_DPC_RP_PIO_SYSERROR 0x18 /* RP PIO SysError */
#define PCI_EXP_DPC_RP_PIO_EXCEPTION 0x1C /* RP PIO Exception */
#define PCI_EXP_DPC_RP_PIO_HEADER_LOG 0x20 /* RP PIO Header Log */
#define PCI_EXP_DPC_RP_PIO_IMPSPEC_LOG 0x30 /* RP PIO ImpSpec Log */
#define PCI_EXP_DPC_RP_PIO_TLPPREFIX_LOG 0x34 /* RP PIO TLP Prefix Log */
/* Precision Time Measurement */
#define PCI_PTM_CAP 0x04 /* PTM Capability */
#define PCI_PTM_CAP_REQ 0x00000001 /* Requester capable */

View File

@@ -139,8 +139,9 @@ enum perf_event_sample_format {
PERF_SAMPLE_IDENTIFIER = 1U << 16,
PERF_SAMPLE_TRANSACTION = 1U << 17,
PERF_SAMPLE_REGS_INTR = 1U << 18,
PERF_SAMPLE_PHYS_ADDR = 1U << 19,
PERF_SAMPLE_MAX = 1U << 19, /* non-ABI */
PERF_SAMPLE_MAX = 1U << 20, /* non-ABI */
};
/*
@@ -174,6 +175,8 @@ enum perf_branch_sample_type_shift {
PERF_SAMPLE_BRANCH_NO_FLAGS_SHIFT = 14, /* no flags */
PERF_SAMPLE_BRANCH_NO_CYCLES_SHIFT = 15, /* no cycles */
PERF_SAMPLE_BRANCH_TYPE_SAVE_SHIFT = 16, /* save branch type */
PERF_SAMPLE_BRANCH_MAX_SHIFT /* non-ABI */
};
@@ -198,9 +201,30 @@ enum perf_branch_sample_type {
PERF_SAMPLE_BRANCH_NO_FLAGS = 1U << PERF_SAMPLE_BRANCH_NO_FLAGS_SHIFT,
PERF_SAMPLE_BRANCH_NO_CYCLES = 1U << PERF_SAMPLE_BRANCH_NO_CYCLES_SHIFT,
PERF_SAMPLE_BRANCH_TYPE_SAVE =
1U << PERF_SAMPLE_BRANCH_TYPE_SAVE_SHIFT,
PERF_SAMPLE_BRANCH_MAX = 1U << PERF_SAMPLE_BRANCH_MAX_SHIFT,
};
/*
* Common flow change classification
*/
enum {
PERF_BR_UNKNOWN = 0, /* unknown */
PERF_BR_COND = 1, /* conditional */
PERF_BR_UNCOND = 2, /* unconditional */
PERF_BR_IND = 3, /* indirect */
PERF_BR_CALL = 4, /* function call */
PERF_BR_IND_CALL = 5, /* indirect function call */
PERF_BR_RET = 6, /* function return */
PERF_BR_SYSCALL = 7, /* syscall */
PERF_BR_SYSRET = 8, /* syscall return */
PERF_BR_COND_CALL = 9, /* conditional function call */
PERF_BR_COND_RET = 10, /* conditional function return */
PERF_BR_MAX,
};
#define PERF_SAMPLE_BRANCH_PLM_ALL \
(PERF_SAMPLE_BRANCH_USER|\
PERF_SAMPLE_BRANCH_KERNEL|\
@@ -791,6 +815,7 @@ enum perf_event_type {
* { u64 transaction; } && PERF_SAMPLE_TRANSACTION
* { u64 abi; # enum perf_sample_regs_abi
* u64 regs[weight(mask)]; } && PERF_SAMPLE_REGS_INTR
* { u64 phys_addr;} && PERF_SAMPLE_PHYS_ADDR
* };
*/
PERF_RECORD_SAMPLE = 9,
@@ -931,14 +956,20 @@ union perf_mem_data_src {
mem_snoop:5, /* snoop mode */
mem_lock:2, /* lock instr */
mem_dtlb:7, /* tlb access */
mem_rsvd:31;
mem_lvl_num:4, /* memory hierarchy level number */
mem_remote:1, /* remote */
mem_snoopx:2, /* snoop mode, ext */
mem_rsvd:24;
};
};
#elif defined(__BIG_ENDIAN_BITFIELD)
union perf_mem_data_src {
__u64 val;
struct {
__u64 mem_rsvd:31,
__u64 mem_rsvd:24,
mem_snoopx:2, /* snoop mode, ext */
mem_remote:1, /* remote */
mem_lvl_num:4, /* memory hierarchy level number */
mem_dtlb:7, /* tlb access */
mem_lock:2, /* lock instr */
mem_snoop:5, /* snoop mode */
@@ -975,6 +1006,22 @@ union perf_mem_data_src {
#define PERF_MEM_LVL_UNC 0x2000 /* Uncached memory */
#define PERF_MEM_LVL_SHIFT 5
#define PERF_MEM_REMOTE_REMOTE 0x01 /* Remote */
#define PERF_MEM_REMOTE_SHIFT 37
#define PERF_MEM_LVLNUM_L1 0x01 /* L1 */
#define PERF_MEM_LVLNUM_L2 0x02 /* L2 */
#define PERF_MEM_LVLNUM_L3 0x03 /* L3 */
#define PERF_MEM_LVLNUM_L4 0x04 /* L4 */
/* 5-0xa available */
#define PERF_MEM_LVLNUM_ANY_CACHE 0x0b /* Any cache */
#define PERF_MEM_LVLNUM_LFB 0x0c /* LFB */
#define PERF_MEM_LVLNUM_RAM 0x0d /* RAM */
#define PERF_MEM_LVLNUM_PMEM 0x0e /* PMEM */
#define PERF_MEM_LVLNUM_NA 0x0f /* N/A */
#define PERF_MEM_LVLNUM_SHIFT 33
/* snoop mode */
#define PERF_MEM_SNOOP_NA 0x01 /* not available */
#define PERF_MEM_SNOOP_NONE 0x02 /* no snoop */
@@ -983,6 +1030,10 @@ union perf_mem_data_src {
#define PERF_MEM_SNOOP_HITM 0x10 /* snoop hit modified */
#define PERF_MEM_SNOOP_SHIFT 19
#define PERF_MEM_SNOOPX_FWD 0x01 /* forward */
/* 1 free */
#define PERF_MEM_SNOOPX_SHIFT 37
/* locked instruction */
#define PERF_MEM_LOCK_NA 0x01 /* not available */
#define PERF_MEM_LOCK_LOCKED 0x02 /* locked transaction */
@@ -1015,6 +1066,7 @@ union perf_mem_data_src {
* in_tx: running in a hardware transaction
* abort: aborting a hardware transaction
* cycles: cycles from last branch (or 0 if not supported)
* type: branch type
*/
struct perf_branch_entry {
__u64 from;
@@ -1024,7 +1076,8 @@ struct perf_branch_entry {
in_tx:1, /* in transaction */
abort:1, /* transaction abort */
cycles:16, /* cycle count to last branch */
reserved:44;
type:4, /* branch type */
reserved:40;
};
#endif /* _UAPI_LINUX_PERF_EVENT_H */

View File

@@ -95,8 +95,8 @@ struct pps_kparams {
#define PPS_CAPTURECLEAR 0x02 /* capture clear events */
#define PPS_CAPTUREBOTH 0x03 /* capture assert and clear events */
#define PPS_OFFSETASSERT 0x10 /* apply compensation for assert ev. */
#define PPS_OFFSETCLEAR 0x20 /* apply compensation for clear ev. */
#define PPS_OFFSETASSERT 0x10 /* apply compensation for assert event */
#define PPS_OFFSETCLEAR 0x20 /* apply compensation for clear event */
#define PPS_CANWAIT 0x100 /* can we wait for an event? */
#define PPS_CANPOLL 0x200 /* bit reserved for future use */

View File

@@ -33,7 +33,6 @@
#ifndef _UAPI_LINUX_QUOTA_
#define _UAPI_LINUX_QUOTA_
#include <linux/errno.h>
#include <linux/types.h>
#define __DQUOT_VERSION__ "dquot_6.6.0"

View File

@@ -324,9 +324,10 @@ struct mdp_superblock_1 {
#define MD_FEATURE_RECOVERY_BITMAP 128 /* recovery that is happening
* is guided by bitmap.
*/
#define MD_FEATURE_CLUSTERED 256 /* clustered MD */
#define MD_FEATURE_CLUSTERED 256 /* clustered MD */
#define MD_FEATURE_JOURNAL 512 /* support write cache */
#define MD_FEATURE_PPL 1024 /* support PPL */
#define MD_FEATURE_MULTIPLE_PPLS 2048 /* support for multiple PPLs */
#define MD_FEATURE_ALL (MD_FEATURE_BITMAP_OFFSET \
|MD_FEATURE_RECOVERY_OFFSET \
|MD_FEATURE_RESHAPE_ACTIVE \
@@ -338,6 +339,7 @@ struct mdp_superblock_1 {
|MD_FEATURE_CLUSTERED \
|MD_FEATURE_JOURNAL \
|MD_FEATURE_PPL \
|MD_FEATURE_MULTIPLE_PPLS \
)
struct r5l_payload_header {

View File

@@ -683,10 +683,29 @@ struct tcamsg {
unsigned char tca__pad1;
unsigned short tca__pad2;
};
enum {
TCA_ROOT_UNSPEC,
TCA_ROOT_TAB,
#define TCA_ACT_TAB TCA_ROOT_TAB
#define TCAA_MAX TCA_ROOT_TAB
TCA_ROOT_FLAGS,
TCA_ROOT_COUNT,
TCA_ROOT_TIME_DELTA, /* in msecs */
__TCA_ROOT_MAX,
#define TCA_ROOT_MAX (__TCA_ROOT_MAX - 1)
};
#define TA_RTA(r) ((struct rtattr*)(((char*)(r)) + NLMSG_ALIGN(sizeof(struct tcamsg))))
#define TA_PAYLOAD(n) NLMSG_PAYLOAD(n,sizeof(struct tcamsg))
#define TCA_ACT_TAB 1 /* attr type must be >=1 */
#define TCAA_MAX 1
/* tcamsg flags stored in attribute TCA_ROOT_FLAGS
*
* TCA_FLAG_LARGE_DUMP_ON user->kernel to request for larger than TCA_ACT_MAX_PRIO
* actions in a dump. All dump responses will contain the number of actions
* being dumped stored in for user app's consumption in TCA_ROOT_COUNT
*
*/
#define TCA_FLAG_LARGE_DUMP_ON (1 << 0)
/* New extended info filters for IFLA_EXT_MASK */
#define RTEXT_FILTER_VF (1 << 0)

124
include/uapi/linux/rxrpc.h Normal file
View File

@@ -0,0 +1,124 @@
/* Types and definitions for AF_RXRPC.
*
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
#ifndef _UAPI_LINUX_RXRPC_H
#define _UAPI_LINUX_RXRPC_H
#include <linux/types.h>
#include <linux/in.h>
#include <linux/in6.h>
/*
* RxRPC socket address
*/
struct sockaddr_rxrpc {
sa_family_t srx_family; /* address family */
u16 srx_service; /* service desired */
u16 transport_type; /* type of transport socket (SOCK_DGRAM) */
u16 transport_len; /* length of transport address */
union {
sa_family_t family; /* transport address family */
struct sockaddr_in sin; /* IPv4 transport address */
struct sockaddr_in6 sin6; /* IPv6 transport address */
} transport;
};
/*
* RxRPC socket options
*/
#define RXRPC_SECURITY_KEY 1 /* [clnt] set client security key */
#define RXRPC_SECURITY_KEYRING 2 /* [srvr] set ring of server security keys */
#define RXRPC_EXCLUSIVE_CONNECTION 3 /* Deprecated; use RXRPC_EXCLUSIVE_CALL instead */
#define RXRPC_MIN_SECURITY_LEVEL 4 /* minimum security level */
#define RXRPC_UPGRADEABLE_SERVICE 5 /* Upgrade service[0] -> service[1] */
#define RXRPC_SUPPORTED_CMSG 6 /* Get highest supported control message type */
/*
* RxRPC control messages
* - If neither abort or accept are specified, the message is a data message.
* - terminal messages mean that a user call ID tag can be recycled
* - s/r/- indicate whether these are applicable to sendmsg() and/or recvmsg()
*/
enum rxrpc_cmsg_type {
RXRPC_USER_CALL_ID = 1, /* sr: user call ID specifier */
RXRPC_ABORT = 2, /* sr: abort request / notification [terminal] */
RXRPC_ACK = 3, /* -r: [Service] RPC op final ACK received [terminal] */
RXRPC_NET_ERROR = 5, /* -r: network error received [terminal] */
RXRPC_BUSY = 6, /* -r: server busy received [terminal] */
RXRPC_LOCAL_ERROR = 7, /* -r: local error generated [terminal] */
RXRPC_NEW_CALL = 8, /* -r: [Service] new incoming call notification */
RXRPC_ACCEPT = 9, /* s-: [Service] accept request */
RXRPC_EXCLUSIVE_CALL = 10, /* s-: Call should be on exclusive connection */
RXRPC_UPGRADE_SERVICE = 11, /* s-: Request service upgrade for client call */
RXRPC_TX_LENGTH = 12, /* s-: Total length of Tx data */
RXRPC__SUPPORTED
};
/*
* RxRPC security levels
*/
#define RXRPC_SECURITY_PLAIN 0 /* plain secure-checksummed packets only */
#define RXRPC_SECURITY_AUTH 1 /* authenticated packets */
#define RXRPC_SECURITY_ENCRYPT 2 /* encrypted packets */
/*
* RxRPC security indices
*/
#define RXRPC_SECURITY_NONE 0 /* no security protocol */
#define RXRPC_SECURITY_RXKAD 2 /* kaserver or kerberos 4 */
#define RXRPC_SECURITY_RXGK 4 /* gssapi-based */
#define RXRPC_SECURITY_RXK5 5 /* kerberos 5 */
/*
* RxRPC-level abort codes
*/
#define RX_CALL_DEAD -1 /* call/conn has been inactive and is shut down */
#define RX_INVALID_OPERATION -2 /* invalid operation requested / attempted */
#define RX_CALL_TIMEOUT -3 /* call timeout exceeded */
#define RX_EOF -4 /* unexpected end of data on read op */
#define RX_PROTOCOL_ERROR -5 /* low-level protocol error */
#define RX_USER_ABORT -6 /* generic user abort */
#define RX_ADDRINUSE -7 /* UDP port in use */
#define RX_DEBUGI_BADTYPE -8 /* bad debugging packet type */
/*
* (un)marshalling abort codes (rxgen)
*/
#define RXGEN_CC_MARSHAL -450
#define RXGEN_CC_UNMARSHAL -451
#define RXGEN_SS_MARSHAL -452
#define RXGEN_SS_UNMARSHAL -453
#define RXGEN_DECODE -454
#define RXGEN_OPCODE -455
#define RXGEN_SS_XDRFREE -456
#define RXGEN_CC_XDRFREE -457
/*
* Rx kerberos security abort codes
* - unfortunately we have no generalised security abort codes to say things
* like "unsupported security", so we have to use these instead and hope the
* other side understands
*/
#define RXKADINCONSISTENCY 19270400 /* security module structure inconsistent */
#define RXKADPACKETSHORT 19270401 /* packet too short for security challenge */
#define RXKADLEVELFAIL 19270402 /* security level negotiation failed */
#define RXKADTICKETLEN 19270403 /* ticket length too short or too long */
#define RXKADOUTOFSEQUENCE 19270404 /* packet had bad sequence number */
#define RXKADNOAUTH 19270405 /* caller not authorised */
#define RXKADBADKEY 19270406 /* illegal key: bad parity or weak */
#define RXKADBADTICKET 19270407 /* security object was passed a bad ticket */
#define RXKADUNKNOWNKEY 19270408 /* ticket contained unknown key version number */
#define RXKADEXPIRED 19270409 /* authentication expired */
#define RXKADSEALEDINCON 19270410 /* sealed data inconsistent */
#define RXKADDATALEN 19270411 /* user data too long */
#define RXKADILLEGALLEVEL 19270412 /* caller not authorised to use encrypted conns */
#endif /* _UAPI_LINUX_RXRPC_H */

View File

@@ -11,27 +11,34 @@
#define SECCOMP_MODE_FILTER 2 /* uses user-supplied filter. */
/* Valid operations for seccomp syscall. */
#define SECCOMP_SET_MODE_STRICT 0
#define SECCOMP_SET_MODE_FILTER 1
#define SECCOMP_SET_MODE_STRICT 0
#define SECCOMP_SET_MODE_FILTER 1
#define SECCOMP_GET_ACTION_AVAIL 2
/* Valid flags for SECCOMP_SET_MODE_FILTER */
#define SECCOMP_FILTER_FLAG_TSYNC 1
#define SECCOMP_FILTER_FLAG_LOG 2
/*
* All BPF programs must return a 32-bit value.
* The bottom 16-bits are for optional return data.
* The upper 16-bits are ordered from least permissive values to most.
* The upper 16-bits are ordered from least permissive values to most,
* as a signed value (so 0x8000000 is negative).
*
* The ordering ensures that a min_t() over composed return values always
* selects the least permissive choice.
*/
#define SECCOMP_RET_KILL 0x00000000U /* kill the task immediately */
#define SECCOMP_RET_TRAP 0x00030000U /* disallow and force a SIGSYS */
#define SECCOMP_RET_ERRNO 0x00050000U /* returns an errno */
#define SECCOMP_RET_TRACE 0x7ff00000U /* pass to a tracer or disallow */
#define SECCOMP_RET_ALLOW 0x7fff0000U /* allow */
#define SECCOMP_RET_KILL_PROCESS 0x80000000U /* kill the process */
#define SECCOMP_RET_KILL_THREAD 0x00000000U /* kill the thread */
#define SECCOMP_RET_KILL SECCOMP_RET_KILL_THREAD
#define SECCOMP_RET_TRAP 0x00030000U /* disallow and force a SIGSYS */
#define SECCOMP_RET_ERRNO 0x00050000U /* returns an errno */
#define SECCOMP_RET_TRACE 0x7ff00000U /* pass to a tracer or disallow */
#define SECCOMP_RET_LOG 0x7ffc0000U /* allow after logging */
#define SECCOMP_RET_ALLOW 0x7fff0000U /* allow */
/* Masks for the return value sections. */
#define SECCOMP_RET_ACTION_FULL 0xffff0000U
#define SECCOMP_RET_ACTION 0x7fff0000U
#define SECCOMP_RET_DATA 0x0000ffffU

View File

@@ -33,16 +33,26 @@ struct seg6_iptunnel_encap {
enum {
SEG6_IPTUN_MODE_INLINE,
SEG6_IPTUN_MODE_ENCAP,
SEG6_IPTUN_MODE_L2ENCAP,
};
#ifdef __KERNEL__
static inline size_t seg6_lwt_headroom(struct seg6_iptunnel_encap *tuninfo)
{
int encap = (tuninfo->mode == SEG6_IPTUN_MODE_ENCAP);
int head = 0;
return ((tuninfo->srh->hdrlen + 1) << 3) +
(encap * sizeof(struct ipv6hdr));
switch (tuninfo->mode) {
case SEG6_IPTUN_MODE_INLINE:
break;
case SEG6_IPTUN_MODE_ENCAP:
head = sizeof(struct ipv6hdr);
break;
case SEG6_IPTUN_MODE_L2ENCAP:
return 0;
}
return ((tuninfo->srh->hdrlen + 1) << 3) + head;
}
#endif

View File

@@ -0,0 +1,68 @@
/*
* SR-IPv6 implementation
*
* Author:
* David Lebrun <david.lebrun@uclouvain.be>
*
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#ifndef _UAPI_LINUX_SEG6_LOCAL_H
#define _UAPI_LINUX_SEG6_LOCAL_H
#include <linux/seg6.h>
enum {
SEG6_LOCAL_UNSPEC,
SEG6_LOCAL_ACTION,
SEG6_LOCAL_SRH,
SEG6_LOCAL_TABLE,
SEG6_LOCAL_NH4,
SEG6_LOCAL_NH6,
SEG6_LOCAL_IIF,
SEG6_LOCAL_OIF,
__SEG6_LOCAL_MAX,
};
#define SEG6_LOCAL_MAX (__SEG6_LOCAL_MAX - 1)
enum {
SEG6_LOCAL_ACTION_UNSPEC = 0,
/* node segment */
SEG6_LOCAL_ACTION_END = 1,
/* adjacency segment (IPv6 cross-connect) */
SEG6_LOCAL_ACTION_END_X = 2,
/* lookup of next seg NH in table */
SEG6_LOCAL_ACTION_END_T = 3,
/* decap and L2 cross-connect */
SEG6_LOCAL_ACTION_END_DX2 = 4,
/* decap and IPv6 cross-connect */
SEG6_LOCAL_ACTION_END_DX6 = 5,
/* decap and IPv4 cross-connect */
SEG6_LOCAL_ACTION_END_DX4 = 6,
/* decap and lookup of DA in v6 table */
SEG6_LOCAL_ACTION_END_DT6 = 7,
/* decap and lookup of DA in v4 table */
SEG6_LOCAL_ACTION_END_DT4 = 8,
/* binding segment with insertion */
SEG6_LOCAL_ACTION_END_B6 = 9,
/* binding segment with encapsulation */
SEG6_LOCAL_ACTION_END_B6_ENCAP = 10,
/* binding segment with MPLS encap */
SEG6_LOCAL_ACTION_END_BM = 11,
/* lookup last seg in table */
SEG6_LOCAL_ACTION_END_S = 12,
/* forward to SR-unaware VNF with static proxy */
SEG6_LOCAL_ACTION_END_AS = 13,
/* forward to SR-unaware VNF with masquerading */
SEG6_LOCAL_ACTION_END_AM = 14,
__SEG6_LOCAL_ACTION_MAX,
};
#define SEG6_LOCAL_ACTION_MAX (__SEG6_LOCAL_ACTION_MAX - 1)
#endif

View File

@@ -56,8 +56,6 @@
#define PORT_ALTR_16550_F128 28 /* Altera 16550 UART with 128 FIFOs */
#define PORT_RT2880 29 /* Ralink RT2880 internal UART */
#define PORT_16550A_FSL64 30 /* Freescale 16550 UART with 64 FIFOs */
#define PORT_DA830 31 /* TI DA8xx/66AK2x */
#define PORT_MAX_8250 31 /* max port ID */
/*
* ARM specific type numbers. These are not currently guaranteed
@@ -70,12 +68,17 @@
#define PORT_CLPS711X 33
#define PORT_SA1100 34
#define PORT_UART00 35
#define PORT_OWL 36
#define PORT_21285 37
/* Sparc type numbers. */
#define PORT_SUNZILOG 38
#define PORT_SUNSAB 39
/* Intel EG20 */
#define PORT_PCH_8LINE 44
#define PORT_PCH_2LINE 45
/* DEC */
#define PORT_DZ 46
#define PORT_ZS 47
@@ -205,8 +208,8 @@
/* MAX310X */
#define PORT_MAX310X 94
/* High Speed UART for Medfield */
#define PORT_MFD 95
/* TI DA8xx/66AK2x */
#define PORT_DA830 95
/* TI OMAP-UART */
#define PORT_OMAP 96
@@ -271,4 +274,7 @@
/* MPS2 UART */
#define PORT_MPS2UART 116
/* MediaTek BTIF */
#define PORT_MTK_BTIF 117
#endif /* _UAPILINUX_SERIAL_CORE_H */

View File

@@ -3,6 +3,7 @@
#include <linux/ipc.h>
#include <linux/errno.h>
#include <asm-generic/hugetlb_encode.h>
#ifndef __KERNEL__
#include <unistd.h>
#endif
@@ -40,11 +41,37 @@ struct shmid_ds {
/* Include the definition of shmid64_ds and shminfo64 */
#include <asm/shmbuf.h>
/* permission flag for shmget */
/*
* shmget() shmflg values.
*/
/* The bottom nine bits are the same as open(2) mode flags */
#define SHM_R 0400 /* or S_IRUGO from <linux/stat.h> */
#define SHM_W 0200 /* or S_IWUGO from <linux/stat.h> */
/* Bits 9 & 10 are IPC_CREAT and IPC_EXCL */
#define SHM_HUGETLB 04000 /* segment will use huge TLB pages */
#define SHM_NORESERVE 010000 /* don't check for reservations */
/* mode for attach */
/*
* Huge page size encoding when SHM_HUGETLB is specified, and a huge page
* size other than the default is desired. See hugetlb_encode.h
*/
#define SHM_HUGE_SHIFT HUGETLB_FLAG_ENCODE_SHIFT
#define SHM_HUGE_MASK HUGETLB_FLAG_ENCODE_MASK
#define SHM_HUGE_64KB HUGETLB_FLAG_ENCODE_64KB
#define SHM_HUGE_512KB HUGETLB_FLAG_ENCODE_512KB
#define SHM_HUGE_1MB HUGETLB_FLAG_ENCODE_1MB
#define SHM_HUGE_2MB HUGETLB_FLAG_ENCODE_2MB
#define SHM_HUGE_8MB HUGETLB_FLAG_ENCODE_8MB
#define SHM_HUGE_16MB HUGETLB_FLAG_ENCODE_16MB
#define SHM_HUGE_256MB HUGETLB_FLAG_ENCODE_256MB
#define SHM_HUGE_1GB HUGETLB_FLAG_ENCODE_1GB
#define SHM_HUGE_2GB HUGETLB_FLAG_ENCODE_2GB
#define SHM_HUGE_16GB HUGETLB_FLAG_ENCODE_16GB
/*
* shmat() shmflg values
*/
#define SHM_RDONLY 010000 /* read-only access */
#define SHM_RND 020000 /* round attach address to SHMLBA boundary */
#define SHM_REMAP 040000 /* take-over region on attach */

View File

@@ -184,12 +184,7 @@ enum
LINUX_MIB_DELAYEDACKLOST, /* DelayedACKLost */
LINUX_MIB_LISTENOVERFLOWS, /* ListenOverflows */
LINUX_MIB_LISTENDROPS, /* ListenDrops */
LINUX_MIB_TCPPREQUEUED, /* TCPPrequeued */
LINUX_MIB_TCPDIRECTCOPYFROMBACKLOG, /* TCPDirectCopyFromBacklog */
LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, /* TCPDirectCopyFromPrequeue */
LINUX_MIB_TCPPREQUEUEDROPPED, /* TCPPrequeueDropped */
LINUX_MIB_TCPHPHITS, /* TCPHPHits */
LINUX_MIB_TCPHPHITSTOUSER, /* TCPHPHitsToUser */
LINUX_MIB_TCPPUREACKS, /* TCPPureAcks */
LINUX_MIB_TCPHPACKS, /* TCPHPAcks */
LINUX_MIB_TCPRENORECOVERY, /* TCPRenoRecovery */
@@ -208,14 +203,12 @@ enum
LINUX_MIB_TCPSACKFAILURES, /* TCPSackFailures */
LINUX_MIB_TCPLOSSFAILURES, /* TCPLossFailures */
LINUX_MIB_TCPFASTRETRANS, /* TCPFastRetrans */
LINUX_MIB_TCPFORWARDRETRANS, /* TCPForwardRetrans */
LINUX_MIB_TCPSLOWSTARTRETRANS, /* TCPSlowStartRetrans */
LINUX_MIB_TCPTIMEOUTS, /* TCPTimeouts */
LINUX_MIB_TCPLOSSPROBES, /* TCPLossProbes */
LINUX_MIB_TCPLOSSPROBERECOVERY, /* TCPLossProbeRecovery */
LINUX_MIB_TCPRENORECOVERYFAIL, /* TCPRenoRecoveryFail */
LINUX_MIB_TCPSACKRECOVERYFAIL, /* TCPSackRecoveryFail */
LINUX_MIB_TCPSCHEDULERFAILED, /* TCPSchedulerFailed */
LINUX_MIB_TCPRCVCOLLAPSED, /* TCPRcvCollapsed */
LINUX_MIB_TCPDSACKOLDSENT, /* TCPDSACKOldSent */
LINUX_MIB_TCPDSACKOFOSENT, /* TCPDSACKOfoSent */

View File

@@ -231,6 +231,14 @@ enum {
TCP_NLA_SNDBUF_LIMITED, /* Time (usec) limited by send buffer */
TCP_NLA_DATA_SEGS_OUT, /* Data pkts sent including retransmission */
TCP_NLA_TOTAL_RETRANS, /* Data pkts retransmitted */
TCP_NLA_PACING_RATE, /* Pacing rate in bytes per second */
TCP_NLA_DELIVERY_RATE, /* Delivery rate in bytes per second */
TCP_NLA_SND_CWND, /* Sending congestion window */
TCP_NLA_REORDERING, /* Reordering metric */
TCP_NLA_MIN_RTT, /* minimum RTT */
TCP_NLA_RECUR_RETRANS, /* Recurring retransmits for the current pkt */
TCP_NLA_DELIVERY_RATE_APP_LMT, /* delivery rate application limited ? */
};
/* for TCP_MD5SIG socket option */
@@ -248,4 +256,13 @@ struct tcp_md5sig {
__u8 tcpm_key[TCP_MD5SIG_MAXKEYLEN]; /* key (binary) */
};
/* INET_DIAG_MD5SIG */
struct tcp_diag_md5sig {
__u8 tcpm_family;
__u8 tcpm_prefixlen;
__u16 tcpm_keylen;
__be32 tcpm_addr[4];
__u8 tcpm_key[TCP_MD5SIG_MAXKEYLEN];
};
#endif /* _UAPI_LINUX_TCP_H */

View File

@@ -49,6 +49,7 @@
#define TEE_MAX_ARG_SIZE 1024
#define TEE_GEN_CAP_GP (1 << 0)/* GlobalPlatform compliant TEE */
#define TEE_GEN_CAP_PRIVILEGED (1 << 1)/* Privileged device (for supplicant) */
/*
* TEE Implementation ID

View File

@@ -0,0 +1,31 @@
/*
* This file defines the USB charger type and state that are needed for
* USB device APIs.
*/
#ifndef _UAPI__LINUX_USB_CHARGER_H
#define _UAPI__LINUX_USB_CHARGER_H
/*
* USB charger type:
* SDP (Standard Downstream Port)
* DCP (Dedicated Charging Port)
* CDP (Charging Downstream Port)
* ACA (Accessory Charger Adapters)
*/
enum usb_charger_type {
UNKNOWN_TYPE,
SDP_TYPE,
DCP_TYPE,
CDP_TYPE,
ACA_TYPE,
};
/* USB charger state */
enum usb_charger_state {
USB_CHARGER_DEFAULT,
USB_CHARGER_PRESENT,
USB_CHARGER_ABSENT,
};
#endif /* _UAPI__LINUX_USB_CHARGER_H */

View File

@@ -23,7 +23,9 @@
UFFD_FEATURE_EVENT_REMOVE | \
UFFD_FEATURE_EVENT_UNMAP | \
UFFD_FEATURE_MISSING_HUGETLBFS | \
UFFD_FEATURE_MISSING_SHMEM)
UFFD_FEATURE_MISSING_SHMEM | \
UFFD_FEATURE_SIGBUS | \
UFFD_FEATURE_THREAD_ID)
#define UFFD_API_IOCTLS \
((__u64)1 << _UFFDIO_REGISTER | \
(__u64)1 << _UFFDIO_UNREGISTER | \
@@ -78,6 +80,9 @@ struct uffd_msg {
struct {
__u64 flags;
__u64 address;
union {
__u32 ptid;
} feat;
} pagefault;
struct {
@@ -153,6 +158,13 @@ struct uffdio_api {
* UFFD_FEATURE_MISSING_SHMEM works the same as
* UFFD_FEATURE_MISSING_HUGETLBFS, but it applies to shmem
* (i.e. tmpfs and other shmem based APIs).
*
* UFFD_FEATURE_SIGBUS feature means no page-fault
* (UFFD_EVENT_PAGEFAULT) event will be delivered, instead
* a SIGBUS signal will be sent to the faulting process.
*
* UFFD_FEATURE_THREAD_ID pid of the page faulted task_struct will
* be returned, if feature is not requested 0 will be returned.
*/
#define UFFD_FEATURE_PAGEFAULT_FLAG_WP (1<<0)
#define UFFD_FEATURE_EVENT_FORK (1<<1)
@@ -161,6 +173,8 @@ struct uffdio_api {
#define UFFD_FEATURE_MISSING_HUGETLBFS (1<<4)
#define UFFD_FEATURE_MISSING_SHMEM (1<<5)
#define UFFD_FEATURE_EVENT_UNMAP (1<<6)
#define UFFD_FEATURE_SIGBUS (1<<7)
#define UFFD_FEATURE_THREAD_ID (1<<8)
__u64 features;
__u64 ioctls;

View File

@@ -1,7 +1,7 @@
#ifndef _UAPI_LINUX_VIRTIO_RING_H
#define _UAPI_LINUX_VIRTIO_RING_H
/* An interface for efficient virtio implementation, currently for use by KVM
* and lguest, but hopefully others soon. Do NOT change this since it will
/* An interface for efficient virtio implementation, currently for use by KVM,
* but hopefully others soon. Do NOT change this since it will
* break existing servers and clients.
*
* This header is BSD licensed so anyone can use the definitions to implement

View File

@@ -304,6 +304,7 @@ enum xfrm_attr_type_t {
XFRMA_ADDRESS_FILTER, /* struct xfrm_address_filter */
XFRMA_PAD,
XFRMA_OFFLOAD_DEV, /* struct xfrm_state_offload */
XFRMA_OUTPUT_MARK, /* __u32 */
__XFRMA_MAX
#define XFRMA_MAX (__XFRMA_MAX - 1)

View File

@@ -0,0 +1,84 @@
/*
* Copyright (c) 2017, Mellanox Technologies inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef IB_USER_IOCTL_VERBS_H
#define IB_USER_IOCTL_VERBS_H
#include <rdma/rdma_user_ioctl.h>
#define UVERBS_UDATA_DRIVER_DATA_NS 1
#define UVERBS_UDATA_DRIVER_DATA_FLAG (1UL << UVERBS_ID_NS_SHIFT)
enum uverbs_default_objects {
UVERBS_OBJECT_DEVICE, /* No instances of DEVICE are allowed */
UVERBS_OBJECT_PD,
UVERBS_OBJECT_COMP_CHANNEL,
UVERBS_OBJECT_CQ,
UVERBS_OBJECT_QP,
UVERBS_OBJECT_SRQ,
UVERBS_OBJECT_AH,
UVERBS_OBJECT_MR,
UVERBS_OBJECT_MW,
UVERBS_OBJECT_FLOW,
UVERBS_OBJECT_XRCD,
UVERBS_OBJECT_RWQ_IND_TBL,
UVERBS_OBJECT_WQ,
UVERBS_OBJECT_LAST,
};
enum {
UVERBS_UHW_IN = UVERBS_UDATA_DRIVER_DATA_FLAG,
UVERBS_UHW_OUT,
};
enum uverbs_create_cq_cmd_attr_ids {
CREATE_CQ_HANDLE,
CREATE_CQ_CQE,
CREATE_CQ_USER_HANDLE,
CREATE_CQ_COMP_CHANNEL,
CREATE_CQ_COMP_VECTOR,
CREATE_CQ_FLAGS,
CREATE_CQ_RESP_CQE,
};
enum uverbs_destroy_cq_cmd_attr_ids {
DESTROY_CQ_HANDLE,
DESTROY_CQ_RESP,
};
enum uverbs_actions_cq_ops {
UVERBS_CQ_CREATE,
UVERBS_CQ_DESTROY,
};
#endif

View File

@@ -236,6 +236,20 @@ struct ib_uverbs_rss_caps {
__u32 reserved;
};
struct ib_uverbs_tm_caps {
/* Max size of rendezvous request message */
__u32 max_rndv_hdr_size;
/* Max number of entries in tag matching list */
__u32 max_num_tags;
/* TM flags */
__u32 flags;
/* Max number of outstanding list operations */
__u32 max_ops;
/* Max number of SGE in tag matching entry */
__u32 max_sge;
__u32 reserved;
};
struct ib_uverbs_ex_query_device_resp {
struct ib_uverbs_query_device_resp base;
__u32 comp_mask;
@@ -247,6 +261,7 @@ struct ib_uverbs_ex_query_device_resp {
struct ib_uverbs_rss_caps rss_caps;
__u32 max_wq_type_rq;
__u32 raw_packet_caps;
struct ib_uverbs_tm_caps xrq_caps;
};
struct ib_uverbs_query_port {
@@ -578,7 +593,7 @@ struct ib_uverbs_ex_create_qp {
__u32 comp_mask;
__u32 create_flags;
__u32 rwq_ind_tbl_handle;
__u32 reserved1;
__u32 source_qpn;
};
struct ib_uverbs_open_qp {
@@ -1024,7 +1039,7 @@ struct ib_uverbs_create_xsrq {
__u32 max_wr;
__u32 max_sge;
__u32 srq_limit;
__u32 reserved;
__u32 max_num_tags;
__u32 xrcd_handle;
__u32 cq_handle;
__u64 driver_data[0];

View File

@@ -95,13 +95,63 @@ struct mlx4_ib_create_srq_resp {
__u32 reserved;
};
struct mlx4_ib_create_qp_rss {
__u64 rx_hash_fields_mask;
__u8 rx_hash_function;
__u8 reserved[7];
__u8 rx_hash_key[40];
__u32 comp_mask;
__u32 reserved1;
};
struct mlx4_ib_create_qp {
__u64 buf_addr;
__u64 db_addr;
__u8 log_sq_bb_count;
__u8 log_sq_stride;
__u8 sq_no_prefetch;
__u8 reserved[5];
__u8 reserved;
__u32 inl_recv_sz;
};
struct mlx4_ib_create_wq {
__u64 buf_addr;
__u64 db_addr;
__u8 log_range_size;
__u8 reserved[3];
__u32 comp_mask;
};
struct mlx4_ib_modify_wq {
__u32 comp_mask;
__u32 reserved;
};
struct mlx4_ib_create_rwq_ind_tbl_resp {
__u32 response_length;
__u32 reserved;
};
/* RX Hash function flags */
enum mlx4_ib_rx_hash_function_flags {
MLX4_IB_RX_HASH_FUNC_TOEPLITZ = 1 << 0,
};
/*
* RX Hash flags, these flags allows to set which incoming packet's field should
* participates in RX Hash. Each flag represent certain packet's field,
* when the flag is set the field that is represented by the flag will
* participate in RX Hash calculation.
*/
enum mlx4_ib_rx_hash_fields {
MLX4_IB_RX_HASH_SRC_IPV4 = 1 << 0,
MLX4_IB_RX_HASH_DST_IPV4 = 1 << 1,
MLX4_IB_RX_HASH_SRC_IPV6 = 1 << 2,
MLX4_IB_RX_HASH_DST_IPV6 = 1 << 3,
MLX4_IB_RX_HASH_SRC_PORT_TCP = 1 << 4,
MLX4_IB_RX_HASH_DST_PORT_TCP = 1 << 5,
MLX4_IB_RX_HASH_SRC_PORT_UDP = 1 << 6,
MLX4_IB_RX_HASH_DST_PORT_UDP = 1 << 7
};
#endif /* MLX4_ABI_USER_H */

View File

@@ -168,6 +168,28 @@ struct mlx5_packet_pacing_caps {
__u32 reserved;
};
enum mlx5_ib_mpw_caps {
MPW_RESERVED = 1 << 0,
MLX5_IB_ALLOW_MPW = 1 << 1,
MLX5_IB_SUPPORT_EMPW = 1 << 2,
};
enum mlx5_ib_sw_parsing_offloads {
MLX5_IB_SW_PARSING = 1 << 0,
MLX5_IB_SW_PARSING_CSUM = 1 << 1,
MLX5_IB_SW_PARSING_LSO = 1 << 2,
};
struct mlx5_ib_sw_parsing_caps {
__u32 sw_parsing_offloads; /* enum mlx5_ib_sw_parsing_offloads */
/* Corresponding bit will be set if qp type from
* 'enum ib_qp_type' is supported, e.g.
* supported_qpts |= 1 << IB_QPT_RAW_PACKET
*/
__u32 supported_qpts;
};
struct mlx5_ib_query_device_resp {
__u32 comp_mask;
__u32 response_length;
@@ -177,6 +199,7 @@ struct mlx5_ib_query_device_resp {
struct mlx5_packet_pacing_caps packet_pacing_caps;
__u32 mlx5_ib_support_multi_pkt_send_wqes;
__u32 reserved;
struct mlx5_ib_sw_parsing_caps sw_parsing_caps;
};
struct mlx5_ib_create_cq {

View File

@@ -49,6 +49,9 @@ struct qedr_alloc_ucontext_resp {
__u32 sges_per_recv_wr;
__u32 sges_per_srq_wr;
__u32 max_cqes;
__u8 dpm_enabled;
__u8 wids_enabled;
__u16 wid_count;
};
struct qedr_alloc_pd_ureq {

View File

@@ -8,7 +8,7 @@ enum {
RDMA_NL_IWCM,
RDMA_NL_RSVD,
RDMA_NL_LS, /* RDMA Local Services */
RDMA_NL_I40IW,
RDMA_NL_NLDEV, /* RDMA device interface */
RDMA_NL_NUM_CLIENTS
};
@@ -222,4 +222,86 @@ struct rdma_nla_ls_gid {
__u8 gid[16];
};
enum rdma_nldev_command {
RDMA_NLDEV_CMD_UNSPEC,
RDMA_NLDEV_CMD_GET, /* can dump */
RDMA_NLDEV_CMD_SET,
RDMA_NLDEV_CMD_NEW,
RDMA_NLDEV_CMD_DEL,
RDMA_NLDEV_CMD_PORT_GET, /* can dump */
RDMA_NLDEV_CMD_PORT_SET,
RDMA_NLDEV_CMD_PORT_NEW,
RDMA_NLDEV_CMD_PORT_DEL,
RDMA_NLDEV_NUM_OPS
};
enum rdma_nldev_attr {
/* don't change the order or add anything between, this is ABI! */
RDMA_NLDEV_ATTR_UNSPEC,
/* Identifier for ib_device */
RDMA_NLDEV_ATTR_DEV_INDEX, /* u32 */
RDMA_NLDEV_ATTR_DEV_NAME, /* string */
/*
* Device index together with port index are identifiers
* for port/link properties.
*
* For RDMA_NLDEV_CMD_GET commamnd, port index will return number
* of available ports in ib_device, while for port specific operations,
* it will be real port index as it appears in sysfs. Port index follows
* sysfs notation and starts from 1 for the first port.
*/
RDMA_NLDEV_ATTR_PORT_INDEX, /* u32 */
/*
* Device and port capabilities
*/
RDMA_NLDEV_ATTR_CAP_FLAGS, /* u64 */
/*
* FW version
*/
RDMA_NLDEV_ATTR_FW_VERSION, /* string */
/*
* Node GUID (in host byte order) associated with the RDMA device.
*/
RDMA_NLDEV_ATTR_NODE_GUID, /* u64 */
/*
* System image GUID (in host byte order) associated with
* this RDMA device and other devices which are part of a
* single system.
*/
RDMA_NLDEV_ATTR_SYS_IMAGE_GUID, /* u64 */
/*
* Subnet prefix (in host byte order)
*/
RDMA_NLDEV_ATTR_SUBNET_PREFIX, /* u64 */
/*
* Local Identifier (LID),
* According to IB specification, It is 16-bit address assigned
* by the Subnet Manager. Extended to be 32-bit for OmniPath users.
*/
RDMA_NLDEV_ATTR_LID, /* u32 */
RDMA_NLDEV_ATTR_SM_LID, /* u32 */
/*
* LID mask control (LMC)
*/
RDMA_NLDEV_ATTR_LMC, /* u8 */
RDMA_NLDEV_ATTR_PORT_STATE, /* u8 */
RDMA_NLDEV_ATTR_PORT_PHYS_STATE, /* u8 */
RDMA_NLDEV_ATTR_DEV_NODE_TYPE, /* u8 */
RDMA_NLDEV_ATTR_MAX
};
#endif /* _UAPI_RDMA_NETLINK_H */

View File

@@ -43,6 +43,39 @@
/* Legacy name, for user space application which already use it */
#define IB_IOCTL_MAGIC RDMA_IOCTL_MAGIC
#define RDMA_VERBS_IOCTL \
_IOWR(RDMA_IOCTL_MAGIC, 1, struct ib_uverbs_ioctl_hdr)
#define UVERBS_ID_NS_MASK 0xF000
#define UVERBS_ID_NS_SHIFT 12
enum {
/* User input */
UVERBS_ATTR_F_MANDATORY = 1U << 0,
/*
* Valid output bit should be ignored and considered set in
* mandatory fields. This bit is kernel output.
*/
UVERBS_ATTR_F_VALID_OUTPUT = 1U << 1,
};
struct ib_uverbs_attr {
__u16 attr_id; /* command specific type attribute */
__u16 len; /* only for pointers */
__u16 flags; /* combination of UVERBS_ATTR_F_XXXX */
__u16 reserved;
__u64 data; /* ptr to command, inline data or idr/fd */
};
struct ib_uverbs_ioctl_hdr {
__u16 length;
__u16 object_id;
__u16 method_id;
__u16 num_attrs;
__u64 reserved;
struct ib_uverbs_attr attrs[0];
};
/*
* General blocks assignments
* It is closed on purpose do not expose it it user space

View File

@@ -125,7 +125,8 @@ enum pvrdma_wc_flags {
PVRDMA_WC_IP_CSUM_OK = 1 << 3,
PVRDMA_WC_WITH_SMAC = 1 << 4,
PVRDMA_WC_WITH_VLAN = 1 << 5,
PVRDMA_WC_FLAGS_MAX = PVRDMA_WC_WITH_VLAN,
PVRDMA_WC_WITH_NETWORK_HDR_TYPE = 1 << 6,
PVRDMA_WC_FLAGS_MAX = PVRDMA_WC_WITH_NETWORK_HDR_TYPE,
};
struct pvrdma_alloc_ucontext_resp {
@@ -283,7 +284,8 @@ struct pvrdma_cqe {
__u8 dlid_path_bits;
__u8 port_num;
__u8 smac[6];
__u8 reserved2[7]; /* Pad to next power of 2 (64). */
__u8 network_hdr_type;
__u8 reserved2[6]; /* Pad to next power of 2 (64). */
};
#endif /* __VMW_PVRDMA_ABI_H__ */

View File

@@ -163,8 +163,71 @@
*
* %SKL_TKN_U32_DMA_BUF_SIZE: DMA buffer size in millisec
*
* %SKL_TKN_U32_PIPE_DIR: Specifies pipe direction. Can be
* playback/capture.
*
* %SKL_TKN_U32_NUM_CONFIGS: Number of pipe configs
*
* %SKL_TKN_U32_PATH_MEM_PGS: Size of memory (in pages) required for pipeline
* and its data
*
* %SKL_TKN_U32_PIPE_CONFIG_ID: Config id for the modules in the pipe
* and PCM params supported by that pipe
* config. This is used as index to fill
* up the pipe config and module config
* structure.
*
* %SKL_TKN_U32_CFG_FREQ:
* %SKL_TKN_U8_CFG_CHAN:
* %SKL_TKN_U8_CFG_BPS: PCM params (freq, channels, bits per sample)
* supported for each of the pipe configs.
*
* %SKL_TKN_CFG_MOD_RES_ID: Module's resource index for each of the
* pipe config
*
* %SKL_TKN_CFG_MOD_FMT_ID: Module's interface index for each of the
* pipe config
*
* %SKL_TKN_U8_NUM_MOD: Number of modules in the manifest
*
* %SKL_TKN_MM_U8_MOD_IDX: Current index of the module in the manifest
*
* %SKL_TKN_MM_U8_NUM_RES: Number of resources for the module
*
* %SKL_TKN_MM_U8_NUM_INTF: Number of interfaces for the module
*
* %SKL_TKN_MM_U32_RES_ID: Resource index for the resource info to
* be filled into.
* A module can support multiple resource
* configuration and is represnted as a
* resource table. This index is used to
* fill information into appropriate index.
*
* %SKL_TKN_MM_U32_CPS: DSP cycles per second
*
* %SKL_TKN_MM_U32_DMA_SIZE: Allocated buffer size for gateway DMA
*
* %SKL_TKN_MM_U32_CPC: DSP cycles allocated per frame
*
* %SKL_TKN_MM_U32_RES_PIN_ID: Resource pin index in the module
*
* %SKL_TKN_MM_U32_INTF_PIN_ID: Interface index in the module
*
* %SKL_TKN_MM_U32_PIN_BUF: Buffer size of the module pin
*
* %SKL_TKN_MM_U32_FMT_ID: Format index for each of the interface/
* format information to be filled into.
*
* %SKL_TKN_MM_U32_NUM_IN_FMT: Number of input formats
* %SKL_TKN_MM_U32_NUM_OUT_FMT: Number of output formats
*
* module_id and loadable flags dont have tokens as these values will be
* read from the DSP FW manifest
*
* Tokens defined can be used either in the manifest or widget private data.
*
* SKL_TKN_MM is used as a suffix for all tokens that represent
* module data in the manifest.
*/
enum SKL_TKNS {
SKL_TKN_UUID = 1,
@@ -218,7 +281,34 @@ enum SKL_TKNS {
SKL_TKL_U32_D0I3_CAPS, /* Typo added at v4.10 */
SKL_TKN_U32_D0I3_CAPS = SKL_TKL_U32_D0I3_CAPS,
SKL_TKN_U32_DMA_BUF_SIZE,
SKL_TKN_MAX = SKL_TKN_U32_DMA_BUF_SIZE,
SKL_TKN_U32_PIPE_DIRECTION,
SKL_TKN_U32_PIPE_CONFIG_ID,
SKL_TKN_U32_NUM_CONFIGS,
SKL_TKN_U32_PATH_MEM_PGS,
SKL_TKN_U32_CFG_FREQ,
SKL_TKN_U8_CFG_CHAN,
SKL_TKN_U8_CFG_BPS,
SKL_TKN_CFG_MOD_RES_ID,
SKL_TKN_CFG_MOD_FMT_ID,
SKL_TKN_U8_NUM_MOD,
SKL_TKN_MM_U8_MOD_IDX,
SKL_TKN_MM_U8_NUM_RES,
SKL_TKN_MM_U8_NUM_INTF,
SKL_TKN_MM_U32_RES_ID,
SKL_TKN_MM_U32_CPS,
SKL_TKN_MM_U32_DMA_SIZE,
SKL_TKN_MM_U32_CPC,
SKL_TKN_MM_U32_RES_PIN_ID,
SKL_TKN_MM_U32_INTF_PIN_ID,
SKL_TKN_MM_U32_PIN_BUF,
SKL_TKN_MM_U32_FMT_ID,
SKL_TKN_MM_U32_NUM_IN_FMT,
SKL_TKN_MM_U32_NUM_OUT_FMT,
SKL_TKN_MAX = SKL_TKN_MM_U32_NUM_OUT_FMT,
};
#endif