Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

No conflicts!

Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski
2021-09-16 13:58:38 -07:00
1129 changed files with 36760 additions and 13185 deletions

View File

@@ -673,15 +673,15 @@ __SYSCALL(__NR_madvise, sys_madvise)
#define __NR_remap_file_pages 234
__SYSCALL(__NR_remap_file_pages, sys_remap_file_pages)
#define __NR_mbind 235
__SC_COMP(__NR_mbind, sys_mbind, compat_sys_mbind)
__SYSCALL(__NR_mbind, sys_mbind)
#define __NR_get_mempolicy 236
__SC_COMP(__NR_get_mempolicy, sys_get_mempolicy, compat_sys_get_mempolicy)
__SYSCALL(__NR_get_mempolicy, sys_get_mempolicy)
#define __NR_set_mempolicy 237
__SC_COMP(__NR_set_mempolicy, sys_set_mempolicy, compat_sys_set_mempolicy)
__SYSCALL(__NR_set_mempolicy, sys_set_mempolicy)
#define __NR_migrate_pages 238
__SC_COMP(__NR_migrate_pages, sys_migrate_pages, compat_sys_migrate_pages)
__SYSCALL(__NR_migrate_pages, sys_migrate_pages)
#define __NR_move_pages 239
__SC_COMP(__NR_move_pages, sys_move_pages, compat_sys_move_pages)
__SYSCALL(__NR_move_pages, sys_move_pages)
#endif
#define __NR_rt_tgsigqueueinfo 240

View File

@@ -50,7 +50,7 @@ enum { CXL_CMDS };
#define ___C(a, b) { b }
static const struct {
const char *name;
} cxl_command_names[] = { CXL_CMDS };
} cxl_command_names[] __attribute__((__unused__)) = { CXL_CMDS };
/*
* Here's how this actually breaks out:

View File

@@ -9,6 +9,30 @@
#include <stdint.h>
#endif
/* Driver command error status */
enum idxd_scmd_stat {
IDXD_SCMD_DEV_ENABLED = 0x80000010,
IDXD_SCMD_DEV_NOT_ENABLED = 0x80000020,
IDXD_SCMD_WQ_ENABLED = 0x80000021,
IDXD_SCMD_DEV_DMA_ERR = 0x80020000,
IDXD_SCMD_WQ_NO_GRP = 0x80030000,
IDXD_SCMD_WQ_NO_NAME = 0x80040000,
IDXD_SCMD_WQ_NO_SVM = 0x80050000,
IDXD_SCMD_WQ_NO_THRESH = 0x80060000,
IDXD_SCMD_WQ_PORTAL_ERR = 0x80070000,
IDXD_SCMD_WQ_RES_ALLOC_ERR = 0x80080000,
IDXD_SCMD_PERCPU_ERR = 0x80090000,
IDXD_SCMD_DMA_CHAN_ERR = 0x800a0000,
IDXD_SCMD_CDEV_ERR = 0x800b0000,
IDXD_SCMD_WQ_NO_SWQ_SUPPORT = 0x800c0000,
IDXD_SCMD_WQ_NONE_CONFIGURED = 0x800d0000,
IDXD_SCMD_WQ_NO_SIZE = 0x800e0000,
IDXD_SCMD_WQ_NO_PRIV = 0x800f0000,
};
#define IDXD_SCMD_SOFTERR_MASK 0x80000000
#define IDXD_SCMD_SOFTERR_SHIFT 16
/* Descriptor flags */
#define IDXD_OP_FLAG_FENCE 0x0001
#define IDXD_OP_FLAG_BOF 0x0002

306
include/uapi/linux/vduse.h Normal file
View File

@@ -0,0 +1,306 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef _UAPI_VDUSE_H_
#define _UAPI_VDUSE_H_
#include <linux/types.h>
#define VDUSE_BASE 0x81
/* The ioctls for control device (/dev/vduse/control) */
#define VDUSE_API_VERSION 0
/*
* Get the version of VDUSE API that kernel supported (VDUSE_API_VERSION).
* This is used for future extension.
*/
#define VDUSE_GET_API_VERSION _IOR(VDUSE_BASE, 0x00, __u64)
/* Set the version of VDUSE API that userspace supported. */
#define VDUSE_SET_API_VERSION _IOW(VDUSE_BASE, 0x01, __u64)
/**
* struct vduse_dev_config - basic configuration of a VDUSE device
* @name: VDUSE device name, needs to be NUL terminated
* @vendor_id: virtio vendor id
* @device_id: virtio device id
* @features: virtio features
* @vq_num: the number of virtqueues
* @vq_align: the allocation alignment of virtqueue's metadata
* @reserved: for future use, needs to be initialized to zero
* @config_size: the size of the configuration space
* @config: the buffer of the configuration space
*
* Structure used by VDUSE_CREATE_DEV ioctl to create VDUSE device.
*/
struct vduse_dev_config {
#define VDUSE_NAME_MAX 256
char name[VDUSE_NAME_MAX];
__u32 vendor_id;
__u32 device_id;
__u64 features;
__u32 vq_num;
__u32 vq_align;
__u32 reserved[13];
__u32 config_size;
__u8 config[];
};
/* Create a VDUSE device which is represented by a char device (/dev/vduse/$NAME) */
#define VDUSE_CREATE_DEV _IOW(VDUSE_BASE, 0x02, struct vduse_dev_config)
/*
* Destroy a VDUSE device. Make sure there are no more references
* to the char device (/dev/vduse/$NAME).
*/
#define VDUSE_DESTROY_DEV _IOW(VDUSE_BASE, 0x03, char[VDUSE_NAME_MAX])
/* The ioctls for VDUSE device (/dev/vduse/$NAME) */
/**
* struct vduse_iotlb_entry - entry of IOTLB to describe one IOVA region [start, last]
* @offset: the mmap offset on returned file descriptor
* @start: start of the IOVA region
* @last: last of the IOVA region
* @perm: access permission of the IOVA region
*
* Structure used by VDUSE_IOTLB_GET_FD ioctl to find an overlapped IOVA region.
*/
struct vduse_iotlb_entry {
__u64 offset;
__u64 start;
__u64 last;
#define VDUSE_ACCESS_RO 0x1
#define VDUSE_ACCESS_WO 0x2
#define VDUSE_ACCESS_RW 0x3
__u8 perm;
};
/*
* Find the first IOVA region that overlaps with the range [start, last]
* and return the corresponding file descriptor. Return -EINVAL means the
* IOVA region doesn't exist. Caller should set start and last fields.
*/
#define VDUSE_IOTLB_GET_FD _IOWR(VDUSE_BASE, 0x10, struct vduse_iotlb_entry)
/*
* Get the negotiated virtio features. It's a subset of the features in
* struct vduse_dev_config which can be accepted by virtio driver. It's
* only valid after FEATURES_OK status bit is set.
*/
#define VDUSE_DEV_GET_FEATURES _IOR(VDUSE_BASE, 0x11, __u64)
/**
* struct vduse_config_data - data used to update configuration space
* @offset: the offset from the beginning of configuration space
* @length: the length to write to configuration space
* @buffer: the buffer used to write from
*
* Structure used by VDUSE_DEV_SET_CONFIG ioctl to update device
* configuration space.
*/
struct vduse_config_data {
__u32 offset;
__u32 length;
__u8 buffer[];
};
/* Set device configuration space */
#define VDUSE_DEV_SET_CONFIG _IOW(VDUSE_BASE, 0x12, struct vduse_config_data)
/*
* Inject a config interrupt. It's usually used to notify virtio driver
* that device configuration space has changed.
*/
#define VDUSE_DEV_INJECT_CONFIG_IRQ _IO(VDUSE_BASE, 0x13)
/**
* struct vduse_vq_config - basic configuration of a virtqueue
* @index: virtqueue index
* @max_size: the max size of virtqueue
* @reserved: for future use, needs to be initialized to zero
*
* Structure used by VDUSE_VQ_SETUP ioctl to setup a virtqueue.
*/
struct vduse_vq_config {
__u32 index;
__u16 max_size;
__u16 reserved[13];
};
/*
* Setup the specified virtqueue. Make sure all virtqueues have been
* configured before the device is attached to vDPA bus.
*/
#define VDUSE_VQ_SETUP _IOW(VDUSE_BASE, 0x14, struct vduse_vq_config)
/**
* struct vduse_vq_state_split - split virtqueue state
* @avail_index: available index
*/
struct vduse_vq_state_split {
__u16 avail_index;
};
/**
* struct vduse_vq_state_packed - packed virtqueue state
* @last_avail_counter: last driver ring wrap counter observed by device
* @last_avail_idx: device available index
* @last_used_counter: device ring wrap counter
* @last_used_idx: used index
*/
struct vduse_vq_state_packed {
__u16 last_avail_counter;
__u16 last_avail_idx;
__u16 last_used_counter;
__u16 last_used_idx;
};
/**
* struct vduse_vq_info - information of a virtqueue
* @index: virtqueue index
* @num: the size of virtqueue
* @desc_addr: address of desc area
* @driver_addr: address of driver area
* @device_addr: address of device area
* @split: split virtqueue state
* @packed: packed virtqueue state
* @ready: ready status of virtqueue
*
* Structure used by VDUSE_VQ_GET_INFO ioctl to get virtqueue's information.
*/
struct vduse_vq_info {
__u32 index;
__u32 num;
__u64 desc_addr;
__u64 driver_addr;
__u64 device_addr;
union {
struct vduse_vq_state_split split;
struct vduse_vq_state_packed packed;
};
__u8 ready;
};
/* Get the specified virtqueue's information. Caller should set index field. */
#define VDUSE_VQ_GET_INFO _IOWR(VDUSE_BASE, 0x15, struct vduse_vq_info)
/**
* struct vduse_vq_eventfd - eventfd configuration for a virtqueue
* @index: virtqueue index
* @fd: eventfd, -1 means de-assigning the eventfd
*
* Structure used by VDUSE_VQ_SETUP_KICKFD ioctl to setup kick eventfd.
*/
struct vduse_vq_eventfd {
__u32 index;
#define VDUSE_EVENTFD_DEASSIGN -1
int fd;
};
/*
* Setup kick eventfd for specified virtqueue. The kick eventfd is used
* by VDUSE kernel module to notify userspace to consume the avail vring.
*/
#define VDUSE_VQ_SETUP_KICKFD _IOW(VDUSE_BASE, 0x16, struct vduse_vq_eventfd)
/*
* Inject an interrupt for specific virtqueue. It's used to notify virtio driver
* to consume the used vring.
*/
#define VDUSE_VQ_INJECT_IRQ _IOW(VDUSE_BASE, 0x17, __u32)
/* The control messages definition for read(2)/write(2) on /dev/vduse/$NAME */
/**
* enum vduse_req_type - request type
* @VDUSE_GET_VQ_STATE: get the state for specified virtqueue from userspace
* @VDUSE_SET_STATUS: set the device status
* @VDUSE_UPDATE_IOTLB: Notify userspace to update the memory mapping for
* specified IOVA range via VDUSE_IOTLB_GET_FD ioctl
*/
enum vduse_req_type {
VDUSE_GET_VQ_STATE,
VDUSE_SET_STATUS,
VDUSE_UPDATE_IOTLB,
};
/**
* struct vduse_vq_state - virtqueue state
* @index: virtqueue index
* @split: split virtqueue state
* @packed: packed virtqueue state
*/
struct vduse_vq_state {
__u32 index;
union {
struct vduse_vq_state_split split;
struct vduse_vq_state_packed packed;
};
};
/**
* struct vduse_dev_status - device status
* @status: device status
*/
struct vduse_dev_status {
__u8 status;
};
/**
* struct vduse_iova_range - IOVA range [start, last]
* @start: start of the IOVA range
* @last: last of the IOVA range
*/
struct vduse_iova_range {
__u64 start;
__u64 last;
};
/**
* struct vduse_dev_request - control request
* @type: request type
* @request_id: request id
* @reserved: for future use
* @vq_state: virtqueue state, only index field is available
* @s: device status
* @iova: IOVA range for updating
* @padding: padding
*
* Structure used by read(2) on /dev/vduse/$NAME.
*/
struct vduse_dev_request {
__u32 type;
__u32 request_id;
__u32 reserved[4];
union {
struct vduse_vq_state vq_state;
struct vduse_dev_status s;
struct vduse_iova_range iova;
__u32 padding[32];
};
};
/**
* struct vduse_dev_response - response to control request
* @request_id: corresponding request id
* @result: the result of request
* @reserved: for future use, needs to be initialized to zero
* @vq_state: virtqueue state
* @padding: padding
*
* Structure used by write(2) on /dev/vduse/$NAME.
*/
struct vduse_dev_response {
__u32 request_id;
#define VDUSE_REQ_RESULT_OK 0x00
#define VDUSE_REQ_RESULT_FAILED 0x01
__u32 result;
__u32 reserved[4];
union {
struct vduse_vq_state vq_state;
__u32 padding[32];
};
};
#endif /* _UAPI_VDUSE_H_ */

View File

@@ -54,9 +54,18 @@
#define VIRTIO_ID_SOUND 25 /* virtio sound */
#define VIRTIO_ID_FS 26 /* virtio filesystem */
#define VIRTIO_ID_PMEM 27 /* virtio pmem */
#define VIRTIO_ID_RPMB 28 /* virtio rpmb */
#define VIRTIO_ID_MAC80211_HWSIM 29 /* virtio mac80211-hwsim */
#define VIRTIO_ID_VIDEO_ENCODER 30 /* virtio video encoder */
#define VIRTIO_ID_VIDEO_DECODER 31 /* virtio video decoder */
#define VIRTIO_ID_SCMI 32 /* virtio SCMI */
#define VIRTIO_ID_NITRO_SEC_MOD 33 /* virtio nitro secure module*/
#define VIRTIO_ID_I2C_ADAPTER 34 /* virtio i2c adapter */
#define VIRTIO_ID_WATCHDOG 35 /* virtio watchdog */
#define VIRTIO_ID_CAN 36 /* virtio can */
#define VIRTIO_ID_DMABUF 37 /* virtio dmabuf */
#define VIRTIO_ID_PARAM_SERV 38 /* virtio parameter server */
#define VIRTIO_ID_AUDIO_POLICY 39 /* virtio audio policy */
#define VIRTIO_ID_BT 40 /* virtio bluetooth */
#define VIRTIO_ID_GPIO 41 /* virtio gpio */

View File

@@ -9,13 +9,14 @@
/**
* enum virtio_pcidev_ops - virtual PCI device operations
* @VIRTIO_PCIDEV_OP_RESERVED: reserved to catch errors
* @VIRTIO_PCIDEV_OP_CFG_READ: read config space, size is 1, 2, 4 or 8;
* the @data field should be filled in by the device (in little endian).
* @VIRTIO_PCIDEV_OP_CFG_WRITE: write config space, size is 1, 2, 4 or 8;
* the @data field contains the data to write (in little endian).
* @VIRTIO_PCIDEV_OP_BAR_READ: read BAR mem/pio, size can be variable;
* @VIRTIO_PCIDEV_OP_MMIO_READ: read BAR mem/pio, size can be variable;
* the @data field should be filled in by the device (in little endian).
* @VIRTIO_PCIDEV_OP_BAR_WRITE: write BAR mem/pio, size can be variable;
* @VIRTIO_PCIDEV_OP_MMIO_WRITE: write BAR mem/pio, size can be variable;
* the @data field contains the data to write (in little endian).
* @VIRTIO_PCIDEV_OP_MMIO_MEMSET: memset MMIO, size is variable but
* the @data field only has one byte (unlike @VIRTIO_PCIDEV_OP_MMIO_WRITE)

View File

@@ -97,7 +97,8 @@ enum virtio_vsock_shutdown {
/* VIRTIO_VSOCK_OP_RW flags values */
enum virtio_vsock_rw {
VIRTIO_VSOCK_SEQ_EOR = 1,
VIRTIO_VSOCK_SEQ_EOM = 1,
VIRTIO_VSOCK_SEQ_EOR = 2,
};
#endif /* _UAPI_LINUX_VIRTIO_VSOCK_H */

View File

@@ -276,7 +276,17 @@ enum hl_device_status {
HL_DEVICE_STATUS_OPERATIONAL,
HL_DEVICE_STATUS_IN_RESET,
HL_DEVICE_STATUS_MALFUNCTION,
HL_DEVICE_STATUS_NEEDS_RESET
HL_DEVICE_STATUS_NEEDS_RESET,
HL_DEVICE_STATUS_IN_DEVICE_CREATION,
HL_DEVICE_STATUS_LAST = HL_DEVICE_STATUS_IN_DEVICE_CREATION
};
enum hl_server_type {
HL_SERVER_TYPE_UNKNOWN = 0,
HL_SERVER_GAUDI_HLS1 = 1,
HL_SERVER_GAUDI_HLS1H = 2,
HL_SERVER_GAUDI_TYPE1 = 3,
HL_SERVER_GAUDI_TYPE2 = 4
};
/* Opcode for management ioctl
@@ -337,17 +347,49 @@ enum hl_device_status {
#define HL_INFO_VERSION_MAX_LEN 128
#define HL_INFO_CARD_NAME_MAX_LEN 16
/**
* struct hl_info_hw_ip_info - hardware information on various IPs in the ASIC
* @sram_base_address: The first SRAM physical base address that is free to be
* used by the user.
* @dram_base_address: The first DRAM virtual or physical base address that is
* free to be used by the user.
* @dram_size: The DRAM size that is available to the user.
* @sram_size: The SRAM size that is available to the user.
* @num_of_events: The number of events that can be received from the f/w. This
* is needed so the user can what is the size of the h/w events
* array he needs to pass to the kernel when he wants to fetch
* the event counters.
* @device_id: PCI device ID of the ASIC.
* @module_id: Module ID of the ASIC for mezzanine cards in servers
* (From OCP spec).
* @first_available_interrupt_id: The first available interrupt ID for the user
* to be used when it works with user interrupts.
* @server_type: Server type that the Gaudi ASIC is currently installed in.
* The value is according to enum hl_server_type
* @cpld_version: CPLD version on the board.
* @psoc_pci_pll_nr: PCI PLL NR value. Needed by the profiler in some ASICs.
* @psoc_pci_pll_nf: PCI PLL NF value. Needed by the profiler in some ASICs.
* @psoc_pci_pll_od: PCI PLL OD value. Needed by the profiler in some ASICs.
* @psoc_pci_pll_div_factor: PCI PLL DIV factor value. Needed by the profiler
* in some ASICs.
* @tpc_enabled_mask: Bit-mask that represents which TPCs are enabled. Relevant
* for Goya/Gaudi only.
* @dram_enabled: Whether the DRAM is enabled.
* @cpucp_version: The CPUCP f/w version.
* @card_name: The card name as passed by the f/w.
* @dram_page_size: The DRAM physical page size.
*/
struct hl_info_hw_ip_info {
__u64 sram_base_address;
__u64 dram_base_address;
__u64 dram_size;
__u32 sram_size;
__u32 num_of_events;
__u32 device_id; /* PCI Device ID */
__u32 module_id; /* For mezzanine cards in servers (From OCP spec.) */
__u32 device_id;
__u32 module_id;
__u32 reserved;
__u16 first_available_interrupt_id;
__u16 reserved2;
__u16 server_type;
__u32 cpld_version;
__u32 psoc_pci_pll_nr;
__u32 psoc_pci_pll_nf;
@@ -358,7 +400,7 @@ struct hl_info_hw_ip_info {
__u8 pad[2];
__u8 cpucp_version[HL_INFO_VERSION_MAX_LEN];
__u8 card_name[HL_INFO_CARD_NAME_MAX_LEN];
__u64 reserved3;
__u64 reserved2;
__u64 dram_page_size;
};
@@ -628,12 +670,21 @@ struct hl_cs_chunk {
__u64 cb_handle;
/* Relevant only when HL_CS_FLAGS_WAIT or
* HL_CS_FLAGS_COLLECTIVE_WAIT is set.
* HL_CS_FLAGS_COLLECTIVE_WAIT is set
* This holds address of array of u64 values that contain
* signal CS sequence numbers. The wait described by this job
* will listen on all those signals (wait event per signal)
* signal CS sequence numbers. The wait described by
* this job will listen on all those signals
* (wait event per signal)
*/
__u64 signal_seq_arr;
/*
* Relevant only when HL_CS_FLAGS_WAIT or
* HL_CS_FLAGS_COLLECTIVE_WAIT is set
* along with HL_CS_FLAGS_ENCAP_SIGNALS.
* This is the CS sequence which has the encapsulated signals.
*/
__u64 encaps_signal_seq;
};
/* Index of queue to put the CB on */
@@ -651,6 +702,17 @@ struct hl_cs_chunk {
* Number of entries in signal_seq_arr
*/
__u32 num_signal_seq_arr;
/* Relevant only when HL_CS_FLAGS_WAIT or
* HL_CS_FLAGS_COLLECTIVE_WAIT is set along
* with HL_CS_FLAGS_ENCAP_SIGNALS
* This set the signals range that the user want to wait for
* out of the whole reserved signals range.
* e.g if the signals range is 20, and user don't want
* to wait for signal 8, so he set this offset to 7, then
* he call the API again with 9 and so on till 20.
*/
__u32 encaps_signal_offset;
};
/* HL_CS_CHUNK_FLAGS_* */
@@ -678,6 +740,28 @@ struct hl_cs_chunk {
#define HL_CS_FLAGS_CUSTOM_TIMEOUT 0x200
#define HL_CS_FLAGS_SKIP_RESET_ON_TIMEOUT 0x400
/*
* The encapsulated signals CS is merged into the existing CS ioctls.
* In order to use this feature need to follow the below procedure:
* 1. Reserve signals, set the CS type to HL_CS_FLAGS_RESERVE_SIGNALS_ONLY
* the output of this API will be the SOB offset from CFG_BASE.
* this address will be used to patch CB cmds to do the signaling for this
* SOB by incrementing it's value.
* for reverting the reservation use HL_CS_FLAGS_UNRESERVE_SIGNALS_ONLY
* CS type, note that this might fail if out-of-sync happened to the SOB
* value, in case other signaling request to the same SOB occurred between
* reserve-unreserve calls.
* 2. Use the staged CS to do the encapsulated signaling jobs.
* use HL_CS_FLAGS_STAGED_SUBMISSION and HL_CS_FLAGS_STAGED_SUBMISSION_FIRST
* along with HL_CS_FLAGS_ENCAP_SIGNALS flag, and set encaps_signal_offset
* field. This offset allows app to wait on part of the reserved signals.
* 3. Use WAIT/COLLECTIVE WAIT CS along with HL_CS_FLAGS_ENCAP_SIGNALS flag
* to wait for the encapsulated signals.
*/
#define HL_CS_FLAGS_ENCAP_SIGNALS 0x800
#define HL_CS_FLAGS_RESERVE_SIGNALS_ONLY 0x1000
#define HL_CS_FLAGS_UNRESERVE_SIGNALS_ONLY 0x2000
#define HL_CS_STATUS_SUCCESS 0
#define HL_MAX_JOBS_PER_CS 512
@@ -690,10 +774,35 @@ struct hl_cs_in {
/* holds address of array of hl_cs_chunk for execution phase */
__u64 chunks_execute;
/* Sequence number of a staged submission CS
* valid only if HL_CS_FLAGS_STAGED_SUBMISSION is set
*/
__u64 seq;
union {
/*
* Sequence number of a staged submission CS
* valid only if HL_CS_FLAGS_STAGED_SUBMISSION is set and
* HL_CS_FLAGS_STAGED_SUBMISSION_FIRST is unset.
*/
__u64 seq;
/*
* Encapsulated signals handle id
* Valid for two flows:
* 1. CS with encapsulated signals:
* when HL_CS_FLAGS_STAGED_SUBMISSION and
* HL_CS_FLAGS_STAGED_SUBMISSION_FIRST
* and HL_CS_FLAGS_ENCAP_SIGNALS are set.
* 2. unreserve signals:
* valid when HL_CS_FLAGS_UNRESERVE_SIGNALS_ONLY is set.
*/
__u32 encaps_sig_handle_id;
/* Valid only when HL_CS_FLAGS_RESERVE_SIGNALS_ONLY is set */
struct {
/* Encapsulated signals number */
__u32 encaps_signals_count;
/* Encapsulated signals queue index (stream) */
__u32 encaps_signals_q_idx;
};
};
/* Number of chunks in restore phase array. Maximum number is
* HL_MAX_JOBS_PER_CS
@@ -718,14 +827,31 @@ struct hl_cs_in {
};
struct hl_cs_out {
/*
* seq holds the sequence number of the CS to pass to wait ioctl. All
* values are valid except for 0 and ULLONG_MAX
*/
__u64 seq;
/* HL_CS_STATUS_* */
union {
/*
* seq holds the sequence number of the CS to pass to wait
* ioctl. All values are valid except for 0 and ULLONG_MAX
*/
__u64 seq;
/* Valid only when HL_CS_FLAGS_RESERVE_SIGNALS_ONLY is set */
struct {
/* This is the resereved signal handle id */
__u32 handle_id;
/* This is the signals count */
__u32 count;
};
};
/* HL_CS_STATUS */
__u32 status;
__u32 pad;
/*
* SOB base address offset
* Valid only when HL_CS_FLAGS_RESERVE_SIGNALS_ONLY is set
*/
__u32 sob_base_addr_offset;
};
union hl_cs_args {
@@ -735,11 +861,18 @@ union hl_cs_args {
#define HL_WAIT_CS_FLAGS_INTERRUPT 0x2
#define HL_WAIT_CS_FLAGS_INTERRUPT_MASK 0xFFF00000
#define HL_WAIT_CS_FLAGS_MULTI_CS 0x4
#define HL_WAIT_MULTI_CS_LIST_MAX_LEN 32
struct hl_wait_cs_in {
union {
struct {
/* Command submission sequence number */
/*
* In case of wait_cs holds the CS sequence number.
* In case of wait for multi CS hold a user pointer to
* an array of CS sequence numbers
*/
__u64 seq;
/* Absolute timeout to wait for command submission
* in microseconds
@@ -767,12 +900,17 @@ struct hl_wait_cs_in {
/* Context ID - Currently not in use */
__u32 ctx_id;
/* HL_WAIT_CS_FLAGS_*
* If HL_WAIT_CS_FLAGS_INTERRUPT is set, this field should include
* interrupt id according to HL_WAIT_CS_FLAGS_INTERRUPT_MASK, in order
* not to specify an interrupt id ,set mask to all 1s.
*/
__u32 flags;
/* Multi CS API info- valid entries in multi-CS array */
__u8 seq_arr_len;
__u8 pad[7];
};
#define HL_WAIT_CS_STATUS_COMPLETED 0
@@ -789,8 +927,15 @@ struct hl_wait_cs_out {
__u32 status;
/* HL_WAIT_CS_STATUS_FLAG* */
__u32 flags;
/* valid only if HL_WAIT_CS_STATUS_FLAG_TIMESTAMP_VLD is set */
/*
* valid only if HL_WAIT_CS_STATUS_FLAG_TIMESTAMP_VLD is set
* for wait_cs: timestamp of CS completion
* for wait_multi_cs: timestamp of FIRST CS completion
*/
__s64 timestamp_nsec;
/* multi CS completion bitmap */
__u32 cs_completion_map;
__u32 pad;
};
union hl_wait_cs_args {
@@ -813,6 +958,7 @@ union hl_wait_cs_args {
#define HL_MEM_CONTIGUOUS 0x1
#define HL_MEM_SHARED 0x2
#define HL_MEM_USERPTR 0x4
#define HL_MEM_FORCE_HINT 0x8
struct hl_mem_in {
union {