Merge branches 'cma', 'cxgb4', 'flowsteer', 'ipoib', 'misc', 'mlx4', 'mlx5', 'nes', 'ocrdma', 'qib' and 'srp' into for-next
This commit is contained in:
@@ -230,6 +230,15 @@ enum {
|
||||
MLX5_MAX_PAGE_SHIFT = 31
|
||||
};
|
||||
|
||||
enum {
|
||||
MLX5_ADAPTER_PAGE_SHIFT = 12
|
||||
};
|
||||
|
||||
enum {
|
||||
MLX5_CAP_OFF_DCT = 41,
|
||||
MLX5_CAP_OFF_CMDIF_CSUM = 46,
|
||||
};
|
||||
|
||||
struct mlx5_inbox_hdr {
|
||||
__be16 opcode;
|
||||
u8 rsvd[4];
|
||||
@@ -319,9 +328,9 @@ struct mlx5_hca_cap {
|
||||
u8 rsvd25[42];
|
||||
__be16 log_uar_page_sz;
|
||||
u8 rsvd26[28];
|
||||
u8 log_msx_atomic_size_qp;
|
||||
u8 log_max_atomic_size_qp;
|
||||
u8 rsvd27[2];
|
||||
u8 log_msx_atomic_size_dc;
|
||||
u8 log_max_atomic_size_dc;
|
||||
u8 rsvd28[76];
|
||||
};
|
||||
|
||||
|
||||
@@ -483,6 +483,7 @@ struct mlx5_priv {
|
||||
struct rb_root page_root;
|
||||
int fw_pages;
|
||||
int reg_pages;
|
||||
struct list_head free_list;
|
||||
|
||||
struct mlx5_core_health health;
|
||||
|
||||
@@ -557,9 +558,11 @@ typedef void (*mlx5_cmd_cbk_t)(int status, void *context);
|
||||
struct mlx5_cmd_work_ent {
|
||||
struct mlx5_cmd_msg *in;
|
||||
struct mlx5_cmd_msg *out;
|
||||
void *uout;
|
||||
int uout_size;
|
||||
mlx5_cmd_cbk_t callback;
|
||||
void *context;
|
||||
int idx;
|
||||
int idx;
|
||||
struct completion done;
|
||||
struct mlx5_cmd *cmd;
|
||||
struct work_struct work;
|
||||
@@ -570,6 +573,7 @@ struct mlx5_cmd_work_ent {
|
||||
u8 token;
|
||||
struct timespec ts1;
|
||||
struct timespec ts2;
|
||||
u16 op;
|
||||
};
|
||||
|
||||
struct mlx5_pas {
|
||||
@@ -653,6 +657,9 @@ void mlx5_cmd_use_polling(struct mlx5_core_dev *dev);
|
||||
int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr);
|
||||
int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
|
||||
int out_size);
|
||||
int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size,
|
||||
void *out, int out_size, mlx5_cmd_cbk_t callback,
|
||||
void *context);
|
||||
int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn);
|
||||
int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn);
|
||||
int mlx5_alloc_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari);
|
||||
@@ -676,7 +683,9 @@ int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
|
||||
int mlx5_core_arm_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
|
||||
u16 lwm, int is_srq);
|
||||
int mlx5_core_create_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
|
||||
struct mlx5_create_mkey_mbox_in *in, int inlen);
|
||||
struct mlx5_create_mkey_mbox_in *in, int inlen,
|
||||
mlx5_cmd_cbk_t callback, void *context,
|
||||
struct mlx5_create_mkey_mbox_out *out);
|
||||
int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr);
|
||||
int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
|
||||
struct mlx5_query_mkey_mbox_out *out, int outlen);
|
||||
@@ -745,6 +754,11 @@ static inline u32 mlx5_idx_to_mkey(u32 mkey_idx)
|
||||
return mkey_idx << 8;
|
||||
}
|
||||
|
||||
static inline u8 mlx5_mkey_variant(u32 mkey)
|
||||
{
|
||||
return mkey & 0xff;
|
||||
}
|
||||
|
||||
enum {
|
||||
MLX5_PROF_MASK_QP_SIZE = (u64)1 << 0,
|
||||
MLX5_PROF_MASK_MR_CACHE = (u64)1 << 1,
|
||||
|
||||
@@ -67,12 +67,14 @@ enum rdma_node_type {
|
||||
RDMA_NODE_IB_CA = 1,
|
||||
RDMA_NODE_IB_SWITCH,
|
||||
RDMA_NODE_IB_ROUTER,
|
||||
RDMA_NODE_RNIC
|
||||
RDMA_NODE_RNIC,
|
||||
RDMA_NODE_USNIC,
|
||||
};
|
||||
|
||||
enum rdma_transport_type {
|
||||
RDMA_TRANSPORT_IB,
|
||||
RDMA_TRANSPORT_IWARP
|
||||
RDMA_TRANSPORT_IWARP,
|
||||
RDMA_TRANSPORT_USNIC
|
||||
};
|
||||
|
||||
enum rdma_transport_type
|
||||
@@ -1436,6 +1438,7 @@ struct ib_device {
|
||||
|
||||
int uverbs_abi_ver;
|
||||
u64 uverbs_cmd_mask;
|
||||
u64 uverbs_ex_cmd_mask;
|
||||
|
||||
char node_desc[64];
|
||||
__be64 node_guid;
|
||||
@@ -2384,4 +2387,17 @@ struct ib_flow *ib_create_flow(struct ib_qp *qp,
|
||||
struct ib_flow_attr *flow_attr, int domain);
|
||||
int ib_destroy_flow(struct ib_flow *flow_id);
|
||||
|
||||
static inline int ib_check_mr_access(int flags)
|
||||
{
|
||||
/*
|
||||
* Local write permission is required if remote write or
|
||||
* remote atomic permission is also requested.
|
||||
*/
|
||||
if (flags & (IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_REMOTE_WRITE) &&
|
||||
!(flags & IB_ACCESS_LOCAL_WRITE))
|
||||
return -EINVAL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif /* IB_VERBS_H */
|
||||
|
||||
@@ -13,6 +13,27 @@ struct srp_rport_identifiers {
|
||||
u8 roles;
|
||||
};
|
||||
|
||||
/**
|
||||
* enum srp_rport_state - SRP transport layer state
|
||||
* @SRP_RPORT_RUNNING: Transport layer operational.
|
||||
* @SRP_RPORT_BLOCKED: Transport layer not operational; fast I/O fail timer
|
||||
* is running and I/O has been blocked.
|
||||
* @SRP_RPORT_FAIL_FAST: Fast I/O fail timer has expired; fail I/O fast.
|
||||
* @SRP_RPORT_LOST: Device loss timer has expired; port is being removed.
|
||||
*/
|
||||
enum srp_rport_state {
|
||||
SRP_RPORT_RUNNING,
|
||||
SRP_RPORT_BLOCKED,
|
||||
SRP_RPORT_FAIL_FAST,
|
||||
SRP_RPORT_LOST,
|
||||
};
|
||||
|
||||
/**
|
||||
* struct srp_rport
|
||||
* @lld_data: LLD private data.
|
||||
* @mutex: Protects against concurrent rport reconnect / fast_io_fail /
|
||||
* dev_loss_tmo activity.
|
||||
*/
|
||||
struct srp_rport {
|
||||
/* for initiator and target drivers */
|
||||
|
||||
@@ -23,11 +44,43 @@ struct srp_rport {
|
||||
|
||||
/* for initiator drivers */
|
||||
|
||||
void *lld_data; /* LLD private data */
|
||||
void *lld_data;
|
||||
|
||||
struct mutex mutex;
|
||||
enum srp_rport_state state;
|
||||
bool deleted;
|
||||
int reconnect_delay;
|
||||
int failed_reconnects;
|
||||
struct delayed_work reconnect_work;
|
||||
int fast_io_fail_tmo;
|
||||
int dev_loss_tmo;
|
||||
struct delayed_work fast_io_fail_work;
|
||||
struct delayed_work dev_loss_work;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct srp_function_template
|
||||
* @has_rport_state: Whether or not to create the state, fast_io_fail_tmo and
|
||||
* dev_loss_tmo sysfs attribute for an rport.
|
||||
* @reset_timer_if_blocked: Whether or srp_timed_out() should reset the command
|
||||
* timer if the device on which it has been queued is blocked.
|
||||
* @reconnect_delay: If not NULL, points to the default reconnect_delay value.
|
||||
* @fast_io_fail_tmo: If not NULL, points to the default fast_io_fail_tmo value.
|
||||
* @dev_loss_tmo: If not NULL, points to the default dev_loss_tmo value.
|
||||
* @reconnect: Callback function for reconnecting to the target. See also
|
||||
* srp_reconnect_rport().
|
||||
* @terminate_rport_io: Callback function for terminating all outstanding I/O
|
||||
* requests for an rport.
|
||||
*/
|
||||
struct srp_function_template {
|
||||
/* for initiator drivers */
|
||||
bool has_rport_state;
|
||||
bool reset_timer_if_blocked;
|
||||
int *reconnect_delay;
|
||||
int *fast_io_fail_tmo;
|
||||
int *dev_loss_tmo;
|
||||
int (*reconnect)(struct srp_rport *rport);
|
||||
void (*terminate_rport_io)(struct srp_rport *rport);
|
||||
void (*rport_delete)(struct srp_rport *rport);
|
||||
/* for target drivers */
|
||||
int (* tsk_mgmt_response)(struct Scsi_Host *, u64, u64, int);
|
||||
@@ -38,10 +91,36 @@ extern struct scsi_transport_template *
|
||||
srp_attach_transport(struct srp_function_template *);
|
||||
extern void srp_release_transport(struct scsi_transport_template *);
|
||||
|
||||
extern void srp_rport_get(struct srp_rport *rport);
|
||||
extern void srp_rport_put(struct srp_rport *rport);
|
||||
extern struct srp_rport *srp_rport_add(struct Scsi_Host *,
|
||||
struct srp_rport_identifiers *);
|
||||
extern void srp_rport_del(struct srp_rport *);
|
||||
|
||||
extern int srp_tmo_valid(int reconnect_delay, int fast_io_fail_tmo,
|
||||
int dev_loss_tmo);
|
||||
extern int srp_reconnect_rport(struct srp_rport *rport);
|
||||
extern void srp_start_tl_fail_timers(struct srp_rport *rport);
|
||||
extern void srp_remove_host(struct Scsi_Host *);
|
||||
|
||||
/**
|
||||
* srp_chkready() - evaluate the transport layer state before I/O
|
||||
*
|
||||
* Returns a SCSI result code that can be returned by the LLD queuecommand()
|
||||
* implementation. The role of this function is similar to that of
|
||||
* fc_remote_port_chkready().
|
||||
*/
|
||||
static inline int srp_chkready(struct srp_rport *rport)
|
||||
{
|
||||
switch (rport->state) {
|
||||
case SRP_RPORT_RUNNING:
|
||||
case SRP_RPORT_BLOCKED:
|
||||
default:
|
||||
return 0;
|
||||
case SRP_RPORT_FAIL_FAST:
|
||||
return DID_TRANSPORT_FAILFAST << 16;
|
||||
case SRP_RPORT_LOST:
|
||||
return DID_NO_CONNECT << 16;
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
@@ -87,10 +87,11 @@ enum {
|
||||
IB_USER_VERBS_CMD_CLOSE_XRCD,
|
||||
IB_USER_VERBS_CMD_CREATE_XSRQ,
|
||||
IB_USER_VERBS_CMD_OPEN_QP,
|
||||
#ifdef CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING
|
||||
IB_USER_VERBS_CMD_CREATE_FLOW = IB_USER_VERBS_CMD_THRESHOLD,
|
||||
IB_USER_VERBS_CMD_DESTROY_FLOW
|
||||
#endif /* CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING */
|
||||
};
|
||||
|
||||
enum {
|
||||
IB_USER_VERBS_EX_CMD_CREATE_FLOW = IB_USER_VERBS_CMD_THRESHOLD,
|
||||
IB_USER_VERBS_EX_CMD_DESTROY_FLOW
|
||||
};
|
||||
|
||||
/*
|
||||
@@ -122,22 +123,24 @@ struct ib_uverbs_comp_event_desc {
|
||||
* the rest of the command struct based on these value.
|
||||
*/
|
||||
|
||||
#define IB_USER_VERBS_CMD_COMMAND_MASK 0xff
|
||||
#define IB_USER_VERBS_CMD_FLAGS_MASK 0xff000000u
|
||||
#define IB_USER_VERBS_CMD_FLAGS_SHIFT 24
|
||||
|
||||
#define IB_USER_VERBS_CMD_FLAG_EXTENDED 0x80
|
||||
|
||||
struct ib_uverbs_cmd_hdr {
|
||||
__u32 command;
|
||||
__u16 in_words;
|
||||
__u16 out_words;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING
|
||||
struct ib_uverbs_cmd_hdr_ex {
|
||||
__u32 command;
|
||||
__u16 in_words;
|
||||
__u16 out_words;
|
||||
struct ib_uverbs_ex_cmd_hdr {
|
||||
__u64 response;
|
||||
__u16 provider_in_words;
|
||||
__u16 provider_out_words;
|
||||
__u32 cmd_hdr_reserved;
|
||||
};
|
||||
#endif /* CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING */
|
||||
|
||||
struct ib_uverbs_get_context {
|
||||
__u64 response;
|
||||
@@ -700,62 +703,71 @@ struct ib_uverbs_detach_mcast {
|
||||
__u64 driver_data[0];
|
||||
};
|
||||
|
||||
#ifdef CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING
|
||||
struct ib_kern_eth_filter {
|
||||
struct ib_uverbs_flow_spec_hdr {
|
||||
__u32 type;
|
||||
__u16 size;
|
||||
__u16 reserved;
|
||||
/* followed by flow_spec */
|
||||
__u64 flow_spec_data[0];
|
||||
};
|
||||
|
||||
struct ib_uverbs_flow_eth_filter {
|
||||
__u8 dst_mac[6];
|
||||
__u8 src_mac[6];
|
||||
__be16 ether_type;
|
||||
__be16 vlan_tag;
|
||||
};
|
||||
|
||||
struct ib_kern_spec_eth {
|
||||
__u32 type;
|
||||
__u16 size;
|
||||
__u16 reserved;
|
||||
struct ib_kern_eth_filter val;
|
||||
struct ib_kern_eth_filter mask;
|
||||
};
|
||||
|
||||
struct ib_kern_ipv4_filter {
|
||||
__be32 src_ip;
|
||||
__be32 dst_ip;
|
||||
};
|
||||
|
||||
struct ib_kern_spec_ipv4 {
|
||||
__u32 type;
|
||||
__u16 size;
|
||||
__u16 reserved;
|
||||
struct ib_kern_ipv4_filter val;
|
||||
struct ib_kern_ipv4_filter mask;
|
||||
};
|
||||
|
||||
struct ib_kern_tcp_udp_filter {
|
||||
__be16 dst_port;
|
||||
__be16 src_port;
|
||||
};
|
||||
|
||||
struct ib_kern_spec_tcp_udp {
|
||||
__u32 type;
|
||||
__u16 size;
|
||||
__u16 reserved;
|
||||
struct ib_kern_tcp_udp_filter val;
|
||||
struct ib_kern_tcp_udp_filter mask;
|
||||
};
|
||||
|
||||
struct ib_kern_spec {
|
||||
struct ib_uverbs_flow_spec_eth {
|
||||
union {
|
||||
struct ib_uverbs_flow_spec_hdr hdr;
|
||||
struct {
|
||||
__u32 type;
|
||||
__u16 size;
|
||||
__u16 reserved;
|
||||
};
|
||||
struct ib_kern_spec_eth eth;
|
||||
struct ib_kern_spec_ipv4 ipv4;
|
||||
struct ib_kern_spec_tcp_udp tcp_udp;
|
||||
};
|
||||
struct ib_uverbs_flow_eth_filter val;
|
||||
struct ib_uverbs_flow_eth_filter mask;
|
||||
};
|
||||
|
||||
struct ib_kern_flow_attr {
|
||||
struct ib_uverbs_flow_ipv4_filter {
|
||||
__be32 src_ip;
|
||||
__be32 dst_ip;
|
||||
};
|
||||
|
||||
struct ib_uverbs_flow_spec_ipv4 {
|
||||
union {
|
||||
struct ib_uverbs_flow_spec_hdr hdr;
|
||||
struct {
|
||||
__u32 type;
|
||||
__u16 size;
|
||||
__u16 reserved;
|
||||
};
|
||||
};
|
||||
struct ib_uverbs_flow_ipv4_filter val;
|
||||
struct ib_uverbs_flow_ipv4_filter mask;
|
||||
};
|
||||
|
||||
struct ib_uverbs_flow_tcp_udp_filter {
|
||||
__be16 dst_port;
|
||||
__be16 src_port;
|
||||
};
|
||||
|
||||
struct ib_uverbs_flow_spec_tcp_udp {
|
||||
union {
|
||||
struct ib_uverbs_flow_spec_hdr hdr;
|
||||
struct {
|
||||
__u32 type;
|
||||
__u16 size;
|
||||
__u16 reserved;
|
||||
};
|
||||
};
|
||||
struct ib_uverbs_flow_tcp_udp_filter val;
|
||||
struct ib_uverbs_flow_tcp_udp_filter mask;
|
||||
};
|
||||
|
||||
struct ib_uverbs_flow_attr {
|
||||
__u32 type;
|
||||
__u16 size;
|
||||
__u16 priority;
|
||||
@@ -767,13 +779,13 @@ struct ib_kern_flow_attr {
|
||||
* struct ib_flow_spec_xxx
|
||||
* struct ib_flow_spec_yyy
|
||||
*/
|
||||
struct ib_uverbs_flow_spec_hdr flow_specs[0];
|
||||
};
|
||||
|
||||
struct ib_uverbs_create_flow {
|
||||
__u32 comp_mask;
|
||||
__u64 response;
|
||||
__u32 qp_handle;
|
||||
struct ib_kern_flow_attr flow_attr;
|
||||
struct ib_uverbs_flow_attr flow_attr;
|
||||
};
|
||||
|
||||
struct ib_uverbs_create_flow_resp {
|
||||
@@ -785,7 +797,6 @@ struct ib_uverbs_destroy_flow {
|
||||
__u32 comp_mask;
|
||||
__u32 flow_handle;
|
||||
};
|
||||
#endif /* CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING */
|
||||
|
||||
struct ib_uverbs_create_srq {
|
||||
__u64 response;
|
||||
|
||||
Reference in New Issue
Block a user