Merge branch 's390-qeth-next'

Julian Wiedmann says:

====================
s390/qeth: updates 2018-10-12

please apply one more patchset for net-next. This extends the TSO support
in qeth.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2018-10-12 11:27:01 -07:00
commit a688c53a02
5 changed files with 210 additions and 57 deletions

View File

@ -390,8 +390,9 @@ enum qeth_layer2_frame_flags {
enum qeth_header_ids {
QETH_HEADER_TYPE_LAYER3 = 0x01,
QETH_HEADER_TYPE_LAYER2 = 0x02,
QETH_HEADER_TYPE_TSO = 0x03,
QETH_HEADER_TYPE_L3_TSO = 0x03,
QETH_HEADER_TYPE_OSN = 0x04,
QETH_HEADER_TYPE_L2_TSO = 0x06,
};
/* flags for qeth_hdr.ext_flags */
#define QETH_HDR_EXT_VLAN_FRAME 0x01
@ -1047,6 +1048,8 @@ int qeth_vm_request_mac(struct qeth_card *card);
int qeth_add_hw_header(struct qeth_card *card, struct sk_buff *skb,
struct qeth_hdr **hdr, unsigned int hdr_len,
unsigned int proto_len, unsigned int *elements);
void qeth_fill_tso_ext(struct qeth_hdr_tso *hdr, unsigned int payload_len,
struct sk_buff *skb, unsigned int proto_len);
int qeth_xmit(struct qeth_card *card, struct sk_buff *skb,
struct qeth_qdio_out_q *queue, int ipv, int cast_type,
void (*fill_header)(struct qeth_card *card, struct qeth_hdr *hdr,

View File

@ -4088,15 +4088,31 @@ out:
}
EXPORT_SYMBOL_GPL(qeth_do_send_packet);
void qeth_fill_tso_ext(struct qeth_hdr_tso *hdr, unsigned int payload_len,
struct sk_buff *skb, unsigned int proto_len)
{
struct qeth_hdr_ext_tso *ext = &hdr->ext;
ext->hdr_tot_len = sizeof(*ext);
ext->imb_hdr_no = 1;
ext->hdr_type = 1;
ext->hdr_version = 1;
ext->hdr_len = 28;
ext->payload_len = payload_len;
ext->mss = skb_shinfo(skb)->gso_size;
ext->dg_hdr_len = proto_len;
}
EXPORT_SYMBOL_GPL(qeth_fill_tso_ext);
int qeth_xmit(struct qeth_card *card, struct sk_buff *skb,
struct qeth_qdio_out_q *queue, int ipv, int cast_type,
void (*fill_header)(struct qeth_card *card, struct qeth_hdr *hdr,
struct sk_buff *skb, int ipv, int cast_type,
unsigned int data_len))
{
const unsigned int proto_len = IS_IQD(card) ? ETH_HLEN : 0;
const unsigned int hw_hdr_len = sizeof(struct qeth_hdr);
unsigned int proto_len, hw_hdr_len;
unsigned int frame_len = skb->len;
bool is_tso = skb_is_gso(skb);
unsigned int data_offset = 0;
struct qeth_hdr *hdr = NULL;
unsigned int hd_len = 0;
@ -4104,6 +4120,14 @@ int qeth_xmit(struct qeth_card *card, struct sk_buff *skb,
int push_len, rc;
bool is_sg;
if (is_tso) {
hw_hdr_len = sizeof(struct qeth_hdr_tso);
proto_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
} else {
hw_hdr_len = sizeof(struct qeth_hdr);
proto_len = IS_IQD(card) ? ETH_HLEN : 0;
}
rc = skb_cow_head(skb, hw_hdr_len);
if (rc)
return rc;
@ -4112,13 +4136,16 @@ int qeth_xmit(struct qeth_card *card, struct sk_buff *skb,
&elements);
if (push_len < 0)
return push_len;
if (!push_len) {
if (is_tso || !push_len) {
/* HW header needs its own buffer element. */
hd_len = hw_hdr_len + proto_len;
data_offset = proto_len;
data_offset = push_len + proto_len;
}
memset(hdr, 0, hw_hdr_len);
fill_header(card, hdr, skb, ipv, cast_type, frame_len);
if (is_tso)
qeth_fill_tso_ext((struct qeth_hdr_tso *) hdr,
frame_len - proto_len, skb, proto_len);
is_sg = skb_is_nonlinear(skb);
if (IS_IQD(card)) {
@ -4136,6 +4163,10 @@ int qeth_xmit(struct qeth_card *card, struct sk_buff *skb,
card->perf_stats.buf_elements_sent += elements;
if (is_sg)
card->perf_stats.sg_skbs_sent++;
if (is_tso) {
card->perf_stats.large_send_bytes += frame_len;
card->perf_stats.large_send_cnt++;
}
}
} else {
if (!push_len)
@ -5394,6 +5425,21 @@ static int qeth_setassparms_inspect_rc(struct qeth_ipa_cmd *cmd)
return cmd->hdr.return_code;
}
static int qeth_setassparms_get_caps_cb(struct qeth_card *card,
struct qeth_reply *reply,
unsigned long data)
{
struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
struct qeth_ipa_caps *caps = reply->param;
if (qeth_setassparms_inspect_rc(cmd))
return 0;
caps->supported = cmd->data.setassparms.data.caps.supported;
caps->enabled = cmd->data.setassparms.data.caps.enabled;
return 0;
}
int qeth_setassparms_cb(struct qeth_card *card,
struct qeth_reply *reply, unsigned long data)
{
@ -6396,27 +6442,85 @@ static int qeth_set_ipa_csum(struct qeth_card *card, bool on, int cstype,
return rc ? -EIO : 0;
}
static int qeth_set_ipa_tso(struct qeth_card *card, int on)
static int qeth_start_tso_cb(struct qeth_card *card, struct qeth_reply *reply,
unsigned long data)
{
struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
struct qeth_tso_start_data *tso_data = reply->param;
if (qeth_setassparms_inspect_rc(cmd))
return 0;
tso_data->mss = cmd->data.setassparms.data.tso.mss;
tso_data->supported = cmd->data.setassparms.data.tso.supported;
return 0;
}
static int qeth_set_tso_off(struct qeth_card *card,
enum qeth_prot_versions prot)
{
return qeth_send_simple_setassparms_prot(card, IPA_OUTBOUND_TSO,
IPA_CMD_ASS_STOP, 0, prot);
}
static int qeth_set_tso_on(struct qeth_card *card,
enum qeth_prot_versions prot)
{
struct qeth_tso_start_data tso_data;
struct qeth_cmd_buffer *iob;
struct qeth_ipa_caps caps;
int rc;
QETH_CARD_TEXT(card, 3, "sttso");
iob = qeth_get_setassparms_cmd(card, IPA_OUTBOUND_TSO,
IPA_CMD_ASS_START, 0, prot);
if (!iob)
return -ENOMEM;
if (on) {
rc = qeth_send_simple_setassparms(card, IPA_OUTBOUND_TSO,
IPA_CMD_ASS_START, 0);
if (rc) {
dev_warn(&card->gdev->dev,
"Starting outbound TCP segmentation offload for %s failed\n",
QETH_CARD_IFNAME(card));
return -EIO;
}
dev_info(&card->gdev->dev, "Outbound TSO enabled\n");
} else {
rc = qeth_send_simple_setassparms(card, IPA_OUTBOUND_TSO,
IPA_CMD_ASS_STOP, 0);
rc = qeth_send_setassparms(card, iob, 0, 0 /* unused */,
qeth_start_tso_cb, &tso_data);
if (rc)
return rc;
if (!tso_data.mss || !(tso_data.supported & QETH_IPA_LARGE_SEND_TCP)) {
qeth_set_tso_off(card, prot);
return -EOPNOTSUPP;
}
return rc;
iob = qeth_get_setassparms_cmd(card, IPA_OUTBOUND_TSO,
IPA_CMD_ASS_ENABLE, sizeof(caps), prot);
if (!iob) {
qeth_set_tso_off(card, prot);
return -ENOMEM;
}
/* enable TSO capability */
caps.supported = 0;
caps.enabled = QETH_IPA_LARGE_SEND_TCP;
rc = qeth_send_setassparms(card, iob, sizeof(caps), (long) &caps,
qeth_setassparms_get_caps_cb, &caps);
if (rc) {
qeth_set_tso_off(card, prot);
return rc;
}
if (!qeth_ipa_caps_supported(&caps, QETH_IPA_LARGE_SEND_TCP) ||
!qeth_ipa_caps_enabled(&caps, QETH_IPA_LARGE_SEND_TCP)) {
qeth_set_tso_off(card, prot);
return -EOPNOTSUPP;
}
dev_info(&card->gdev->dev, "TSOv%u enabled (MSS: %u)\n", prot,
tso_data.mss);
return 0;
}
static int qeth_set_ipa_tso(struct qeth_card *card, bool on,
enum qeth_prot_versions prot)
{
int rc = on ? qeth_set_tso_on(card, prot) :
qeth_set_tso_off(card, prot);
return rc ? -EIO : 0;
}
static int qeth_set_ipa_rx_csum(struct qeth_card *card, bool on)
@ -6443,7 +6547,7 @@ static int qeth_set_ipa_rx_csum(struct qeth_card *card, bool on)
}
#define QETH_HW_FEATURES (NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_TSO | \
NETIF_F_IPV6_CSUM)
NETIF_F_IPV6_CSUM | NETIF_F_TSO6)
/**
* qeth_enable_hw_features() - (Re-)Enable HW functions for device features
* @dev: a net_device
@ -6493,11 +6597,18 @@ int qeth_set_features(struct net_device *dev, netdev_features_t features)
if (rc)
changed ^= NETIF_F_RXCSUM;
}
if ((changed & NETIF_F_TSO)) {
rc = qeth_set_ipa_tso(card, features & NETIF_F_TSO ? 1 : 0);
if (changed & NETIF_F_TSO) {
rc = qeth_set_ipa_tso(card, features & NETIF_F_TSO,
QETH_PROT_IPV4);
if (rc)
changed ^= NETIF_F_TSO;
}
if (changed & NETIF_F_TSO6) {
rc = qeth_set_ipa_tso(card, features & NETIF_F_TSO6,
QETH_PROT_IPV6);
if (rc)
changed ^= NETIF_F_TSO6;
}
/* everything changed successfully? */
if ((dev->features ^ features) == changed)
@ -6523,6 +6634,8 @@ netdev_features_t qeth_fix_features(struct net_device *dev,
features &= ~NETIF_F_RXCSUM;
if (!qeth_is_supported(card, IPA_OUTBOUND_TSO))
features &= ~NETIF_F_TSO;
if (!qeth_is_supported6(card, IPA_OUTBOUND_TSO))
features &= ~NETIF_F_TSO6;
/* if the card isn't up, remove features that require hw changes */
if (card->state == CARD_STATE_DOWN ||
card->state == CARD_STATE_RECOVER)

View File

@ -56,6 +56,21 @@ static inline bool qeth_intparm_is_iob(unsigned long intparm)
#define IPA_CMD_INITIATOR_OSA_REPLY 0x81
#define IPA_CMD_PRIM_VERSION_NO 0x01
struct qeth_ipa_caps {
u32 supported;
u32 enabled;
};
static inline bool qeth_ipa_caps_supported(struct qeth_ipa_caps *caps, u32 mask)
{
return (caps->supported & mask) == mask;
}
static inline bool qeth_ipa_caps_enabled(struct qeth_ipa_caps *caps, u32 mask)
{
return (caps->enabled & mask) == mask;
}
enum qeth_card_types {
QETH_CARD_TYPE_OSD = 1,
QETH_CARD_TYPE_IQD = 5,
@ -405,14 +420,25 @@ struct qeth_checksum_cmd {
__u32 enabled;
} __packed;
enum qeth_ipa_large_send_caps {
QETH_IPA_LARGE_SEND_TCP = 0x00000001,
};
struct qeth_tso_start_data {
u32 mss;
u32 supported;
};
/* SETASSPARMS IPA Command: */
struct qeth_ipacmd_setassparms {
struct qeth_ipacmd_setassparms_hdr hdr;
union {
__u32 flags_32bit;
struct qeth_ipa_caps caps;
struct qeth_checksum_cmd chksum;
struct qeth_arp_cache_entry add_arp_entry;
struct qeth_arp_query_data query_arp;
struct qeth_tso_start_data tso;
__u8 ip[16];
} data;
} __attribute__ ((packed));

View File

@ -197,15 +197,19 @@ static void qeth_l2_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
struct sk_buff *skb, int ipv, int cast_type,
unsigned int data_len)
{
struct vlan_ethhdr *veth = (struct vlan_ethhdr *)skb_mac_header(skb);
struct vlan_ethhdr *veth = vlan_eth_hdr(skb);
hdr->hdr.l2.id = QETH_HEADER_TYPE_LAYER2;
hdr->hdr.l2.pkt_length = data_len;
if (skb->ip_summed == CHECKSUM_PARTIAL) {
qeth_tx_csum(skb, &hdr->hdr.l2.flags[1], ipv);
if (card->options.performance_stats)
card->perf_stats.tx_csum++;
if (skb_is_gso(skb)) {
hdr->hdr.l2.id = QETH_HEADER_TYPE_L2_TSO;
} else {
hdr->hdr.l2.id = QETH_HEADER_TYPE_LAYER2;
if (skb->ip_summed == CHECKSUM_PARTIAL) {
qeth_tx_csum(skb, &hdr->hdr.l2.flags[1], ipv);
if (card->options.performance_stats)
card->perf_stats.tx_csum++;
}
}
/* set byte byte 3 to casting flags */
@ -897,6 +901,20 @@ static int qeth_l2_setup_netdev(struct qeth_card *card)
card->dev->hw_features |= NETIF_F_RXCSUM;
card->dev->vlan_features |= NETIF_F_RXCSUM;
}
if (qeth_is_supported(card, IPA_OUTBOUND_TSO)) {
card->dev->hw_features |= NETIF_F_TSO;
card->dev->vlan_features |= NETIF_F_TSO;
}
if (qeth_is_supported6(card, IPA_OUTBOUND_TSO)) {
card->dev->hw_features |= NETIF_F_TSO6;
card->dev->vlan_features |= NETIF_F_TSO6;
}
if (card->dev->hw_features & (NETIF_F_TSO | NETIF_F_TSO6)) {
card->dev->needed_headroom = sizeof(struct qeth_hdr_tso);
netif_set_gso_max_size(card->dev,
PAGE_SIZE * (QDIO_MAX_ELEMENTS_PER_BUFFER - 1));
}
qeth_l2_request_initial_mac(card);
netif_napi_add(card->dev, &card->napi, qeth_poll, QETH_NAPI_WEIGHT);

View File

@ -2037,7 +2037,7 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
hdr->hdr.l3.length = data_len;
if (skb_is_gso(skb)) {
hdr->hdr.l3.id = QETH_HEADER_TYPE_TSO;
hdr->hdr.l3.id = QETH_HEADER_TYPE_L3_TSO;
} else {
hdr->hdr.l3.id = QETH_HEADER_TYPE_LAYER3;
if (skb->ip_summed == CHECKSUM_PARTIAL) {
@ -2099,22 +2099,6 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
rcu_read_unlock();
}
static void qeth_l3_fill_tso_ext(struct qeth_hdr_tso *hdr,
unsigned int payload_len, struct sk_buff *skb,
unsigned int proto_len)
{
struct qeth_hdr_ext_tso *ext = &hdr->ext;
ext->hdr_tot_len = sizeof(*ext);
ext->imb_hdr_no = 1;
ext->hdr_type = 1;
ext->hdr_version = 1;
ext->hdr_len = 28;
ext->payload_len = payload_len;
ext->mss = skb_shinfo(skb)->gso_size;
ext->dg_hdr_len = proto_len;
}
static void qeth_l3_fixup_headers(struct sk_buff *skb)
{
struct iphdr *iph = ip_hdr(skb);
@ -2175,9 +2159,9 @@ static int qeth_l3_xmit(struct qeth_card *card, struct sk_buff *skb,
} else {
qeth_l3_fill_header(card, hdr, skb, ipv, cast_type, frame_len);
if (is_tso)
qeth_l3_fill_tso_ext((struct qeth_hdr_tso *) hdr,
frame_len - proto_len, skb,
proto_len);
qeth_fill_tso_ext((struct qeth_hdr_tso *) hdr,
frame_len - proto_len, skb,
proto_len);
}
is_sg = skb_is_nonlinear(skb);
@ -2401,6 +2385,7 @@ static const struct net_device_ops qeth_l3_osa_netdev_ops = {
static int qeth_l3_setup_netdev(struct qeth_card *card)
{
unsigned int headroom;
int rc;
if (card->dev->netdev_ops)
@ -2415,11 +2400,6 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
}
card->dev->netdev_ops = &qeth_l3_osa_netdev_ops;
card->dev->needed_headroom = sizeof(struct qeth_hdr);
/* allow for de-acceleration of NETIF_F_HW_VLAN_CTAG_TX: */
card->dev->needed_headroom += VLAN_HLEN;
if (qeth_is_supported(card, IPA_OUTBOUND_TSO))
card->dev->needed_headroom = sizeof(struct qeth_hdr_tso);
/*IPv6 address autoconfiguration stuff*/
qeth_l3_get_unique_id(card);
@ -2438,10 +2418,22 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
card->dev->hw_features |= NETIF_F_IPV6_CSUM;
card->dev->vlan_features |= NETIF_F_IPV6_CSUM;
}
if (qeth_is_supported6(card, IPA_OUTBOUND_TSO)) {
card->dev->hw_features |= NETIF_F_TSO6;
card->dev->vlan_features |= NETIF_F_TSO6;
}
/* allow for de-acceleration of NETIF_F_HW_VLAN_CTAG_TX: */
if (card->dev->hw_features & NETIF_F_TSO6)
headroom = sizeof(struct qeth_hdr_tso) + VLAN_HLEN;
else if (card->dev->hw_features & NETIF_F_TSO)
headroom = sizeof(struct qeth_hdr_tso);
else
headroom = sizeof(struct qeth_hdr) + VLAN_HLEN;
} else if (card->info.type == QETH_CARD_TYPE_IQD) {
card->dev->flags |= IFF_NOARP;
card->dev->netdev_ops = &qeth_l3_netdev_ops;
card->dev->needed_headroom = sizeof(struct qeth_hdr) - ETH_HLEN;
headroom = sizeof(struct qeth_hdr) - ETH_HLEN;
rc = qeth_l3_iqd_read_initial_mac(card);
if (rc)
@ -2452,13 +2444,14 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
} else
return -ENODEV;
card->dev->needed_headroom = headroom;
card->dev->ethtool_ops = &qeth_l3_ethtool_ops;
card->dev->features |= NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_CTAG_FILTER;
netif_keep_dst(card->dev);
if (card->dev->hw_features & NETIF_F_TSO)
if (card->dev->hw_features & (NETIF_F_TSO | NETIF_F_TSO6))
netif_set_gso_max_size(card->dev,
PAGE_SIZE * (QETH_MAX_BUFFER_ELEMENTS(card) - 1));