forked from Minki/linux
s390/qeth: add statistics for consumed buffer elements
Nowadays an skb fragment typically spans over multiple pages. So replace the obsolete, SG-only 'fragments' counter with one that tracks the consumed buffer elements. This is what actually matters for performance. Signed-off-by: Julian Wiedmann <jwi@linux.ibm.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
72f219da79
commit
d2a274b25b
@ -104,6 +104,7 @@ struct qeth_dbf_info {
|
|||||||
struct qeth_perf_stats {
|
struct qeth_perf_stats {
|
||||||
unsigned int bufs_rec;
|
unsigned int bufs_rec;
|
||||||
unsigned int bufs_sent;
|
unsigned int bufs_sent;
|
||||||
|
unsigned int buf_elements_sent;
|
||||||
|
|
||||||
unsigned int skbs_sent_pack;
|
unsigned int skbs_sent_pack;
|
||||||
unsigned int bufs_sent_pack;
|
unsigned int bufs_sent_pack;
|
||||||
@ -137,7 +138,6 @@ struct qeth_perf_stats {
|
|||||||
unsigned int large_send_bytes;
|
unsigned int large_send_bytes;
|
||||||
unsigned int large_send_cnt;
|
unsigned int large_send_cnt;
|
||||||
unsigned int sg_skbs_sent;
|
unsigned int sg_skbs_sent;
|
||||||
unsigned int sg_frags_sent;
|
|
||||||
/* initial values when measuring starts */
|
/* initial values when measuring starts */
|
||||||
unsigned long initial_rx_packets;
|
unsigned long initial_rx_packets;
|
||||||
unsigned long initial_tx_packets;
|
unsigned long initial_tx_packets;
|
||||||
|
@ -5970,7 +5970,7 @@ static struct {
|
|||||||
{"tx skbs packing"},
|
{"tx skbs packing"},
|
||||||
{"tx buffers packing"},
|
{"tx buffers packing"},
|
||||||
{"tx sg skbs"},
|
{"tx sg skbs"},
|
||||||
{"tx sg frags"},
|
{"tx buffer elements"},
|
||||||
/* 10 */{"rx sg skbs"},
|
/* 10 */{"rx sg skbs"},
|
||||||
{"rx sg frags"},
|
{"rx sg frags"},
|
||||||
{"rx sg page allocs"},
|
{"rx sg page allocs"},
|
||||||
@ -6029,7 +6029,7 @@ void qeth_core_get_ethtool_stats(struct net_device *dev,
|
|||||||
data[6] = card->perf_stats.skbs_sent_pack;
|
data[6] = card->perf_stats.skbs_sent_pack;
|
||||||
data[7] = card->perf_stats.bufs_sent_pack;
|
data[7] = card->perf_stats.bufs_sent_pack;
|
||||||
data[8] = card->perf_stats.sg_skbs_sent;
|
data[8] = card->perf_stats.sg_skbs_sent;
|
||||||
data[9] = card->perf_stats.sg_frags_sent;
|
data[9] = card->perf_stats.buf_elements_sent;
|
||||||
data[10] = card->perf_stats.sg_skbs_rx;
|
data[10] = card->perf_stats.sg_skbs_rx;
|
||||||
data[11] = card->perf_stats.sg_frags_rx;
|
data[11] = card->perf_stats.sg_frags_rx;
|
||||||
data[12] = card->perf_stats.sg_alloc_page_rx;
|
data[12] = card->perf_stats.sg_alloc_page_rx;
|
||||||
|
@ -672,10 +672,11 @@ static int qeth_l2_xmit_osa(struct qeth_card *card, struct sk_buff *skb,
|
|||||||
int ipv)
|
int ipv)
|
||||||
{
|
{
|
||||||
int push_len = sizeof(struct qeth_hdr);
|
int push_len = sizeof(struct qeth_hdr);
|
||||||
unsigned int elements, nr_frags;
|
|
||||||
unsigned int hdr_elements = 0;
|
unsigned int hdr_elements = 0;
|
||||||
struct qeth_hdr *hdr = NULL;
|
struct qeth_hdr *hdr = NULL;
|
||||||
unsigned int hd_len = 0;
|
unsigned int hd_len = 0;
|
||||||
|
unsigned int elements;
|
||||||
|
bool is_sg;
|
||||||
int rc;
|
int rc;
|
||||||
|
|
||||||
/* fix hardware limitation: as long as we do not have sbal
|
/* fix hardware limitation: as long as we do not have sbal
|
||||||
@ -693,7 +694,6 @@ static int qeth_l2_xmit_osa(struct qeth_card *card, struct sk_buff *skb,
|
|||||||
if (rc)
|
if (rc)
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
nr_frags = skb_shinfo(skb)->nr_frags;
|
|
||||||
|
|
||||||
rc = skb_cow_head(skb, push_len);
|
rc = skb_cow_head(skb, push_len);
|
||||||
if (rc)
|
if (rc)
|
||||||
@ -720,15 +720,16 @@ static int qeth_l2_xmit_osa(struct qeth_card *card, struct sk_buff *skb,
|
|||||||
}
|
}
|
||||||
elements += hdr_elements;
|
elements += hdr_elements;
|
||||||
|
|
||||||
|
is_sg = skb_is_nonlinear(skb);
|
||||||
/* TODO: remove the skb_orphan() once TX completion is fast enough */
|
/* TODO: remove the skb_orphan() once TX completion is fast enough */
|
||||||
skb_orphan(skb);
|
skb_orphan(skb);
|
||||||
rc = qeth_do_send_packet(card, queue, skb, hdr, 0, hd_len, elements);
|
rc = qeth_do_send_packet(card, queue, skb, hdr, 0, hd_len, elements);
|
||||||
out:
|
out:
|
||||||
if (!rc) {
|
if (!rc) {
|
||||||
if (card->options.performance_stats && nr_frags) {
|
if (card->options.performance_stats) {
|
||||||
card->perf_stats.sg_skbs_sent++;
|
card->perf_stats.buf_elements_sent += elements;
|
||||||
/* nr_frags + skb->data */
|
if (is_sg)
|
||||||
card->perf_stats.sg_frags_sent += nr_frags + 1;
|
card->perf_stats.sg_skbs_sent++;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if (hd_len)
|
if (hd_len)
|
||||||
|
@ -2166,12 +2166,13 @@ static int qeth_l3_xmit_offload(struct qeth_card *card, struct sk_buff *skb,
|
|||||||
int cast_type)
|
int cast_type)
|
||||||
{
|
{
|
||||||
const unsigned int hw_hdr_len = sizeof(struct qeth_hdr);
|
const unsigned int hw_hdr_len = sizeof(struct qeth_hdr);
|
||||||
unsigned int frame_len, nr_frags;
|
|
||||||
unsigned char eth_hdr[ETH_HLEN];
|
unsigned char eth_hdr[ETH_HLEN];
|
||||||
unsigned int hdr_elements = 0;
|
unsigned int hdr_elements = 0;
|
||||||
struct qeth_hdr *hdr = NULL;
|
struct qeth_hdr *hdr = NULL;
|
||||||
int elements, push_len, rc;
|
int elements, push_len, rc;
|
||||||
unsigned int hd_len = 0;
|
unsigned int hd_len = 0;
|
||||||
|
unsigned int frame_len;
|
||||||
|
bool is_sg;
|
||||||
|
|
||||||
/* compress skb to fit into one IO buffer: */
|
/* compress skb to fit into one IO buffer: */
|
||||||
if (!qeth_get_elements_no(card, skb, 0, 0)) {
|
if (!qeth_get_elements_no(card, skb, 0, 0)) {
|
||||||
@ -2194,7 +2195,6 @@ static int qeth_l3_xmit_offload(struct qeth_card *card, struct sk_buff *skb,
|
|||||||
skb_copy_from_linear_data(skb, eth_hdr, ETH_HLEN);
|
skb_copy_from_linear_data(skb, eth_hdr, ETH_HLEN);
|
||||||
skb_pull(skb, ETH_HLEN);
|
skb_pull(skb, ETH_HLEN);
|
||||||
frame_len = skb->len;
|
frame_len = skb->len;
|
||||||
nr_frags = skb_shinfo(skb)->nr_frags;
|
|
||||||
|
|
||||||
push_len = qeth_push_hdr(skb, &hdr, hw_hdr_len);
|
push_len = qeth_push_hdr(skb, &hdr, hw_hdr_len);
|
||||||
if (push_len < 0)
|
if (push_len < 0)
|
||||||
@ -2217,6 +2217,7 @@ static int qeth_l3_xmit_offload(struct qeth_card *card, struct sk_buff *skb,
|
|||||||
else
|
else
|
||||||
qeth_l3_fill_header(card, hdr, skb, ipv, cast_type, frame_len);
|
qeth_l3_fill_header(card, hdr, skb, ipv, cast_type, frame_len);
|
||||||
|
|
||||||
|
is_sg = skb_is_nonlinear(skb);
|
||||||
if (IS_IQD(card)) {
|
if (IS_IQD(card)) {
|
||||||
rc = qeth_do_send_packet_fast(queue, skb, hdr, 0, hd_len);
|
rc = qeth_do_send_packet_fast(queue, skb, hdr, 0, hd_len);
|
||||||
} else {
|
} else {
|
||||||
@ -2227,10 +2228,10 @@ static int qeth_l3_xmit_offload(struct qeth_card *card, struct sk_buff *skb,
|
|||||||
}
|
}
|
||||||
out:
|
out:
|
||||||
if (!rc) {
|
if (!rc) {
|
||||||
if (card->options.performance_stats && nr_frags) {
|
if (card->options.performance_stats) {
|
||||||
card->perf_stats.sg_skbs_sent++;
|
card->perf_stats.buf_elements_sent += elements;
|
||||||
/* nr_frags + skb->data */
|
if (is_sg)
|
||||||
card->perf_stats.sg_frags_sent += nr_frags + 1;
|
card->perf_stats.sg_skbs_sent++;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if (!push_len)
|
if (!push_len)
|
||||||
@ -2248,14 +2249,14 @@ out:
|
|||||||
static int qeth_l3_xmit(struct qeth_card *card, struct sk_buff *skb,
|
static int qeth_l3_xmit(struct qeth_card *card, struct sk_buff *skb,
|
||||||
struct qeth_qdio_out_q *queue, int ipv, int cast_type)
|
struct qeth_qdio_out_q *queue, int ipv, int cast_type)
|
||||||
{
|
{
|
||||||
unsigned int hd_len, nr_frags;
|
|
||||||
int elements, len, rc;
|
int elements, len, rc;
|
||||||
__be16 *tag;
|
__be16 *tag;
|
||||||
struct qeth_hdr *hdr = NULL;
|
struct qeth_hdr *hdr = NULL;
|
||||||
int hdr_elements = 0;
|
int hdr_elements = 0;
|
||||||
struct sk_buff *new_skb = NULL;
|
struct sk_buff *new_skb = NULL;
|
||||||
int tx_bytes = skb->len;
|
int tx_bytes = skb->len;
|
||||||
bool use_tso;
|
unsigned int hd_len;
|
||||||
|
bool use_tso, is_sg;
|
||||||
|
|
||||||
/* Ignore segment size from skb_is_gso(), 1 page is always used. */
|
/* Ignore segment size from skb_is_gso(), 1 page is always used. */
|
||||||
use_tso = skb_is_gso(skb) &&
|
use_tso = skb_is_gso(skb) &&
|
||||||
@ -2297,7 +2298,6 @@ static int qeth_l3_xmit(struct qeth_card *card, struct sk_buff *skb,
|
|||||||
if (rc)
|
if (rc)
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
nr_frags = skb_shinfo(new_skb)->nr_frags;
|
|
||||||
|
|
||||||
if (use_tso) {
|
if (use_tso) {
|
||||||
hdr = skb_push(new_skb, sizeof(struct qeth_hdr_tso));
|
hdr = skb_push(new_skb, sizeof(struct qeth_hdr_tso));
|
||||||
@ -2334,6 +2334,8 @@ static int qeth_l3_xmit(struct qeth_card *card, struct sk_buff *skb,
|
|||||||
rc = -EINVAL;
|
rc = -EINVAL;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
is_sg = skb_is_nonlinear(new_skb);
|
||||||
rc = qeth_do_send_packet(card, queue, new_skb, hdr, hd_len, hd_len,
|
rc = qeth_do_send_packet(card, queue, new_skb, hdr, hd_len, hd_len,
|
||||||
elements);
|
elements);
|
||||||
out:
|
out:
|
||||||
@ -2341,15 +2343,13 @@ out:
|
|||||||
if (new_skb != skb)
|
if (new_skb != skb)
|
||||||
dev_kfree_skb_any(skb);
|
dev_kfree_skb_any(skb);
|
||||||
if (card->options.performance_stats) {
|
if (card->options.performance_stats) {
|
||||||
|
card->perf_stats.buf_elements_sent += elements;
|
||||||
|
if (is_sg)
|
||||||
|
card->perf_stats.sg_skbs_sent++;
|
||||||
if (use_tso) {
|
if (use_tso) {
|
||||||
card->perf_stats.large_send_bytes += tx_bytes;
|
card->perf_stats.large_send_bytes += tx_bytes;
|
||||||
card->perf_stats.large_send_cnt++;
|
card->perf_stats.large_send_cnt++;
|
||||||
}
|
}
|
||||||
if (nr_frags) {
|
|
||||||
card->perf_stats.sg_skbs_sent++;
|
|
||||||
/* nr_frags + skb->data */
|
|
||||||
card->perf_stats.sg_frags_sent += nr_frags + 1;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if (new_skb != skb)
|
if (new_skb != skb)
|
||||||
|
Loading…
Reference in New Issue
Block a user