forked from Minki/linux
netvsc: keep track of some non-fatal overload conditions
Add ethtool statistics for case where send chimmeny buffer is exhausted and driver has to fall back to doing scatter/gather send. Also, add statistic for case where ring buffer is full and receive completions are delayed. Signed-off-by: Stephen Hemminger <sthemmin@microsoft.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
8b5327975a
commit
cad5c19770
@ -680,6 +680,8 @@ struct netvsc_ethtool_stats {
|
||||
unsigned long tx_no_space;
|
||||
unsigned long tx_too_big;
|
||||
unsigned long tx_busy;
|
||||
unsigned long tx_send_full;
|
||||
unsigned long rx_comp_busy;
|
||||
};
|
||||
|
||||
struct netvsc_vf_pcpu_stats {
|
||||
|
@ -883,7 +883,9 @@ int netvsc_send(struct net_device_context *ndev_ctx,
|
||||
} else if (pktlen + net_device->pkt_align <
|
||||
net_device->send_section_size) {
|
||||
section_index = netvsc_get_next_send_section(net_device);
|
||||
if (section_index != NETVSC_INVALID_INDEX) {
|
||||
if (unlikely(section_index == NETVSC_INVALID_INDEX)) {
|
||||
++ndev_ctx->eth_stats.tx_send_full;
|
||||
} else {
|
||||
move_pkt_msd(&msd_send, &msd_skb, msdp);
|
||||
msd_len = 0;
|
||||
}
|
||||
@ -949,9 +951,10 @@ send_now:
|
||||
}
|
||||
|
||||
/* Send pending recv completions */
|
||||
static int send_recv_completions(struct netvsc_channel *nvchan)
|
||||
static int send_recv_completions(struct net_device *ndev,
|
||||
struct netvsc_device *nvdev,
|
||||
struct netvsc_channel *nvchan)
|
||||
{
|
||||
struct netvsc_device *nvdev = nvchan->net_device;
|
||||
struct multi_recv_comp *mrc = &nvchan->mrc;
|
||||
struct recv_comp_msg {
|
||||
struct nvsp_message_header hdr;
|
||||
@ -969,8 +972,12 @@ static int send_recv_completions(struct netvsc_channel *nvchan)
|
||||
msg.status = rcd->status;
|
||||
ret = vmbus_sendpacket(nvchan->channel, &msg, sizeof(msg),
|
||||
rcd->tid, VM_PKT_COMP, 0);
|
||||
if (unlikely(ret))
|
||||
if (unlikely(ret)) {
|
||||
struct net_device_context *ndev_ctx = netdev_priv(ndev);
|
||||
|
||||
++ndev_ctx->eth_stats.rx_comp_busy;
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (++mrc->first == nvdev->recv_completion_cnt)
|
||||
mrc->first = 0;
|
||||
@ -1011,7 +1018,7 @@ static void enq_receive_complete(struct net_device *ndev,
|
||||
recv_comp_slot_avail(nvdev, mrc, &filled, &avail);
|
||||
|
||||
if (unlikely(filled > NAPI_POLL_WEIGHT)) {
|
||||
send_recv_completions(nvchan);
|
||||
send_recv_completions(ndev, nvdev, nvchan);
|
||||
recv_comp_slot_avail(nvdev, mrc, &filled, &avail);
|
||||
}
|
||||
|
||||
@ -1194,7 +1201,7 @@ int netvsc_poll(struct napi_struct *napi, int budget)
|
||||
* then re-enable host interrupts
|
||||
* and reschedule if ring is not empty.
|
||||
*/
|
||||
if (send_recv_completions(nvchan) == 0 &&
|
||||
if (send_recv_completions(ndev, net_device, nvchan) == 0 &&
|
||||
work_done < budget &&
|
||||
napi_complete_done(napi, work_done) &&
|
||||
hv_end_read(&channel->inbound)) {
|
||||
|
@ -1112,6 +1112,8 @@ static const struct {
|
||||
{ "tx_no_space", offsetof(struct netvsc_ethtool_stats, tx_no_space) },
|
||||
{ "tx_too_big", offsetof(struct netvsc_ethtool_stats, tx_too_big) },
|
||||
{ "tx_busy", offsetof(struct netvsc_ethtool_stats, tx_busy) },
|
||||
{ "tx_send_full", offsetof(struct netvsc_ethtool_stats, tx_send_full) },
|
||||
{ "rx_comp_busy", offsetof(struct netvsc_ethtool_stats, rx_comp_busy) },
|
||||
}, vf_stats[] = {
|
||||
{ "vf_rx_packets", offsetof(struct netvsc_vf_pcpu_stats, rx_packets) },
|
||||
{ "vf_rx_bytes", offsetof(struct netvsc_vf_pcpu_stats, rx_bytes) },
|
||||
|
Loading…
Reference in New Issue
Block a user