virtio: maintain stats per queue
authorStephen Hemminger <stephen@networkplumber.org>
Sat, 14 Jun 2014 01:06:18 +0000 (18:06 -0700)
committerThomas Monjalon <thomas.monjalon@6wind.com>
Tue, 22 Jul 2014 13:00:00 +0000 (15:00 +0200)
Avoid cache collision and thrashing of the software statistics
by keeping them per-queue in the driver.

Signed-off-by: Stephen Hemminger <shemming@brocade.com>
Acked-by: Alan Carew <alan.carew@intel.com>
lib/librte_pmd_virtio/virtio_ethdev.c
lib/librte_pmd_virtio/virtio_pci.h
lib/librte_pmd_virtio/virtio_rxtx.c
lib/librte_pmd_virtio/virtqueue.h

index b10bfbb..4d781fa 100644 (file)
@@ -474,19 +474,67 @@ virtio_dev_atomic_write_link_status(struct rte_eth_dev *dev,
 static void
 virtio_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
 {
-       struct virtio_hw *hw =
-               VIRTIO_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       if (stats)
-               memcpy(stats, &hw->eth_stats, sizeof(*stats));
+       unsigned i;
+
+       for (i = 0; i < dev->data->nb_tx_queues; i++) {
+               const struct virtqueue *txvq = dev->data->tx_queues[i];
+               if (txvq == NULL)
+                       continue;
+
+               stats->opackets += txvq->packets;
+               stats->obytes += txvq->bytes;
+               stats->oerrors += txvq->errors;
+
+               if (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) {
+                       stats->q_opackets[i] = txvq->packets;
+                       stats->q_obytes[i] = txvq->bytes;
+               }
+       }
+
+       for (i = 0; i < dev->data->nb_rx_queues; i++) {
+               const struct virtqueue *rxvq = dev->data->rx_queues[i];
+               if (rxvq == NULL)
+                       continue;
+
+               stats->ipackets += rxvq->packets;
+               stats->ibytes += rxvq->bytes;
+               stats->ierrors += rxvq->errors;
+
+               if (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) {
+                       stats->q_ipackets[i] = rxvq->packets;
+                       stats->q_ibytes[i] = rxvq->bytes;
+               }
+       }
+
+       stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
 }
 
 static void
 virtio_dev_stats_reset(struct rte_eth_dev *dev)
 {
-       struct virtio_hw *hw =
-               VIRTIO_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       /* Reset software totals */
-       memset(&hw->eth_stats, 0, sizeof(hw->eth_stats));
+       unsigned int i;
+
+       for (i = 0; i < dev->data->nb_tx_queues; i++) {
+               struct virtqueue *txvq = dev->data->tx_queues[i];
+               if (txvq == NULL)
+                       continue;
+
+               txvq->packets = 0;
+               txvq->bytes = 0;
+               txvq->errors = 0;
+       }
+
+       for (i = 0; i < dev->data->nb_rx_queues; i++) {
+               struct virtqueue *rxvq = dev->data->rx_queues[i];
+               if (rxvq == NULL)
+                       continue;
+
+               rxvq->packets = 0;
+               rxvq->bytes = 0;
+               rxvq->errors = 0;
+       }
+
+       dev->data->rx_mbuf_alloc_failed = 0;
 }
 
 static void
index d27b734..6d4c230 100644 (file)
@@ -180,7 +180,6 @@ struct virtio_hw {
        uint8_t     use_msix;
        uint8_t     mac_addr[ETHER_ADDR_LEN];
        int         adapter_stopped;
-       struct      rte_eth_stats eth_stats;
 };
 
 /*
index 6f02a61..c51b811 100644 (file)
@@ -301,7 +301,7 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
                        PMD_RX_LOG(ERR, "Packet drop\n");
                        nb_enqueued++;
                        virtio_discard_rxbuf(rxvq, rxm);
-                       hw->eth_stats.ierrors++;
+                       rxvq->errors++;
                        continue;
                }
 
@@ -317,20 +317,19 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
                VIRTIO_DUMP_PACKET(rxm, rxm->pkt.data_len);
 
                rx_pkts[nb_rx++] = rxm;
-               hw->eth_stats.ibytes += len[i] - sizeof(struct virtio_net_hdr);
-               hw->eth_stats.q_ibytes[rxvq->queue_id] += len[i]
-                       - sizeof(struct virtio_net_hdr);
+               rxvq->bytes += len[i] - sizeof(struct virtio_net_hdr);
        }
 
-       hw->eth_stats.ipackets += nb_rx;
-       hw->eth_stats.q_ipackets[rxvq->queue_id] += nb_rx;
+       rxvq->packets += nb_rx;
 
        /* Allocate new mbuf for the used descriptor */
        error = ENOSPC;
        while (likely(!virtqueue_full(rxvq))) {
                new_mbuf = rte_rxmbuf_alloc(rxvq->mpool);
                if (unlikely(new_mbuf == NULL)) {
-                       hw->eth_stats.rx_nombuf++;
+                       struct rte_eth_dev *dev
+                               = &rte_eth_devices[rxvq->port_id];
+                       dev->data->rx_mbuf_alloc_failed++;
                        break;
                }
                error = virtqueue_enqueue_recv_refill(rxvq, new_mbuf);
@@ -359,7 +358,6 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
        struct rte_mbuf *txm;
        uint16_t nb_used, nb_tx, num;
        int error;
-       struct virtio_hw *hw;
 
        nb_tx = 0;
 
@@ -371,7 +369,6 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 
        rmb();
 
-       hw = txvq->hw;
        num = (uint16_t)(likely(nb_used < VIRTIO_MBUF_BURST_SZ) ? nb_used : VIRTIO_MBUF_BURST_SZ);
 
        while (nb_tx < nb_pkts) {
@@ -394,9 +391,7 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
                                break;
                        }
                        nb_tx++;
-                       hw->eth_stats.obytes += txm->pkt.data_len;
-                       hw->eth_stats.q_obytes[txvq->queue_id]
-                               += txm->pkt.data_len;
+                       txvq->bytes += txm->pkt.data_len;
                } else {
                        PMD_TX_LOG(ERR, "No free tx descriptors to transmit\n");
                        break;
@@ -404,8 +399,7 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
        }
        vq_update_avail_idx(txvq);
 
-       hw->eth_stats.opackets += nb_tx;
-       hw->eth_stats.q_opackets[txvq->queue_id] += nb_tx;
+       txvq->packets += nb_tx;
 
        if (unlikely(virtqueue_kick_prepare(txvq))) {
                virtqueue_notify(txvq);
index e244199..11e3b7a 100644 (file)
@@ -154,6 +154,11 @@ struct virtqueue {
        uint16_t vq_avail_idx;
        phys_addr_t virtio_net_hdr_mem; /**< hdr for each xmit packet */
 
+       /* Statistics */
+       uint64_t        packets;
+       uint64_t        bytes;
+       uint64_t        errors;
+
        struct vq_desc_extra {
                void              *cookie;
                uint16_t          ndescs;