static void
virtio_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
{
- struct virtio_hw *hw =
- VIRTIO_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- if (stats)
- memcpy(stats, &hw->eth_stats, sizeof(*stats));
+ unsigned i;
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ const struct virtqueue *txvq = dev->data->tx_queues[i];
+ if (txvq == NULL)
+ continue;
+
+ stats->opackets += txvq->packets;
+ stats->obytes += txvq->bytes;
+ stats->oerrors += txvq->errors;
+
+ if (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) {
+ stats->q_opackets[i] = txvq->packets;
+ stats->q_obytes[i] = txvq->bytes;
+ }
+ }
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ const struct virtqueue *rxvq = dev->data->rx_queues[i];
+ if (rxvq == NULL)
+ continue;
+
+ stats->ipackets += rxvq->packets;
+ stats->ibytes += rxvq->bytes;
+ stats->ierrors += rxvq->errors;
+
+ if (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) {
+ stats->q_ipackets[i] = rxvq->packets;
+ stats->q_ibytes[i] = rxvq->bytes;
+ }
+ }
+
+ stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
}
static void
virtio_dev_stats_reset(struct rte_eth_dev *dev)
{
- struct virtio_hw *hw =
- VIRTIO_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- /* Reset software totals */
- memset(&hw->eth_stats, 0, sizeof(hw->eth_stats));
+ unsigned int i;
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ struct virtqueue *txvq = dev->data->tx_queues[i];
+ if (txvq == NULL)
+ continue;
+
+ txvq->packets = 0;
+ txvq->bytes = 0;
+ txvq->errors = 0;
+ }
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ struct virtqueue *rxvq = dev->data->rx_queues[i];
+ if (rxvq == NULL)
+ continue;
+
+ rxvq->packets = 0;
+ rxvq->bytes = 0;
+ rxvq->errors = 0;
+ }
+
+ dev->data->rx_mbuf_alloc_failed = 0;
}
static void
PMD_RX_LOG(ERR, "Packet drop\n");
nb_enqueued++;
virtio_discard_rxbuf(rxvq, rxm);
- hw->eth_stats.ierrors++;
+ rxvq->errors++;
continue;
}
VIRTIO_DUMP_PACKET(rxm, rxm->pkt.data_len);
rx_pkts[nb_rx++] = rxm;
- hw->eth_stats.ibytes += len[i] - sizeof(struct virtio_net_hdr);
- hw->eth_stats.q_ibytes[rxvq->queue_id] += len[i]
- - sizeof(struct virtio_net_hdr);
+ rxvq->bytes += len[i] - sizeof(struct virtio_net_hdr);
}
- hw->eth_stats.ipackets += nb_rx;
- hw->eth_stats.q_ipackets[rxvq->queue_id] += nb_rx;
+ rxvq->packets += nb_rx;
/* Allocate new mbuf for the used descriptor */
error = ENOSPC;
while (likely(!virtqueue_full(rxvq))) {
new_mbuf = rte_rxmbuf_alloc(rxvq->mpool);
if (unlikely(new_mbuf == NULL)) {
- hw->eth_stats.rx_nombuf++;
+ struct rte_eth_dev *dev
+ = &rte_eth_devices[rxvq->port_id];
+ dev->data->rx_mbuf_alloc_failed++;
break;
}
error = virtqueue_enqueue_recv_refill(rxvq, new_mbuf);
struct rte_mbuf *txm;
uint16_t nb_used, nb_tx, num;
int error;
- struct virtio_hw *hw;
nb_tx = 0;
rmb();
- hw = txvq->hw;
num = (uint16_t)(likely(nb_used < VIRTIO_MBUF_BURST_SZ) ? nb_used : VIRTIO_MBUF_BURST_SZ);
while (nb_tx < nb_pkts) {
break;
}
nb_tx++;
- hw->eth_stats.obytes += txm->pkt.data_len;
- hw->eth_stats.q_obytes[txvq->queue_id]
- += txm->pkt.data_len;
+ txvq->bytes += txm->pkt.data_len;
} else {
PMD_TX_LOG(ERR, "No free tx descriptors to transmit\n");
break;
}
vq_update_avail_idx(txvq);
- hw->eth_stats.opackets += nb_tx;
- hw->eth_stats.q_opackets[txvq->queue_id] += nb_tx;
+ txvq->packets += nb_tx;
if (unlikely(virtqueue_kick_prepare(txvq))) {
virtqueue_notify(txvq);