struct rte_ring *rng;
rte_atomic64_t rx_pkts;
rte_atomic64_t tx_pkts;
- rte_atomic64_t err_pkts;
};
struct pmd_internals {
struct ring_queue *r = q;
const uint16_t nb_tx = (uint16_t)rte_ring_enqueue_burst(r->rng,
ptrs, nb_bufs, NULL);
- if (r->rng->flags & RING_F_SP_ENQ) {
+ if (r->rng->flags & RING_F_SP_ENQ)
r->tx_pkts.cnt += nb_tx;
- r->err_pkts.cnt += nb_bufs - nb_tx;
- } else {
+ else
rte_atomic64_add(&(r->tx_pkts), nb_tx);
- rte_atomic64_add(&(r->err_pkts), nb_bufs - nb_tx);
- }
return nb_tx;
}
eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
{
unsigned int i;
- unsigned long rx_total = 0, tx_total = 0, tx_err_total = 0;
+ unsigned long rx_total = 0, tx_total = 0;
const struct pmd_internals *internal = dev->data->dev_private;
for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
i < dev->data->nb_tx_queues; i++) {
stats->q_opackets[i] = internal->tx_ring_queues[i].tx_pkts.cnt;
tx_total += stats->q_opackets[i];
- tx_err_total += internal->tx_ring_queues[i].err_pkts.cnt;
}
stats->ipackets = rx_total;
stats->opackets = tx_total;
- stats->oerrors = tx_err_total;
return 0;
}
for (i = 0; i < dev->data->nb_rx_queues; i++)
internal->rx_ring_queues[i].rx_pkts.cnt = 0;
- for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ for (i = 0; i < dev->data->nb_tx_queues; i++)
internal->tx_ring_queues[i].tx_pkts.cnt = 0;
- internal->tx_ring_queues[i].err_pkts.cnt = 0;
- }
}
static void