/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
struct ring_queue {
struct rte_ring *rng;
- volatile unsigned long rx_pkts;
- volatile unsigned long tx_pkts;
- volatile unsigned long err_pkts;
+ rte_atomic64_t rx_pkts;
+ rte_atomic64_t tx_pkts;
+ rte_atomic64_t err_pkts;
};
struct pmd_internals {
const uint16_t nb_rx = (uint16_t)rte_ring_dequeue_burst(r->rng,
ptrs, nb_bufs);
if (r->rng->flags & RING_F_SC_DEQ)
- r->rx_pkts += nb_rx;
+ r->rx_pkts.cnt += nb_rx;
else
- __sync_fetch_and_add(&r->rx_pkts, nb_rx);
+ rte_atomic64_add(&(r->rx_pkts), nb_rx);
return nb_rx;
}
const uint16_t nb_tx = (uint16_t)rte_ring_enqueue_burst(r->rng,
ptrs, nb_bufs);
if (r->rng->flags & RING_F_SP_ENQ) {
- r->tx_pkts += nb_tx;
- r->err_pkts += nb_bufs - nb_tx;
+ r->tx_pkts.cnt += nb_tx;
+ r->err_pkts.cnt += nb_bufs - nb_tx;
} else {
- __sync_fetch_and_add(&r->tx_pkts, nb_tx);
- __sync_fetch_and_add(&r->err_pkts, nb_bufs - nb_tx);
+ rte_atomic64_add(&(r->tx_pkts), nb_tx);
+ rte_atomic64_add(&(r->err_pkts), nb_bufs - nb_tx);
}
return nb_tx;
}
memset(igb_stats, 0, sizeof(*igb_stats));
for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
i < internal->nb_rx_queues; i++) {
- igb_stats->q_ipackets[i] = internal->rx_ring_queues[i].rx_pkts;
+ igb_stats->q_ipackets[i] = internal->rx_ring_queues[i].rx_pkts.cnt;
rx_total += igb_stats->q_ipackets[i];
}
for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
i < internal->nb_tx_queues; i++) {
- igb_stats->q_opackets[i] = internal->tx_ring_queues[i].tx_pkts;
- igb_stats->q_errors[i] = internal->tx_ring_queues[i].err_pkts;
+ igb_stats->q_opackets[i] = internal->tx_ring_queues[i].tx_pkts.cnt;
+ igb_stats->q_errors[i] = internal->tx_ring_queues[i].err_pkts.cnt;
tx_total += igb_stats->q_opackets[i];
tx_err_total += igb_stats->q_errors[i];
}
unsigned i;
struct pmd_internals *internal = dev->data->dev_private;
for (i = 0; i < internal->nb_rx_queues; i++)
- internal->rx_ring_queues[i].rx_pkts = 0;
+ internal->rx_ring_queues[i].rx_pkts.cnt = 0;
for (i = 0; i < internal->nb_tx_queues; i++) {
- internal->tx_ring_queues[i].tx_pkts = 0;
- internal->tx_ring_queues[i].err_pkts = 0;
+ internal->tx_ring_queues[i].tx_pkts.cnt = 0;
+ internal->tx_ring_queues[i].err_pkts.cnt = 0;
}
}
struct rte_eth_dev *eth_dev = NULL;
unsigned i;
- RTE_LOG(INFO, PMD, "Creating rings-backed ethdev on numa socket %u\n",
- numa_node);
-
/* do some paramter checking */
if (rx_queues == NULL && nb_rx_queues > 0)
goto error;
if (tx_queues == NULL && nb_tx_queues > 0)
goto error;
+ RTE_LOG(INFO, PMD, "Creating rings-backed ethdev on numa socket %u\n",
+ numa_node);
+
/* now do all data allocation - for eth_dev structure, dummy pci driver
* and internal (private) data
*/