ring: use rte_atomic functions
authorIntel <intel.com>
Fri, 8 Nov 2013 02:00:00 +0000 (03:00 +0100)
committerThomas Monjalon <thomas.monjalon@6wind.com>
Sun, 24 Nov 2013 00:31:33 +0000 (01:31 +0100)
Rather than directly calling intrinsics functions,
use functions from rte_atomic.h.

Signed-off-by: Intel
lib/librte_pmd_ring/rte_eth_ring.c

index 9514aab..fa3ff72 100644 (file)
@@ -40,9 +40,9 @@
 
 struct ring_queue {
        struct rte_ring *rng;
-       volatile unsigned long rx_pkts;
-       volatile unsigned long tx_pkts;
-       volatile unsigned long err_pkts;
+       rte_atomic64_t rx_pkts;
+       rte_atomic64_t tx_pkts;
+       rte_atomic64_t err_pkts;
 };
 
 struct pmd_internals {
@@ -70,9 +70,9 @@ eth_ring_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
        const uint16_t nb_rx = (uint16_t)rte_ring_dequeue_burst(r->rng, 
                        ptrs, nb_bufs);
        if (r->rng->flags & RING_F_SC_DEQ)
-               r->rx_pkts += nb_rx;
+               r->rx_pkts.cnt += nb_rx;
        else
-               __sync_fetch_and_add(&r->rx_pkts, nb_rx);
+               rte_atomic64_add(&(r->rx_pkts), nb_rx);
        return nb_rx;
 }
 
@@ -84,11 +84,11 @@ eth_ring_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
        const uint16_t nb_tx = (uint16_t)rte_ring_enqueue_burst(r->rng, 
                        ptrs, nb_bufs);
        if (r->rng->flags & RING_F_SP_ENQ) {
-               r->tx_pkts += nb_tx;
-               r->err_pkts += nb_bufs - nb_tx;
+               r->tx_pkts.cnt += nb_tx;
+               r->err_pkts.cnt += nb_bufs - nb_tx;
        } else {
-               __sync_fetch_and_add(&r->tx_pkts, nb_tx);
-               __sync_fetch_and_add(&r->err_pkts, nb_bufs - nb_tx);
+               rte_atomic64_add(&(r->tx_pkts), nb_tx);
+               rte_atomic64_add(&(r->err_pkts), nb_bufs - nb_tx);
        }
        return nb_tx;
 }
@@ -157,14 +157,14 @@ eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *igb_stats)
        memset(igb_stats, 0, sizeof(*igb_stats));
        for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
                        i < internal->nb_rx_queues; i++) {
-               igb_stats->q_ipackets[i] = internal->rx_ring_queues[i].rx_pkts;
+               igb_stats->q_ipackets[i] = internal->rx_ring_queues[i].rx_pkts.cnt;
                rx_total += igb_stats->q_ipackets[i];
        }
 
        for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
                        i < internal->nb_tx_queues; i++) {
-               igb_stats->q_opackets[i] = internal->tx_ring_queues[i].tx_pkts;
-               igb_stats->q_errors[i] = internal->tx_ring_queues[i].err_pkts;
+               igb_stats->q_opackets[i] = internal->tx_ring_queues[i].tx_pkts.cnt;
+               igb_stats->q_errors[i] = internal->tx_ring_queues[i].err_pkts.cnt;
                tx_total += igb_stats->q_opackets[i];
                tx_err_total += igb_stats->q_errors[i];
        }
@@ -180,10 +180,10 @@ eth_stats_reset(struct rte_eth_dev *dev)
        unsigned i;
        struct pmd_internals *internal = dev->data->dev_private;
        for (i = 0; i < internal->nb_rx_queues; i++)
-               internal->rx_ring_queues[i].rx_pkts = 0;
+               internal->rx_ring_queues[i].rx_pkts.cnt = 0;
        for (i = 0; i < internal->nb_tx_queues; i++) {
-               internal->tx_ring_queues[i].tx_pkts = 0;
-               internal->tx_ring_queues[i].err_pkts = 0;
+               internal->tx_ring_queues[i].tx_pkts.cnt = 0;
+               internal->tx_ring_queues[i].err_pkts.cnt = 0;
        }
 }