X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_pmd_ring%2Frte_eth_ring.c;h=5ddecc57be7b67cfe3fe4451cd8d5e221d93165b;hb=e9d48c0072d36eb6423b45fba4ec49d0def6c36f;hp=d9667c035dd9fac873ab782a6212e492269ea5b9;hpb=e1e4017751f1a7ed4807269559b8a9bedb9cd73a;p=dpdk.git diff --git a/lib/librte_pmd_ring/rte_eth_ring.c b/lib/librte_pmd_ring/rte_eth_ring.c index d9667c035d..5ddecc57be 100644 --- a/lib/librte_pmd_ring/rte_eth_ring.c +++ b/lib/librte_pmd_ring/rte_eth_ring.c @@ -1,7 +1,7 @@ /*- * BSD LICENSE * - * Copyright(c) 2010-2013 Intel Corporation. All rights reserved. + * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -40,9 +40,9 @@ struct ring_queue { struct rte_ring *rng; - volatile unsigned long rx_pkts; - volatile unsigned long tx_pkts; - volatile unsigned long err_pkts; + rte_atomic64_t rx_pkts; + rte_atomic64_t tx_pkts; + rte_atomic64_t err_pkts; }; struct pmd_internals { @@ -70,9 +70,9 @@ eth_ring_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs) const uint16_t nb_rx = (uint16_t)rte_ring_dequeue_burst(r->rng, ptrs, nb_bufs); if (r->rng->flags & RING_F_SC_DEQ) - r->rx_pkts += nb_rx; + r->rx_pkts.cnt += nb_rx; else - __sync_fetch_and_add(&r->rx_pkts, nb_rx); + rte_atomic64_add(&(r->rx_pkts), nb_rx); return nb_rx; } @@ -84,11 +84,11 @@ eth_ring_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs) const uint16_t nb_tx = (uint16_t)rte_ring_enqueue_burst(r->rng, ptrs, nb_bufs); if (r->rng->flags & RING_F_SP_ENQ) { - r->tx_pkts += nb_tx; - r->err_pkts += nb_bufs - nb_tx; + r->tx_pkts.cnt += nb_tx; + r->err_pkts.cnt += nb_bufs - nb_tx; } else { - __sync_fetch_and_add(&r->tx_pkts, nb_tx); - __sync_fetch_and_add(&r->err_pkts, nb_bufs - nb_tx); + rte_atomic64_add(&(r->tx_pkts), nb_tx); + rte_atomic64_add(&(r->err_pkts), nb_bufs - nb_tx); } return nb_tx; } @@ -157,14 +157,14 @@ eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *igb_stats) memset(igb_stats, 0, sizeof(*igb_stats)); for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS && i < internal->nb_rx_queues; i++) { - igb_stats->q_ipackets[i] = internal->rx_ring_queues[i].rx_pkts; + igb_stats->q_ipackets[i] = internal->rx_ring_queues[i].rx_pkts.cnt; rx_total += igb_stats->q_ipackets[i]; } for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS && i < internal->nb_tx_queues; i++) { - igb_stats->q_opackets[i] = internal->tx_ring_queues[i].tx_pkts; - igb_stats->q_errors[i] = internal->tx_ring_queues[i].err_pkts; + igb_stats->q_opackets[i] = internal->tx_ring_queues[i].tx_pkts.cnt; + igb_stats->q_errors[i] = internal->tx_ring_queues[i].err_pkts.cnt; tx_total += igb_stats->q_opackets[i]; tx_err_total += igb_stats->q_errors[i]; } @@ -180,10 +180,10 @@ eth_stats_reset(struct rte_eth_dev *dev) unsigned i; struct pmd_internals *internal = dev->data->dev_private; for (i = 0; i < internal->nb_rx_queues; i++) - internal->rx_ring_queues[i].rx_pkts = 0; + internal->rx_ring_queues[i].rx_pkts.cnt = 0; for (i = 0; i < internal->nb_tx_queues; i++) { - internal->tx_ring_queues[i].tx_pkts = 0; - internal->tx_ring_queues[i].err_pkts = 0; + internal->tx_ring_queues[i].tx_pkts.cnt = 0; + internal->tx_ring_queues[i].err_pkts.cnt = 0; } } @@ -220,15 +220,15 @@ rte_eth_from_rings(struct rte_ring *const rx_queues[], struct rte_eth_dev *eth_dev = NULL; unsigned i; - RTE_LOG(INFO, PMD, "Creating rings-backed ethdev on numa socket %u\n", - numa_node); - /* do some paramter checking */ if (rx_queues == NULL && nb_rx_queues > 0) goto error; if (tx_queues == NULL && nb_tx_queues > 0) goto error; + RTE_LOG(INFO, PMD, "Creating rings-backed ethdev on numa socket %u\n", + numa_node); + /* now do all data allocation - for eth_dev structure, dummy pci driver * and internal (private) data */