git.droids-corp.org
/
dpdk.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
update Intel copyright years to 2014
[dpdk.git]
/
lib
/
librte_pmd_ring
/
rte_eth_ring.c
diff --git
a/lib/librte_pmd_ring/rte_eth_ring.c
b/lib/librte_pmd_ring/rte_eth_ring.c
index
d9667c0
..
5ddecc5
100644
(file)
--- a/
lib/librte_pmd_ring/rte_eth_ring.c
+++ b/
lib/librte_pmd_ring/rte_eth_ring.c
@@
-1,7
+1,7
@@
/*-
* BSD LICENSE
*
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-201
3
Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-201
4
Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@
-40,9
+40,9
@@
struct ring_queue {
struct rte_ring *rng;
struct ring_queue {
struct rte_ring *rng;
-
volatile unsigned long
rx_pkts;
-
volatile unsigned long
tx_pkts;
-
volatile unsigned long
err_pkts;
+
rte_atomic64_t
rx_pkts;
+
rte_atomic64_t
tx_pkts;
+
rte_atomic64_t
err_pkts;
};
struct pmd_internals {
};
struct pmd_internals {
@@
-70,9
+70,9
@@
eth_ring_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
const uint16_t nb_rx = (uint16_t)rte_ring_dequeue_burst(r->rng,
ptrs, nb_bufs);
if (r->rng->flags & RING_F_SC_DEQ)
const uint16_t nb_rx = (uint16_t)rte_ring_dequeue_burst(r->rng,
ptrs, nb_bufs);
if (r->rng->flags & RING_F_SC_DEQ)
- r->rx_pkts += nb_rx;
+ r->rx_pkts
.cnt
+= nb_rx;
else
else
-
__sync_fetch_and_add(&r->rx_pkts
, nb_rx);
+
rte_atomic64_add(&(r->rx_pkts)
, nb_rx);
return nb_rx;
}
return nb_rx;
}
@@
-84,11
+84,11
@@
eth_ring_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
const uint16_t nb_tx = (uint16_t)rte_ring_enqueue_burst(r->rng,
ptrs, nb_bufs);
if (r->rng->flags & RING_F_SP_ENQ) {
const uint16_t nb_tx = (uint16_t)rte_ring_enqueue_burst(r->rng,
ptrs, nb_bufs);
if (r->rng->flags & RING_F_SP_ENQ) {
- r->tx_pkts += nb_tx;
- r->err_pkts += nb_bufs - nb_tx;
+ r->tx_pkts
.cnt
+= nb_tx;
+ r->err_pkts
.cnt
+= nb_bufs - nb_tx;
} else {
} else {
-
__sync_fetch_and_add(&r->tx_pkts
, nb_tx);
-
__sync_fetch_and_add(&r->err_pkts
, nb_bufs - nb_tx);
+
rte_atomic64_add(&(r->tx_pkts)
, nb_tx);
+
rte_atomic64_add(&(r->err_pkts)
, nb_bufs - nb_tx);
}
return nb_tx;
}
}
return nb_tx;
}
@@
-157,14
+157,14
@@
eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *igb_stats)
memset(igb_stats, 0, sizeof(*igb_stats));
for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
i < internal->nb_rx_queues; i++) {
memset(igb_stats, 0, sizeof(*igb_stats));
for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
i < internal->nb_rx_queues; i++) {
- igb_stats->q_ipackets[i] = internal->rx_ring_queues[i].rx_pkts;
+ igb_stats->q_ipackets[i] = internal->rx_ring_queues[i].rx_pkts
.cnt
;
rx_total += igb_stats->q_ipackets[i];
}
for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
i < internal->nb_tx_queues; i++) {
rx_total += igb_stats->q_ipackets[i];
}
for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
i < internal->nb_tx_queues; i++) {
- igb_stats->q_opackets[i] = internal->tx_ring_queues[i].tx_pkts;
- igb_stats->q_errors[i] = internal->tx_ring_queues[i].err_pkts;
+ igb_stats->q_opackets[i] = internal->tx_ring_queues[i].tx_pkts
.cnt
;
+ igb_stats->q_errors[i] = internal->tx_ring_queues[i].err_pkts
.cnt
;
tx_total += igb_stats->q_opackets[i];
tx_err_total += igb_stats->q_errors[i];
}
tx_total += igb_stats->q_opackets[i];
tx_err_total += igb_stats->q_errors[i];
}
@@
-180,10
+180,10
@@
eth_stats_reset(struct rte_eth_dev *dev)
unsigned i;
struct pmd_internals *internal = dev->data->dev_private;
for (i = 0; i < internal->nb_rx_queues; i++)
unsigned i;
struct pmd_internals *internal = dev->data->dev_private;
for (i = 0; i < internal->nb_rx_queues; i++)
- internal->rx_ring_queues[i].rx_pkts = 0;
+ internal->rx_ring_queues[i].rx_pkts
.cnt
= 0;
for (i = 0; i < internal->nb_tx_queues; i++) {
for (i = 0; i < internal->nb_tx_queues; i++) {
- internal->tx_ring_queues[i].tx_pkts = 0;
- internal->tx_ring_queues[i].err_pkts = 0;
+ internal->tx_ring_queues[i].tx_pkts
.cnt
= 0;
+ internal->tx_ring_queues[i].err_pkts
.cnt
= 0;
}
}
}
}
@@
-220,15
+220,15
@@
rte_eth_from_rings(struct rte_ring *const rx_queues[],
struct rte_eth_dev *eth_dev = NULL;
unsigned i;
struct rte_eth_dev *eth_dev = NULL;
unsigned i;
- RTE_LOG(INFO, PMD, "Creating rings-backed ethdev on numa socket %u\n",
- numa_node);
-
/* do some paramter checking */
if (rx_queues == NULL && nb_rx_queues > 0)
goto error;
if (tx_queues == NULL && nb_tx_queues > 0)
goto error;
/* do some paramter checking */
if (rx_queues == NULL && nb_rx_queues > 0)
goto error;
if (tx_queues == NULL && nb_tx_queues > 0)
goto error;
+ RTE_LOG(INFO, PMD, "Creating rings-backed ethdev on numa socket %u\n",
+ numa_node);
+
/* now do all data allocation - for eth_dev structure, dummy pci driver
* and internal (private) data
*/
/* now do all data allocation - for eth_dev structure, dummy pci driver
* and internal (private) data
*/