git.droids-corp.org
/
dpdk.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
net/mlx4: fix returned values upon failed probing
[dpdk.git]
/
examples
/
load_balancer
/
runtime.c
diff --git
a/examples/load_balancer/runtime.c
b/examples/load_balancer/runtime.c
index
a53f33f
..
8192c08
100644
(file)
--- a/
examples/load_balancer/runtime.c
+++ b/
examples/load_balancer/runtime.c
@@
-48,7
+48,6
@@
#include <rte_memory.h>
#include <rte_memcpy.h>
#include <rte_memzone.h>
#include <rte_memory.h>
#include <rte_memcpy.h>
#include <rte_memzone.h>
-#include <rte_tailq.h>
#include <rte_eal.h>
#include <rte_per_lcore.h>
#include <rte_launch.h>
#include <rte_eal.h>
#include <rte_per_lcore.h>
#include <rte_launch.h>
@@
-145,9
+144,10
@@
app_lcore_io_rx_buffer_to_send (
ret = rte_ring_sp_enqueue_bulk(
lp->rx.rings[worker],
(void **) lp->rx.mbuf_out[worker].array,
ret = rte_ring_sp_enqueue_bulk(
lp->rx.rings[worker],
(void **) lp->rx.mbuf_out[worker].array,
- bsz);
+ bsz,
+ NULL);
- if (unlikely(ret ==
-ENOBUFS
)) {
+ if (unlikely(ret ==
0
)) {
uint32_t k;
for (k = 0; k < bsz; k ++) {
struct rte_mbuf *m = lp->rx.mbuf_out[worker].array[k];
uint32_t k;
for (k = 0; k < bsz; k ++) {
struct rte_mbuf *m = lp->rx.mbuf_out[worker].array[k];
@@
-311,9
+311,10
@@
app_lcore_io_rx_flush(struct app_lcore_params_io *lp, uint32_t n_workers)
ret = rte_ring_sp_enqueue_bulk(
lp->rx.rings[worker],
(void **) lp->rx.mbuf_out[worker].array,
ret = rte_ring_sp_enqueue_bulk(
lp->rx.rings[worker],
(void **) lp->rx.mbuf_out[worker].array,
- lp->rx.mbuf_out[worker].n_mbufs);
+ lp->rx.mbuf_out[worker].n_mbufs,
+ NULL);
- if (unlikely(ret
<
0)) {
+ if (unlikely(ret
==
0)) {
uint32_t k;
for (k = 0; k < lp->rx.mbuf_out[worker].n_mbufs; k ++) {
struct rte_mbuf *pkt_to_free = lp->rx.mbuf_out[worker].array[k];
uint32_t k;
for (k = 0; k < lp->rx.mbuf_out[worker].n_mbufs; k ++) {
struct rte_mbuf *pkt_to_free = lp->rx.mbuf_out[worker].array[k];
@@
-348,11
+349,11
@@
app_lcore_io_tx(
ret = rte_ring_sc_dequeue_bulk(
ring,
(void **) &lp->tx.mbuf_out[port].array[n_mbufs],
ret = rte_ring_sc_dequeue_bulk(
ring,
(void **) &lp->tx.mbuf_out[port].array[n_mbufs],
- bsz_rd);
+ bsz_rd,
+ NULL);
- if (unlikely(ret ==
-ENOENT)) {
+ if (unlikely(ret ==
0))
continue;
continue;
- }
n_mbufs += bsz_rd;
n_mbufs += bsz_rd;
@@
-504,11
+505,11
@@
app_lcore_worker(
ret = rte_ring_sc_dequeue_bulk(
ring_in,
(void **) lp->mbuf_in.array,
ret = rte_ring_sc_dequeue_bulk(
ring_in,
(void **) lp->mbuf_in.array,
- bsz_rd);
+ bsz_rd,
+ NULL);
- if (unlikely(ret ==
-ENOENT)) {
+ if (unlikely(ret ==
0))
continue;
continue;
- }
#if APP_WORKER_DROP_ALL_PACKETS
for (j = 0; j < bsz_rd; j ++) {
#if APP_WORKER_DROP_ALL_PACKETS
for (j = 0; j < bsz_rd; j ++) {
@@
-526,7
+527,7
@@
app_lcore_worker(
struct rte_mbuf *pkt;
struct ipv4_hdr *ipv4_hdr;
uint32_t ipv4_dst, pos;
struct rte_mbuf *pkt;
struct ipv4_hdr *ipv4_hdr;
uint32_t ipv4_dst, pos;
- uint
8
_t port;
+ uint
32
_t port;
if (likely(j < bsz_rd - 1)) {
APP_WORKER_PREFETCH1(rte_pktmbuf_mtod(lp->mbuf_in.array[j+1], unsigned char *));
if (likely(j < bsz_rd - 1)) {
APP_WORKER_PREFETCH1(rte_pktmbuf_mtod(lp->mbuf_in.array[j+1], unsigned char *));
@@
-536,7
+537,9
@@
app_lcore_worker(
}
pkt = lp->mbuf_in.array[j];
}
pkt = lp->mbuf_in.array[j];
- ipv4_hdr = (struct ipv4_hdr *)(rte_pktmbuf_mtod(pkt, unsigned char *) + sizeof(struct ether_hdr));
+ ipv4_hdr = rte_pktmbuf_mtod_offset(pkt,
+ struct ipv4_hdr *,
+ sizeof(struct ether_hdr));
ipv4_dst = rte_be_to_cpu_32(ipv4_hdr->dst_addr);
if (unlikely(rte_lpm_lookup(lp->lpm_table, ipv4_dst, &port) != 0)) {
ipv4_dst = rte_be_to_cpu_32(ipv4_hdr->dst_addr);
if (unlikely(rte_lpm_lookup(lp->lpm_table, ipv4_dst, &port) != 0)) {
@@
-554,11
+557,12
@@
app_lcore_worker(
ret = rte_ring_sp_enqueue_bulk(
lp->rings_out[port],
(void **) lp->mbuf_out[port].array,
ret = rte_ring_sp_enqueue_bulk(
lp->rings_out[port],
(void **) lp->mbuf_out[port].array,
- bsz_wr);
+ bsz_wr,
+ NULL);
#if APP_STATS
lp->rings_out_iters[port] ++;
#if APP_STATS
lp->rings_out_iters[port] ++;
- if (ret
==
0) {
+ if (ret
>
0) {
lp->rings_out_count[port] += 1;
}
if (lp->rings_out_iters[port] == APP_STATS){
lp->rings_out_count[port] += 1;
}
if (lp->rings_out_iters[port] == APP_STATS){
@@
-571,7
+575,7
@@
app_lcore_worker(
}
#endif
}
#endif
- if (unlikely(ret ==
-ENOBUFS
)) {
+ if (unlikely(ret ==
0
)) {
uint32_t k;
for (k = 0; k < bsz_wr; k ++) {
struct rte_mbuf *pkt_to_free = lp->mbuf_out[port].array[k];
uint32_t k;
for (k = 0; k < bsz_wr; k ++) {
struct rte_mbuf *pkt_to_free = lp->mbuf_out[port].array[k];
@@
-606,9
+610,10
@@
app_lcore_worker_flush(struct app_lcore_params_worker *lp)
ret = rte_ring_sp_enqueue_bulk(
lp->rings_out[port],
(void **) lp->mbuf_out[port].array,
ret = rte_ring_sp_enqueue_bulk(
lp->rings_out[port],
(void **) lp->mbuf_out[port].array,
- lp->mbuf_out[port].n_mbufs);
+ lp->mbuf_out[port].n_mbufs,
+ NULL);
- if (unlikely(ret
<
0)) {
+ if (unlikely(ret
==
0)) {
uint32_t k;
for (k = 0; k < lp->mbuf_out[port].n_mbufs; k ++) {
struct rte_mbuf *pkt_to_free = lp->mbuf_out[port].array[k];
uint32_t k;
for (k = 0; k < lp->mbuf_out[port].n_mbufs; k ++) {
struct rte_mbuf *pkt_to_free = lp->mbuf_out[port].array[k];