ring: return free space when enqueuing
[dpdk.git] / examples / load_balancer / runtime.c
index a53f33f..1645994 100644 (file)
@@ -48,7 +48,6 @@
 #include <rte_memory.h>
 #include <rte_memcpy.h>
 #include <rte_memzone.h>
-#include <rte_tailq.h>
 #include <rte_eal.h>
 #include <rte_per_lcore.h>
 #include <rte_launch.h>
@@ -145,9 +144,10 @@ app_lcore_io_rx_buffer_to_send (
        ret = rte_ring_sp_enqueue_bulk(
                lp->rx.rings[worker],
                (void **) lp->rx.mbuf_out[worker].array,
-               bsz);
+               bsz,
+               NULL);
 
-       if (unlikely(ret == -ENOBUFS)) {
+       if (unlikely(ret == 0)) {
                uint32_t k;
                for (k = 0; k < bsz; k ++) {
                        struct rte_mbuf *m = lp->rx.mbuf_out[worker].array[k];
@@ -311,9 +311,10 @@ app_lcore_io_rx_flush(struct app_lcore_params_io *lp, uint32_t n_workers)
                ret = rte_ring_sp_enqueue_bulk(
                        lp->rx.rings[worker],
                        (void **) lp->rx.mbuf_out[worker].array,
-                       lp->rx.mbuf_out[worker].n_mbufs);
+                       lp->rx.mbuf_out[worker].n_mbufs,
+                       NULL);
 
-               if (unlikely(ret < 0)) {
+               if (unlikely(ret == 0)) {
                        uint32_t k;
                        for (k = 0; k < lp->rx.mbuf_out[worker].n_mbufs; k ++) {
                                struct rte_mbuf *pkt_to_free = lp->rx.mbuf_out[worker].array[k];
@@ -350,9 +351,8 @@ app_lcore_io_tx(
                                (void **) &lp->tx.mbuf_out[port].array[n_mbufs],
                                bsz_rd);
 
-                       if (unlikely(ret == -ENOENT)) {
+                       if (unlikely(ret == 0))
                                continue;
-                       }
 
                        n_mbufs += bsz_rd;
 
@@ -506,9 +506,8 @@ app_lcore_worker(
                        (void **) lp->mbuf_in.array,
                        bsz_rd);
 
-               if (unlikely(ret == -ENOENT)) {
+               if (unlikely(ret == 0))
                        continue;
-               }
 
 #if APP_WORKER_DROP_ALL_PACKETS
                for (j = 0; j < bsz_rd; j ++) {
@@ -526,7 +525,7 @@ app_lcore_worker(
                        struct rte_mbuf *pkt;
                        struct ipv4_hdr *ipv4_hdr;
                        uint32_t ipv4_dst, pos;
-                       uint8_t port;
+                       uint32_t port;
 
                        if (likely(j < bsz_rd - 1)) {
                                APP_WORKER_PREFETCH1(rte_pktmbuf_mtod(lp->mbuf_in.array[j+1], unsigned char *));
@@ -536,7 +535,9 @@ app_lcore_worker(
                        }
 
                        pkt = lp->mbuf_in.array[j];
-                       ipv4_hdr = (struct ipv4_hdr *)(rte_pktmbuf_mtod(pkt, unsigned char *) + sizeof(struct ether_hdr));
+                       ipv4_hdr = rte_pktmbuf_mtod_offset(pkt,
+                                                          struct ipv4_hdr *,
+                                                          sizeof(struct ether_hdr));
                        ipv4_dst = rte_be_to_cpu_32(ipv4_hdr->dst_addr);
 
                        if (unlikely(rte_lpm_lookup(lp->lpm_table, ipv4_dst, &port) != 0)) {
@@ -554,11 +555,12 @@ app_lcore_worker(
                        ret = rte_ring_sp_enqueue_bulk(
                                lp->rings_out[port],
                                (void **) lp->mbuf_out[port].array,
-                               bsz_wr);
+                               bsz_wr,
+                               NULL);
 
 #if APP_STATS
                        lp->rings_out_iters[port] ++;
-                       if (ret == 0) {
+                       if (ret > 0) {
                                lp->rings_out_count[port] += 1;
                        }
                        if (lp->rings_out_iters[port] == APP_STATS){
@@ -571,7 +573,7 @@ app_lcore_worker(
                        }
 #endif
 
-                       if (unlikely(ret == -ENOBUFS)) {
+                       if (unlikely(ret == 0)) {
                                uint32_t k;
                                for (k = 0; k < bsz_wr; k ++) {
                                        struct rte_mbuf *pkt_to_free = lp->mbuf_out[port].array[k];
@@ -606,9 +608,10 @@ app_lcore_worker_flush(struct app_lcore_params_worker *lp)
                ret = rte_ring_sp_enqueue_bulk(
                        lp->rings_out[port],
                        (void **) lp->mbuf_out[port].array,
-                       lp->mbuf_out[port].n_mbufs);
+                       lp->mbuf_out[port].n_mbufs,
+                       NULL);
 
-               if (unlikely(ret < 0)) {
+               if (unlikely(ret == 0)) {
                        uint32_t k;
                        for (k = 0; k < lp->mbuf_out[port].n_mbufs; k ++) {
                                struct rte_mbuf *pkt_to_free = lp->mbuf_out[port].array[k];