X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=examples%2Fload_balancer%2Fruntime.c;h=8192c081275ac0f20712cfbb68b8145371f61508;hb=e629b8dbd9b38259e5fe32475fdf3e56176d8f39;hp=d349df3fa2f93a2ce9b3fabfb5de7f9aeb89c45a;hpb=af75078fece3615088e561357c1e97603e43a5fe;p=dpdk.git diff --git a/examples/load_balancer/runtime.c b/examples/load_balancer/runtime.c index d349df3fa2..8192c08127 100644 --- a/examples/load_balancer/runtime.c +++ b/examples/load_balancer/runtime.c @@ -1,36 +1,34 @@ /*- * BSD LICENSE - * - * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * + * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions * are met: - * - * * Redistributions of source code must retain the above copyright + * + * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * version: DPDK.L.1.2.3-3 */ #include @@ -50,7 +48,6 @@ #include #include #include -#include #include #include #include @@ -147,9 +144,10 @@ app_lcore_io_rx_buffer_to_send ( ret = rte_ring_sp_enqueue_bulk( lp->rx.rings[worker], (void **) lp->rx.mbuf_out[worker].array, - bsz); + bsz, + NULL); - if (unlikely(ret == -ENOBUFS)) { + if (unlikely(ret == 0)) { uint32_t k; for (k = 0; k < bsz; k ++) { struct rte_mbuf *m = lp->rx.mbuf_out[worker].array[k]; @@ -166,11 +164,11 @@ app_lcore_io_rx_buffer_to_send ( lp->rx.rings_count[worker] ++; } if (unlikely(lp->rx.rings_iters[worker] == APP_STATS)) { - uint32_t lcore = rte_lcore_id(); + unsigned lcore = rte_lcore_id(); printf("\tI/O RX %u out (worker %u): enq success rate = %.2f\n", lcore, - worker, + (unsigned)worker, ((double) lp->rx.rings_count[worker]) / ((double) lp->rx.rings_iters[worker])); lp->rx.rings_iters[worker] = 0; lp->rx.rings_count[worker] = 0; @@ -187,7 +185,7 @@ app_lcore_io_rx( uint8_t pos_lb) { struct rte_mbuf *mbuf_1_0, *mbuf_1_1, *mbuf_2_0, *mbuf_2_1; - uint8_t *data_1_0, *data_1_1; + uint8_t *data_1_0, *data_1_1 = NULL; uint32_t i; for (i = 0; i < lp->rx.n_nic_queues; i ++) { @@ -210,14 +208,14 @@ app_lcore_io_rx( lp->rx.nic_queues_count[i] += n_mbufs; if (unlikely(lp->rx.nic_queues_iters[i] == APP_STATS)) { struct rte_eth_stats stats; - uint32_t lcore = rte_lcore_id(); + unsigned lcore = rte_lcore_id(); rte_eth_stats_get(port, &stats); printf("I/O RX %u in (NIC port %u): NIC drop ratio = %.2f avg burst size = %.2f\n", lcore, - (uint32_t) port, - (double) stats.ierrors / (double) (stats.ierrors + stats.ipackets), + (unsigned) port, + (double) stats.imissed / (double) (stats.imissed + stats.ipackets), ((double) lp->rx.nic_queues_count[i]) / ((double) lp->rx.nic_queues_iters[i])); lp->rx.nic_queues_iters[i] = 0; lp->rx.nic_queues_count[i] = 0; @@ -313,9 +311,10 @@ app_lcore_io_rx_flush(struct app_lcore_params_io *lp, uint32_t n_workers) ret = rte_ring_sp_enqueue_bulk( lp->rx.rings[worker], (void **) lp->rx.mbuf_out[worker].array, - lp->rx.mbuf_out[worker].n_mbufs); + lp->rx.mbuf_out[worker].n_mbufs, + NULL); - if (unlikely(ret < 0)) { + if (unlikely(ret == 0)) { uint32_t k; for (k = 0; k < lp->rx.mbuf_out[worker].n_mbufs; k ++) { struct rte_mbuf *pkt_to_free = lp->rx.mbuf_out[worker].array[k]; @@ -350,11 +349,11 @@ app_lcore_io_tx( ret = rte_ring_sc_dequeue_bulk( ring, (void **) &lp->tx.mbuf_out[port].array[n_mbufs], - bsz_rd); + bsz_rd, + NULL); - if (unlikely(ret == -ENOENT)) { + if (unlikely(ret == 0)) continue; - } n_mbufs += bsz_rd; @@ -393,11 +392,11 @@ app_lcore_io_tx( lp->tx.nic_ports_iters[port] ++; lp->tx.nic_ports_count[port] += n_pkts; if (unlikely(lp->tx.nic_ports_iters[port] == APP_STATS)) { - uint32_t lcore = rte_lcore_id(); + unsigned lcore = rte_lcore_id(); printf("\t\t\tI/O TX %u out (port %u): avg burst size = %.2f\n", lcore, - (uint32_t) port, + (unsigned) port, ((double) lp->tx.nic_ports_count[port]) / ((double) lp->tx.nic_ports_iters[port])); lp->tx.nic_ports_iters[port] = 0; lp->tx.nic_ports_count[port] = 0; @@ -506,11 +505,11 @@ app_lcore_worker( ret = rte_ring_sc_dequeue_bulk( ring_in, (void **) lp->mbuf_in.array, - bsz_rd); + bsz_rd, + NULL); - if (unlikely(ret == -ENOENT)) { + if (unlikely(ret == 0)) continue; - } #if APP_WORKER_DROP_ALL_PACKETS for (j = 0; j < bsz_rd; j ++) { @@ -528,7 +527,7 @@ app_lcore_worker( struct rte_mbuf *pkt; struct ipv4_hdr *ipv4_hdr; uint32_t ipv4_dst, pos; - uint8_t port; + uint32_t port; if (likely(j < bsz_rd - 1)) { APP_WORKER_PREFETCH1(rte_pktmbuf_mtod(lp->mbuf_in.array[j+1], unsigned char *)); @@ -538,11 +537,13 @@ app_lcore_worker( } pkt = lp->mbuf_in.array[j]; - ipv4_hdr = (struct ipv4_hdr *)(rte_pktmbuf_mtod(pkt, unsigned char *) + sizeof(struct ether_hdr)); + ipv4_hdr = rte_pktmbuf_mtod_offset(pkt, + struct ipv4_hdr *, + sizeof(struct ether_hdr)); ipv4_dst = rte_be_to_cpu_32(ipv4_hdr->dst_addr); if (unlikely(rte_lpm_lookup(lp->lpm_table, ipv4_dst, &port) != 0)) { - port = pkt->pkt.in_port; + port = pkt->port; } pos = lp->mbuf_out[port].n_mbufs; @@ -556,24 +557,25 @@ app_lcore_worker( ret = rte_ring_sp_enqueue_bulk( lp->rings_out[port], (void **) lp->mbuf_out[port].array, - bsz_wr); + bsz_wr, + NULL); #if APP_STATS lp->rings_out_iters[port] ++; - if (ret == 0) { + if (ret > 0) { lp->rings_out_count[port] += 1; } if (lp->rings_out_iters[port] == APP_STATS){ printf("\t\tWorker %u out (NIC port %u): enq success rate = %.2f\n", - lp->worker_id, - (uint32_t) port, + (unsigned) lp->worker_id, + (unsigned) port, ((double) lp->rings_out_count[port]) / ((double) lp->rings_out_iters[port])); lp->rings_out_iters[port] = 0; lp->rings_out_count[port] = 0; } #endif - if (unlikely(ret == -ENOBUFS)) { + if (unlikely(ret == 0)) { uint32_t k; for (k = 0; k < bsz_wr; k ++) { struct rte_mbuf *pkt_to_free = lp->mbuf_out[port].array[k]; @@ -608,9 +610,10 @@ app_lcore_worker_flush(struct app_lcore_params_worker *lp) ret = rte_ring_sp_enqueue_bulk( lp->rings_out[port], (void **) lp->mbuf_out[port].array, - lp->mbuf_out[port].n_mbufs); + lp->mbuf_out[port].n_mbufs, + NULL); - if (unlikely(ret < 0)) { + if (unlikely(ret == 0)) { uint32_t k; for (k = 0; k < lp->mbuf_out[port].n_mbufs; k ++) { struct rte_mbuf *pkt_to_free = lp->mbuf_out[port].array[k]; @@ -648,7 +651,7 @@ int app_lcore_main_loop(__attribute__((unused)) void *arg) { struct app_lcore_params *lp; - uint32_t lcore; + unsigned lcore; lcore = rte_lcore_id(); lp = &app.lcore_params[lcore]; @@ -661,7 +664,7 @@ app_lcore_main_loop(__attribute__((unused)) void *arg) if (lp->type == e_APP_LCORE_WORKER) { printf("Logical core %u (worker %u) main loop.\n", lcore, - lp->worker.worker_id); + (unsigned) lp->worker.worker_id); app_lcore_main_loop_worker(); }