4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 #include <sys/types.h>
40 #include <sys/queue.h>
45 #include <rte_common.h>
46 #include <rte_byteorder.h>
48 #include <rte_memory.h>
49 #include <rte_memcpy.h>
50 #include <rte_memzone.h>
51 #include <rte_tailq.h>
53 #include <rte_per_lcore.h>
54 #include <rte_launch.h>
55 #include <rte_atomic.h>
56 #include <rte_cycles.h>
57 #include <rte_prefetch.h>
58 #include <rte_lcore.h>
59 #include <rte_per_lcore.h>
60 #include <rte_branch_prediction.h>
61 #include <rte_interrupts.h>
63 #include <rte_random.h>
64 #include <rte_debug.h>
65 #include <rte_ether.h>
66 #include <rte_ethdev.h>
68 #include <rte_mempool.h>
76 #ifndef APP_LCORE_IO_FLUSH
77 #define APP_LCORE_IO_FLUSH 1000000
80 #ifndef APP_LCORE_WORKER_FLUSH
81 #define APP_LCORE_WORKER_FLUSH 1000000
85 #define APP_STATS 1000000
88 #define APP_IO_RX_DROP_ALL_PACKETS 0
89 #define APP_WORKER_DROP_ALL_PACKETS 0
90 #define APP_IO_TX_DROP_ALL_PACKETS 0
92 #ifndef APP_IO_RX_PREFETCH_ENABLE
93 #define APP_IO_RX_PREFETCH_ENABLE 1
96 #ifndef APP_WORKER_PREFETCH_ENABLE
97 #define APP_WORKER_PREFETCH_ENABLE 1
100 #ifndef APP_IO_TX_PREFETCH_ENABLE
101 #define APP_IO_TX_PREFETCH_ENABLE 1
104 #if APP_IO_RX_PREFETCH_ENABLE
105 #define APP_IO_RX_PREFETCH0(p) rte_prefetch0(p)
106 #define APP_IO_RX_PREFETCH1(p) rte_prefetch1(p)
108 #define APP_IO_RX_PREFETCH0(p)
109 #define APP_IO_RX_PREFETCH1(p)
112 #if APP_WORKER_PREFETCH_ENABLE
113 #define APP_WORKER_PREFETCH0(p) rte_prefetch0(p)
114 #define APP_WORKER_PREFETCH1(p) rte_prefetch1(p)
116 #define APP_WORKER_PREFETCH0(p)
117 #define APP_WORKER_PREFETCH1(p)
120 #if APP_IO_TX_PREFETCH_ENABLE
121 #define APP_IO_TX_PREFETCH0(p) rte_prefetch0(p)
122 #define APP_IO_TX_PREFETCH1(p) rte_prefetch1(p)
124 #define APP_IO_TX_PREFETCH0(p)
125 #define APP_IO_TX_PREFETCH1(p)
129 app_lcore_io_rx_buffer_to_send (
130 struct app_lcore_params_io *lp,
132 struct rte_mbuf *mbuf,
138 pos = lp->rx.mbuf_out[worker].n_mbufs;
139 lp->rx.mbuf_out[worker].array[pos ++] = mbuf;
140 if (likely(pos < bsz)) {
141 lp->rx.mbuf_out[worker].n_mbufs = pos;
145 ret = rte_ring_sp_enqueue_bulk(
146 lp->rx.rings[worker],
147 (void **) lp->rx.mbuf_out[worker].array,
150 if (unlikely(ret == -ENOBUFS)) {
152 for (k = 0; k < bsz; k ++) {
153 struct rte_mbuf *m = lp->rx.mbuf_out[worker].array[k];
158 lp->rx.mbuf_out[worker].n_mbufs = 0;
159 lp->rx.mbuf_out_flush[worker] = 0;
162 lp->rx.rings_iters[worker] ++;
163 if (likely(ret == 0)) {
164 lp->rx.rings_count[worker] ++;
166 if (unlikely(lp->rx.rings_iters[worker] == APP_STATS)) {
167 unsigned lcore = rte_lcore_id();
169 printf("\tI/O RX %u out (worker %u): enq success rate = %.2f\n",
172 ((double) lp->rx.rings_count[worker]) / ((double) lp->rx.rings_iters[worker]));
173 lp->rx.rings_iters[worker] = 0;
174 lp->rx.rings_count[worker] = 0;
181 struct app_lcore_params_io *lp,
187 struct rte_mbuf *mbuf_1_0, *mbuf_1_1, *mbuf_2_0, *mbuf_2_1;
188 uint8_t *data_1_0, *data_1_1 = NULL;
191 for (i = 0; i < lp->rx.n_nic_queues; i ++) {
192 uint8_t port = lp->rx.nic_queues[i].port;
193 uint8_t queue = lp->rx.nic_queues[i].queue;
196 n_mbufs = rte_eth_rx_burst(
199 lp->rx.mbuf_in.array,
202 if (unlikely(n_mbufs == 0)) {
207 lp->rx.nic_queues_iters[i] ++;
208 lp->rx.nic_queues_count[i] += n_mbufs;
209 if (unlikely(lp->rx.nic_queues_iters[i] == APP_STATS)) {
210 struct rte_eth_stats stats;
211 unsigned lcore = rte_lcore_id();
213 rte_eth_stats_get(port, &stats);
215 printf("I/O RX %u in (NIC port %u): NIC drop ratio = %.2f avg burst size = %.2f\n",
218 (double) stats.imissed / (double) (stats.imissed + stats.ipackets),
219 ((double) lp->rx.nic_queues_count[i]) / ((double) lp->rx.nic_queues_iters[i]));
220 lp->rx.nic_queues_iters[i] = 0;
221 lp->rx.nic_queues_count[i] = 0;
225 #if APP_IO_RX_DROP_ALL_PACKETS
226 for (j = 0; j < n_mbufs; j ++) {
227 struct rte_mbuf *pkt = lp->rx.mbuf_in.array[j];
228 rte_pktmbuf_free(pkt);
234 mbuf_1_0 = lp->rx.mbuf_in.array[0];
235 mbuf_1_1 = lp->rx.mbuf_in.array[1];
236 data_1_0 = rte_pktmbuf_mtod(mbuf_1_0, uint8_t *);
237 if (likely(n_mbufs > 1)) {
238 data_1_1 = rte_pktmbuf_mtod(mbuf_1_1, uint8_t *);
241 mbuf_2_0 = lp->rx.mbuf_in.array[2];
242 mbuf_2_1 = lp->rx.mbuf_in.array[3];
243 APP_IO_RX_PREFETCH0(mbuf_2_0);
244 APP_IO_RX_PREFETCH0(mbuf_2_1);
246 for (j = 0; j + 3 < n_mbufs; j += 2) {
247 struct rte_mbuf *mbuf_0_0, *mbuf_0_1;
248 uint8_t *data_0_0, *data_0_1;
249 uint32_t worker_0, worker_1;
258 data_1_0 = rte_pktmbuf_mtod(mbuf_2_0, uint8_t *);
259 data_1_1 = rte_pktmbuf_mtod(mbuf_2_1, uint8_t *);
260 APP_IO_RX_PREFETCH0(data_1_0);
261 APP_IO_RX_PREFETCH0(data_1_1);
263 mbuf_2_0 = lp->rx.mbuf_in.array[j+4];
264 mbuf_2_1 = lp->rx.mbuf_in.array[j+5];
265 APP_IO_RX_PREFETCH0(mbuf_2_0);
266 APP_IO_RX_PREFETCH0(mbuf_2_1);
268 worker_0 = data_0_0[pos_lb] & (n_workers - 1);
269 worker_1 = data_0_1[pos_lb] & (n_workers - 1);
271 app_lcore_io_rx_buffer_to_send(lp, worker_0, mbuf_0_0, bsz_wr);
272 app_lcore_io_rx_buffer_to_send(lp, worker_1, mbuf_0_1, bsz_wr);
275 /* Handle the last 1, 2 (when n_mbufs is even) or 3 (when n_mbufs is odd) packets */
276 for ( ; j < n_mbufs; j += 1) {
277 struct rte_mbuf *mbuf;
286 data = rte_pktmbuf_mtod(mbuf, uint8_t *);
288 APP_IO_RX_PREFETCH0(mbuf_1_0);
290 worker = data[pos_lb] & (n_workers - 1);
292 app_lcore_io_rx_buffer_to_send(lp, worker, mbuf, bsz_wr);
298 app_lcore_io_rx_flush(struct app_lcore_params_io *lp, uint32_t n_workers)
302 for (worker = 0; worker < n_workers; worker ++) {
305 if (likely((lp->rx.mbuf_out_flush[worker] == 0) ||
306 (lp->rx.mbuf_out[worker].n_mbufs == 0))) {
307 lp->rx.mbuf_out_flush[worker] = 1;
311 ret = rte_ring_sp_enqueue_bulk(
312 lp->rx.rings[worker],
313 (void **) lp->rx.mbuf_out[worker].array,
314 lp->rx.mbuf_out[worker].n_mbufs);
316 if (unlikely(ret < 0)) {
318 for (k = 0; k < lp->rx.mbuf_out[worker].n_mbufs; k ++) {
319 struct rte_mbuf *pkt_to_free = lp->rx.mbuf_out[worker].array[k];
320 rte_pktmbuf_free(pkt_to_free);
324 lp->rx.mbuf_out[worker].n_mbufs = 0;
325 lp->rx.mbuf_out_flush[worker] = 1;
331 struct app_lcore_params_io *lp,
338 for (worker = 0; worker < n_workers; worker ++) {
341 for (i = 0; i < lp->tx.n_nic_ports; i ++) {
342 uint8_t port = lp->tx.nic_ports[i];
343 struct rte_ring *ring = lp->tx.rings[port][worker];
344 uint32_t n_mbufs, n_pkts;
347 n_mbufs = lp->tx.mbuf_out[port].n_mbufs;
348 ret = rte_ring_sc_dequeue_bulk(
350 (void **) &lp->tx.mbuf_out[port].array[n_mbufs],
353 if (unlikely(ret == -ENOENT)) {
359 #if APP_IO_TX_DROP_ALL_PACKETS
362 APP_IO_TX_PREFETCH0(lp->tx.mbuf_out[port].array[0]);
363 APP_IO_TX_PREFETCH0(lp->tx.mbuf_out[port].array[1]);
365 for (j = 0; j < n_mbufs; j ++) {
366 if (likely(j < n_mbufs - 2)) {
367 APP_IO_TX_PREFETCH0(lp->tx.mbuf_out[port].array[j + 2]);
370 rte_pktmbuf_free(lp->tx.mbuf_out[port].array[j]);
373 lp->tx.mbuf_out[port].n_mbufs = 0;
379 if (unlikely(n_mbufs < bsz_wr)) {
380 lp->tx.mbuf_out[port].n_mbufs = n_mbufs;
384 n_pkts = rte_eth_tx_burst(
387 lp->tx.mbuf_out[port].array,
391 lp->tx.nic_ports_iters[port] ++;
392 lp->tx.nic_ports_count[port] += n_pkts;
393 if (unlikely(lp->tx.nic_ports_iters[port] == APP_STATS)) {
394 unsigned lcore = rte_lcore_id();
396 printf("\t\t\tI/O TX %u out (port %u): avg burst size = %.2f\n",
399 ((double) lp->tx.nic_ports_count[port]) / ((double) lp->tx.nic_ports_iters[port]));
400 lp->tx.nic_ports_iters[port] = 0;
401 lp->tx.nic_ports_count[port] = 0;
405 if (unlikely(n_pkts < n_mbufs)) {
407 for (k = n_pkts; k < n_mbufs; k ++) {
408 struct rte_mbuf *pkt_to_free = lp->tx.mbuf_out[port].array[k];
409 rte_pktmbuf_free(pkt_to_free);
412 lp->tx.mbuf_out[port].n_mbufs = 0;
413 lp->tx.mbuf_out_flush[port] = 0;
419 app_lcore_io_tx_flush(struct app_lcore_params_io *lp)
423 for (port = 0; port < lp->tx.n_nic_ports; port ++) {
426 if (likely((lp->tx.mbuf_out_flush[port] == 0) ||
427 (lp->tx.mbuf_out[port].n_mbufs == 0))) {
428 lp->tx.mbuf_out_flush[port] = 1;
432 n_pkts = rte_eth_tx_burst(
435 lp->tx.mbuf_out[port].array,
436 (uint16_t) lp->tx.mbuf_out[port].n_mbufs);
438 if (unlikely(n_pkts < lp->tx.mbuf_out[port].n_mbufs)) {
440 for (k = n_pkts; k < lp->tx.mbuf_out[port].n_mbufs; k ++) {
441 struct rte_mbuf *pkt_to_free = lp->tx.mbuf_out[port].array[k];
442 rte_pktmbuf_free(pkt_to_free);
446 lp->tx.mbuf_out[port].n_mbufs = 0;
447 lp->tx.mbuf_out_flush[port] = 1;
452 app_lcore_main_loop_io(void)
454 uint32_t lcore = rte_lcore_id();
455 struct app_lcore_params_io *lp = &app.lcore_params[lcore].io;
456 uint32_t n_workers = app_get_lcores_worker();
459 uint32_t bsz_rx_rd = app.burst_size_io_rx_read;
460 uint32_t bsz_rx_wr = app.burst_size_io_rx_write;
461 uint32_t bsz_tx_rd = app.burst_size_io_tx_read;
462 uint32_t bsz_tx_wr = app.burst_size_io_tx_write;
464 uint8_t pos_lb = app.pos_lb;
467 if (APP_LCORE_IO_FLUSH && (unlikely(i == APP_LCORE_IO_FLUSH))) {
468 if (likely(lp->rx.n_nic_queues > 0)) {
469 app_lcore_io_rx_flush(lp, n_workers);
472 if (likely(lp->tx.n_nic_ports > 0)) {
473 app_lcore_io_tx_flush(lp);
479 if (likely(lp->rx.n_nic_queues > 0)) {
480 app_lcore_io_rx(lp, n_workers, bsz_rx_rd, bsz_rx_wr, pos_lb);
483 if (likely(lp->tx.n_nic_ports > 0)) {
484 app_lcore_io_tx(lp, n_workers, bsz_tx_rd, bsz_tx_wr);
493 struct app_lcore_params_worker *lp,
499 for (i = 0; i < lp->n_rings_in; i ++) {
500 struct rte_ring *ring_in = lp->rings_in[i];
504 ret = rte_ring_sc_dequeue_bulk(
506 (void **) lp->mbuf_in.array,
509 if (unlikely(ret == -ENOENT)) {
513 #if APP_WORKER_DROP_ALL_PACKETS
514 for (j = 0; j < bsz_rd; j ++) {
515 struct rte_mbuf *pkt = lp->mbuf_in.array[j];
516 rte_pktmbuf_free(pkt);
522 APP_WORKER_PREFETCH1(rte_pktmbuf_mtod(lp->mbuf_in.array[0], unsigned char *));
523 APP_WORKER_PREFETCH0(lp->mbuf_in.array[1]);
525 for (j = 0; j < bsz_rd; j ++) {
526 struct rte_mbuf *pkt;
527 struct ipv4_hdr *ipv4_hdr;
528 uint32_t ipv4_dst, pos;
531 if (likely(j < bsz_rd - 1)) {
532 APP_WORKER_PREFETCH1(rte_pktmbuf_mtod(lp->mbuf_in.array[j+1], unsigned char *));
534 if (likely(j < bsz_rd - 2)) {
535 APP_WORKER_PREFETCH0(lp->mbuf_in.array[j+2]);
538 pkt = lp->mbuf_in.array[j];
539 ipv4_hdr = (struct ipv4_hdr *)(rte_pktmbuf_mtod(pkt, unsigned char *) + sizeof(struct ether_hdr));
540 ipv4_dst = rte_be_to_cpu_32(ipv4_hdr->dst_addr);
542 if (unlikely(rte_lpm_lookup(lp->lpm_table, ipv4_dst, &port) != 0)) {
546 pos = lp->mbuf_out[port].n_mbufs;
548 lp->mbuf_out[port].array[pos ++] = pkt;
549 if (likely(pos < bsz_wr)) {
550 lp->mbuf_out[port].n_mbufs = pos;
554 ret = rte_ring_sp_enqueue_bulk(
556 (void **) lp->mbuf_out[port].array,
560 lp->rings_out_iters[port] ++;
562 lp->rings_out_count[port] += 1;
564 if (lp->rings_out_iters[port] == APP_STATS){
565 printf("\t\tWorker %u out (NIC port %u): enq success rate = %.2f\n",
566 (unsigned) lp->worker_id,
568 ((double) lp->rings_out_count[port]) / ((double) lp->rings_out_iters[port]));
569 lp->rings_out_iters[port] = 0;
570 lp->rings_out_count[port] = 0;
574 if (unlikely(ret == -ENOBUFS)) {
576 for (k = 0; k < bsz_wr; k ++) {
577 struct rte_mbuf *pkt_to_free = lp->mbuf_out[port].array[k];
578 rte_pktmbuf_free(pkt_to_free);
582 lp->mbuf_out[port].n_mbufs = 0;
583 lp->mbuf_out_flush[port] = 0;
589 app_lcore_worker_flush(struct app_lcore_params_worker *lp)
593 for (port = 0; port < APP_MAX_NIC_PORTS; port ++) {
596 if (unlikely(lp->rings_out[port] == NULL)) {
600 if (likely((lp->mbuf_out_flush[port] == 0) ||
601 (lp->mbuf_out[port].n_mbufs == 0))) {
602 lp->mbuf_out_flush[port] = 1;
606 ret = rte_ring_sp_enqueue_bulk(
608 (void **) lp->mbuf_out[port].array,
609 lp->mbuf_out[port].n_mbufs);
611 if (unlikely(ret < 0)) {
613 for (k = 0; k < lp->mbuf_out[port].n_mbufs; k ++) {
614 struct rte_mbuf *pkt_to_free = lp->mbuf_out[port].array[k];
615 rte_pktmbuf_free(pkt_to_free);
619 lp->mbuf_out[port].n_mbufs = 0;
620 lp->mbuf_out_flush[port] = 1;
625 app_lcore_main_loop_worker(void) {
626 uint32_t lcore = rte_lcore_id();
627 struct app_lcore_params_worker *lp = &app.lcore_params[lcore].worker;
630 uint32_t bsz_rd = app.burst_size_worker_read;
631 uint32_t bsz_wr = app.burst_size_worker_write;
634 if (APP_LCORE_WORKER_FLUSH && (unlikely(i == APP_LCORE_WORKER_FLUSH))) {
635 app_lcore_worker_flush(lp);
639 app_lcore_worker(lp, bsz_rd, bsz_wr);
646 app_lcore_main_loop(__attribute__((unused)) void *arg)
648 struct app_lcore_params *lp;
651 lcore = rte_lcore_id();
652 lp = &app.lcore_params[lcore];
654 if (lp->type == e_APP_LCORE_IO) {
655 printf("Logical core %u (I/O) main loop.\n", lcore);
656 app_lcore_main_loop_io();
659 if (lp->type == e_APP_LCORE_WORKER) {
660 printf("Logical core %u (worker %u) main loop.\n",
662 (unsigned) lp->worker.worker_id);
663 app_lcore_main_loop_worker();