net: add rte prefix to ether structures
[dpdk.git] / examples / load_balancer / runtime.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2014 Intel Corporation
3  */
4
5 #include <stdio.h>
6 #include <stdlib.h>
7 #include <stdint.h>
8 #include <inttypes.h>
9 #include <sys/types.h>
10 #include <string.h>
11 #include <sys/queue.h>
12 #include <stdarg.h>
13 #include <errno.h>
14 #include <getopt.h>
15
16 #include <rte_common.h>
17 #include <rte_byteorder.h>
18 #include <rte_log.h>
19 #include <rte_memory.h>
20 #include <rte_memcpy.h>
21 #include <rte_eal.h>
22 #include <rte_launch.h>
23 #include <rte_atomic.h>
24 #include <rte_cycles.h>
25 #include <rte_prefetch.h>
26 #include <rte_lcore.h>
27 #include <rte_per_lcore.h>
28 #include <rte_branch_prediction.h>
29 #include <rte_interrupts.h>
30 #include <rte_random.h>
31 #include <rte_debug.h>
32 #include <rte_ether.h>
33 #include <rte_ethdev.h>
34 #include <rte_ring.h>
35 #include <rte_mempool.h>
36 #include <rte_mbuf.h>
37 #include <rte_ip.h>
38 #include <rte_tcp.h>
39 #include <rte_lpm.h>
40
41 #include "main.h"
42
43 #ifndef APP_LCORE_IO_FLUSH
44 #define APP_LCORE_IO_FLUSH           1000000
45 #endif
46
47 #ifndef APP_LCORE_WORKER_FLUSH
48 #define APP_LCORE_WORKER_FLUSH       1000000
49 #endif
50
51 #ifndef APP_STATS
52 #define APP_STATS                    1000000
53 #endif
54
55 #define APP_IO_RX_DROP_ALL_PACKETS   0
56 #define APP_WORKER_DROP_ALL_PACKETS  0
57 #define APP_IO_TX_DROP_ALL_PACKETS   0
58
59 #ifndef APP_IO_RX_PREFETCH_ENABLE
60 #define APP_IO_RX_PREFETCH_ENABLE    1
61 #endif
62
63 #ifndef APP_WORKER_PREFETCH_ENABLE
64 #define APP_WORKER_PREFETCH_ENABLE   1
65 #endif
66
67 #ifndef APP_IO_TX_PREFETCH_ENABLE
68 #define APP_IO_TX_PREFETCH_ENABLE    1
69 #endif
70
71 #if APP_IO_RX_PREFETCH_ENABLE
72 #define APP_IO_RX_PREFETCH0(p)       rte_prefetch0(p)
73 #define APP_IO_RX_PREFETCH1(p)       rte_prefetch1(p)
74 #else
75 #define APP_IO_RX_PREFETCH0(p)
76 #define APP_IO_RX_PREFETCH1(p)
77 #endif
78
79 #if APP_WORKER_PREFETCH_ENABLE
80 #define APP_WORKER_PREFETCH0(p)      rte_prefetch0(p)
81 #define APP_WORKER_PREFETCH1(p)      rte_prefetch1(p)
82 #else
83 #define APP_WORKER_PREFETCH0(p)
84 #define APP_WORKER_PREFETCH1(p)
85 #endif
86
87 #if APP_IO_TX_PREFETCH_ENABLE
88 #define APP_IO_TX_PREFETCH0(p)       rte_prefetch0(p)
89 #define APP_IO_TX_PREFETCH1(p)       rte_prefetch1(p)
90 #else
91 #define APP_IO_TX_PREFETCH0(p)
92 #define APP_IO_TX_PREFETCH1(p)
93 #endif
94
95 static inline void
96 app_lcore_io_rx_buffer_to_send (
97         struct app_lcore_params_io *lp,
98         uint32_t worker,
99         struct rte_mbuf *mbuf,
100         uint32_t bsz)
101 {
102         uint32_t pos;
103         int ret;
104
105         pos = lp->rx.mbuf_out[worker].n_mbufs;
106         lp->rx.mbuf_out[worker].array[pos ++] = mbuf;
107         if (likely(pos < bsz)) {
108                 lp->rx.mbuf_out[worker].n_mbufs = pos;
109                 return;
110         }
111
112         ret = rte_ring_sp_enqueue_bulk(
113                 lp->rx.rings[worker],
114                 (void **) lp->rx.mbuf_out[worker].array,
115                 bsz,
116                 NULL);
117
118         if (unlikely(ret == 0)) {
119                 uint32_t k;
120                 for (k = 0; k < bsz; k ++) {
121                         struct rte_mbuf *m = lp->rx.mbuf_out[worker].array[k];
122                         rte_pktmbuf_free(m);
123                 }
124         }
125
126         lp->rx.mbuf_out[worker].n_mbufs = 0;
127         lp->rx.mbuf_out_flush[worker] = 0;
128
129 #if APP_STATS
130         lp->rx.rings_iters[worker] ++;
131         if (likely(ret == 0)) {
132                 lp->rx.rings_count[worker] ++;
133         }
134         if (unlikely(lp->rx.rings_iters[worker] == APP_STATS)) {
135                 unsigned lcore = rte_lcore_id();
136
137                 printf("\tI/O RX %u out (worker %u): enq success rate = %.2f\n",
138                         lcore,
139                         (unsigned)worker,
140                         ((double) lp->rx.rings_count[worker]) / ((double) lp->rx.rings_iters[worker]));
141                 lp->rx.rings_iters[worker] = 0;
142                 lp->rx.rings_count[worker] = 0;
143         }
144 #endif
145 }
146
147 static inline void
148 app_lcore_io_rx(
149         struct app_lcore_params_io *lp,
150         uint32_t n_workers,
151         uint32_t bsz_rd,
152         uint32_t bsz_wr,
153         uint8_t pos_lb)
154 {
155         struct rte_mbuf *mbuf_1_0, *mbuf_1_1, *mbuf_2_0, *mbuf_2_1;
156         uint8_t *data_1_0, *data_1_1 = NULL;
157         uint32_t i;
158
159         for (i = 0; i < lp->rx.n_nic_queues; i ++) {
160                 uint16_t port = lp->rx.nic_queues[i].port;
161                 uint8_t queue = lp->rx.nic_queues[i].queue;
162                 uint32_t n_mbufs, j;
163
164                 n_mbufs = rte_eth_rx_burst(
165                         port,
166                         queue,
167                         lp->rx.mbuf_in.array,
168                         (uint16_t) bsz_rd);
169
170                 if (unlikely(n_mbufs == 0)) {
171                         continue;
172                 }
173
174 #if APP_STATS
175                 lp->rx.nic_queues_iters[i] ++;
176                 lp->rx.nic_queues_count[i] += n_mbufs;
177                 if (unlikely(lp->rx.nic_queues_iters[i] == APP_STATS)) {
178                         struct rte_eth_stats stats;
179                         unsigned lcore = rte_lcore_id();
180
181                         rte_eth_stats_get(port, &stats);
182
183                         printf("I/O RX %u in (NIC port %u): NIC drop ratio = %.2f avg burst size = %.2f\n",
184                                 lcore,
185                                 port,
186                                 (double) stats.imissed / (double) (stats.imissed + stats.ipackets),
187                                 ((double) lp->rx.nic_queues_count[i]) / ((double) lp->rx.nic_queues_iters[i]));
188                         lp->rx.nic_queues_iters[i] = 0;
189                         lp->rx.nic_queues_count[i] = 0;
190                 }
191 #endif
192
193 #if APP_IO_RX_DROP_ALL_PACKETS
194                 for (j = 0; j < n_mbufs; j ++) {
195                         struct rte_mbuf *pkt = lp->rx.mbuf_in.array[j];
196                         rte_pktmbuf_free(pkt);
197                 }
198
199                 continue;
200 #endif
201
202                 mbuf_1_0 = lp->rx.mbuf_in.array[0];
203                 mbuf_1_1 = lp->rx.mbuf_in.array[1];
204                 data_1_0 = rte_pktmbuf_mtod(mbuf_1_0, uint8_t *);
205                 if (likely(n_mbufs > 1)) {
206                         data_1_1 = rte_pktmbuf_mtod(mbuf_1_1, uint8_t *);
207                 }
208
209                 mbuf_2_0 = lp->rx.mbuf_in.array[2];
210                 mbuf_2_1 = lp->rx.mbuf_in.array[3];
211                 APP_IO_RX_PREFETCH0(mbuf_2_0);
212                 APP_IO_RX_PREFETCH0(mbuf_2_1);
213
214                 for (j = 0; j + 3 < n_mbufs; j += 2) {
215                         struct rte_mbuf *mbuf_0_0, *mbuf_0_1;
216                         uint8_t *data_0_0, *data_0_1;
217                         uint32_t worker_0, worker_1;
218
219                         mbuf_0_0 = mbuf_1_0;
220                         mbuf_0_1 = mbuf_1_1;
221                         data_0_0 = data_1_0;
222                         data_0_1 = data_1_1;
223
224                         mbuf_1_0 = mbuf_2_0;
225                         mbuf_1_1 = mbuf_2_1;
226                         data_1_0 = rte_pktmbuf_mtod(mbuf_2_0, uint8_t *);
227                         data_1_1 = rte_pktmbuf_mtod(mbuf_2_1, uint8_t *);
228                         APP_IO_RX_PREFETCH0(data_1_0);
229                         APP_IO_RX_PREFETCH0(data_1_1);
230
231                         mbuf_2_0 = lp->rx.mbuf_in.array[j+4];
232                         mbuf_2_1 = lp->rx.mbuf_in.array[j+5];
233                         APP_IO_RX_PREFETCH0(mbuf_2_0);
234                         APP_IO_RX_PREFETCH0(mbuf_2_1);
235
236                         worker_0 = data_0_0[pos_lb] & (n_workers - 1);
237                         worker_1 = data_0_1[pos_lb] & (n_workers - 1);
238
239                         app_lcore_io_rx_buffer_to_send(lp, worker_0, mbuf_0_0, bsz_wr);
240                         app_lcore_io_rx_buffer_to_send(lp, worker_1, mbuf_0_1, bsz_wr);
241                 }
242
243                 /* Handle the last 1, 2 (when n_mbufs is even) or 3 (when n_mbufs is odd) packets  */
244                 for ( ; j < n_mbufs; j += 1) {
245                         struct rte_mbuf *mbuf;
246                         uint8_t *data;
247                         uint32_t worker;
248
249                         mbuf = mbuf_1_0;
250                         mbuf_1_0 = mbuf_1_1;
251                         mbuf_1_1 = mbuf_2_0;
252                         mbuf_2_0 = mbuf_2_1;
253
254                         data = rte_pktmbuf_mtod(mbuf, uint8_t *);
255
256                         APP_IO_RX_PREFETCH0(mbuf_1_0);
257
258                         worker = data[pos_lb] & (n_workers - 1);
259
260                         app_lcore_io_rx_buffer_to_send(lp, worker, mbuf, bsz_wr);
261                 }
262         }
263 }
264
265 static inline void
266 app_lcore_io_rx_flush(struct app_lcore_params_io *lp, uint32_t n_workers)
267 {
268         uint32_t worker;
269
270         for (worker = 0; worker < n_workers; worker ++) {
271                 int ret;
272
273                 if (likely((lp->rx.mbuf_out_flush[worker] == 0) ||
274                            (lp->rx.mbuf_out[worker].n_mbufs == 0))) {
275                         lp->rx.mbuf_out_flush[worker] = 1;
276                         continue;
277                 }
278
279                 ret = rte_ring_sp_enqueue_bulk(
280                         lp->rx.rings[worker],
281                         (void **) lp->rx.mbuf_out[worker].array,
282                         lp->rx.mbuf_out[worker].n_mbufs,
283                         NULL);
284
285                 if (unlikely(ret == 0)) {
286                         uint32_t k;
287                         for (k = 0; k < lp->rx.mbuf_out[worker].n_mbufs; k ++) {
288                                 struct rte_mbuf *pkt_to_free = lp->rx.mbuf_out[worker].array[k];
289                                 rte_pktmbuf_free(pkt_to_free);
290                         }
291                 }
292
293                 lp->rx.mbuf_out[worker].n_mbufs = 0;
294                 lp->rx.mbuf_out_flush[worker] = 1;
295         }
296 }
297
298 static inline void
299 app_lcore_io_tx(
300         struct app_lcore_params_io *lp,
301         uint32_t n_workers,
302         uint32_t bsz_rd,
303         uint32_t bsz_wr)
304 {
305         uint32_t worker;
306
307         for (worker = 0; worker < n_workers; worker ++) {
308                 uint32_t i;
309
310                 for (i = 0; i < lp->tx.n_nic_ports; i ++) {
311                         uint16_t port = lp->tx.nic_ports[i];
312                         struct rte_ring *ring = lp->tx.rings[port][worker];
313                         uint32_t n_mbufs, n_pkts;
314                         int ret;
315
316                         n_mbufs = lp->tx.mbuf_out[port].n_mbufs;
317                         ret = rte_ring_sc_dequeue_bulk(
318                                 ring,
319                                 (void **) &lp->tx.mbuf_out[port].array[n_mbufs],
320                                 bsz_rd,
321                                 NULL);
322
323                         if (unlikely(ret == 0))
324                                 continue;
325
326                         n_mbufs += bsz_rd;
327
328 #if APP_IO_TX_DROP_ALL_PACKETS
329                         {
330                                 uint32_t j;
331                                 APP_IO_TX_PREFETCH0(lp->tx.mbuf_out[port].array[0]);
332                                 APP_IO_TX_PREFETCH0(lp->tx.mbuf_out[port].array[1]);
333
334                                 for (j = 0; j < n_mbufs; j ++) {
335                                         if (likely(j < n_mbufs - 2)) {
336                                                 APP_IO_TX_PREFETCH0(lp->tx.mbuf_out[port].array[j + 2]);
337                                         }
338
339                                         rte_pktmbuf_free(lp->tx.mbuf_out[port].array[j]);
340                                 }
341
342                                 lp->tx.mbuf_out[port].n_mbufs = 0;
343
344                                 continue;
345                         }
346 #endif
347
348                         if (unlikely(n_mbufs < bsz_wr)) {
349                                 lp->tx.mbuf_out[port].n_mbufs = n_mbufs;
350                                 continue;
351                         }
352
353                         n_pkts = rte_eth_tx_burst(
354                                 port,
355                                 0,
356                                 lp->tx.mbuf_out[port].array,
357                                 (uint16_t) n_mbufs);
358
359 #if APP_STATS
360                         lp->tx.nic_ports_iters[port] ++;
361                         lp->tx.nic_ports_count[port] += n_pkts;
362                         if (unlikely(lp->tx.nic_ports_iters[port] == APP_STATS)) {
363                                 unsigned lcore = rte_lcore_id();
364
365                                 printf("\t\t\tI/O TX %u out (port %u): avg burst size = %.2f\n",
366                                         lcore,
367                                         port,
368                                         ((double) lp->tx.nic_ports_count[port]) / ((double) lp->tx.nic_ports_iters[port]));
369                                 lp->tx.nic_ports_iters[port] = 0;
370                                 lp->tx.nic_ports_count[port] = 0;
371                         }
372 #endif
373
374                         if (unlikely(n_pkts < n_mbufs)) {
375                                 uint32_t k;
376                                 for (k = n_pkts; k < n_mbufs; k ++) {
377                                         struct rte_mbuf *pkt_to_free = lp->tx.mbuf_out[port].array[k];
378                                         rte_pktmbuf_free(pkt_to_free);
379                                 }
380                         }
381                         lp->tx.mbuf_out[port].n_mbufs = 0;
382                         lp->tx.mbuf_out_flush[port] = 0;
383                 }
384         }
385 }
386
387 static inline void
388 app_lcore_io_tx_flush(struct app_lcore_params_io *lp)
389 {
390         uint16_t port;
391         uint32_t i;
392
393         for (i = 0; i < lp->tx.n_nic_ports; i++) {
394                 uint32_t n_pkts;
395
396                 port = lp->tx.nic_ports[i];
397                 if (likely((lp->tx.mbuf_out_flush[port] == 0) ||
398                            (lp->tx.mbuf_out[port].n_mbufs == 0))) {
399                         lp->tx.mbuf_out_flush[port] = 1;
400                         continue;
401                 }
402
403                 n_pkts = rte_eth_tx_burst(
404                         port,
405                         0,
406                         lp->tx.mbuf_out[port].array,
407                         (uint16_t) lp->tx.mbuf_out[port].n_mbufs);
408
409                 if (unlikely(n_pkts < lp->tx.mbuf_out[port].n_mbufs)) {
410                         uint32_t k;
411                         for (k = n_pkts; k < lp->tx.mbuf_out[port].n_mbufs; k ++) {
412                                 struct rte_mbuf *pkt_to_free = lp->tx.mbuf_out[port].array[k];
413                                 rte_pktmbuf_free(pkt_to_free);
414                         }
415                 }
416
417                 lp->tx.mbuf_out[port].n_mbufs = 0;
418                 lp->tx.mbuf_out_flush[port] = 1;
419         }
420 }
421
422 static void
423 app_lcore_main_loop_io(void)
424 {
425         uint32_t lcore = rte_lcore_id();
426         struct app_lcore_params_io *lp = &app.lcore_params[lcore].io;
427         uint32_t n_workers = app_get_lcores_worker();
428         uint64_t i = 0;
429
430         uint32_t bsz_rx_rd = app.burst_size_io_rx_read;
431         uint32_t bsz_rx_wr = app.burst_size_io_rx_write;
432         uint32_t bsz_tx_rd = app.burst_size_io_tx_read;
433         uint32_t bsz_tx_wr = app.burst_size_io_tx_write;
434
435         uint8_t pos_lb = app.pos_lb;
436
437         for ( ; ; ) {
438                 if (APP_LCORE_IO_FLUSH && (unlikely(i == APP_LCORE_IO_FLUSH))) {
439                         if (likely(lp->rx.n_nic_queues > 0)) {
440                                 app_lcore_io_rx_flush(lp, n_workers);
441                         }
442
443                         if (likely(lp->tx.n_nic_ports > 0)) {
444                                 app_lcore_io_tx_flush(lp);
445                         }
446
447                         i = 0;
448                 }
449
450                 if (likely(lp->rx.n_nic_queues > 0)) {
451                         app_lcore_io_rx(lp, n_workers, bsz_rx_rd, bsz_rx_wr, pos_lb);
452                 }
453
454                 if (likely(lp->tx.n_nic_ports > 0)) {
455                         app_lcore_io_tx(lp, n_workers, bsz_tx_rd, bsz_tx_wr);
456                 }
457
458                 i ++;
459         }
460 }
461
462 static inline void
463 app_lcore_worker(
464         struct app_lcore_params_worker *lp,
465         uint32_t bsz_rd,
466         uint32_t bsz_wr)
467 {
468         uint32_t i;
469
470         for (i = 0; i < lp->n_rings_in; i ++) {
471                 struct rte_ring *ring_in = lp->rings_in[i];
472                 uint32_t j;
473                 int ret;
474
475                 ret = rte_ring_sc_dequeue_bulk(
476                         ring_in,
477                         (void **) lp->mbuf_in.array,
478                         bsz_rd,
479                         NULL);
480
481                 if (unlikely(ret == 0))
482                         continue;
483
484 #if APP_WORKER_DROP_ALL_PACKETS
485                 for (j = 0; j < bsz_rd; j ++) {
486                         struct rte_mbuf *pkt = lp->mbuf_in.array[j];
487                         rte_pktmbuf_free(pkt);
488                 }
489
490                 continue;
491 #endif
492
493                 APP_WORKER_PREFETCH1(rte_pktmbuf_mtod(lp->mbuf_in.array[0], unsigned char *));
494                 APP_WORKER_PREFETCH0(lp->mbuf_in.array[1]);
495
496                 for (j = 0; j < bsz_rd; j ++) {
497                         struct rte_mbuf *pkt;
498                         struct ipv4_hdr *ipv4_hdr;
499                         uint32_t ipv4_dst, pos;
500                         uint32_t port;
501
502                         if (likely(j < bsz_rd - 1)) {
503                                 APP_WORKER_PREFETCH1(rte_pktmbuf_mtod(lp->mbuf_in.array[j+1], unsigned char *));
504                         }
505                         if (likely(j < bsz_rd - 2)) {
506                                 APP_WORKER_PREFETCH0(lp->mbuf_in.array[j+2]);
507                         }
508
509                         pkt = lp->mbuf_in.array[j];
510                         ipv4_hdr = rte_pktmbuf_mtod_offset(
511                                 pkt, struct ipv4_hdr *,
512                                 sizeof(struct rte_ether_hdr));
513                         ipv4_dst = rte_be_to_cpu_32(ipv4_hdr->dst_addr);
514
515                         if (unlikely(rte_lpm_lookup(lp->lpm_table, ipv4_dst, &port) != 0)) {
516                                 port = pkt->port;
517                         }
518
519                         pos = lp->mbuf_out[port].n_mbufs;
520
521                         lp->mbuf_out[port].array[pos ++] = pkt;
522                         if (likely(pos < bsz_wr)) {
523                                 lp->mbuf_out[port].n_mbufs = pos;
524                                 continue;
525                         }
526
527                         ret = rte_ring_sp_enqueue_bulk(
528                                 lp->rings_out[port],
529                                 (void **) lp->mbuf_out[port].array,
530                                 bsz_wr,
531                                 NULL);
532
533 #if APP_STATS
534                         lp->rings_out_iters[port] ++;
535                         if (ret > 0) {
536                                 lp->rings_out_count[port] += 1;
537                         }
538                         if (lp->rings_out_iters[port] == APP_STATS){
539                                 printf("\t\tWorker %u out (NIC port %u): enq success rate = %.2f\n",
540                                         (unsigned) lp->worker_id,
541                                         port,
542                                         ((double) lp->rings_out_count[port]) / ((double) lp->rings_out_iters[port]));
543                                 lp->rings_out_iters[port] = 0;
544                                 lp->rings_out_count[port] = 0;
545                         }
546 #endif
547
548                         if (unlikely(ret == 0)) {
549                                 uint32_t k;
550                                 for (k = 0; k < bsz_wr; k ++) {
551                                         struct rte_mbuf *pkt_to_free = lp->mbuf_out[port].array[k];
552                                         rte_pktmbuf_free(pkt_to_free);
553                                 }
554                         }
555
556                         lp->mbuf_out[port].n_mbufs = 0;
557                         lp->mbuf_out_flush[port] = 0;
558                 }
559         }
560 }
561
562 static inline void
563 app_lcore_worker_flush(struct app_lcore_params_worker *lp)
564 {
565         uint32_t port;
566
567         for (port = 0; port < APP_MAX_NIC_PORTS; port ++) {
568                 int ret;
569
570                 if (unlikely(lp->rings_out[port] == NULL)) {
571                         continue;
572                 }
573
574                 if (likely((lp->mbuf_out_flush[port] == 0) ||
575                            (lp->mbuf_out[port].n_mbufs == 0))) {
576                         lp->mbuf_out_flush[port] = 1;
577                         continue;
578                 }
579
580                 ret = rte_ring_sp_enqueue_bulk(
581                         lp->rings_out[port],
582                         (void **) lp->mbuf_out[port].array,
583                         lp->mbuf_out[port].n_mbufs,
584                         NULL);
585
586                 if (unlikely(ret == 0)) {
587                         uint32_t k;
588                         for (k = 0; k < lp->mbuf_out[port].n_mbufs; k ++) {
589                                 struct rte_mbuf *pkt_to_free = lp->mbuf_out[port].array[k];
590                                 rte_pktmbuf_free(pkt_to_free);
591                         }
592                 }
593
594                 lp->mbuf_out[port].n_mbufs = 0;
595                 lp->mbuf_out_flush[port] = 1;
596         }
597 }
598
599 static void
600 app_lcore_main_loop_worker(void) {
601         uint32_t lcore = rte_lcore_id();
602         struct app_lcore_params_worker *lp = &app.lcore_params[lcore].worker;
603         uint64_t i = 0;
604
605         uint32_t bsz_rd = app.burst_size_worker_read;
606         uint32_t bsz_wr = app.burst_size_worker_write;
607
608         for ( ; ; ) {
609                 if (APP_LCORE_WORKER_FLUSH && (unlikely(i == APP_LCORE_WORKER_FLUSH))) {
610                         app_lcore_worker_flush(lp);
611                         i = 0;
612                 }
613
614                 app_lcore_worker(lp, bsz_rd, bsz_wr);
615
616                 i ++;
617         }
618 }
619
620 int
621 app_lcore_main_loop(__attribute__((unused)) void *arg)
622 {
623         struct app_lcore_params *lp;
624         unsigned lcore;
625
626         lcore = rte_lcore_id();
627         lp = &app.lcore_params[lcore];
628
629         if (lp->type == e_APP_LCORE_IO) {
630                 printf("Logical core %u (I/O) main loop.\n", lcore);
631                 app_lcore_main_loop_io();
632         }
633
634         if (lp->type == e_APP_LCORE_WORKER) {
635                 printf("Logical core %u (worker %u) main loop.\n",
636                         lcore,
637                         (unsigned) lp->worker.worker_id);
638                 app_lcore_main_loop_worker();
639         }
640
641         return 0;
642 }