examples: use factorized default Rx/Tx configuration
[dpdk.git] / examples / l3fwd-vf / main.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <stdio.h>
35 #include <stdlib.h>
36 #include <stdint.h>
37 #include <inttypes.h>
38 #include <sys/types.h>
39 #include <string.h>
40 #include <sys/queue.h>
41 #include <stdarg.h>
42 #include <errno.h>
43 #include <getopt.h>
44 #include <signal.h>
45
46 #include <rte_common.h>
47 #include <rte_byteorder.h>
48 #include <rte_log.h>
49 #include <rte_memory.h>
50 #include <rte_memcpy.h>
51 #include <rte_memzone.h>
52 #include <rte_tailq.h>
53 #include <rte_eal.h>
54 #include <rte_per_lcore.h>
55 #include <rte_launch.h>
56 #include <rte_atomic.h>
57 #include <rte_cycles.h>
58 #include <rte_prefetch.h>
59 #include <rte_lcore.h>
60 #include <rte_per_lcore.h>
61 #include <rte_branch_prediction.h>
62 #include <rte_interrupts.h>
63 #include <rte_pci.h>
64 #include <rte_random.h>
65 #include <rte_debug.h>
66 #include <rte_ether.h>
67 #include <rte_ethdev.h>
68 #include <rte_ring.h>
69 #include <rte_mempool.h>
70 #include <rte_mbuf.h>
71 #include <rte_ip.h>
72 #include <rte_tcp.h>
73 #include <rte_udp.h>
74 #include <rte_string_fns.h>
75
76 #include "main.h"
77
78 #define APP_LOOKUP_EXACT_MATCH          0
79 #define APP_LOOKUP_LPM                  1
80 #define DO_RFC_1812_CHECKS
81
82 //#define APP_LOOKUP_METHOD             APP_LOOKUP_EXACT_MATCH
83 #ifndef APP_LOOKUP_METHOD
84 #define APP_LOOKUP_METHOD             APP_LOOKUP_LPM
85 #endif
86
87 #if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
88 #include <rte_hash.h>
89 #elif (APP_LOOKUP_METHOD == APP_LOOKUP_LPM)
90 #include <rte_lpm.h>
91 #else
92 #error "APP_LOOKUP_METHOD set to incorrect value"
93 #endif
94
95 #define RTE_LOGTYPE_L3FWD RTE_LOGTYPE_USER1
96
97 #define MEMPOOL_CACHE_SIZE 256
98
99 #define MBUF_SIZE (2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM)
100
101 /*
102  * This expression is used to calculate the number of mbufs needed depending on user input, taking
103  *  into account memory for rx and tx hardware rings, cache per lcore and mtable per port per lcore.
104  *  RTE_MAX is used to ensure that NB_MBUF never goes below a minimum value of 8192
105  */
106
107 #define NB_MBUF RTE_MAX (                                                                                                                                       \
108                                 (nb_ports*nb_rx_queue*RTE_TEST_RX_DESC_DEFAULT +                                                        \
109                                 nb_ports*nb_lcores*MAX_PKT_BURST +                                                                                      \
110                                 nb_ports*n_tx_queue*RTE_TEST_TX_DESC_DEFAULT +                                                          \
111                                 nb_lcores*MEMPOOL_CACHE_SIZE),                                                                                          \
112                                 (unsigned)8192)
113
114 /*
115  * RX and TX Prefetch, Host, and Write-back threshold values should be
116  * carefully set for optimal performance. Consult the network
117  * controller's datasheet and supporting DPDK documentation for guidance
118  * on how these parameters should be set.
119  */
120 #define RX_PTHRESH 8 /**< Default values of RX prefetch threshold reg. */
121 #define RX_HTHRESH 8 /**< Default values of RX host threshold reg. */
122 #define RX_WTHRESH 4 /**< Default values of RX write-back threshold reg. */
123
124 /*
125  * These default values are optimized for use with the Intel(R) 82599 10 GbE
126  * Controller and the DPDK ixgbe PMD. Consider using other values for other
127  * network controllers and/or network drivers.
128  */
129 #define TX_PTHRESH 36 /**< Default values of TX prefetch threshold reg. */
130 #define TX_HTHRESH 0  /**< Default values of TX host threshold reg. */
131 #define TX_WTHRESH 0  /**< Default values of TX write-back threshold reg. */
132
133 #define MAX_PKT_BURST 32
134 #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
135
136 #define NB_SOCKETS 8
137
138 #define SOCKET0 0
139
140 /* Configure how many packets ahead to prefetch, when reading packets */
141 #define PREFETCH_OFFSET 3
142
143 /*
144  * Configurable number of RX/TX ring descriptors
145  */
146 #define RTE_TEST_RX_DESC_DEFAULT 128
147 #define RTE_TEST_TX_DESC_DEFAULT 512
148 static uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT;
149 static uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT;
150
151 /* ethernet addresses of ports */
152 static struct ether_addr ports_eth_addr[RTE_MAX_ETHPORTS];
153
154 /* mask of enabled ports */
155 static uint32_t enabled_port_mask = 0;
156 static int numa_on = 1; /**< NUMA is enabled by default. */
157
158 struct mbuf_table {
159         uint16_t len;
160         struct rte_mbuf *m_table[MAX_PKT_BURST];
161 };
162
163 struct lcore_rx_queue {
164         uint8_t port_id;
165         uint8_t queue_id;
166 } __rte_cache_aligned;
167
168 #define MAX_RX_QUEUE_PER_LCORE 16
169 #define MAX_TX_QUEUE_PER_PORT 1
170 #define MAX_RX_QUEUE_PER_PORT 1
171
172 #define MAX_LCORE_PARAMS 1024
173 struct lcore_params {
174         uint8_t port_id;
175         uint8_t queue_id;
176         uint8_t lcore_id;
177 } __rte_cache_aligned;
178
179 static struct lcore_params lcore_params_array[MAX_LCORE_PARAMS];
180 static struct lcore_params lcore_params_array_default[] = {
181         {0, 0, 2},
182         {0, 1, 2},
183         {0, 2, 2},
184         {1, 0, 2},
185         {1, 1, 2},
186         {1, 2, 2},
187         {2, 0, 2},
188         {3, 0, 3},
189         {3, 1, 3},
190 };
191
192 static struct lcore_params * lcore_params = lcore_params_array_default;
193 static uint16_t nb_lcore_params = sizeof(lcore_params_array_default) /
194                                 sizeof(lcore_params_array_default[0]);
195
196 static struct rte_eth_conf port_conf = {
197         .rxmode = {
198                 .mq_mode        = ETH_MQ_RX_RSS,
199                 .max_rx_pkt_len = ETHER_MAX_LEN,
200                 .split_hdr_size = 0,
201                 .header_split   = 0, /**< Header Split disabled */
202                 .hw_ip_checksum = 1, /**< IP checksum offload enabled */
203                 .hw_vlan_filter = 0, /**< VLAN filtering disabled */
204                 .jumbo_frame    = 0, /**< Jumbo Frame Support disabled */
205                 .hw_strip_crc   = 0, /**< CRC stripped by hardware */
206         },
207         .rx_adv_conf = {
208                 .rss_conf = {
209                         .rss_key = NULL,
210                         .rss_hf = ETH_RSS_IP,
211                 },
212         },
213         .txmode = {
214                 .mq_mode = ETH_MQ_TX_NONE,
215         },
216 };
217
218 static struct rte_mempool * pktmbuf_pool[NB_SOCKETS];
219
220
221 #if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
222
223 #ifdef RTE_MACHINE_CPUFLAG_SSE4_2
224 #include <rte_hash_crc.h>
225 #define DEFAULT_HASH_FUNC       rte_hash_crc
226 #else
227 #include <rte_jhash.h>
228 #define DEFAULT_HASH_FUNC       rte_jhash
229 #endif
230
231 struct ipv4_5tuple {
232         uint32_t ip_dst;
233         uint32_t ip_src;
234         uint16_t port_dst;
235         uint16_t port_src;
236         uint8_t proto;
237 } __attribute__((__packed__));
238
239 struct l3fwd_route {
240         struct ipv4_5tuple key;
241         uint8_t if_out;
242 };
243
244 static struct l3fwd_route l3fwd_route_array[] = {
245         {{IPv4(100,10,0,1), IPv4(200,10,0,1), 101, 11, IPPROTO_TCP}, 0},
246         {{IPv4(100,20,0,2), IPv4(200,20,0,2), 102, 12, IPPROTO_TCP}, 1},
247         {{IPv4(100,30,0,3), IPv4(200,30,0,3), 103, 13, IPPROTO_TCP}, 2},
248         {{IPv4(100,40,0,4), IPv4(200,40,0,4), 104, 14, IPPROTO_TCP}, 3},
249 };
250
251 typedef struct rte_hash lookup_struct_t;
252 static lookup_struct_t *l3fwd_lookup_struct[NB_SOCKETS];
253
254 #define L3FWD_HASH_ENTRIES      1024
255 struct rte_hash_parameters l3fwd_hash_params = {
256         .name = "l3fwd_hash_0",
257         .entries = L3FWD_HASH_ENTRIES,
258         .bucket_entries = 4,
259         .key_len = sizeof(struct ipv4_5tuple),
260         .hash_func = DEFAULT_HASH_FUNC,
261         .hash_func_init_val = 0,
262         .socket_id = SOCKET0,
263 };
264
265 #define L3FWD_NUM_ROUTES \
266         (sizeof(l3fwd_route_array) / sizeof(l3fwd_route_array[0]))
267
268 static uint8_t l3fwd_out_if[L3FWD_HASH_ENTRIES] __rte_cache_aligned;
269 #endif
270
271 #if (APP_LOOKUP_METHOD == APP_LOOKUP_LPM)
272 struct l3fwd_route {
273         uint32_t ip;
274         uint8_t  depth;
275         uint8_t  if_out;
276 };
277
278 static struct l3fwd_route l3fwd_route_array[] = {
279         {IPv4(1,1,1,0), 24, 0},
280         {IPv4(2,1,1,0), 24, 1},
281         {IPv4(3,1,1,0), 24, 2},
282         {IPv4(4,1,1,0), 24, 3},
283         {IPv4(5,1,1,0), 24, 4},
284         {IPv4(6,1,1,0), 24, 5},
285         {IPv4(7,1,1,0), 24, 6},
286         {IPv4(8,1,1,0), 24, 7},
287 };
288
289 #define L3FWD_NUM_ROUTES \
290         (sizeof(l3fwd_route_array) / sizeof(l3fwd_route_array[0]))
291
292 #define L3FWD_LPM_MAX_RULES     1024
293
294 typedef struct rte_lpm lookup_struct_t;
295 static lookup_struct_t *l3fwd_lookup_struct[NB_SOCKETS];
296 #endif
297
298 struct lcore_conf {
299         uint16_t n_rx_queue;
300         struct lcore_rx_queue rx_queue_list[MAX_RX_QUEUE_PER_LCORE];
301         uint16_t tx_queue_id;
302         struct mbuf_table tx_mbufs[RTE_MAX_ETHPORTS];
303         lookup_struct_t * lookup_struct;
304 } __rte_cache_aligned;
305
306 static struct lcore_conf lcore_conf[RTE_MAX_LCORE];
307
308 /* Send burst of packets on an output interface */
309 static inline int
310 send_burst(struct lcore_conf *qconf, uint16_t n, uint8_t port)
311 {
312         struct rte_mbuf **m_table;
313         int ret;
314         uint16_t queueid;
315
316         queueid = qconf->tx_queue_id;
317         m_table = (struct rte_mbuf **)qconf->tx_mbufs[port].m_table;
318
319         ret = rte_eth_tx_burst(port, queueid, m_table, n);
320         if (unlikely(ret < n)) {
321                 do {
322                         rte_pktmbuf_free(m_table[ret]);
323                 } while (++ret < n);
324         }
325
326         return 0;
327 }
328
329 /* Enqueue a single packet, and send burst if queue is filled */
330 static inline int
331 send_single_packet(struct rte_mbuf *m, uint8_t port)
332 {
333         uint32_t lcore_id;
334         uint16_t len;
335         struct lcore_conf *qconf;
336
337         lcore_id = rte_lcore_id();
338
339         qconf = &lcore_conf[lcore_id];
340         len = qconf->tx_mbufs[port].len;
341         qconf->tx_mbufs[port].m_table[len] = m;
342         len++;
343
344         /* enough pkts to be sent */
345         if (unlikely(len == MAX_PKT_BURST)) {
346                 send_burst(qconf, MAX_PKT_BURST, port);
347                 len = 0;
348         }
349
350         qconf->tx_mbufs[port].len = len;
351         return 0;
352 }
353
354 #ifdef DO_RFC_1812_CHECKS
355 static inline int
356 is_valid_ipv4_pkt(struct ipv4_hdr *pkt, uint32_t link_len)
357 {
358         /* From http://www.rfc-editor.org/rfc/rfc1812.txt section 5.2.2 */
359         /*
360          * 1. The packet length reported by the Link Layer must be large
361          * enough to hold the minimum length legal IP datagram (20 bytes).
362          */
363         if (link_len < sizeof(struct ipv4_hdr))
364                 return -1;
365
366         /* 2. The IP checksum must be correct. */
367         /* this is checked in H/W */
368
369         /*
370          * 3. The IP version number must be 4. If the version number is not 4
371          * then the packet may be another version of IP, such as IPng or
372          * ST-II.
373          */
374         if (((pkt->version_ihl) >> 4) != 4)
375                 return -3;
376         /*
377          * 4. The IP header length field must be large enough to hold the
378          * minimum length legal IP datagram (20 bytes = 5 words).
379          */
380         if ((pkt->version_ihl & 0xf) < 5)
381                 return -4;
382
383         /*
384          * 5. The IP total length field must be large enough to hold the IP
385          * datagram header, whose length is specified in the IP header length
386          * field.
387          */
388         if (rte_cpu_to_be_16(pkt->total_length) < sizeof(struct ipv4_hdr))
389                 return -5;
390
391         return 0;
392 }
393 #endif
394
395 #if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
396 static void
397 print_key(struct ipv4_5tuple key)
398 {
399         printf("IP dst = %08x, IP src = %08x, port dst = %d, port src = %d, proto = %d\n",
400                (unsigned)key.ip_dst, (unsigned)key.ip_src, key.port_dst, key.port_src, key.proto);
401 }
402
403 static inline uint8_t
404 get_dst_port(struct ipv4_hdr *ipv4_hdr,  uint8_t portid, lookup_struct_t * l3fwd_lookup_struct)
405 {
406         struct ipv4_5tuple key;
407         struct tcp_hdr *tcp;
408         struct udp_hdr *udp;
409         int ret = 0;
410
411         key.ip_dst = rte_be_to_cpu_32(ipv4_hdr->dst_addr);
412         key.ip_src = rte_be_to_cpu_32(ipv4_hdr->src_addr);
413         key.proto = ipv4_hdr->next_proto_id;
414
415         switch (ipv4_hdr->next_proto_id) {
416         case IPPROTO_TCP:
417                 tcp = (struct tcp_hdr *)((unsigned char *) ipv4_hdr +
418                                         sizeof(struct ipv4_hdr));
419                 key.port_dst = rte_be_to_cpu_16(tcp->dst_port);
420                 key.port_src = rte_be_to_cpu_16(tcp->src_port);
421                 break;
422
423         case IPPROTO_UDP:
424                 udp = (struct udp_hdr *)((unsigned char *) ipv4_hdr +
425                                         sizeof(struct ipv4_hdr));
426                 key.port_dst = rte_be_to_cpu_16(udp->dst_port);
427                 key.port_src = rte_be_to_cpu_16(udp->src_port);
428                 break;
429
430         default:
431                 key.port_dst = 0;
432                 key.port_src = 0;
433         }
434
435         /* Find destination port */
436         ret = rte_hash_lookup(l3fwd_lookup_struct, (const void *)&key);
437         return (uint8_t)((ret < 0)? portid : l3fwd_out_if[ret]);
438 }
439 #endif
440
441 #if (APP_LOOKUP_METHOD == APP_LOOKUP_LPM)
442 static inline uint8_t
443 get_dst_port(struct ipv4_hdr *ipv4_hdr,  uint8_t portid, lookup_struct_t * l3fwd_lookup_struct)
444 {
445         uint8_t next_hop;
446
447         return (uint8_t) ((rte_lpm_lookup(l3fwd_lookup_struct,
448                         rte_be_to_cpu_32(ipv4_hdr->dst_addr), &next_hop) == 0)?
449                         next_hop : portid);
450 }
451 #endif
452
453 static inline void
454 l3fwd_simple_forward(struct rte_mbuf *m, uint8_t portid, lookup_struct_t * l3fwd_lookup_struct)
455 {
456         struct ether_hdr *eth_hdr;
457         struct ipv4_hdr *ipv4_hdr;
458         void *tmp;
459         uint8_t dst_port;
460
461         eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
462
463         ipv4_hdr = (struct ipv4_hdr *)(rte_pktmbuf_mtod(m, unsigned char *) +
464                                 sizeof(struct ether_hdr));
465
466 #ifdef DO_RFC_1812_CHECKS
467         /* Check to make sure the packet is valid (RFC1812) */
468         if (is_valid_ipv4_pkt(ipv4_hdr, m->pkt_len) < 0) {
469                 rte_pktmbuf_free(m);
470                 return;
471         }
472 #endif
473
474         dst_port = get_dst_port(ipv4_hdr, portid, l3fwd_lookup_struct);
475         if (dst_port >= RTE_MAX_ETHPORTS || (enabled_port_mask & 1 << dst_port) == 0)
476                 dst_port = portid;
477
478         /* 02:00:00:00:00:xx */
479         tmp = &eth_hdr->d_addr.addr_bytes[0];
480         *((uint64_t *)tmp) = 0x000000000002 + ((uint64_t)dst_port << 40);
481
482 #ifdef DO_RFC_1812_CHECKS
483         /* Update time to live and header checksum */
484         --(ipv4_hdr->time_to_live);
485         ++(ipv4_hdr->hdr_checksum);
486 #endif
487
488         /* src addr */
489         ether_addr_copy(&ports_eth_addr[dst_port], &eth_hdr->s_addr);
490
491         send_single_packet(m, dst_port);
492
493 }
494
495 /* main processing loop */
496 static int
497 main_loop(__attribute__((unused)) void *dummy)
498 {
499         struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
500         unsigned lcore_id;
501         uint64_t prev_tsc, diff_tsc, cur_tsc;
502         int i, j, nb_rx;
503         uint8_t portid, queueid;
504         struct lcore_conf *qconf;
505         const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S * BURST_TX_DRAIN_US;
506
507         prev_tsc = 0;
508
509         lcore_id = rte_lcore_id();
510         qconf = &lcore_conf[lcore_id];
511
512         if (qconf->n_rx_queue == 0) {
513                 RTE_LOG(INFO, L3FWD, "lcore %u has nothing to do\n", lcore_id);
514                 return 0;
515         }
516
517         RTE_LOG(INFO, L3FWD, "entering main loop on lcore %u\n", lcore_id);
518
519         for (i = 0; i < qconf->n_rx_queue; i++) {
520
521                 portid = qconf->rx_queue_list[i].port_id;
522                 queueid = qconf->rx_queue_list[i].queue_id;
523                 RTE_LOG(INFO, L3FWD, " -- lcoreid=%u portid=%hhu rxqueueid=%hhu\n", lcore_id,
524                         portid, queueid);
525         }
526
527         while (1) {
528
529                 cur_tsc = rte_rdtsc();
530
531                 /*
532                  * TX burst queue drain
533                  */
534                 diff_tsc = cur_tsc - prev_tsc;
535                 if (unlikely(diff_tsc > drain_tsc)) {
536
537                         /*
538                          * This could be optimized (use queueid instead of
539                          * portid), but it is not called so often
540                          */
541                         for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {
542                                 if (qconf->tx_mbufs[portid].len == 0)
543                                         continue;
544                                 send_burst(&lcore_conf[lcore_id],
545                                         qconf->tx_mbufs[portid].len,
546                                         portid);
547                                 qconf->tx_mbufs[portid].len = 0;
548                         }
549
550                         prev_tsc = cur_tsc;
551                 }
552
553                 /*
554                  * Read packet from RX queues
555                  */
556                 for (i = 0; i < qconf->n_rx_queue; ++i) {
557
558                         portid = qconf->rx_queue_list[i].port_id;
559                         queueid = qconf->rx_queue_list[i].queue_id;
560                         nb_rx = rte_eth_rx_burst(portid, queueid, pkts_burst, MAX_PKT_BURST);
561
562                         /* Prefetch first packets */
563                         for (j = 0; j < PREFETCH_OFFSET && j < nb_rx; j++) {
564                                 rte_prefetch0(rte_pktmbuf_mtod(
565                                                 pkts_burst[j], void *));
566                         }
567
568                         /* Prefetch and forward already prefetched packets */
569                         for (j = 0; j < (nb_rx - PREFETCH_OFFSET); j++) {
570                                 rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[
571                                                 j + PREFETCH_OFFSET], void *));
572                                 l3fwd_simple_forward(pkts_burst[j], portid, qconf->lookup_struct);
573                         }
574
575                         /* Forward remaining prefetched packets */
576                         for (; j < nb_rx; j++) {
577                                 l3fwd_simple_forward(pkts_burst[j], portid, qconf->lookup_struct);
578                         }
579                 }
580         }
581 }
582
583 static int
584 check_lcore_params(void)
585 {
586         uint8_t queue, lcore;
587         uint16_t i;
588         int socketid;
589
590         for (i = 0; i < nb_lcore_params; ++i) {
591                 queue = lcore_params[i].queue_id;
592                 if (queue >= MAX_RX_QUEUE_PER_PORT) {
593                         printf("invalid queue number: %hhu\n", queue);
594                         return -1;
595                 }
596                 lcore = lcore_params[i].lcore_id;
597                 if (!rte_lcore_is_enabled(lcore)) {
598                         printf("error: lcore %hhu is not enabled in lcore mask\n", lcore);
599                         return -1;
600                 }
601                 if ((socketid = rte_lcore_to_socket_id(lcore) != 0) &&
602                         (numa_on == 0)) {
603                         printf("warning: lcore %hhu is on socket %d with numa off \n",
604                                 lcore, socketid);
605                 }
606         }
607         return 0;
608 }
609
610 static int
611 check_port_config(const unsigned nb_ports)
612 {
613         unsigned portid;
614         uint16_t i;
615
616         for (i = 0; i < nb_lcore_params; ++i) {
617                 portid = lcore_params[i].port_id;
618                 if ((enabled_port_mask & (1 << portid)) == 0) {
619                         printf("port %u is not enabled in port mask\n", portid);
620                         return -1;
621                 }
622                 if (portid >= nb_ports) {
623                         printf("port %u is not present on the board\n", portid);
624                         return -1;
625                 }
626         }
627         return 0;
628 }
629
630 static uint8_t
631 get_port_n_rx_queues(const uint8_t port)
632 {
633         int queue = -1;
634         uint16_t i;
635
636         for (i = 0; i < nb_lcore_params; ++i) {
637                 if (lcore_params[i].port_id == port && lcore_params[i].queue_id > queue)
638                         queue = lcore_params[i].queue_id;
639         }
640         return (uint8_t)(++queue);
641 }
642
643 static int
644 init_lcore_rx_queues(void)
645 {
646         uint16_t i, nb_rx_queue;
647         uint8_t lcore;
648
649         for (i = 0; i < nb_lcore_params; ++i) {
650                 lcore = lcore_params[i].lcore_id;
651                 nb_rx_queue = lcore_conf[lcore].n_rx_queue;
652                 if (nb_rx_queue >= MAX_RX_QUEUE_PER_LCORE) {
653                         printf("error: too many queues (%u) for lcore: %u\n",
654                                 (unsigned)nb_rx_queue + 1, (unsigned)lcore);
655                         return -1;
656                 } else {
657                         lcore_conf[lcore].rx_queue_list[nb_rx_queue].port_id =
658                                 lcore_params[i].port_id;
659                         lcore_conf[lcore].rx_queue_list[nb_rx_queue].queue_id =
660                                 lcore_params[i].queue_id;
661                         lcore_conf[lcore].n_rx_queue++;
662                 }
663         }
664         return 0;
665 }
666
667 /* display usage */
668 static void
669 print_usage(const char *prgname)
670 {
671         printf ("%s [EAL options] -- -p PORTMASK"
672                 "  [--config (port,queue,lcore)[,(port,queue,lcore]]\n"
673                 "  -p PORTMASK: hexadecimal bitmask of ports to configure\n"
674                 "  --config (port,queue,lcore): rx queues configuration\n"
675                 "  --no-numa: optional, disable numa awareness\n",
676                 prgname);
677 }
678
679 /* Custom handling of signals to handle process terminal */
680 static void
681 signal_handler(int signum)
682 {
683         uint8_t portid;
684         uint8_t nb_ports = rte_eth_dev_count();
685
686         /* When we receive a SIGINT signal */
687         if (signum == SIGINT) {
688                 for (portid = 0; portid < nb_ports; portid++) {
689                         /* skip ports that are not enabled */
690                         if ((enabled_port_mask & (1 << portid)) == 0)
691                                 continue;
692                         rte_eth_dev_close(portid);
693                 }
694         }
695         rte_exit(EXIT_SUCCESS, "\n User forced exit\n");
696 }
697 static int
698 parse_portmask(const char *portmask)
699 {
700         char *end = NULL;
701         unsigned long pm;
702
703         /* parse hexadecimal string */
704         pm = strtoul(portmask, &end, 16);
705         if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
706                 return -1;
707
708         if (pm == 0)
709                 return -1;
710
711         return pm;
712 }
713
714 static int
715 parse_config(const char *q_arg)
716 {
717         char s[256];
718         const char *p, *p0 = q_arg;
719         char *end;
720         enum fieldnames {
721                 FLD_PORT = 0,
722                 FLD_QUEUE,
723                 FLD_LCORE,
724                 _NUM_FLD
725         };
726         unsigned long int_fld[_NUM_FLD];
727         char *str_fld[_NUM_FLD];
728         int i;
729         unsigned size;
730
731         nb_lcore_params = 0;
732
733         while ((p = strchr(p0,'(')) != NULL) {
734                 ++p;
735                 if((p0 = strchr(p,')')) == NULL)
736                         return -1;
737
738                 size = p0 - p;
739                 if(size >= sizeof(s))
740                         return -1;
741
742                 snprintf(s, sizeof(s), "%.*s", size, p);
743                 if (rte_strsplit(s, sizeof(s), str_fld, _NUM_FLD, ',') != _NUM_FLD)
744                         return -1;
745                 for (i = 0; i < _NUM_FLD; i++){
746                         errno = 0;
747                         int_fld[i] = strtoul(str_fld[i], &end, 0);
748                         if (errno != 0 || end == str_fld[i] || int_fld[i] > 255)
749                                 return -1;
750                 }
751                 if (nb_lcore_params >= MAX_LCORE_PARAMS) {
752                         printf("exceeded max number of lcore params: %hu\n",
753                                 nb_lcore_params);
754                         return -1;
755                 }
756                 lcore_params_array[nb_lcore_params].port_id = (uint8_t)int_fld[FLD_PORT];
757                 lcore_params_array[nb_lcore_params].queue_id = (uint8_t)int_fld[FLD_QUEUE];
758                 lcore_params_array[nb_lcore_params].lcore_id = (uint8_t)int_fld[FLD_LCORE];
759                 ++nb_lcore_params;
760         }
761         lcore_params = lcore_params_array;
762         return 0;
763 }
764
765 /* Parse the argument given in the command line of the application */
766 static int
767 parse_args(int argc, char **argv)
768 {
769         int opt, ret;
770         char **argvopt;
771         int option_index;
772         char *prgname = argv[0];
773         static struct option lgopts[] = {
774                 {"config", 1, 0, 0},
775                 {"no-numa", 0, 0, 0},
776                 {NULL, 0, 0, 0}
777         };
778
779         argvopt = argv;
780
781         while ((opt = getopt_long(argc, argvopt, "p:",
782                                 lgopts, &option_index)) != EOF) {
783
784                 switch (opt) {
785                 /* portmask */
786                 case 'p':
787                         enabled_port_mask = parse_portmask(optarg);
788                         if (enabled_port_mask == 0) {
789                                 printf("invalid portmask\n");
790                                 print_usage(prgname);
791                                 return -1;
792                         }
793                         break;
794
795                 /* long options */
796                 case 0:
797                         if (!strcmp(lgopts[option_index].name, "config")) {
798                                 ret = parse_config(optarg);
799                                 if (ret) {
800                                         printf("invalid config\n");
801                                         print_usage(prgname);
802                                         return -1;
803                                 }
804                         }
805
806                         if (!strcmp(lgopts[option_index].name, "no-numa")) {
807                                 printf("numa is disabled \n");
808                                 numa_on = 0;
809                         }
810                         break;
811
812                 default:
813                         print_usage(prgname);
814                         return -1;
815                 }
816         }
817
818         if (optind >= 0)
819                 argv[optind-1] = prgname;
820
821         ret = optind-1;
822         optind = 0; /* reset getopt lib */
823         return ret;
824 }
825
826 static void
827 print_ethaddr(const char *name, const struct ether_addr *eth_addr)
828 {
829         printf ("%s%02X:%02X:%02X:%02X:%02X:%02X", name,
830                 eth_addr->addr_bytes[0],
831                 eth_addr->addr_bytes[1],
832                 eth_addr->addr_bytes[2],
833                 eth_addr->addr_bytes[3],
834                 eth_addr->addr_bytes[4],
835                 eth_addr->addr_bytes[5]);
836 }
837
838 #if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
839 static void
840 setup_hash(int socketid)
841 {
842         unsigned i;
843         int ret;
844         char s[64];
845
846         /* create  hashes */
847         snprintf(s, sizeof(s), "l3fwd_hash_%d", socketid);
848         l3fwd_hash_params.name = s;
849         l3fwd_hash_params.socket_id = socketid;
850         l3fwd_lookup_struct[socketid] = rte_hash_create(&l3fwd_hash_params);
851         if (l3fwd_lookup_struct[socketid] == NULL)
852                 rte_exit(EXIT_FAILURE, "Unable to create the l3fwd hash on "
853                                 "socket %d\n", socketid);
854
855         /* populate the hash */
856         for (i = 0; i < L3FWD_NUM_ROUTES; i++) {
857                 ret = rte_hash_add_key (l3fwd_lookup_struct[socketid],
858                                 (void *) &l3fwd_route_array[i].key);
859                 if (ret < 0) {
860                         rte_exit(EXIT_FAILURE, "Unable to add entry %u to the"
861                                 "l3fwd hash on socket %d\n", i, socketid);
862                 }
863                 l3fwd_out_if[ret] = l3fwd_route_array[i].if_out;
864                 printf("Hash: Adding key\n");
865                 print_key(l3fwd_route_array[i].key);
866         }
867 }
868 #endif
869
870 #if (APP_LOOKUP_METHOD == APP_LOOKUP_LPM)
871 static void
872 setup_lpm(int socketid)
873 {
874         unsigned i;
875         int ret;
876         char s[64];
877
878         /* create the LPM table */
879         snprintf(s, sizeof(s), "L3FWD_LPM_%d", socketid);
880         l3fwd_lookup_struct[socketid] = rte_lpm_create(s, socketid,
881                                 L3FWD_LPM_MAX_RULES, 0);
882         if (l3fwd_lookup_struct[socketid] == NULL)
883                 rte_exit(EXIT_FAILURE, "Unable to create the l3fwd LPM table"
884                                 " on socket %d\n", socketid);
885
886         /* populate the LPM table */
887         for (i = 0; i < L3FWD_NUM_ROUTES; i++) {
888                 ret = rte_lpm_add(l3fwd_lookup_struct[socketid],
889                         l3fwd_route_array[i].ip,
890                         l3fwd_route_array[i].depth,
891                         l3fwd_route_array[i].if_out);
892
893                 if (ret < 0) {
894                         rte_exit(EXIT_FAILURE, "Unable to add entry %u to the "
895                                 "l3fwd LPM table on socket %d\n",
896                                 i, socketid);
897                 }
898
899                 printf("LPM: Adding route 0x%08x / %d (%d)\n",
900                         (unsigned)l3fwd_route_array[i].ip,
901                         l3fwd_route_array[i].depth,
902                         l3fwd_route_array[i].if_out);
903         }
904 }
905 #endif
906
907 static int
908 init_mem(unsigned nb_mbuf)
909 {
910         struct lcore_conf *qconf;
911         int socketid;
912         unsigned lcore_id;
913         char s[64];
914
915         for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
916                 if (rte_lcore_is_enabled(lcore_id) == 0)
917                         continue;
918
919                 if (numa_on)
920                         socketid = rte_lcore_to_socket_id(lcore_id);
921                 else
922                         socketid = 0;
923
924                 if (socketid >= NB_SOCKETS) {
925                         rte_exit(EXIT_FAILURE, "Socket %d of lcore %u is out of range %d\n",
926                                 socketid, lcore_id, NB_SOCKETS);
927                 }
928                 if (pktmbuf_pool[socketid] == NULL) {
929                         snprintf(s, sizeof(s), "mbuf_pool_%d", socketid);
930                         pktmbuf_pool[socketid] =
931                                 rte_mempool_create(s, nb_mbuf, MBUF_SIZE,
932                                                    MEMPOOL_CACHE_SIZE,
933                                         sizeof(struct rte_pktmbuf_pool_private),
934                                         rte_pktmbuf_pool_init, NULL,
935                                         rte_pktmbuf_init, NULL,
936                                         socketid, 0);
937                         if (pktmbuf_pool[socketid] == NULL)
938                                 rte_exit(EXIT_FAILURE, "Cannot init mbuf pool on socket %d\n", socketid);
939                         else
940                                 printf("Allocated mbuf pool on socket %d\n", socketid);
941
942 #if (APP_LOOKUP_METHOD == APP_LOOKUP_LPM)
943                         setup_lpm(socketid);
944 #else
945                         setup_hash(socketid);
946 #endif
947                 }
948                 qconf = &lcore_conf[lcore_id];
949                 qconf->lookup_struct = l3fwd_lookup_struct[socketid];
950         }
951         return 0;
952 }
953
954 int
955 MAIN(int argc, char **argv)
956 {
957         struct lcore_conf *qconf;
958         struct rte_eth_dev_info dev_info;
959         struct rte_eth_txconf *txconf;
960         int ret;
961         unsigned nb_ports;
962         uint16_t queueid;
963         unsigned lcore_id;
964         uint32_t nb_lcores;
965         uint16_t n_tx_queue;
966         uint8_t portid, nb_rx_queue, queue, socketid;
967
968         signal(SIGINT, signal_handler);
969         /* init EAL */
970         ret = rte_eal_init(argc, argv);
971         if (ret < 0)
972                 rte_exit(EXIT_FAILURE, "Invalid EAL parameters\n");
973         argc -= ret;
974         argv += ret;
975
976         /* parse application arguments (after the EAL ones) */
977         ret = parse_args(argc, argv);
978         if (ret < 0)
979                 rte_exit(EXIT_FAILURE, "Invalid L3FWD-VF parameters\n");
980
981         if (check_lcore_params() < 0)
982                 rte_exit(EXIT_FAILURE, "check_lcore_params failed\n");
983
984         ret = init_lcore_rx_queues();
985         if (ret < 0)
986                 rte_exit(EXIT_FAILURE, "init_lcore_rx_queues failed\n");
987
988         nb_ports = rte_eth_dev_count();
989         if (nb_ports > RTE_MAX_ETHPORTS)
990                 nb_ports = RTE_MAX_ETHPORTS;
991
992         if (check_port_config(nb_ports) < 0)
993                 rte_exit(EXIT_FAILURE, "check_port_config failed\n");
994
995         nb_lcores = rte_lcore_count();
996
997         /* initialize all ports */
998         for (portid = 0; portid < nb_ports; portid++) {
999                 /* skip ports that are not enabled */
1000                 if ((enabled_port_mask & (1 << portid)) == 0) {
1001                         printf("\nSkipping disabled port %d\n", portid);
1002                         continue;
1003                 }
1004
1005                 /* init port */
1006                 printf("Initializing port %d ... ", portid );
1007                 fflush(stdout);
1008
1009                 /* must always equal(=1) */
1010                 nb_rx_queue = get_port_n_rx_queues(portid);
1011                 n_tx_queue = MAX_TX_QUEUE_PER_PORT;
1012
1013                 printf("Creating queues: nb_rxq=%d nb_txq=%u... ",
1014                         nb_rx_queue, (unsigned)1 );
1015                 ret = rte_eth_dev_configure(portid, nb_rx_queue, n_tx_queue, &port_conf);
1016                 if (ret < 0)
1017                         rte_exit(EXIT_FAILURE, "Cannot configure device: err=%d, port=%d\n",
1018                                 ret, portid);
1019
1020                 rte_eth_macaddr_get(portid, &ports_eth_addr[portid]);
1021                 print_ethaddr(" Address:", &ports_eth_addr[portid]);
1022                 printf(", ");
1023
1024                 ret = init_mem(NB_MBUF);
1025                 if (ret < 0)
1026                         rte_exit(EXIT_FAILURE, "init_mem failed\n");
1027
1028                 /* init one TX queue */
1029                 socketid = (uint8_t)rte_lcore_to_socket_id(rte_get_master_lcore());
1030
1031                 printf("txq=%d,%d,%d ", portid, 0, socketid);
1032                 fflush(stdout);
1033
1034                 rte_eth_dev_info_get(portid, &dev_info);
1035                 txconf = &dev_info.default_txconf;
1036                 if (port_conf.rxmode.jumbo_frame)
1037                         txconf->txq_flags = 0;
1038                 ret = rte_eth_tx_queue_setup(portid, 0, nb_txd,
1039                                                  socketid, txconf);
1040                 if (ret < 0)
1041                         rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: err=%d, "
1042                                 "port=%d\n", ret, portid);
1043
1044                 printf("\n");
1045         }
1046
1047         for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
1048                 if (rte_lcore_is_enabled(lcore_id) == 0)
1049                         continue;
1050                 qconf = &lcore_conf[lcore_id];
1051                 qconf->tx_queue_id = 0;
1052
1053                 printf("\nInitializing rx queues on lcore %u ... ", lcore_id );
1054                 fflush(stdout);
1055                 /* init RX queues */
1056                 for(queue = 0; queue < qconf->n_rx_queue; ++queue) {
1057                         portid = qconf->rx_queue_list[queue].port_id;
1058                         queueid = qconf->rx_queue_list[queue].queue_id;
1059
1060                         if (numa_on)
1061                                 socketid = (uint8_t)rte_lcore_to_socket_id(lcore_id);
1062                         else
1063                                 socketid = 0;
1064
1065                         printf("rxq=%d,%d,%d ", portid, queueid, socketid);
1066                         fflush(stdout);
1067
1068                         ret = rte_eth_rx_queue_setup(portid, queueid, nb_rxd,
1069                                                 socketid, NULL,
1070                                                 pktmbuf_pool[socketid]);
1071                         if (ret < 0)
1072                                 rte_exit(EXIT_FAILURE, "rte_eth_rx_queue_setup: err=%d,"
1073                                                 "port=%d\n", ret, portid);
1074                 }
1075         }
1076         printf("\n");
1077
1078         /* start ports */
1079         for (portid = 0; portid < nb_ports; portid++) {
1080                 if ((enabled_port_mask & (1 << portid)) == 0) {
1081                         continue;
1082                 }
1083                 /* Start device */
1084                 ret = rte_eth_dev_start(portid);
1085                 if (ret < 0)
1086                         rte_exit(EXIT_FAILURE, "rte_eth_dev_start: err=%d, port=%d\n",
1087                                 ret, portid);
1088
1089                 printf("done: Port %d\n", portid);
1090
1091         }
1092
1093         /* launch per-lcore init on every lcore */
1094         rte_eal_mp_remote_launch(main_loop, NULL, CALL_MASTER);
1095         RTE_LCORE_FOREACH_SLAVE(lcore_id) {
1096                 if (rte_eal_wait_lcore(lcore_id) < 0)
1097                         return -1;
1098         }
1099
1100         return 0;
1101 }