update Intel copyright years to 2014
[dpdk.git] / examples / l3fwd-vf / main.c
1 /*-
2  *   BSD LICENSE
3  * 
4  *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  * 
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  * 
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  * 
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <stdio.h>
35 #include <stdlib.h>
36 #include <stdint.h>
37 #include <inttypes.h>
38 #include <sys/types.h>
39 #include <string.h>
40 #include <sys/queue.h>
41 #include <stdarg.h>
42 #include <errno.h>
43 #include <getopt.h>
44 #include <signal.h>
45
46 #include <rte_common.h>
47 #include <rte_byteorder.h>
48 #include <rte_log.h>
49 #include <rte_memory.h>
50 #include <rte_memcpy.h>
51 #include <rte_memzone.h>
52 #include <rte_tailq.h>
53 #include <rte_eal.h>
54 #include <rte_per_lcore.h>
55 #include <rte_launch.h>
56 #include <rte_atomic.h>
57 #include <rte_cycles.h>
58 #include <rte_prefetch.h>
59 #include <rte_lcore.h>
60 #include <rte_per_lcore.h>
61 #include <rte_branch_prediction.h>
62 #include <rte_interrupts.h>
63 #include <rte_pci.h>
64 #include <rte_random.h>
65 #include <rte_debug.h>
66 #include <rte_ether.h>
67 #include <rte_ethdev.h>
68 #include <rte_ring.h>
69 #include <rte_mempool.h>
70 #include <rte_mbuf.h>
71 #include <rte_ip.h>
72 #include <rte_tcp.h>
73 #include <rte_udp.h>
74 #include <rte_string_fns.h>
75
76 #include "main.h"
77
78 #define APP_LOOKUP_EXACT_MATCH          0
79 #define APP_LOOKUP_LPM                  1
80 #define DO_RFC_1812_CHECKS
81
82 //#define APP_LOOKUP_METHOD             APP_LOOKUP_EXACT_MATCH
83 #ifndef APP_LOOKUP_METHOD
84 #define APP_LOOKUP_METHOD             APP_LOOKUP_LPM
85 #endif
86
87 #if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
88 #include <rte_hash.h>
89 #elif (APP_LOOKUP_METHOD == APP_LOOKUP_LPM)
90 #include <rte_lpm.h>
91 #else
92 #error "APP_LOOKUP_METHOD set to incorrect value"
93 #endif
94
95 #define RTE_LOGTYPE_L3FWD RTE_LOGTYPE_USER1
96
97 #define MEMPOOL_CACHE_SIZE 256
98
99 #define MBUF_SIZE (2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM)
100
101 /*
102  * This expression is used to calculate the number of mbufs needed depending on user input, taking
103  *  into account memory for rx and tx hardware rings, cache per lcore and mtable per port per lcore.
104  *  RTE_MAX is used to ensure that NB_MBUF never goes below a minimum value of 8192
105  */
106
107 #define NB_MBUF RTE_MAX (                                                                                                                                       \
108                                 (nb_ports*nb_rx_queue*RTE_TEST_RX_DESC_DEFAULT +                                                        \
109                                 nb_ports*nb_lcores*MAX_PKT_BURST +                                                                                      \
110                                 nb_ports*n_tx_queue*RTE_TEST_TX_DESC_DEFAULT +                                                          \
111                                 nb_lcores*MEMPOOL_CACHE_SIZE),                                                                                          \
112                                 (unsigned)8192)
113
114 /*
115  * RX and TX Prefetch, Host, and Write-back threshold values should be
116  * carefully set for optimal performance. Consult the network
117  * controller's datasheet and supporting DPDK documentation for guidance
118  * on how these parameters should be set.
119  */
120 #define RX_PTHRESH 8 /**< Default values of RX prefetch threshold reg. */
121 #define RX_HTHRESH 8 /**< Default values of RX host threshold reg. */
122 #define RX_WTHRESH 4 /**< Default values of RX write-back threshold reg. */
123
124 /*
125  * These default values are optimized for use with the Intel(R) 82599 10 GbE
126  * Controller and the DPDK ixgbe PMD. Consider using other values for other
127  * network controllers and/or network drivers.
128  */
129 #define TX_PTHRESH 36 /**< Default values of TX prefetch threshold reg. */
130 #define TX_HTHRESH 0  /**< Default values of TX host threshold reg. */
131 #define TX_WTHRESH 0  /**< Default values of TX write-back threshold reg. */
132
133 #define MAX_PKT_BURST 32
134 #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
135
136 #define NB_SOCKETS 8
137
138 #define SOCKET0 0
139
140 /* Configure how many packets ahead to prefetch, when reading packets */
141 #define PREFETCH_OFFSET 3
142
143 /*
144  * Configurable number of RX/TX ring descriptors
145  */
146 #define RTE_TEST_RX_DESC_DEFAULT 128
147 #define RTE_TEST_TX_DESC_DEFAULT 512
148 static uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT;
149 static uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT;
150
151 /* ethernet addresses of ports */
152 static struct ether_addr ports_eth_addr[RTE_MAX_ETHPORTS];
153
154 /* mask of enabled ports */
155 static uint32_t enabled_port_mask = 0;
156 static int numa_on = 1; /**< NUMA is enabled by default. */
157
158 struct mbuf_table {
159         uint16_t len;
160         struct rte_mbuf *m_table[MAX_PKT_BURST];
161 };
162
163 struct lcore_rx_queue {
164         uint8_t port_id;
165         uint8_t queue_id;
166 } __rte_cache_aligned;
167
168 #define MAX_RX_QUEUE_PER_LCORE 16
169 #define MAX_TX_QUEUE_PER_PORT 1
170 #define MAX_RX_QUEUE_PER_PORT 1
171
172 #define MAX_LCORE_PARAMS 1024
173 struct lcore_params {
174         uint8_t port_id;
175         uint8_t queue_id;
176         uint8_t lcore_id;
177 } __rte_cache_aligned;
178
179 static struct lcore_params lcore_params_array[MAX_LCORE_PARAMS];
180 static struct lcore_params lcore_params_array_default[] = {
181         {0, 0, 2},
182         {0, 1, 2},
183         {0, 2, 2},
184         {1, 0, 2},
185         {1, 1, 2},
186         {1, 2, 2},
187         {2, 0, 2},
188         {3, 0, 3},
189         {3, 1, 3},
190 };
191
192 static struct lcore_params * lcore_params = lcore_params_array_default;
193 static uint16_t nb_lcore_params = sizeof(lcore_params_array_default) /
194                                 sizeof(lcore_params_array_default[0]);
195
196 static struct rte_eth_conf port_conf = {
197         .rxmode = {
198                 .max_rx_pkt_len = ETHER_MAX_LEN,
199                 .split_hdr_size = 0,
200                 .header_split   = 0, /**< Header Split disabled */
201                 .hw_ip_checksum = 1, /**< IP checksum offload enabled */
202                 .hw_vlan_filter = 0, /**< VLAN filtering disabled */
203                 .jumbo_frame    = 0, /**< Jumbo Frame Support disabled */
204                 .hw_strip_crc   = 0, /**< CRC stripped by hardware */
205         },
206         .rx_adv_conf = {
207                 .rss_conf = {
208                         .rss_key = NULL,
209                         .rss_hf = ETH_RSS_IPV4 | ETH_RSS_IPV6,
210                 },
211         },
212         .txmode = {
213                 .mq_mode = ETH_MQ_TX_NONE,
214         },
215 };
216
217 static const struct rte_eth_rxconf rx_conf = {
218         .rx_thresh = {
219                 .pthresh = RX_PTHRESH,
220                 .hthresh = RX_HTHRESH,
221                 .wthresh = RX_WTHRESH,
222         },
223         .rx_free_thresh = 32,
224 };
225
226 static const struct rte_eth_txconf tx_conf = {
227         .tx_thresh = {
228                 .pthresh = TX_PTHRESH,
229                 .hthresh = TX_HTHRESH,
230                 .wthresh = TX_WTHRESH,
231         },
232         .tx_free_thresh = 0, /* Use PMD default values */
233         .tx_rs_thresh = 0, /* Use PMD default values */
234         .txq_flags = (ETH_TXQ_FLAGS_NOMULTSEGS |
235                       ETH_TXQ_FLAGS_NOVLANOFFL |
236                       ETH_TXQ_FLAGS_NOXSUMSCTP |
237                       ETH_TXQ_FLAGS_NOXSUMUDP |
238                       ETH_TXQ_FLAGS_NOXSUMTCP)
239 };
240
241 static struct rte_mempool * pktmbuf_pool[NB_SOCKETS];
242
243
244 #if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
245
246 #ifdef RTE_MACHINE_CPUFLAG_SSE4_2
247 #include <rte_hash_crc.h>
248 #define DEFAULT_HASH_FUNC       rte_hash_crc
249 #else
250 #include <rte_jhash.h>
251 #define DEFAULT_HASH_FUNC       rte_jhash
252 #endif
253
254 struct ipv4_5tuple {
255         uint32_t ip_dst;
256         uint32_t ip_src;
257         uint16_t port_dst;
258         uint16_t port_src;
259         uint8_t proto;
260 } __attribute__((__packed__));
261
262 struct l3fwd_route {
263         struct ipv4_5tuple key;
264         uint8_t if_out;
265 };
266
267 static struct l3fwd_route l3fwd_route_array[] = {
268         {{IPv4(100,10,0,1), IPv4(200,10,0,1), 101, 11, IPPROTO_TCP}, 0},
269         {{IPv4(100,20,0,2), IPv4(200,20,0,2), 102, 12, IPPROTO_TCP}, 1},
270         {{IPv4(100,30,0,3), IPv4(200,30,0,3), 103, 13, IPPROTO_TCP}, 2},
271         {{IPv4(100,40,0,4), IPv4(200,40,0,4), 104, 14, IPPROTO_TCP}, 3},
272 };
273
274 typedef struct rte_hash lookup_struct_t;
275 static lookup_struct_t *l3fwd_lookup_struct[NB_SOCKETS];
276
277 #define L3FWD_HASH_ENTRIES      1024
278 struct rte_hash_parameters l3fwd_hash_params = {
279         .name = "l3fwd_hash_0",
280         .entries = L3FWD_HASH_ENTRIES,
281         .bucket_entries = 4,
282         .key_len = sizeof(struct ipv4_5tuple),
283         .hash_func = DEFAULT_HASH_FUNC,
284         .hash_func_init_val = 0,
285         .socket_id = SOCKET0,
286 };
287
288 #define L3FWD_NUM_ROUTES \
289         (sizeof(l3fwd_route_array) / sizeof(l3fwd_route_array[0]))
290
291 static uint8_t l3fwd_out_if[L3FWD_HASH_ENTRIES] __rte_cache_aligned;
292 #endif
293
294 #if (APP_LOOKUP_METHOD == APP_LOOKUP_LPM)
295 struct l3fwd_route {
296         uint32_t ip;
297         uint8_t  depth;
298         uint8_t  if_out;
299 };
300
301 static struct l3fwd_route l3fwd_route_array[] = {
302         {IPv4(1,1,1,0), 24, 0},
303         {IPv4(2,1,1,0), 24, 1},
304         {IPv4(3,1,1,0), 24, 2},
305         {IPv4(4,1,1,0), 24, 3},
306         {IPv4(5,1,1,0), 24, 4},
307         {IPv4(6,1,1,0), 24, 5},
308         {IPv4(7,1,1,0), 24, 6},
309         {IPv4(8,1,1,0), 24, 7},
310 };
311
312 #define L3FWD_NUM_ROUTES \
313         (sizeof(l3fwd_route_array) / sizeof(l3fwd_route_array[0]))
314
315 #define L3FWD_LPM_MAX_RULES     1024
316
317 typedef struct rte_lpm lookup_struct_t;
318 static lookup_struct_t *l3fwd_lookup_struct[NB_SOCKETS];
319 #endif
320
321 struct lcore_conf {
322         uint16_t n_rx_queue;
323         struct lcore_rx_queue rx_queue_list[MAX_RX_QUEUE_PER_LCORE];
324         uint16_t tx_queue_id;
325         struct mbuf_table tx_mbufs[RTE_MAX_ETHPORTS];
326         lookup_struct_t * lookup_struct;
327 } __rte_cache_aligned;
328
329 static struct lcore_conf lcore_conf[RTE_MAX_LCORE];
330
331 /* Send burst of packets on an output interface */
332 static inline int
333 send_burst(struct lcore_conf *qconf, uint16_t n, uint8_t port)
334 {
335         struct rte_mbuf **m_table;
336         int ret;
337         uint16_t queueid;
338
339         queueid = qconf->tx_queue_id;
340         m_table = (struct rte_mbuf **)qconf->tx_mbufs[port].m_table;
341
342         ret = rte_eth_tx_burst(port, queueid, m_table, n);
343         if (unlikely(ret < n)) {
344                 do {
345                         rte_pktmbuf_free(m_table[ret]);
346                 } while (++ret < n);
347         }
348
349         return 0;
350 }
351
352 /* Enqueue a single packet, and send burst if queue is filled */
353 static inline int
354 send_single_packet(struct rte_mbuf *m, uint8_t port)
355 {
356         uint32_t lcore_id;
357         uint16_t len;
358         struct lcore_conf *qconf;
359
360         lcore_id = rte_lcore_id();
361
362         qconf = &lcore_conf[lcore_id];
363         len = qconf->tx_mbufs[port].len;
364         qconf->tx_mbufs[port].m_table[len] = m;
365         len++;
366
367         /* enough pkts to be sent */
368         if (unlikely(len == MAX_PKT_BURST)) {
369                 send_burst(qconf, MAX_PKT_BURST, port);
370                 len = 0;
371         }
372
373         qconf->tx_mbufs[port].len = len;
374         return 0;
375 }
376
377 #ifdef DO_RFC_1812_CHECKS
378 static inline int
379 is_valid_ipv4_pkt(struct ipv4_hdr *pkt, uint32_t link_len)
380 {
381         /* From http://www.rfc-editor.org/rfc/rfc1812.txt section 5.2.2 */
382         /*
383          * 1. The packet length reported by the Link Layer must be large
384          * enough to hold the minimum length legal IP datagram (20 bytes).
385          */
386         if (link_len < sizeof(struct ipv4_hdr))
387                 return -1;
388
389         /* 2. The IP checksum must be correct. */
390         /* this is checked in H/W */
391
392         /*
393          * 3. The IP version number must be 4. If the version number is not 4
394          * then the packet may be another version of IP, such as IPng or
395          * ST-II.
396          */
397         if (((pkt->version_ihl) >> 4) != 4)
398                 return -3;
399         /*
400          * 4. The IP header length field must be large enough to hold the
401          * minimum length legal IP datagram (20 bytes = 5 words).
402          */
403         if ((pkt->version_ihl & 0xf) < 5)
404                 return -4;
405
406         /*
407          * 5. The IP total length field must be large enough to hold the IP
408          * datagram header, whose length is specified in the IP header length
409          * field.
410          */
411         if (rte_cpu_to_be_16(pkt->total_length) < sizeof(struct ipv4_hdr))
412                 return -5;
413
414         return 0;
415 }
416 #endif
417
418 #if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
419 static void
420 print_key(struct ipv4_5tuple key)
421 {
422         printf("IP dst = %08x, IP src = %08x, port dst = %d, port src = %d, proto = %d\n",
423                (unsigned)key.ip_dst, (unsigned)key.ip_src, key.port_dst, key.port_src, key.proto);
424 }
425
426 static inline uint8_t
427 get_dst_port(struct ipv4_hdr *ipv4_hdr,  uint8_t portid, lookup_struct_t * l3fwd_lookup_struct)
428 {
429         struct ipv4_5tuple key;
430         struct tcp_hdr *tcp;
431         struct udp_hdr *udp;
432         int ret = 0;
433
434         key.ip_dst = rte_be_to_cpu_32(ipv4_hdr->dst_addr);
435         key.ip_src = rte_be_to_cpu_32(ipv4_hdr->src_addr);
436         key.proto = ipv4_hdr->next_proto_id;
437
438         switch (ipv4_hdr->next_proto_id) {
439         case IPPROTO_TCP:
440                 tcp = (struct tcp_hdr *)((unsigned char *) ipv4_hdr +
441                                         sizeof(struct ipv4_hdr));
442                 key.port_dst = rte_be_to_cpu_16(tcp->dst_port);
443                 key.port_src = rte_be_to_cpu_16(tcp->src_port);
444                 break;
445
446         case IPPROTO_UDP:
447                 udp = (struct udp_hdr *)((unsigned char *) ipv4_hdr +
448                                         sizeof(struct ipv4_hdr));
449                 key.port_dst = rte_be_to_cpu_16(udp->dst_port);
450                 key.port_src = rte_be_to_cpu_16(udp->src_port);
451                 break;
452
453         default:
454                 key.port_dst = 0;
455                 key.port_src = 0;
456         }
457
458         /* Find destination port */
459         ret = rte_hash_lookup(l3fwd_lookup_struct, (const void *)&key);
460         return (uint8_t)((ret < 0)? portid : l3fwd_out_if[ret]);
461 }
462 #endif
463
464 #if (APP_LOOKUP_METHOD == APP_LOOKUP_LPM)
465 static inline uint8_t
466 get_dst_port(struct ipv4_hdr *ipv4_hdr,  uint8_t portid, lookup_struct_t * l3fwd_lookup_struct)
467 {
468         uint8_t next_hop;
469
470         return (uint8_t) ((rte_lpm_lookup(l3fwd_lookup_struct,
471                         rte_be_to_cpu_32(ipv4_hdr->dst_addr), &next_hop) == 0)?
472                         next_hop : portid);
473 }
474 #endif
475
476 static inline void
477 l3fwd_simple_forward(struct rte_mbuf *m, uint8_t portid, lookup_struct_t * l3fwd_lookup_struct)
478 {
479         struct ether_hdr *eth_hdr;
480         struct ipv4_hdr *ipv4_hdr;
481         void *tmp;
482         uint8_t dst_port;
483
484         eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
485
486         ipv4_hdr = (struct ipv4_hdr *)(rte_pktmbuf_mtod(m, unsigned char *) +
487                                 sizeof(struct ether_hdr));
488
489 #ifdef DO_RFC_1812_CHECKS
490         /* Check to make sure the packet is valid (RFC1812) */
491         if (is_valid_ipv4_pkt(ipv4_hdr, m->pkt.pkt_len) < 0) {
492                 rte_pktmbuf_free(m);
493                 return;
494         }
495 #endif
496
497         dst_port = get_dst_port(ipv4_hdr, portid, l3fwd_lookup_struct);
498         if (dst_port >= RTE_MAX_ETHPORTS || (enabled_port_mask & 1 << dst_port) == 0)
499                 dst_port = portid;
500
501         /* 02:00:00:00:00:xx */
502         tmp = &eth_hdr->d_addr.addr_bytes[0];
503         *((uint64_t *)tmp) = 0x000000000002 + ((uint64_t)dst_port << 40);
504
505 #ifdef DO_RFC_1812_CHECKS
506         /* Update time to live and header checksum */
507         --(ipv4_hdr->time_to_live);
508         ++(ipv4_hdr->hdr_checksum);
509 #endif
510
511         /* src addr */
512         ether_addr_copy(&ports_eth_addr[dst_port], &eth_hdr->s_addr);
513
514         send_single_packet(m, dst_port);
515
516 }
517
518 /* main processing loop */
519 static int
520 main_loop(__attribute__((unused)) void *dummy)
521 {
522         struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
523         unsigned lcore_id;
524         uint64_t prev_tsc, diff_tsc, cur_tsc;
525         int i, j, nb_rx;
526         uint8_t portid, queueid;
527         struct lcore_conf *qconf;
528         const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S * BURST_TX_DRAIN_US;
529
530         prev_tsc = 0;
531
532         lcore_id = rte_lcore_id();
533         qconf = &lcore_conf[lcore_id];
534
535         if (qconf->n_rx_queue == 0) {
536                 RTE_LOG(INFO, L3FWD, "lcore %u has nothing to do\n", lcore_id);
537                 return 0;
538         }
539
540         RTE_LOG(INFO, L3FWD, "entering main loop on lcore %u\n", lcore_id);
541
542         for (i = 0; i < qconf->n_rx_queue; i++) {
543
544                 portid = qconf->rx_queue_list[i].port_id;
545                 queueid = qconf->rx_queue_list[i].queue_id;
546                 RTE_LOG(INFO, L3FWD, " -- lcoreid=%u portid=%hhu rxqueueid=%hhu\n", lcore_id,
547                         portid, queueid);
548         }
549
550         while (1) {
551
552                 cur_tsc = rte_rdtsc();
553
554                 /*
555                  * TX burst queue drain
556                  */
557                 diff_tsc = cur_tsc - prev_tsc;
558                 if (unlikely(diff_tsc > drain_tsc)) {
559
560                         /*
561                          * This could be optimized (use queueid instead of
562                          * portid), but it is not called so often
563                          */
564                         for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {
565                                 if (qconf->tx_mbufs[portid].len == 0)
566                                         continue;
567                                 send_burst(&lcore_conf[lcore_id],
568                                         qconf->tx_mbufs[portid].len,
569                                         portid);
570                                 qconf->tx_mbufs[portid].len = 0;
571                         }
572
573                         prev_tsc = cur_tsc;
574                 }
575
576                 /*
577                  * Read packet from RX queues
578                  */
579                 for (i = 0; i < qconf->n_rx_queue; ++i) {
580
581                         portid = qconf->rx_queue_list[i].port_id;
582                         queueid = qconf->rx_queue_list[i].queue_id;
583                         nb_rx = rte_eth_rx_burst(portid, queueid, pkts_burst, MAX_PKT_BURST);
584
585                         /* Prefetch first packets */
586                         for (j = 0; j < PREFETCH_OFFSET && j < nb_rx; j++) {
587                                 rte_prefetch0(rte_pktmbuf_mtod(
588                                                 pkts_burst[j], void *));
589                         }
590
591                         /* Prefetch and forward already prefetched packets */
592                         for (j = 0; j < (nb_rx - PREFETCH_OFFSET); j++) {
593                                 rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[
594                                                 j + PREFETCH_OFFSET], void *));
595                                 l3fwd_simple_forward(pkts_burst[j], portid, qconf->lookup_struct);
596                         }
597
598                         /* Forward remaining prefetched packets */
599                         for (; j < nb_rx; j++) {
600                                 l3fwd_simple_forward(pkts_burst[j], portid, qconf->lookup_struct);
601                         }
602                 }
603         }
604 }
605
606 static int
607 check_lcore_params(void)
608 {
609         uint8_t queue, lcore;
610         uint16_t i;
611         int socketid;
612
613         for (i = 0; i < nb_lcore_params; ++i) {
614                 queue = lcore_params[i].queue_id;
615                 if (queue >= MAX_RX_QUEUE_PER_PORT) {
616                         printf("invalid queue number: %hhu\n", queue);
617                         return -1;
618                 }
619                 lcore = lcore_params[i].lcore_id;
620                 if (!rte_lcore_is_enabled(lcore)) {
621                         printf("error: lcore %hhu is not enabled in lcore mask\n", lcore);
622                         return -1;
623                 }
624                 if ((socketid = rte_lcore_to_socket_id(lcore) != 0) &&
625                         (numa_on == 0)) {
626                         printf("warning: lcore %hhu is on socket %d with numa off \n",
627                                 lcore, socketid);
628                 }
629         }
630         return 0;
631 }
632
633 static int
634 check_port_config(const unsigned nb_ports)
635 {
636         unsigned portid;
637         uint16_t i;
638
639         for (i = 0; i < nb_lcore_params; ++i) {
640                 portid = lcore_params[i].port_id;
641                 if ((enabled_port_mask & (1 << portid)) == 0) {
642                         printf("port %u is not enabled in port mask\n", portid);
643                         return -1;
644                 }
645                 if (portid >= nb_ports) {
646                         printf("port %u is not present on the board\n", portid);
647                         return -1;
648                 }
649         }
650         return 0;
651 }
652
653 static uint8_t
654 get_port_n_rx_queues(const uint8_t port)
655 {
656         int queue = -1;
657         uint16_t i;
658
659         for (i = 0; i < nb_lcore_params; ++i) {
660                 if (lcore_params[i].port_id == port && lcore_params[i].queue_id > queue)
661                         queue = lcore_params[i].queue_id;
662         }
663         return (uint8_t)(++queue);
664 }
665
666 static int
667 init_lcore_rx_queues(void)
668 {
669         uint16_t i, nb_rx_queue;
670         uint8_t lcore;
671
672         for (i = 0; i < nb_lcore_params; ++i) {
673                 lcore = lcore_params[i].lcore_id;
674                 nb_rx_queue = lcore_conf[lcore].n_rx_queue;
675                 if (nb_rx_queue >= MAX_RX_QUEUE_PER_LCORE) {
676                         printf("error: too many queues (%u) for lcore: %u\n",
677                                 (unsigned)nb_rx_queue + 1, (unsigned)lcore);
678                         return -1;
679                 } else {
680                         lcore_conf[lcore].rx_queue_list[nb_rx_queue].port_id =
681                                 lcore_params[i].port_id;
682                         lcore_conf[lcore].rx_queue_list[nb_rx_queue].queue_id =
683                                 lcore_params[i].queue_id;
684                         lcore_conf[lcore].n_rx_queue++;
685                 }
686         }
687         return 0;
688 }
689
690 /* display usage */
691 static void
692 print_usage(const char *prgname)
693 {
694         printf ("%s [EAL options] -- -p PORTMASK"
695                 "  [--config (port,queue,lcore)[,(port,queue,lcore]]\n"
696                 "  -p PORTMASK: hexadecimal bitmask of ports to configure\n"
697                 "  --config (port,queue,lcore): rx queues configuration\n"
698                 "  --no-numa: optional, disable numa awareness\n",
699                 prgname);
700 }
701
702 /* Custom handling of signals to handle process terminal */
703 static void
704 signal_handler(int signum)
705 {
706         uint8_t portid;
707         uint8_t nb_ports = rte_eth_dev_count();
708
709         /* When we receive a SIGINT signal */
710         if (signum == SIGINT) {
711                 for (portid = 0; portid < nb_ports; portid++) {
712                         /* skip ports that are not enabled */
713                         if ((enabled_port_mask & (1 << portid)) == 0) 
714                                 continue;
715                         rte_eth_dev_close(portid); 
716                 }
717         }
718         rte_exit(EXIT_SUCCESS, "\n User forced exit\n");
719 }
720 static int
721 parse_portmask(const char *portmask)
722 {
723         char *end = NULL;
724         unsigned long pm;
725
726         /* parse hexadecimal string */
727         pm = strtoul(portmask, &end, 16);
728         if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
729                 return -1;
730
731         if (pm == 0)
732                 return -1;
733
734         return pm;
735 }
736
737 static int
738 parse_config(const char *q_arg)
739 {
740         char s[256];
741         const char *p, *p0 = q_arg;
742         char *end;
743         enum fieldnames {
744                 FLD_PORT = 0,
745                 FLD_QUEUE,
746                 FLD_LCORE,
747                 _NUM_FLD
748         };
749         unsigned long int_fld[_NUM_FLD];
750         char *str_fld[_NUM_FLD];
751         int i;
752         unsigned size;
753
754         nb_lcore_params = 0;
755
756         while ((p = strchr(p0,'(')) != NULL) {
757                 ++p;
758                 if((p0 = strchr(p,')')) == NULL)
759                         return -1;
760
761                 size = p0 - p;
762                 if(size >= sizeof(s))
763                         return -1;
764
765                 rte_snprintf(s, sizeof(s), "%.*s", size, p);
766                 if (rte_strsplit(s, sizeof(s), str_fld, _NUM_FLD, ',') != _NUM_FLD)
767                         return -1;
768                 for (i = 0; i < _NUM_FLD; i++){
769                         errno = 0;
770                         int_fld[i] = strtoul(str_fld[i], &end, 0);
771                         if (errno != 0 || end == str_fld[i] || int_fld[i] > 255)
772                                 return -1;
773                 }
774                 if (nb_lcore_params >= MAX_LCORE_PARAMS) {
775                         printf("exceeded max number of lcore params: %hu\n",
776                                 nb_lcore_params);
777                         return -1;
778                 }
779                 lcore_params_array[nb_lcore_params].port_id = (uint8_t)int_fld[FLD_PORT];
780                 lcore_params_array[nb_lcore_params].queue_id = (uint8_t)int_fld[FLD_QUEUE];
781                 lcore_params_array[nb_lcore_params].lcore_id = (uint8_t)int_fld[FLD_LCORE];
782                 ++nb_lcore_params;
783         }
784         lcore_params = lcore_params_array;
785         return 0;
786 }
787
788 /* Parse the argument given in the command line of the application */
789 static int
790 parse_args(int argc, char **argv)
791 {
792         int opt, ret;
793         char **argvopt;
794         int option_index;
795         char *prgname = argv[0];
796         static struct option lgopts[] = {
797                 {"config", 1, 0, 0},
798                 {"no-numa", 0, 0, 0},
799                 {NULL, 0, 0, 0}
800         };
801
802         argvopt = argv;
803
804         while ((opt = getopt_long(argc, argvopt, "p:",
805                                 lgopts, &option_index)) != EOF) {
806
807                 switch (opt) {
808                 /* portmask */
809                 case 'p':
810                         enabled_port_mask = parse_portmask(optarg);
811                         if (enabled_port_mask == 0) {
812                                 printf("invalid portmask\n");
813                                 print_usage(prgname);
814                                 return -1;
815                         }
816                         break;
817
818                 /* long options */
819                 case 0:
820                         if (!strcmp(lgopts[option_index].name, "config")) {
821                                 ret = parse_config(optarg);
822                                 if (ret) {
823                                         printf("invalid config\n");
824                                         print_usage(prgname);
825                                         return -1;
826                                 }
827                         }
828
829                         if (!strcmp(lgopts[option_index].name, "no-numa")) {
830                                 printf("numa is disabled \n");
831                                 numa_on = 0;
832                         }
833                         break;
834
835                 default:
836                         print_usage(prgname);
837                         return -1;
838                 }
839         }
840
841         if (optind >= 0)
842                 argv[optind-1] = prgname;
843
844         ret = optind-1;
845         optind = 0; /* reset getopt lib */
846         return ret;
847 }
848
849 static void
850 print_ethaddr(const char *name, const struct ether_addr *eth_addr)
851 {
852         printf ("%s%02X:%02X:%02X:%02X:%02X:%02X", name,
853                 eth_addr->addr_bytes[0],
854                 eth_addr->addr_bytes[1],
855                 eth_addr->addr_bytes[2],
856                 eth_addr->addr_bytes[3],
857                 eth_addr->addr_bytes[4],
858                 eth_addr->addr_bytes[5]);
859 }
860
861 #if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
862 static void
863 setup_hash(int socketid)
864 {
865         unsigned i;
866         int ret;
867         char s[64];
868
869         /* create  hashes */
870         rte_snprintf(s, sizeof(s), "l3fwd_hash_%d", socketid);
871         l3fwd_hash_params.name = s;
872         l3fwd_hash_params.socket_id = socketid;
873         l3fwd_lookup_struct[socketid] = rte_hash_create(&l3fwd_hash_params);
874         if (l3fwd_lookup_struct[socketid] == NULL)
875                 rte_exit(EXIT_FAILURE, "Unable to create the l3fwd hash on "
876                                 "socket %d\n", socketid);
877
878         /* populate the hash */
879         for (i = 0; i < L3FWD_NUM_ROUTES; i++) {
880                 ret = rte_hash_add_key (l3fwd_lookup_struct[socketid],
881                                 (void *) &l3fwd_route_array[i].key);
882                 if (ret < 0) {
883                         rte_exit(EXIT_FAILURE, "Unable to add entry %u to the"
884                                 "l3fwd hash on socket %d\n", i, socketid);
885                 }
886                 l3fwd_out_if[ret] = l3fwd_route_array[i].if_out;
887                 printf("Hash: Adding key\n");
888                 print_key(l3fwd_route_array[i].key);
889         }
890 }
891 #endif
892
893 #if (APP_LOOKUP_METHOD == APP_LOOKUP_LPM)
894 static void
895 setup_lpm(int socketid)
896 {
897         unsigned i;
898         int ret;
899         char s[64];
900
901         /* create the LPM table */
902         rte_snprintf(s, sizeof(s), "L3FWD_LPM_%d", socketid);
903         l3fwd_lookup_struct[socketid] = rte_lpm_create(s, socketid,
904                                 L3FWD_LPM_MAX_RULES, 0);
905         if (l3fwd_lookup_struct[socketid] == NULL)
906                 rte_exit(EXIT_FAILURE, "Unable to create the l3fwd LPM table"
907                                 " on socket %d\n", socketid);
908
909         /* populate the LPM table */
910         for (i = 0; i < L3FWD_NUM_ROUTES; i++) {
911                 ret = rte_lpm_add(l3fwd_lookup_struct[socketid],
912                         l3fwd_route_array[i].ip,
913                         l3fwd_route_array[i].depth,
914                         l3fwd_route_array[i].if_out);
915
916                 if (ret < 0) {
917                         rte_exit(EXIT_FAILURE, "Unable to add entry %u to the "
918                                 "l3fwd LPM table on socket %d\n",
919                                 i, socketid);
920                 }
921
922                 printf("LPM: Adding route 0x%08x / %d (%d)\n",
923                         (unsigned)l3fwd_route_array[i].ip,
924                         l3fwd_route_array[i].depth,
925                         l3fwd_route_array[i].if_out);
926         }
927 }
928 #endif
929
930 static int
931 init_mem(unsigned nb_mbuf)
932 {
933         struct lcore_conf *qconf;
934         int socketid;
935         unsigned lcore_id;
936         char s[64];
937
938         for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
939                 if (rte_lcore_is_enabled(lcore_id) == 0)
940                         continue;
941
942                 if (numa_on)
943                         socketid = rte_lcore_to_socket_id(lcore_id);
944                 else
945                         socketid = 0;
946
947                 if (socketid >= NB_SOCKETS) {
948                         rte_exit(EXIT_FAILURE, "Socket %d of lcore %u is out of range %d\n",
949                                 socketid, lcore_id, NB_SOCKETS);
950                 }
951                 if (pktmbuf_pool[socketid] == NULL) {
952                         rte_snprintf(s, sizeof(s), "mbuf_pool_%d", socketid);
953                         pktmbuf_pool[socketid] =
954                                 rte_mempool_create(s, nb_mbuf, MBUF_SIZE, 
955                                                    MEMPOOL_CACHE_SIZE,
956                                         sizeof(struct rte_pktmbuf_pool_private),
957                                         rte_pktmbuf_pool_init, NULL,
958                                         rte_pktmbuf_init, NULL,
959                                         socketid, 0);
960                         if (pktmbuf_pool[socketid] == NULL)
961                                 rte_exit(EXIT_FAILURE, "Cannot init mbuf pool on socket %d\n", socketid);
962                         else
963                                 printf("Allocated mbuf pool on socket %d\n", socketid);
964
965 #if (APP_LOOKUP_METHOD == APP_LOOKUP_LPM)
966                         setup_lpm(socketid);
967 #else
968                         setup_hash(socketid);
969 #endif
970                 }
971                 qconf = &lcore_conf[lcore_id];
972                 qconf->lookup_struct = l3fwd_lookup_struct[socketid];
973         }
974         return 0;
975 }
976
977 int
978 MAIN(int argc, char **argv)
979 {
980         struct lcore_conf *qconf;
981         int ret;
982         unsigned nb_ports;
983         uint16_t queueid;
984         unsigned lcore_id;
985         uint32_t nb_lcores;
986         uint16_t n_tx_queue;
987         uint8_t portid, nb_rx_queue, queue, socketid;
988
989         signal(SIGINT, signal_handler);
990         /* init EAL */
991         ret = rte_eal_init(argc, argv);
992         if (ret < 0)
993                 rte_exit(EXIT_FAILURE, "Invalid EAL parameters\n");
994         argc -= ret;
995         argv += ret;
996
997         /* parse application arguments (after the EAL ones) */
998         ret = parse_args(argc, argv);
999         if (ret < 0)
1000                 rte_exit(EXIT_FAILURE, "Invalid L3FWD-VF parameters\n");
1001
1002         if (check_lcore_params() < 0)
1003                 rte_exit(EXIT_FAILURE, "check_lcore_params failed\n");
1004
1005         ret = init_lcore_rx_queues();
1006         if (ret < 0)
1007                 rte_exit(EXIT_FAILURE, "init_lcore_rx_queues failed\n");
1008
1009         /* init driver */
1010         if (rte_pmd_init_all() < 0)
1011                 rte_exit(EXIT_FAILURE, "Cannot init pmd\n");
1012
1013         if (rte_eal_pci_probe() < 0)
1014                 rte_exit(EXIT_FAILURE, "Cannot probe PCI\n");
1015
1016         nb_ports = rte_eth_dev_count();
1017         if (nb_ports > RTE_MAX_ETHPORTS)
1018                 nb_ports = RTE_MAX_ETHPORTS;
1019
1020         if (check_port_config(nb_ports) < 0)
1021                 rte_exit(EXIT_FAILURE, "check_port_config failed\n");
1022
1023         nb_lcores = rte_lcore_count();
1024
1025         /* initialize all ports */
1026         for (portid = 0; portid < nb_ports; portid++) {
1027                 /* skip ports that are not enabled */
1028                 if ((enabled_port_mask & (1 << portid)) == 0) {
1029                         printf("\nSkipping disabled port %d\n", portid);
1030                         continue;
1031                 }
1032
1033                 /* init port */
1034                 printf("Initializing port %d ... ", portid );
1035                 fflush(stdout);
1036
1037                 /* must always equal(=1) */
1038                 nb_rx_queue = get_port_n_rx_queues(portid);
1039                 n_tx_queue = MAX_TX_QUEUE_PER_PORT;
1040
1041                 printf("Creating queues: nb_rxq=%d nb_txq=%u... ",
1042                         nb_rx_queue, (unsigned)1 );
1043                 ret = rte_eth_dev_configure(portid, nb_rx_queue, n_tx_queue, &port_conf);
1044                 if (ret < 0)
1045                         rte_exit(EXIT_FAILURE, "Cannot configure device: err=%d, port=%d\n",
1046                                 ret, portid);
1047
1048                 rte_eth_macaddr_get(portid, &ports_eth_addr[portid]);
1049                 print_ethaddr(" Address:", &ports_eth_addr[portid]);
1050                 printf(", ");
1051
1052                 ret = init_mem(NB_MBUF);
1053                 if (ret < 0)
1054                         rte_exit(EXIT_FAILURE, "init_mem failed\n");
1055
1056                 /* init one TX queue */
1057                 socketid = (uint8_t)rte_lcore_to_socket_id(rte_get_master_lcore());
1058
1059                 printf("txq=%d,%d,%d ", portid, 0, socketid);
1060                 fflush(stdout);
1061                 ret = rte_eth_tx_queue_setup(portid, 0, nb_txd,
1062                                                  socketid, &tx_conf);
1063                 if (ret < 0)
1064                         rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: err=%d, "
1065                                 "port=%d\n", ret, portid);
1066
1067                 printf("\n");
1068         }
1069
1070         for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
1071                 if (rte_lcore_is_enabled(lcore_id) == 0)
1072                         continue;
1073                 qconf = &lcore_conf[lcore_id];
1074                 qconf->tx_queue_id = 0;
1075
1076                 printf("\nInitializing rx queues on lcore %u ... ", lcore_id );
1077                 fflush(stdout);
1078                 /* init RX queues */
1079                 for(queue = 0; queue < qconf->n_rx_queue; ++queue) {
1080                         portid = qconf->rx_queue_list[queue].port_id;
1081                         queueid = qconf->rx_queue_list[queue].queue_id;
1082
1083                         if (numa_on)
1084                                 socketid = (uint8_t)rte_lcore_to_socket_id(lcore_id);
1085                         else
1086                                 socketid = 0;
1087
1088                         printf("rxq=%d,%d,%d ", portid, queueid, socketid);
1089                         fflush(stdout);
1090
1091                         ret = rte_eth_rx_queue_setup(portid, queueid, nb_rxd,
1092                                                 socketid, &rx_conf, pktmbuf_pool[socketid]);
1093                         if (ret < 0)
1094                                 rte_exit(EXIT_FAILURE, "rte_eth_rx_queue_setup: err=%d,"
1095                                                 "port=%d\n", ret, portid);
1096                 }
1097         }
1098         printf("\n");
1099
1100         /* start ports */
1101         for (portid = 0; portid < nb_ports; portid++) {
1102                 if ((enabled_port_mask & (1 << portid)) == 0) {
1103                         continue;
1104                 }
1105                 /* Start device */
1106                 ret = rte_eth_dev_start(portid);
1107                 if (ret < 0)
1108                         rte_exit(EXIT_FAILURE, "rte_eth_dev_start: err=%d, port=%d\n",
1109                                 ret, portid);
1110
1111                 printf("done: Port %d\n", portid);
1112
1113         }
1114
1115         /* launch per-lcore init on every lcore */
1116         rte_eal_mp_remote_launch(main_loop, NULL, CALL_MASTER);
1117         RTE_LCORE_FOREACH_SLAVE(lcore_id) {
1118                 if (rte_eal_wait_lcore(lcore_id) < 0)
1119                         return -1;
1120         }
1121
1122         return 0;
1123 }