4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 #include <sys/types.h>
40 #include <sys/queue.h>
45 #include <sys/param.h>
47 #include <rte_common.h>
48 #include <rte_byteorder.h>
50 #include <rte_memory.h>
51 #include <rte_memcpy.h>
52 #include <rte_memzone.h>
53 #include <rte_tailq.h>
55 #include <rte_per_lcore.h>
56 #include <rte_launch.h>
57 #include <rte_atomic.h>
58 #include <rte_cycles.h>
59 #include <rte_prefetch.h>
60 #include <rte_lcore.h>
61 #include <rte_per_lcore.h>
62 #include <rte_branch_prediction.h>
63 #include <rte_interrupts.h>
65 #include <rte_random.h>
66 #include <rte_debug.h>
67 #include <rte_ether.h>
68 #include <rte_ethdev.h>
70 #include <rte_mempool.h>
72 #include <rte_malloc.h>
76 #include <rte_string_fns.h>
80 #include <rte_ip_frag.h>
84 #define MAX_PKT_BURST 32
87 #define RTE_LOGTYPE_IP_RSMBL RTE_LOGTYPE_USER1
89 #define MAX_JUMBO_PKT_LEN 9600
93 (BUF_SIZE + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM)
97 /* allow max jumbo frame 9.5 KB */
98 #define JUMBO_FRAME_MAX_SIZE 0x2600
100 #define MAX_FLOW_NUM UINT16_MAX
101 #define MIN_FLOW_NUM 1
102 #define DEF_FLOW_NUM 0x1000
104 /* TTL numbers are in ms. */
105 #define MAX_FLOW_TTL (3600 * MS_PER_S)
106 #define MIN_FLOW_TTL 1
107 #define DEF_FLOW_TTL MS_PER_S
109 #define MAX_FRAG_NUM RTE_LIBRTE_IP_FRAG_MAX_FRAG
111 /* Should be power of two. */
112 #define IP_FRAG_TBL_BUCKET_ENTRIES 16
114 static uint32_t max_flow_num = DEF_FLOW_NUM;
115 static uint32_t max_flow_ttl = DEF_FLOW_TTL;
117 #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
121 /* Configure how many packets ahead to prefetch, when reading packets */
122 #define PREFETCH_OFFSET 3
125 * Configurable number of RX/TX ring descriptors
127 #define RTE_TEST_RX_DESC_DEFAULT 128
128 #define RTE_TEST_TX_DESC_DEFAULT 512
130 static uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT;
131 static uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT;
133 /* ethernet addresses of ports */
134 static struct ether_addr ports_eth_addr[RTE_MAX_ETHPORTS];
137 #define IPv4_BYTES_FMT "%" PRIu8 ".%" PRIu8 ".%" PRIu8 ".%" PRIu8
138 #define IPv4_BYTES(addr) \
139 (uint8_t) (((addr) >> 24) & 0xFF),\
140 (uint8_t) (((addr) >> 16) & 0xFF),\
141 (uint8_t) (((addr) >> 8) & 0xFF),\
142 (uint8_t) ((addr) & 0xFF)
146 #define IPv6_BYTES_FMT "%02x%02x:%02x%02x:%02x%02x:%02x%02x:"\
147 "%02x%02x:%02x%02x:%02x%02x:%02x%02x"
148 #define IPv6_BYTES(addr) \
149 addr[0], addr[1], addr[2], addr[3], \
150 addr[4], addr[5], addr[6], addr[7], \
151 addr[8], addr[9], addr[10], addr[11],\
152 addr[12], addr[13],addr[14], addr[15]
155 #define IPV6_ADDR_LEN 16
157 /* mask of enabled ports */
158 static uint32_t enabled_port_mask = 0;
160 static int rx_queue_per_lcore = 1;
166 struct rte_mbuf *m_table[0];
170 struct rte_ip_frag_tbl *frag_tbl;
171 struct rte_mempool *pool;
173 struct rte_lpm6 *lpm6;
177 struct tx_lcore_stat {
184 #define MAX_RX_QUEUE_PER_LCORE 16
185 #define MAX_TX_QUEUE_PER_PORT 16
186 #define MAX_RX_QUEUE_PER_PORT 128
188 struct lcore_queue_conf {
190 struct rx_queue rx_queue_list[MAX_RX_QUEUE_PER_LCORE];
191 uint16_t tx_queue_id[RTE_MAX_ETHPORTS];
192 struct rte_ip_frag_death_row death_row;
193 struct mbuf_table *tx_mbufs[RTE_MAX_ETHPORTS];
194 struct tx_lcore_stat tx_stat;
195 } __rte_cache_aligned;
196 static struct lcore_queue_conf lcore_queue_conf[RTE_MAX_LCORE];
198 static struct rte_eth_conf port_conf = {
200 .mq_mode = ETH_MQ_RX_RSS,
201 .max_rx_pkt_len = JUMBO_FRAME_MAX_SIZE,
203 .header_split = 0, /**< Header Split disabled */
204 .hw_ip_checksum = 1, /**< IP checksum offload enabled */
205 .hw_vlan_filter = 0, /**< VLAN filtering disabled */
206 .jumbo_frame = 1, /**< Jumbo Frame Support disabled */
207 .hw_strip_crc = 0, /**< CRC stripped by hardware */
212 .rss_hf = ETH_RSS_IP,
216 .mq_mode = ETH_MQ_TX_NONE,
221 * IPv4 forwarding table
223 struct l3fwd_ipv4_route {
229 struct l3fwd_ipv4_route l3fwd_ipv4_route_array[] = {
230 {IPv4(100,10,0,0), 16, 0},
231 {IPv4(100,20,0,0), 16, 1},
232 {IPv4(100,30,0,0), 16, 2},
233 {IPv4(100,40,0,0), 16, 3},
234 {IPv4(100,50,0,0), 16, 4},
235 {IPv4(100,60,0,0), 16, 5},
236 {IPv4(100,70,0,0), 16, 6},
237 {IPv4(100,80,0,0), 16, 7},
241 * IPv6 forwarding table
244 struct l3fwd_ipv6_route {
245 uint8_t ip[IPV6_ADDR_LEN];
250 static struct l3fwd_ipv6_route l3fwd_ipv6_route_array[] = {
251 {{1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 0},
252 {{2,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 1},
253 {{3,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 2},
254 {{4,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 3},
255 {{5,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 4},
256 {{6,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 5},
257 {{7,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 6},
258 {{8,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 7},
261 #define LPM_MAX_RULES 1024
262 #define LPM6_MAX_RULES 1024
263 #define LPM6_NUMBER_TBL8S (1 << 16)
265 struct rte_lpm6_config lpm6_config = {
266 .max_rules = LPM6_MAX_RULES,
267 .number_tbl8s = LPM6_NUMBER_TBL8S,
271 static struct rte_lpm *socket_lpm[RTE_MAX_NUMA_NODES];
272 static struct rte_lpm6 *socket_lpm6[RTE_MAX_NUMA_NODES];
274 #ifdef RTE_LIBRTE_IP_FRAG_TBL_STAT
275 #define TX_LCORE_STAT_UPDATE(s, f, v) ((s)->f += (v))
277 #define TX_LCORE_STAT_UPDATE(s, f, v) do {} while (0)
278 #endif /* RTE_LIBRTE_IP_FRAG_TBL_STAT */
281 * If number of queued packets reached given threahold, then
282 * send burst of packets on an output interface.
284 static inline uint32_t
285 send_burst(struct lcore_queue_conf *qconf, uint32_t thresh, uint8_t port)
287 uint32_t fill, len, k, n;
288 struct mbuf_table *txmb;
290 txmb = qconf->tx_mbufs[port];
293 if ((int32_t)(fill = txmb->head - txmb->tail) < 0)
296 if (fill >= thresh) {
297 n = RTE_MIN(len - txmb->tail, fill);
299 k = rte_eth_tx_burst(port, qconf->tx_queue_id[port],
300 txmb->m_table + txmb->tail, (uint16_t)n);
302 TX_LCORE_STAT_UPDATE(&qconf->tx_stat, call, 1);
303 TX_LCORE_STAT_UPDATE(&qconf->tx_stat, send, k);
306 if ((txmb->tail += k) == len)
313 /* Enqueue a single packet, and send burst if queue is filled */
315 send_single_packet(struct rte_mbuf *m, uint8_t port)
317 uint32_t fill, lcore_id, len;
318 struct lcore_queue_conf *qconf;
319 struct mbuf_table *txmb;
321 lcore_id = rte_lcore_id();
322 qconf = &lcore_queue_conf[lcore_id];
324 txmb = qconf->tx_mbufs[port];
327 fill = send_burst(qconf, MAX_PKT_BURST, port);
329 if (fill == len - 1) {
330 TX_LCORE_STAT_UPDATE(&qconf->tx_stat, drop, 1);
331 rte_pktmbuf_free(txmb->m_table[txmb->tail]);
332 if (++txmb->tail == len)
336 TX_LCORE_STAT_UPDATE(&qconf->tx_stat, queue, 1);
337 txmb->m_table[txmb->head] = m;
338 if(++txmb->head == len)
345 reassemble(struct rte_mbuf *m, uint8_t portid, uint32_t queue,
346 struct lcore_queue_conf *qconf, uint64_t tms)
348 struct ether_hdr *eth_hdr;
349 struct rte_ip_frag_tbl *tbl;
350 struct rte_ip_frag_death_row *dr;
351 struct rx_queue *rxq;
353 uint8_t next_hop, dst_port;
355 rxq = &qconf->rx_queue_list[queue];
357 eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
361 /* if packet is IPv4 */
362 if (m->ol_flags & (PKT_RX_IPV4_HDR)) {
363 struct ipv4_hdr *ip_hdr;
366 ip_hdr = (struct ipv4_hdr *)(eth_hdr + 1);
368 /* if it is a fragmented packet, then try to reassemble. */
369 if (rte_ipv4_frag_pkt_is_fragmented(ip_hdr)) {
373 dr = &qconf->death_row;
375 /* prepare mbuf: setup l2_len/l3_len. */
376 m->l2_len = sizeof(*eth_hdr);
377 m->l3_len = sizeof(*ip_hdr);
379 /* process this fragment. */
380 mo = rte_ipv4_frag_reassemble_packet(tbl, dr, m, tms, ip_hdr);
382 /* no packet to send out. */
385 /* we have our packet reassembled. */
388 eth_hdr = rte_pktmbuf_mtod(m,
390 ip_hdr = (struct ipv4_hdr *)(eth_hdr + 1);
393 ip_dst = rte_be_to_cpu_32(ip_hdr->dst_addr);
395 /* Find destination port */
396 if (rte_lpm_lookup(rxq->lpm, ip_dst, &next_hop) == 0 &&
397 (enabled_port_mask & 1 << next_hop) != 0) {
401 eth_hdr->ether_type = rte_be_to_cpu_16(ETHER_TYPE_IPv4);
403 /* if packet is IPv6 */
404 else if (m->ol_flags & (PKT_RX_IPV6_HDR | PKT_RX_IPV6_HDR_EXT)) {
405 struct ipv6_extension_fragment *frag_hdr;
406 struct ipv6_hdr *ip_hdr;
408 ip_hdr = (struct ipv6_hdr *)(eth_hdr + 1);
410 frag_hdr = rte_ipv6_frag_get_ipv6_fragment_header(ip_hdr);
412 if (frag_hdr != NULL) {
416 dr = &qconf->death_row;
418 /* prepare mbuf: setup l2_len/l3_len. */
419 m->l2_len = sizeof(*eth_hdr);
420 m->l3_len = sizeof(*ip_hdr) + sizeof(*frag_hdr);
422 mo = rte_ipv6_frag_reassemble_packet(tbl, dr, m, tms, ip_hdr, frag_hdr);
428 eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
429 ip_hdr = (struct ipv6_hdr *)(eth_hdr + 1);
433 /* Find destination port */
434 if (rte_lpm6_lookup(rxq->lpm6, ip_hdr->dst_addr, &next_hop) == 0 &&
435 (enabled_port_mask & 1 << next_hop) != 0) {
439 eth_hdr->ether_type = rte_be_to_cpu_16(ETHER_TYPE_IPv6);
441 /* if packet wasn't IPv4 or IPv6, it's forwarded to the port it came from */
443 /* 02:00:00:00:00:xx */
444 d_addr_bytes = ð_hdr->d_addr.addr_bytes[0];
445 *((uint64_t *)d_addr_bytes) = 0x000000000002 + ((uint64_t)dst_port << 40);
448 ether_addr_copy(&ports_eth_addr[dst_port], ð_hdr->s_addr);
450 send_single_packet(m, dst_port);
453 /* main processing loop */
455 main_loop(__attribute__((unused)) void *dummy)
457 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
459 uint64_t diff_tsc, cur_tsc, prev_tsc;
462 struct lcore_queue_conf *qconf;
463 const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S * BURST_TX_DRAIN_US;
467 lcore_id = rte_lcore_id();
468 qconf = &lcore_queue_conf[lcore_id];
470 if (qconf->n_rx_queue == 0) {
471 RTE_LOG(INFO, IP_RSMBL, "lcore %u has nothing to do\n", lcore_id);
475 RTE_LOG(INFO, IP_RSMBL, "entering main loop on lcore %u\n", lcore_id);
477 for (i = 0; i < qconf->n_rx_queue; i++) {
479 portid = qconf->rx_queue_list[i].portid;
480 RTE_LOG(INFO, IP_RSMBL, " -- lcoreid=%u portid=%hhu\n", lcore_id,
486 cur_tsc = rte_rdtsc();
489 * TX burst queue drain
491 diff_tsc = cur_tsc - prev_tsc;
492 if (unlikely(diff_tsc > drain_tsc)) {
495 * This could be optimized (use queueid instead of
496 * portid), but it is not called so often
498 for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {
499 if ((enabled_port_mask & (1 << portid)) != 0)
500 send_burst(qconf, 1, portid);
507 * Read packet from RX queues
509 for (i = 0; i < qconf->n_rx_queue; ++i) {
511 portid = qconf->rx_queue_list[i].portid;
513 nb_rx = rte_eth_rx_burst(portid, 0, pkts_burst,
516 /* Prefetch first packets */
517 for (j = 0; j < PREFETCH_OFFSET && j < nb_rx; j++) {
518 rte_prefetch0(rte_pktmbuf_mtod(
519 pkts_burst[j], void *));
522 /* Prefetch and forward already prefetched packets */
523 for (j = 0; j < (nb_rx - PREFETCH_OFFSET); j++) {
524 rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[
525 j + PREFETCH_OFFSET], void *));
526 reassemble(pkts_burst[j], portid,
530 /* Forward remaining prefetched packets */
531 for (; j < nb_rx; j++) {
532 reassemble(pkts_burst[j], portid,
536 rte_ip_frag_free_death_row(&qconf->death_row,
544 print_usage(const char *prgname)
546 printf("%s [EAL options] -- -p PORTMASK [-q NQ]"
547 " [--max-pkt-len PKTLEN]"
548 " [--maxflows=<flows>] [--flowttl=<ttl>[(s|ms)]]\n"
549 " -p PORTMASK: hexadecimal bitmask of ports to configure\n"
550 " -q NQ: number of RX queues per lcore\n"
551 " --maxflows=<flows>: optional, maximum number of flows "
553 " --flowttl=<ttl>[(s|ms)]: optional, maximum TTL for each "
559 parse_flow_num(const char *str, uint32_t min, uint32_t max, uint32_t *val)
564 /* parse decimal string */
566 v = strtoul(str, &end, 10);
567 if (errno != 0 || *end != '\0')
570 if (v < min || v > max)
578 parse_flow_ttl(const char *str, uint32_t min, uint32_t max, uint32_t *val)
583 static const char frmt_sec[] = "s";
584 static const char frmt_msec[] = "ms";
586 /* parse decimal string */
588 v = strtoul(str, &end, 10);
593 if (strncmp(frmt_sec, end, sizeof(frmt_sec)) == 0)
595 else if (strncmp(frmt_msec, end, sizeof (frmt_msec)) != 0)
599 if (v < min || v > max)
607 parse_portmask(const char *portmask)
612 /* parse hexadecimal string */
613 pm = strtoul(portmask, &end, 16);
614 if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
624 parse_nqueue(const char *q_arg)
629 printf("%p\n", q_arg);
631 /* parse hexadecimal string */
632 n = strtoul(q_arg, &end, 10);
633 if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0'))
637 if (n >= MAX_RX_QUEUE_PER_LCORE)
643 /* Parse the argument given in the command line of the application */
645 parse_args(int argc, char **argv)
650 char *prgname = argv[0];
651 static struct option lgopts[] = {
652 {"max-pkt-len", 1, 0, 0},
653 {"maxflows", 1, 0, 0},
654 {"flowttl", 1, 0, 0},
660 while ((opt = getopt_long(argc, argvopt, "p:q:",
661 lgopts, &option_index)) != EOF) {
666 enabled_port_mask = parse_portmask(optarg);
667 if (enabled_port_mask == 0) {
668 printf("invalid portmask\n");
669 print_usage(prgname);
676 rx_queue_per_lcore = parse_nqueue(optarg);
677 if (rx_queue_per_lcore < 0) {
678 printf("invalid queue number\n");
679 print_usage(prgname);
686 if (!strncmp(lgopts[option_index].name,
688 if ((ret = parse_flow_num(optarg, MIN_FLOW_NUM,
690 &max_flow_num)) != 0) {
691 printf("invalid value: \"%s\" for "
694 lgopts[option_index].name);
695 print_usage(prgname);
700 if (!strncmp(lgopts[option_index].name, "flowttl", 7)) {
701 if ((ret = parse_flow_ttl(optarg, MIN_FLOW_TTL,
703 &max_flow_ttl)) != 0) {
704 printf("invalid value: \"%s\" for "
707 lgopts[option_index].name);
708 print_usage(prgname);
716 print_usage(prgname);
722 argv[optind-1] = prgname;
725 optind = 0; /* reset getopt lib */
730 print_ethaddr(const char *name, const struct ether_addr *eth_addr)
732 char buf[ETHER_ADDR_FMT_SIZE];
733 ether_format_addr(buf, ETHER_ADDR_FMT_SIZE, eth_addr);
734 printf("%s%s", name, buf);
737 /* Check the link status of all ports in up to 9s, and print them finally */
739 check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
741 #define CHECK_INTERVAL 100 /* 100ms */
742 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
743 uint8_t portid, count, all_ports_up, print_flag = 0;
744 struct rte_eth_link link;
746 printf("\nChecking link status");
748 for (count = 0; count <= MAX_CHECK_TIME; count++) {
750 for (portid = 0; portid < port_num; portid++) {
751 if ((port_mask & (1 << portid)) == 0)
753 memset(&link, 0, sizeof(link));
754 rte_eth_link_get_nowait(portid, &link);
755 /* print link status if flag set */
756 if (print_flag == 1) {
757 if (link.link_status)
758 printf("Port %d Link Up - speed %u "
759 "Mbps - %s\n", (uint8_t)portid,
760 (unsigned)link.link_speed,
761 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
762 ("full-duplex") : ("half-duplex\n"));
764 printf("Port %d Link Down\n",
768 /* clear all_ports_up flag if any link down */
769 if (link.link_status == 0) {
774 /* after finally printing all link status, get out */
778 if (all_ports_up == 0) {
781 rte_delay_ms(CHECK_INTERVAL);
784 /* set the print_flag if all ports up or timeout */
785 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
793 init_routing_table(void)
796 struct rte_lpm6 *lpm6;
800 for (socket = 0; socket < RTE_MAX_NUMA_NODES; socket++) {
801 if (socket_lpm[socket]) {
802 lpm = socket_lpm[socket];
803 /* populate the LPM table */
804 for (i = 0; i < RTE_DIM(l3fwd_ipv4_route_array); i++) {
805 ret = rte_lpm_add(lpm,
806 l3fwd_ipv4_route_array[i].ip,
807 l3fwd_ipv4_route_array[i].depth,
808 l3fwd_ipv4_route_array[i].if_out);
811 RTE_LOG(ERR, IP_RSMBL, "Unable to add entry %i to the l3fwd "
816 RTE_LOG(INFO, IP_RSMBL, "Socket %i: adding route " IPv4_BYTES_FMT
819 IPv4_BYTES(l3fwd_ipv4_route_array[i].ip),
820 l3fwd_ipv4_route_array[i].depth,
821 l3fwd_ipv4_route_array[i].if_out);
825 if (socket_lpm6[socket]) {
826 lpm6 = socket_lpm6[socket];
827 /* populate the LPM6 table */
828 for (i = 0; i < RTE_DIM(l3fwd_ipv6_route_array); i++) {
829 ret = rte_lpm6_add(lpm6,
830 l3fwd_ipv6_route_array[i].ip,
831 l3fwd_ipv6_route_array[i].depth,
832 l3fwd_ipv6_route_array[i].if_out);
835 RTE_LOG(ERR, IP_RSMBL, "Unable to add entry %i to the l3fwd "
840 RTE_LOG(INFO, IP_RSMBL, "Socket %i: adding route " IPv6_BYTES_FMT
843 IPv6_BYTES(l3fwd_ipv6_route_array[i].ip),
844 l3fwd_ipv6_route_array[i].depth,
845 l3fwd_ipv6_route_array[i].if_out);
853 setup_port_tbl(struct lcore_queue_conf *qconf, uint32_t lcore, int socket,
856 struct mbuf_table *mtb;
860 n = RTE_MAX(max_flow_num, 2UL * MAX_PKT_BURST);
861 sz = sizeof (*mtb) + sizeof (mtb->m_table[0]) * n;
863 if ((mtb = rte_zmalloc_socket(__func__, sz, CACHE_LINE_SIZE,
865 RTE_LOG(ERR, IP_RSMBL, "%s() for lcore: %u, port: %u "
866 "failed to allocate %zu bytes\n",
867 __func__, lcore, port, sz);
872 qconf->tx_mbufs[port] = mtb;
878 setup_queue_tbl(struct rx_queue *rxq, uint32_t lcore, uint32_t queue)
882 uint64_t frag_cycles;
883 char buf[RTE_MEMPOOL_NAMESIZE];
885 socket = rte_lcore_to_socket_id(lcore);
886 if (socket == SOCKET_ID_ANY)
889 frag_cycles = (rte_get_tsc_hz() + MS_PER_S - 1) / MS_PER_S *
892 if ((rxq->frag_tbl = rte_ip_frag_table_create(max_flow_num,
893 IP_FRAG_TBL_BUCKET_ENTRIES, max_flow_num, frag_cycles,
895 RTE_LOG(ERR, IP_RSMBL, "ip_frag_tbl_create(%u) on "
896 "lcore: %u for queue: %u failed\n",
897 max_flow_num, lcore, queue);
902 * At any given moment up to <max_flow_num * (MAX_FRAG_NUM)>
903 * mbufs could be stored int the fragment table.
904 * Plus, each TX queue can hold up to <max_flow_num> packets.
907 nb_mbuf = RTE_MAX(max_flow_num, 2UL * MAX_PKT_BURST) * MAX_FRAG_NUM;
908 nb_mbuf *= (port_conf.rxmode.max_rx_pkt_len + BUF_SIZE - 1) / BUF_SIZE;
909 nb_mbuf *= 2; /* ipv4 and ipv6 */
910 nb_mbuf += RTE_TEST_RX_DESC_DEFAULT + RTE_TEST_TX_DESC_DEFAULT;
912 nb_mbuf = RTE_MAX(nb_mbuf, (uint32_t)NB_MBUF);
914 snprintf(buf, sizeof(buf), "mbuf_pool_%u_%u", lcore, queue);
916 if ((rxq->pool = rte_mempool_create(buf, nb_mbuf, MBUF_SIZE, 0,
917 sizeof(struct rte_pktmbuf_pool_private),
918 rte_pktmbuf_pool_init, NULL, rte_pktmbuf_init, NULL,
919 socket, MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET)) == NULL) {
920 RTE_LOG(ERR, IP_RSMBL, "mempool_create(%s) failed", buf);
932 struct rte_lpm6 *lpm6;
936 /* traverse through lcores and initialize structures on each socket */
938 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
940 if (rte_lcore_is_enabled(lcore_id) == 0)
943 socket = rte_lcore_to_socket_id(lcore_id);
945 if (socket == SOCKET_ID_ANY)
948 if (socket_lpm[socket] == NULL) {
949 RTE_LOG(INFO, IP_RSMBL, "Creating LPM table on socket %i\n", socket);
950 snprintf(buf, sizeof(buf), "IP_RSMBL_LPM_%i", socket);
952 lpm = rte_lpm_create(buf, socket, LPM_MAX_RULES, 0);
954 RTE_LOG(ERR, IP_RSMBL, "Cannot create LPM table\n");
957 socket_lpm[socket] = lpm;
960 if (socket_lpm6[socket] == NULL) {
961 RTE_LOG(INFO, IP_RSMBL, "Creating LPM6 table on socket %i\n", socket);
962 snprintf(buf, sizeof(buf), "IP_RSMBL_LPM_%i", socket);
964 lpm6 = rte_lpm6_create("IP_RSMBL_LPM6", socket, &lpm6_config);
966 RTE_LOG(ERR, IP_RSMBL, "Cannot create LPM table\n");
969 socket_lpm6[socket] = lpm6;
977 queue_dump_stat(void)
980 const struct lcore_queue_conf *qconf;
982 for (lcore = 0; lcore < RTE_MAX_LCORE; lcore++) {
983 if (rte_lcore_is_enabled(lcore) == 0)
986 qconf = &lcore_queue_conf[lcore];
987 for (i = 0; i < qconf->n_rx_queue; i++) {
989 fprintf(stdout, " -- lcoreid=%u portid=%hhu "
991 lcore, qconf->rx_queue_list[i].portid);
992 rte_ip_frag_table_statistics_dump(stdout,
993 qconf->rx_queue_list[i].frag_tbl);
994 fprintf(stdout, "TX bursts:\t%" PRIu64 "\n"
995 "TX packets _queued:\t%" PRIu64 "\n"
996 "TX packets dropped:\t%" PRIu64 "\n"
997 "TX packets send:\t%" PRIu64 "\n",
999 qconf->tx_stat.queue,
1000 qconf->tx_stat.drop,
1001 qconf->tx_stat.send);
1007 signal_handler(int signum)
1010 if (signum != SIGUSR1)
1011 rte_exit(0, "received signal: %d, exiting\n", signum);
1015 MAIN(int argc, char **argv)
1017 struct lcore_queue_conf *qconf;
1018 struct rte_eth_dev_info dev_info;
1019 struct rte_eth_txconf *txconf;
1020 struct rx_queue *rxq;
1024 unsigned lcore_id = 0, rx_lcore_id = 0;
1025 uint32_t n_tx_queue, nb_lcores;
1029 ret = rte_eal_init(argc, argv);
1031 rte_exit(EXIT_FAILURE, "Invalid EAL parameters\n");
1035 /* parse application arguments (after the EAL ones) */
1036 ret = parse_args(argc, argv);
1038 rte_exit(EXIT_FAILURE, "Invalid IP reassembly parameters\n");
1040 nb_ports = rte_eth_dev_count();
1041 if (nb_ports > RTE_MAX_ETHPORTS)
1042 nb_ports = RTE_MAX_ETHPORTS;
1043 else if (nb_ports == 0)
1044 rte_exit(EXIT_FAILURE, "No ports found!\n");
1046 nb_lcores = rte_lcore_count();
1048 /* initialize structures (mempools, lpm etc.) */
1050 rte_panic("Cannot initialize memory structures!\n");
1052 /* check if portmask has non-existent ports */
1053 if (enabled_port_mask & ~(RTE_LEN2MASK(nb_ports, unsigned)))
1054 rte_exit(EXIT_FAILURE, "Non-existent ports in portmask!\n");
1056 /* initialize all ports */
1057 for (portid = 0; portid < nb_ports; portid++) {
1058 /* skip ports that are not enabled */
1059 if ((enabled_port_mask & (1 << portid)) == 0) {
1060 printf("\nSkipping disabled port %d\n", portid);
1064 qconf = &lcore_queue_conf[rx_lcore_id];
1066 /* get the lcore_id for this port */
1067 while (rte_lcore_is_enabled(rx_lcore_id) == 0 ||
1068 qconf->n_rx_queue == (unsigned)rx_queue_per_lcore) {
1071 if (rx_lcore_id >= RTE_MAX_LCORE)
1072 rte_exit(EXIT_FAILURE, "Not enough cores\n");
1074 qconf = &lcore_queue_conf[rx_lcore_id];
1077 socket = rte_lcore_to_socket_id(portid);
1078 if (socket == SOCKET_ID_ANY)
1081 queueid = qconf->n_rx_queue;
1082 rxq = &qconf->rx_queue_list[queueid];
1083 rxq->portid = portid;
1084 rxq->lpm = socket_lpm[socket];
1085 rxq->lpm6 = socket_lpm6[socket];
1086 if (setup_queue_tbl(rxq, rx_lcore_id, queueid) < 0)
1087 rte_exit(EXIT_FAILURE, "Failed to set up queue table\n");
1088 qconf->n_rx_queue++;
1091 printf("Initializing port %d ... ", portid );
1094 n_tx_queue = nb_lcores;
1095 if (n_tx_queue > MAX_TX_QUEUE_PER_PORT)
1096 n_tx_queue = MAX_TX_QUEUE_PER_PORT;
1097 ret = rte_eth_dev_configure(portid, 1, (uint16_t)n_tx_queue,
1101 rte_exit(EXIT_FAILURE, "Cannot configure device: "
1102 "err=%d, port=%d\n",
1106 /* init one RX queue */
1107 ret = rte_eth_rx_queue_setup(portid, 0, nb_rxd,
1112 rte_exit(EXIT_FAILURE, "rte_eth_rx_queue_setup: "
1113 "err=%d, port=%d\n",
1117 rte_eth_macaddr_get(portid, &ports_eth_addr[portid]);
1118 print_ethaddr(" Address:", &ports_eth_addr[portid]);
1121 /* init one TX queue per couple (lcore,port) */
1123 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
1124 if (rte_lcore_is_enabled(lcore_id) == 0)
1127 socket = (int) rte_lcore_to_socket_id(lcore_id);
1129 printf("txq=%u,%d,%d ", lcore_id, queueid, socket);
1132 rte_eth_dev_info_get(portid, &dev_info);
1133 txconf = &dev_info.default_txconf;
1134 txconf->txq_flags = 0;
1136 ret = rte_eth_tx_queue_setup(portid, queueid, nb_txd,
1139 rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: err=%d, "
1140 "port=%d\n", ret, portid);
1142 qconf = &lcore_queue_conf[lcore_id];
1143 qconf->tx_queue_id[portid] = queueid;
1144 setup_port_tbl(qconf, lcore_id, socket, portid);
1153 for (portid = 0; portid < nb_ports; portid++) {
1154 if ((enabled_port_mask & (1 << portid)) == 0) {
1158 ret = rte_eth_dev_start(portid);
1160 rte_exit(EXIT_FAILURE, "rte_eth_dev_start: err=%d, port=%d\n",
1163 rte_eth_promiscuous_enable(portid);
1166 if (init_routing_table() < 0)
1167 rte_exit(EXIT_FAILURE, "Cannot init routing table\n");
1169 check_all_ports_link_status((uint8_t)nb_ports, enabled_port_mask);
1171 signal(SIGUSR1, signal_handler);
1172 signal(SIGTERM, signal_handler);
1173 signal(SIGINT, signal_handler);
1175 /* launch per-lcore init on every lcore */
1176 rte_eal_mp_remote_launch(main_loop, NULL, CALL_MASTER);
1177 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
1178 if (rte_eal_wait_lcore(lcore_id) < 0)