4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 #include <sys/types.h>
39 #include <sys/param.h>
41 #include <sys/queue.h>
46 #include <rte_common.h>
47 #include <rte_byteorder.h>
49 #include <rte_memory.h>
50 #include <rte_memcpy.h>
51 #include <rte_memzone.h>
52 #include <rte_tailq.h>
54 #include <rte_per_lcore.h>
55 #include <rte_launch.h>
56 #include <rte_atomic.h>
57 #include <rte_cycles.h>
58 #include <rte_prefetch.h>
59 #include <rte_lcore.h>
60 #include <rte_per_lcore.h>
61 #include <rte_branch_prediction.h>
62 #include <rte_interrupts.h>
64 #include <rte_random.h>
65 #include <rte_debug.h>
66 #include <rte_ether.h>
67 #include <rte_ethdev.h>
69 #include <rte_mempool.h>
74 #include <rte_string_fns.h>
76 #include <rte_ip_frag.h>
80 #define RTE_LOGTYPE_IP_FRAG RTE_LOGTYPE_USER1
82 #define MBUF_SIZE (2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM)
84 /* allow max jumbo frame 9.5 KB */
85 #define JUMBO_FRAME_MAX_SIZE 0x2600
87 #define ROUNDUP_DIV(a, b) (((a) + (b) - 1) / (b))
90 * Default byte size for the IPv6 Maximum Transfer Unit (MTU).
91 * This value includes the size of IPv6 header.
93 #define IPV4_MTU_DEFAULT ETHER_MTU
94 #define IPV6_MTU_DEFAULT ETHER_MTU
97 * Default payload in bytes for the IPv6 packet.
99 #define IPV4_DEFAULT_PAYLOAD (IPV4_MTU_DEFAULT - sizeof(struct ipv4_hdr))
100 #define IPV6_DEFAULT_PAYLOAD (IPV6_MTU_DEFAULT - sizeof(struct ipv6_hdr))
103 * Max number of fragments per packet expected - defined by config file.
105 #define MAX_PACKET_FRAG RTE_LIBRTE_IP_FRAG_MAX_FRAG
110 * RX and TX Prefetch, Host, and Write-back threshold values should be
111 * carefully set for optimal performance. Consult the network
112 * controller's datasheet and supporting DPDK documentation for guidance
113 * on how these parameters should be set.
115 #define RX_PTHRESH 8 /**< Default values of RX prefetch threshold reg. */
116 #define RX_HTHRESH 8 /**< Default values of RX host threshold reg. */
117 #define RX_WTHRESH 4 /**< Default values of RX write-back threshold reg. */
120 * These default values are optimized for use with the Intel(R) 82599 10 GbE
121 * Controller and the DPDK ixgbe PMD. Consider using other values for other
122 * network controllers and/or network drivers.
124 #define TX_PTHRESH 36 /**< Default values of TX prefetch threshold reg. */
125 #define TX_HTHRESH 0 /**< Default values of TX host threshold reg. */
126 #define TX_WTHRESH 0 /**< Default values of TX write-back threshold reg. */
128 #define MAX_PKT_BURST 32
129 #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
131 /* Configure how many packets ahead to prefetch, when reading packets */
132 #define PREFETCH_OFFSET 3
135 * Configurable number of RX/TX ring descriptors
137 #define RTE_TEST_RX_DESC_DEFAULT 128
138 #define RTE_TEST_TX_DESC_DEFAULT 512
139 static uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT;
140 static uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT;
142 /* ethernet addresses of ports */
143 static struct ether_addr ports_eth_addr[RTE_MAX_ETHPORTS];
146 #define IPv4_BYTES_FMT "%" PRIu8 ".%" PRIu8 ".%" PRIu8 ".%" PRIu8
147 #define IPv4_BYTES(addr) \
148 (uint8_t) (((addr) >> 24) & 0xFF),\
149 (uint8_t) (((addr) >> 16) & 0xFF),\
150 (uint8_t) (((addr) >> 8) & 0xFF),\
151 (uint8_t) ((addr) & 0xFF)
155 #define IPv6_BYTES_FMT "%02x%02x:%02x%02x:%02x%02x:%02x%02x:"\
156 "%02x%02x:%02x%02x:%02x%02x:%02x%02x"
157 #define IPv6_BYTES(addr) \
158 addr[0], addr[1], addr[2], addr[3], \
159 addr[4], addr[5], addr[6], addr[7], \
160 addr[8], addr[9], addr[10], addr[11],\
161 addr[12], addr[13],addr[14], addr[15]
164 #define IPV6_ADDR_LEN 16
166 /* mask of enabled ports */
167 static int enabled_port_mask = 0;
169 static int rx_queue_per_lcore = 1;
171 #define MBUF_TABLE_SIZE (2 * MAX(MAX_PKT_BURST, MAX_PACKET_FRAG))
175 struct rte_mbuf *m_table[MBUF_TABLE_SIZE];
179 struct rte_mempool *direct_pool;
180 struct rte_mempool *indirect_pool;
182 struct rte_lpm6 *lpm6;
186 #define MAX_RX_QUEUE_PER_LCORE 16
187 #define MAX_TX_QUEUE_PER_PORT 16
188 struct lcore_queue_conf {
190 uint16_t tx_queue_id[RTE_MAX_ETHPORTS];
191 struct rx_queue rx_queue_list[MAX_RX_QUEUE_PER_LCORE];
192 struct mbuf_table tx_mbufs[RTE_MAX_ETHPORTS];
193 } __rte_cache_aligned;
194 struct lcore_queue_conf lcore_queue_conf[RTE_MAX_LCORE];
196 static const struct rte_eth_conf port_conf = {
198 .max_rx_pkt_len = JUMBO_FRAME_MAX_SIZE,
200 .header_split = 0, /**< Header Split disabled */
201 .hw_ip_checksum = 1, /**< IP checksum offload enabled */
202 .hw_vlan_filter = 0, /**< VLAN filtering disabled */
203 .jumbo_frame = 1, /**< Jumbo Frame Support enabled */
204 .hw_strip_crc = 0, /**< CRC stripped by hardware */
207 .mq_mode = ETH_MQ_TX_NONE,
211 static const struct rte_eth_rxconf rx_conf = {
213 .pthresh = RX_PTHRESH,
214 .hthresh = RX_HTHRESH,
215 .wthresh = RX_WTHRESH,
219 static const struct rte_eth_txconf tx_conf = {
221 .pthresh = TX_PTHRESH,
222 .hthresh = TX_HTHRESH,
223 .wthresh = TX_WTHRESH,
225 .tx_free_thresh = 0, /* Use PMD default values */
226 .tx_rs_thresh = 0, /* Use PMD default values */
230 * IPv4 forwarding table
232 struct l3fwd_ipv4_route {
238 struct l3fwd_ipv4_route l3fwd_ipv4_route_array[] = {
239 {IPv4(100,10,0,0), 16, 0},
240 {IPv4(100,20,0,0), 16, 1},
241 {IPv4(100,30,0,0), 16, 2},
242 {IPv4(100,40,0,0), 16, 3},
243 {IPv4(100,50,0,0), 16, 4},
244 {IPv4(100,60,0,0), 16, 5},
245 {IPv4(100,70,0,0), 16, 6},
246 {IPv4(100,80,0,0), 16, 7},
250 * IPv6 forwarding table
253 struct l3fwd_ipv6_route {
254 uint8_t ip[IPV6_ADDR_LEN];
259 static struct l3fwd_ipv6_route l3fwd_ipv6_route_array[] = {
260 {{1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 0},
261 {{2,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 1},
262 {{3,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 2},
263 {{4,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 3},
264 {{5,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 4},
265 {{6,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 5},
266 {{7,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 6},
267 {{8,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}, 48, 7},
270 #define LPM_MAX_RULES 1024
271 #define LPM6_MAX_RULES 1024
272 #define LPM6_NUMBER_TBL8S (1 << 16)
274 struct rte_lpm6_config lpm6_config = {
275 .max_rules = LPM6_MAX_RULES,
276 .number_tbl8s = LPM6_NUMBER_TBL8S,
280 static struct rte_mempool *socket_direct_pool[RTE_MAX_NUMA_NODES];
281 static struct rte_mempool *socket_indirect_pool[RTE_MAX_NUMA_NODES];
282 static struct rte_lpm *socket_lpm[RTE_MAX_NUMA_NODES];
283 static struct rte_lpm6 *socket_lpm6[RTE_MAX_NUMA_NODES];
285 /* Send burst of packets on an output interface */
287 send_burst(struct lcore_queue_conf *qconf, uint16_t n, uint8_t port)
289 struct rte_mbuf **m_table;
293 queueid = qconf->tx_queue_id[port];
294 m_table = (struct rte_mbuf **)qconf->tx_mbufs[port].m_table;
296 ret = rte_eth_tx_burst(port, queueid, m_table, n);
297 if (unlikely(ret < n)) {
299 rte_pktmbuf_free(m_table[ret]);
307 l3fwd_simple_forward(struct rte_mbuf *m, struct lcore_queue_conf *qconf,
308 uint8_t queueid, uint8_t port_in)
310 struct rx_queue *rxq;
312 uint8_t next_hop, port_out, ipv6;
316 rxq = &qconf->rx_queue_list[queueid];
318 /* by default, send everything back to the source port */
321 /* Remove the Ethernet header and trailer from the input packet */
322 rte_pktmbuf_adj(m, (uint16_t)sizeof(struct ether_hdr));
324 /* Build transmission burst */
325 len = qconf->tx_mbufs[port_out].len;
327 /* if this is an IPv4 packet */
328 if (m->ol_flags & PKT_RX_IPV4_HDR) {
329 struct ipv4_hdr *ip_hdr;
331 /* Read the lookup key (i.e. ip_dst) from the input packet */
332 ip_hdr = rte_pktmbuf_mtod(m, struct ipv4_hdr *);
333 ip_dst = rte_be_to_cpu_32(ip_hdr->dst_addr);
335 /* Find destination port */
336 if (rte_lpm_lookup(rxq->lpm, ip_dst, &next_hop) == 0 &&
337 (enabled_port_mask & 1 << next_hop) != 0) {
340 /* Build transmission burst for new port */
341 len = qconf->tx_mbufs[port_out].len;
344 /* if we don't need to do any fragmentation */
345 if (likely (IPV4_MTU_DEFAULT >= m->pkt_len)) {
346 qconf->tx_mbufs[port_out].m_table[len] = m;
349 len2 = rte_ipv4_fragment_packet(m,
350 &qconf->tx_mbufs[port_out].m_table[len],
351 (uint16_t)(MBUF_TABLE_SIZE - len),
353 rxq->direct_pool, rxq->indirect_pool);
355 /* Free input packet */
358 /* If we fail to fragment the packet */
359 if (unlikely (len2 < 0))
363 /* if this is an IPv6 packet */
364 else if (m->ol_flags & PKT_RX_IPV6_HDR) {
365 struct ipv6_hdr *ip_hdr;
369 /* Read the lookup key (i.e. ip_dst) from the input packet */
370 ip_hdr = rte_pktmbuf_mtod(m, struct ipv6_hdr *);
372 /* Find destination port */
373 if (rte_lpm6_lookup(rxq->lpm6, ip_hdr->dst_addr, &next_hop) == 0 &&
374 (enabled_port_mask & 1 << next_hop) != 0) {
377 /* Build transmission burst for new port */
378 len = qconf->tx_mbufs[port_out].len;
381 /* if we don't need to do any fragmentation */
382 if (likely (IPV6_MTU_DEFAULT >= m->pkt_len)) {
383 qconf->tx_mbufs[port_out].m_table[len] = m;
386 len2 = rte_ipv6_fragment_packet(m,
387 &qconf->tx_mbufs[port_out].m_table[len],
388 (uint16_t)(MBUF_TABLE_SIZE - len),
390 rxq->direct_pool, rxq->indirect_pool);
392 /* Free input packet */
395 /* If we fail to fragment the packet */
396 if (unlikely (len2 < 0))
400 /* else, just forward the packet */
402 qconf->tx_mbufs[port_out].m_table[len] = m;
406 for (i = len; i < len + len2; i ++) {
409 m = qconf->tx_mbufs[port_out].m_table[i];
410 struct ether_hdr *eth_hdr = (struct ether_hdr *)
411 rte_pktmbuf_prepend(m, (uint16_t)sizeof(struct ether_hdr));
412 if (eth_hdr == NULL) {
413 rte_panic("No headroom in mbuf.\n");
416 m->l2_len = sizeof(struct ether_hdr);
418 /* 02:00:00:00:00:xx */
419 d_addr_bytes = ð_hdr->d_addr.addr_bytes[0];
420 *((uint64_t *)d_addr_bytes) = 0x000000000002 + ((uint64_t)port_out << 40);
423 ether_addr_copy(&ports_eth_addr[port_out], ð_hdr->s_addr);
425 eth_hdr->ether_type = rte_be_to_cpu_16(ETHER_TYPE_IPv6);
427 eth_hdr->ether_type = rte_be_to_cpu_16(ETHER_TYPE_IPv4);
432 if (likely(len < MAX_PKT_BURST)) {
433 qconf->tx_mbufs[port_out].len = (uint16_t)len;
437 /* Transmit packets */
438 send_burst(qconf, (uint16_t)len, port_out);
439 qconf->tx_mbufs[port_out].len = 0;
442 /* main processing loop */
444 main_loop(__attribute__((unused)) void *dummy)
446 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
448 uint64_t prev_tsc, diff_tsc, cur_tsc;
451 struct lcore_queue_conf *qconf;
452 const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S * BURST_TX_DRAIN_US;
456 lcore_id = rte_lcore_id();
457 qconf = &lcore_queue_conf[lcore_id];
459 if (qconf->n_rx_queue == 0) {
460 RTE_LOG(INFO, IP_FRAG, "lcore %u has nothing to do\n", lcore_id);
464 RTE_LOG(INFO, IP_FRAG, "entering main loop on lcore %u\n", lcore_id);
466 for (i = 0; i < qconf->n_rx_queue; i++) {
468 portid = qconf->rx_queue_list[i].portid;
469 RTE_LOG(INFO, IP_FRAG, " -- lcoreid=%u portid=%d\n", lcore_id,
475 cur_tsc = rte_rdtsc();
478 * TX burst queue drain
480 diff_tsc = cur_tsc - prev_tsc;
481 if (unlikely(diff_tsc > drain_tsc)) {
484 * This could be optimized (use queueid instead of
485 * portid), but it is not called so often
487 for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {
488 if (qconf->tx_mbufs[portid].len == 0)
490 send_burst(&lcore_queue_conf[lcore_id],
491 qconf->tx_mbufs[portid].len,
493 qconf->tx_mbufs[portid].len = 0;
500 * Read packet from RX queues
502 for (i = 0; i < qconf->n_rx_queue; i++) {
504 portid = qconf->rx_queue_list[i].portid;
505 nb_rx = rte_eth_rx_burst(portid, 0, pkts_burst,
508 /* Prefetch first packets */
509 for (j = 0; j < PREFETCH_OFFSET && j < nb_rx; j++) {
510 rte_prefetch0(rte_pktmbuf_mtod(
511 pkts_burst[j], void *));
514 /* Prefetch and forward already prefetched packets */
515 for (j = 0; j < (nb_rx - PREFETCH_OFFSET); j++) {
516 rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[
517 j + PREFETCH_OFFSET], void *));
518 l3fwd_simple_forward(pkts_burst[j], qconf, i, portid);
521 /* Forward remaining prefetched packets */
522 for (; j < nb_rx; j++) {
523 l3fwd_simple_forward(pkts_burst[j], qconf, i, portid);
531 print_usage(const char *prgname)
533 printf("%s [EAL options] -- -p PORTMASK [-q NQ]\n"
534 " -p PORTMASK: hexadecimal bitmask of ports to configure\n"
535 " -q NQ: number of queue (=ports) per lcore (default is 1)\n",
540 parse_portmask(const char *portmask)
545 /* parse hexadecimal string */
546 pm = strtoul(portmask, &end, 16);
547 if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
557 parse_nqueue(const char *q_arg)
562 /* parse hexadecimal string */
563 n = strtoul(q_arg, &end, 10);
564 if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0'))
568 if (n >= MAX_RX_QUEUE_PER_LCORE)
574 /* Parse the argument given in the command line of the application */
576 parse_args(int argc, char **argv)
581 char *prgname = argv[0];
582 static struct option lgopts[] = {
588 while ((opt = getopt_long(argc, argvopt, "p:q:",
589 lgopts, &option_index)) != EOF) {
594 enabled_port_mask = parse_portmask(optarg);
595 if (enabled_port_mask < 0) {
596 printf("invalid portmask\n");
597 print_usage(prgname);
604 rx_queue_per_lcore = parse_nqueue(optarg);
605 if (rx_queue_per_lcore < 0) {
606 printf("invalid queue number\n");
607 print_usage(prgname);
614 print_usage(prgname);
618 print_usage(prgname);
623 if (enabled_port_mask == 0) {
624 printf("portmask not specified\n");
625 print_usage(prgname);
630 argv[optind-1] = prgname;
633 optind = 0; /* reset getopt lib */
638 print_ethaddr(const char *name, struct ether_addr *eth_addr)
640 printf("%s%02X:%02X:%02X:%02X:%02X:%02X", name,
641 eth_addr->addr_bytes[0],
642 eth_addr->addr_bytes[1],
643 eth_addr->addr_bytes[2],
644 eth_addr->addr_bytes[3],
645 eth_addr->addr_bytes[4],
646 eth_addr->addr_bytes[5]);
649 /* Check the link status of all ports in up to 9s, and print them finally */
651 check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
653 #define CHECK_INTERVAL 100 /* 100ms */
654 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
655 uint8_t portid, count, all_ports_up, print_flag = 0;
656 struct rte_eth_link link;
658 printf("\nChecking link status");
660 for (count = 0; count <= MAX_CHECK_TIME; count++) {
662 for (portid = 0; portid < port_num; portid++) {
663 if ((port_mask & (1 << portid)) == 0)
665 memset(&link, 0, sizeof(link));
666 rte_eth_link_get_nowait(portid, &link);
667 /* print link status if flag set */
668 if (print_flag == 1) {
669 if (link.link_status)
670 printf("Port %d Link Up - speed %u "
671 "Mbps - %s\n", (uint8_t)portid,
672 (unsigned)link.link_speed,
673 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
674 ("full-duplex") : ("half-duplex\n"));
676 printf("Port %d Link Down\n",
680 /* clear all_ports_up flag if any link down */
681 if (link.link_status == 0) {
686 /* after finally printing all link status, get out */
690 if (all_ports_up == 0) {
693 rte_delay_ms(CHECK_INTERVAL);
696 /* set the print_flag if all ports up or timeout */
697 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
705 init_routing_table(void)
708 struct rte_lpm6 *lpm6;
712 for (socket = 0; socket < RTE_MAX_NUMA_NODES; socket++) {
713 if (socket_lpm[socket]) {
714 lpm = socket_lpm[socket];
715 /* populate the LPM table */
716 for (i = 0; i < RTE_DIM(l3fwd_ipv4_route_array); i++) {
717 ret = rte_lpm_add(lpm,
718 l3fwd_ipv4_route_array[i].ip,
719 l3fwd_ipv4_route_array[i].depth,
720 l3fwd_ipv4_route_array[i].if_out);
723 RTE_LOG(ERR, IP_FRAG, "Unable to add entry %i to the l3fwd "
728 RTE_LOG(INFO, IP_FRAG, "Socket %i: adding route " IPv4_BYTES_FMT
731 IPv4_BYTES(l3fwd_ipv4_route_array[i].ip),
732 l3fwd_ipv4_route_array[i].depth,
733 l3fwd_ipv4_route_array[i].if_out);
737 if (socket_lpm6[socket]) {
738 lpm6 = socket_lpm6[socket];
739 /* populate the LPM6 table */
740 for (i = 0; i < RTE_DIM(l3fwd_ipv6_route_array); i++) {
741 ret = rte_lpm6_add(lpm6,
742 l3fwd_ipv6_route_array[i].ip,
743 l3fwd_ipv6_route_array[i].depth,
744 l3fwd_ipv6_route_array[i].if_out);
747 RTE_LOG(ERR, IP_FRAG, "Unable to add entry %i to the l3fwd "
752 RTE_LOG(INFO, IP_FRAG, "Socket %i: adding route " IPv6_BYTES_FMT
755 IPv6_BYTES(l3fwd_ipv6_route_array[i].ip),
756 l3fwd_ipv6_route_array[i].depth,
757 l3fwd_ipv6_route_array[i].if_out);
768 struct rte_mempool *mp;
770 struct rte_lpm6 *lpm6;
774 /* traverse through lcores and initialize structures on each socket */
776 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
778 if (rte_lcore_is_enabled(lcore_id) == 0)
781 socket = rte_lcore_to_socket_id(lcore_id);
783 if (socket == SOCKET_ID_ANY)
786 if (socket_direct_pool[socket] == NULL) {
787 RTE_LOG(INFO, IP_FRAG, "Creating direct mempool on socket %i\n",
789 snprintf(buf, sizeof(buf), "pool_direct_%i", socket);
791 mp = rte_mempool_create(buf, NB_MBUF,
793 sizeof(struct rte_pktmbuf_pool_private),
794 rte_pktmbuf_pool_init, NULL,
795 rte_pktmbuf_init, NULL,
798 RTE_LOG(ERR, IP_FRAG, "Cannot create direct mempool\n");
801 socket_direct_pool[socket] = mp;
804 if (socket_indirect_pool[socket] == NULL) {
805 RTE_LOG(INFO, IP_FRAG, "Creating indirect mempool on socket %i\n",
807 snprintf(buf, sizeof(buf), "pool_indirect_%i", socket);
809 mp = rte_mempool_create(buf, NB_MBUF,
810 sizeof(struct rte_mbuf), 32,
813 rte_pktmbuf_init, NULL,
816 RTE_LOG(ERR, IP_FRAG, "Cannot create indirect mempool\n");
819 socket_indirect_pool[socket] = mp;
822 if (socket_lpm[socket] == NULL) {
823 RTE_LOG(INFO, IP_FRAG, "Creating LPM table on socket %i\n", socket);
824 snprintf(buf, sizeof(buf), "IP_FRAG_LPM_%i", socket);
826 lpm = rte_lpm_create(buf, socket, LPM_MAX_RULES, 0);
828 RTE_LOG(ERR, IP_FRAG, "Cannot create LPM table\n");
831 socket_lpm[socket] = lpm;
834 if (socket_lpm6[socket] == NULL) {
835 RTE_LOG(INFO, IP_FRAG, "Creating LPM6 table on socket %i\n", socket);
836 snprintf(buf, sizeof(buf), "IP_FRAG_LPM_%i", socket);
838 lpm6 = rte_lpm6_create("IP_FRAG_LPM6", socket, &lpm6_config);
840 RTE_LOG(ERR, IP_FRAG, "Cannot create LPM table\n");
843 socket_lpm6[socket] = lpm6;
851 MAIN(int argc, char **argv)
853 struct lcore_queue_conf *qconf;
854 struct rx_queue *rxq;
857 uint16_t queueid = 0;
858 unsigned lcore_id = 0, rx_lcore_id = 0;
859 uint32_t n_tx_queue, nb_lcores;
863 ret = rte_eal_init(argc, argv);
865 rte_exit(EXIT_FAILURE, "rte_eal_init failed");
869 /* parse application arguments (after the EAL ones) */
870 ret = parse_args(argc, argv);
872 rte_exit(EXIT_FAILURE, "Invalid arguments");
874 if (rte_eal_pci_probe() < 0)
875 rte_panic("Cannot probe PCI\n");
877 nb_ports = rte_eth_dev_count();
878 if (nb_ports > RTE_MAX_ETHPORTS)
879 nb_ports = RTE_MAX_ETHPORTS;
880 else if (nb_ports == 0)
881 rte_exit(EXIT_FAILURE, "No ports found!\n");
883 nb_lcores = rte_lcore_count();
885 /* initialize structures (mempools, lpm etc.) */
887 rte_panic("Cannot initialize memory structures!\n");
889 /* check if portmask has non-existent ports */
890 if (enabled_port_mask & ~(RTE_LEN2MASK(nb_ports, unsigned)))
891 rte_exit(EXIT_FAILURE, "Non-existent ports in portmask!\n");
893 /* initialize all ports */
894 for (portid = 0; portid < nb_ports; portid++) {
895 /* skip ports that are not enabled */
896 if ((enabled_port_mask & (1 << portid)) == 0) {
897 printf("Skipping disabled port %d\n", portid);
901 qconf = &lcore_queue_conf[rx_lcore_id];
903 /* get the lcore_id for this port */
904 while (rte_lcore_is_enabled(rx_lcore_id) == 0 ||
905 qconf->n_rx_queue == (unsigned)rx_queue_per_lcore) {
908 if (rx_lcore_id >= RTE_MAX_LCORE)
909 rte_exit(EXIT_FAILURE, "Not enough cores\n");
911 qconf = &lcore_queue_conf[rx_lcore_id];
914 socket = (int) rte_lcore_to_socket_id(rx_lcore_id);
915 if (socket == SOCKET_ID_ANY)
918 rxq = &qconf->rx_queue_list[qconf->n_rx_queue];
919 rxq->portid = portid;
920 rxq->direct_pool = socket_direct_pool[socket];
921 rxq->indirect_pool = socket_indirect_pool[socket];
922 rxq->lpm = socket_lpm[socket];
923 rxq->lpm6 = socket_lpm6[socket];
927 printf("Initializing port %d on lcore %u...", portid,
931 n_tx_queue = nb_lcores;
932 if (n_tx_queue > MAX_TX_QUEUE_PER_PORT)
933 n_tx_queue = MAX_TX_QUEUE_PER_PORT;
934 ret = rte_eth_dev_configure(portid, 1, (uint16_t)n_tx_queue,
938 rte_exit(EXIT_FAILURE, "Cannot configure device: "
943 /* init one RX queue */
944 ret = rte_eth_rx_queue_setup(portid, 0, nb_rxd,
946 socket_direct_pool[socket]);
949 rte_exit(EXIT_FAILURE, "rte_eth_rx_queue_setup: "
954 rte_eth_macaddr_get(portid, &ports_eth_addr[portid]);
955 print_ethaddr(" Address:", &ports_eth_addr[portid]);
958 /* init one TX queue per couple (lcore,port) */
960 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
961 if (rte_lcore_is_enabled(lcore_id) == 0)
964 socket = (int) rte_lcore_to_socket_id(lcore_id);
965 printf("txq=%u,%d ", lcore_id, queueid);
967 ret = rte_eth_tx_queue_setup(portid, queueid, nb_txd,
971 rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: "
972 "err=%d, port=%d\n", ret, portid);
975 qconf = &lcore_queue_conf[lcore_id];
976 qconf->tx_queue_id[portid] = queueid;
986 for (portid = 0; portid < nb_ports; portid++) {
987 if ((enabled_port_mask & (1 << portid)) == 0) {
991 ret = rte_eth_dev_start(portid);
993 rte_exit(EXIT_FAILURE, "rte_eth_dev_start: err=%d, port=%d\n",
996 rte_eth_promiscuous_enable(portid);
999 if (init_routing_table() < 0)
1000 rte_exit(EXIT_FAILURE, "Cannot init routing table\n");
1002 check_all_ports_link_status((uint8_t)nb_ports, enabled_port_mask);
1004 /* launch per-lcore init on every lcore */
1005 rte_eal_mp_remote_launch(main_loop, NULL, CALL_MASTER);
1006 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
1007 if (rte_eal_wait_lcore(lcore_id) < 0)