1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2016 Intel Corporation
11 #include <sys/queue.h>
16 #include <rte_common.h>
17 #include <rte_byteorder.h>
19 #include <rte_memory.h>
20 #include <rte_memcpy.h>
22 #include <rte_launch.h>
23 #include <rte_atomic.h>
24 #include <rte_cycles.h>
25 #include <rte_prefetch.h>
26 #include <rte_lcore.h>
27 #include <rte_per_lcore.h>
28 #include <rte_branch_prediction.h>
29 #include <rte_interrupts.h>
30 #include <rte_random.h>
31 #include <rte_debug.h>
32 #include <rte_ether.h>
33 #include <rte_ethdev.h>
34 #include <rte_mempool.h>
39 #include <rte_string_fns.h>
42 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
43 #define L3FWDACL_DEBUG
45 #define DO_RFC_1812_CHECKS
47 #define RTE_LOGTYPE_L3FWD RTE_LOGTYPE_USER1
49 #define MAX_JUMBO_PKT_LEN 9600
51 #define MEMPOOL_CACHE_SIZE 256
54 * This expression is used to calculate the number of mbufs needed
55 * depending on user input, taking into account memory for rx and tx hardware
56 * rings, cache per lcore and mtable per port per lcore.
57 * RTE_MAX is used to ensure that NB_MBUF never goes below a
58 * minimum value of 8192
61 #define NB_MBUF RTE_MAX(\
62 (nb_ports * nb_rx_queue * nb_rxd + \
63 nb_ports * nb_lcores * MAX_PKT_BURST + \
64 nb_ports * n_tx_queue * nb_txd + \
65 nb_lcores * MEMPOOL_CACHE_SIZE), \
68 #define MAX_PKT_BURST 32
69 #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
73 /* Configure how many packets ahead to prefetch, when reading packets */
74 #define PREFETCH_OFFSET 3
77 * Configurable number of RX/TX ring descriptors
79 #define RTE_TEST_RX_DESC_DEFAULT 1024
80 #define RTE_TEST_TX_DESC_DEFAULT 1024
81 static uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT;
82 static uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT;
84 /* ethernet addresses of ports */
85 static struct ether_addr ports_eth_addr[RTE_MAX_ETHPORTS];
87 /* mask of enabled ports */
88 static uint32_t enabled_port_mask;
89 static int promiscuous_on; /**< Ports set in promiscuous mode off by default. */
90 static int numa_on = 1; /**< NUMA is enabled by default. */
92 struct lcore_rx_queue {
95 } __rte_cache_aligned;
97 #define MAX_RX_QUEUE_PER_LCORE 16
98 #define MAX_TX_QUEUE_PER_PORT RTE_MAX_ETHPORTS
99 #define MAX_RX_QUEUE_PER_PORT 128
101 #define MAX_LCORE_PARAMS 1024
102 struct lcore_params {
106 } __rte_cache_aligned;
108 static struct lcore_params lcore_params_array[MAX_LCORE_PARAMS];
109 static struct lcore_params lcore_params_array_default[] = {
121 static struct lcore_params *lcore_params = lcore_params_array_default;
122 static uint16_t nb_lcore_params = sizeof(lcore_params_array_default) /
123 sizeof(lcore_params_array_default[0]);
125 static struct rte_eth_conf port_conf = {
127 .mq_mode = ETH_MQ_RX_RSS,
128 .max_rx_pkt_len = ETHER_MAX_LEN,
130 .ignore_offload_bitfield = 1,
131 .offloads = (DEV_RX_OFFLOAD_CRC_STRIP |
132 DEV_RX_OFFLOAD_CHECKSUM),
137 .rss_hf = ETH_RSS_IP | ETH_RSS_UDP |
138 ETH_RSS_TCP | ETH_RSS_SCTP,
142 .mq_mode = ETH_MQ_TX_NONE,
146 static struct rte_mempool *pktmbuf_pool[NB_SOCKETS];
148 /***********************start of ACL part******************************/
149 #ifdef DO_RFC_1812_CHECKS
151 is_valid_ipv4_pkt(struct ipv4_hdr *pkt, uint32_t link_len);
154 send_single_packet(struct rte_mbuf *m, uint16_t port);
156 #define MAX_ACL_RULE_NUM 100000
157 #define DEFAULT_MAX_CATEGORIES 1
158 #define L3FWD_ACL_IPV4_NAME "l3fwd-acl-ipv4"
159 #define L3FWD_ACL_IPV6_NAME "l3fwd-acl-ipv6"
160 #define ACL_LEAD_CHAR ('@')
161 #define ROUTE_LEAD_CHAR ('R')
162 #define COMMENT_LEAD_CHAR ('#')
163 #define OPTION_CONFIG "config"
164 #define OPTION_NONUMA "no-numa"
165 #define OPTION_ENBJMO "enable-jumbo"
166 #define OPTION_RULE_IPV4 "rule_ipv4"
167 #define OPTION_RULE_IPV6 "rule_ipv6"
168 #define OPTION_SCALAR "scalar"
169 #define ACL_DENY_SIGNATURE 0xf0000000
170 #define RTE_LOGTYPE_L3FWDACL RTE_LOGTYPE_USER3
171 #define acl_log(format, ...) RTE_LOG(ERR, L3FWDACL, format, ##__VA_ARGS__)
172 #define uint32_t_to_char(ip, a, b, c, d) do {\
173 *a = (unsigned char)(ip >> 24 & 0xff);\
174 *b = (unsigned char)(ip >> 16 & 0xff);\
175 *c = (unsigned char)(ip >> 8 & 0xff);\
176 *d = (unsigned char)(ip & 0xff);\
178 #define OFF_ETHHEAD (sizeof(struct ether_hdr))
179 #define OFF_IPV42PROTO (offsetof(struct ipv4_hdr, next_proto_id))
180 #define OFF_IPV62PROTO (offsetof(struct ipv6_hdr, proto))
181 #define MBUF_IPV4_2PROTO(m) \
182 rte_pktmbuf_mtod_offset((m), uint8_t *, OFF_ETHHEAD + OFF_IPV42PROTO)
183 #define MBUF_IPV6_2PROTO(m) \
184 rte_pktmbuf_mtod_offset((m), uint8_t *, OFF_ETHHEAD + OFF_IPV62PROTO)
186 #define GET_CB_FIELD(in, fd, base, lim, dlm) do { \
190 val = strtoul((in), &end, (base)); \
191 if (errno != 0 || end[0] != (dlm) || val > (lim)) \
193 (fd) = (typeof(fd))val; \
198 * ACL rules should have higher priorities than route ones to ensure ACL rule
199 * always be found when input packets have multi-matches in the database.
200 * A exception case is performance measure, which can define route rules with
201 * higher priority and route rules will always be returned in each lookup.
202 * Reserve range from ACL_RULE_PRIORITY_MAX + 1 to
203 * RTE_ACL_MAX_PRIORITY for route entries in performance measure
205 #define ACL_RULE_PRIORITY_MAX 0x10000000
208 * Forward port info save in ACL lib starts from 1
209 * since ACL assume 0 is invalid.
210 * So, need add 1 when saving and minus 1 when forwarding packets.
212 #define FWD_PORT_SHIFT 1
215 * Rule and trace formats definitions.
228 * That effectively defines order of IPV4VLAN classifications:
230 * - VLAN (TAG and DOMAIN)
233 * - PORTS (SRC and DST)
236 RTE_ACL_IPV4VLAN_PROTO,
237 RTE_ACL_IPV4VLAN_VLAN,
238 RTE_ACL_IPV4VLAN_SRC,
239 RTE_ACL_IPV4VLAN_DST,
240 RTE_ACL_IPV4VLAN_PORTS,
244 struct rte_acl_field_def ipv4_defs[NUM_FIELDS_IPV4] = {
246 .type = RTE_ACL_FIELD_TYPE_BITMASK,
247 .size = sizeof(uint8_t),
248 .field_index = PROTO_FIELD_IPV4,
249 .input_index = RTE_ACL_IPV4VLAN_PROTO,
253 .type = RTE_ACL_FIELD_TYPE_MASK,
254 .size = sizeof(uint32_t),
255 .field_index = SRC_FIELD_IPV4,
256 .input_index = RTE_ACL_IPV4VLAN_SRC,
257 .offset = offsetof(struct ipv4_hdr, src_addr) -
258 offsetof(struct ipv4_hdr, next_proto_id),
261 .type = RTE_ACL_FIELD_TYPE_MASK,
262 .size = sizeof(uint32_t),
263 .field_index = DST_FIELD_IPV4,
264 .input_index = RTE_ACL_IPV4VLAN_DST,
265 .offset = offsetof(struct ipv4_hdr, dst_addr) -
266 offsetof(struct ipv4_hdr, next_proto_id),
269 .type = RTE_ACL_FIELD_TYPE_RANGE,
270 .size = sizeof(uint16_t),
271 .field_index = SRCP_FIELD_IPV4,
272 .input_index = RTE_ACL_IPV4VLAN_PORTS,
273 .offset = sizeof(struct ipv4_hdr) -
274 offsetof(struct ipv4_hdr, next_proto_id),
277 .type = RTE_ACL_FIELD_TYPE_RANGE,
278 .size = sizeof(uint16_t),
279 .field_index = DSTP_FIELD_IPV4,
280 .input_index = RTE_ACL_IPV4VLAN_PORTS,
281 .offset = sizeof(struct ipv4_hdr) -
282 offsetof(struct ipv4_hdr, next_proto_id) +
287 #define IPV6_ADDR_LEN 16
288 #define IPV6_ADDR_U16 (IPV6_ADDR_LEN / sizeof(uint16_t))
289 #define IPV6_ADDR_U32 (IPV6_ADDR_LEN / sizeof(uint32_t))
306 struct rte_acl_field_def ipv6_defs[NUM_FIELDS_IPV6] = {
308 .type = RTE_ACL_FIELD_TYPE_BITMASK,
309 .size = sizeof(uint8_t),
310 .field_index = PROTO_FIELD_IPV6,
311 .input_index = PROTO_FIELD_IPV6,
315 .type = RTE_ACL_FIELD_TYPE_MASK,
316 .size = sizeof(uint32_t),
317 .field_index = SRC1_FIELD_IPV6,
318 .input_index = SRC1_FIELD_IPV6,
319 .offset = offsetof(struct ipv6_hdr, src_addr) -
320 offsetof(struct ipv6_hdr, proto),
323 .type = RTE_ACL_FIELD_TYPE_MASK,
324 .size = sizeof(uint32_t),
325 .field_index = SRC2_FIELD_IPV6,
326 .input_index = SRC2_FIELD_IPV6,
327 .offset = offsetof(struct ipv6_hdr, src_addr) -
328 offsetof(struct ipv6_hdr, proto) + sizeof(uint32_t),
331 .type = RTE_ACL_FIELD_TYPE_MASK,
332 .size = sizeof(uint32_t),
333 .field_index = SRC3_FIELD_IPV6,
334 .input_index = SRC3_FIELD_IPV6,
335 .offset = offsetof(struct ipv6_hdr, src_addr) -
336 offsetof(struct ipv6_hdr, proto) + 2 * sizeof(uint32_t),
339 .type = RTE_ACL_FIELD_TYPE_MASK,
340 .size = sizeof(uint32_t),
341 .field_index = SRC4_FIELD_IPV6,
342 .input_index = SRC4_FIELD_IPV6,
343 .offset = offsetof(struct ipv6_hdr, src_addr) -
344 offsetof(struct ipv6_hdr, proto) + 3 * sizeof(uint32_t),
347 .type = RTE_ACL_FIELD_TYPE_MASK,
348 .size = sizeof(uint32_t),
349 .field_index = DST1_FIELD_IPV6,
350 .input_index = DST1_FIELD_IPV6,
351 .offset = offsetof(struct ipv6_hdr, dst_addr)
352 - offsetof(struct ipv6_hdr, proto),
355 .type = RTE_ACL_FIELD_TYPE_MASK,
356 .size = sizeof(uint32_t),
357 .field_index = DST2_FIELD_IPV6,
358 .input_index = DST2_FIELD_IPV6,
359 .offset = offsetof(struct ipv6_hdr, dst_addr) -
360 offsetof(struct ipv6_hdr, proto) + sizeof(uint32_t),
363 .type = RTE_ACL_FIELD_TYPE_MASK,
364 .size = sizeof(uint32_t),
365 .field_index = DST3_FIELD_IPV6,
366 .input_index = DST3_FIELD_IPV6,
367 .offset = offsetof(struct ipv6_hdr, dst_addr) -
368 offsetof(struct ipv6_hdr, proto) + 2 * sizeof(uint32_t),
371 .type = RTE_ACL_FIELD_TYPE_MASK,
372 .size = sizeof(uint32_t),
373 .field_index = DST4_FIELD_IPV6,
374 .input_index = DST4_FIELD_IPV6,
375 .offset = offsetof(struct ipv6_hdr, dst_addr) -
376 offsetof(struct ipv6_hdr, proto) + 3 * sizeof(uint32_t),
379 .type = RTE_ACL_FIELD_TYPE_RANGE,
380 .size = sizeof(uint16_t),
381 .field_index = SRCP_FIELD_IPV6,
382 .input_index = SRCP_FIELD_IPV6,
383 .offset = sizeof(struct ipv6_hdr) -
384 offsetof(struct ipv6_hdr, proto),
387 .type = RTE_ACL_FIELD_TYPE_RANGE,
388 .size = sizeof(uint16_t),
389 .field_index = DSTP_FIELD_IPV6,
390 .input_index = SRCP_FIELD_IPV6,
391 .offset = sizeof(struct ipv6_hdr) -
392 offsetof(struct ipv6_hdr, proto) + sizeof(uint16_t),
401 CB_FLD_SRC_PORT_HIGH,
404 CB_FLD_DST_PORT_HIGH,
410 RTE_ACL_RULE_DEF(acl4_rule, RTE_DIM(ipv4_defs));
411 RTE_ACL_RULE_DEF(acl6_rule, RTE_DIM(ipv6_defs));
413 struct acl_search_t {
414 const uint8_t *data_ipv4[MAX_PKT_BURST];
415 struct rte_mbuf *m_ipv4[MAX_PKT_BURST];
416 uint32_t res_ipv4[MAX_PKT_BURST];
419 const uint8_t *data_ipv6[MAX_PKT_BURST];
420 struct rte_mbuf *m_ipv6[MAX_PKT_BURST];
421 uint32_t res_ipv6[MAX_PKT_BURST];
426 char mapped[NB_SOCKETS];
427 struct rte_acl_ctx *acx_ipv4[NB_SOCKETS];
428 struct rte_acl_ctx *acx_ipv6[NB_SOCKETS];
429 #ifdef L3FWDACL_DEBUG
430 struct acl4_rule *rule_ipv4;
431 struct acl6_rule *rule_ipv6;
436 const char *rule_ipv4_name;
437 const char *rule_ipv6_name;
441 const char cb_port_delim[] = ":";
444 print_one_ipv4_rule(struct acl4_rule *rule, int extra)
446 unsigned char a, b, c, d;
448 uint32_t_to_char(rule->field[SRC_FIELD_IPV4].value.u32,
450 printf("%hhu.%hhu.%hhu.%hhu/%u ", a, b, c, d,
451 rule->field[SRC_FIELD_IPV4].mask_range.u32);
452 uint32_t_to_char(rule->field[DST_FIELD_IPV4].value.u32,
454 printf("%hhu.%hhu.%hhu.%hhu/%u ", a, b, c, d,
455 rule->field[DST_FIELD_IPV4].mask_range.u32);
456 printf("%hu : %hu %hu : %hu 0x%hhx/0x%hhx ",
457 rule->field[SRCP_FIELD_IPV4].value.u16,
458 rule->field[SRCP_FIELD_IPV4].mask_range.u16,
459 rule->field[DSTP_FIELD_IPV4].value.u16,
460 rule->field[DSTP_FIELD_IPV4].mask_range.u16,
461 rule->field[PROTO_FIELD_IPV4].value.u8,
462 rule->field[PROTO_FIELD_IPV4].mask_range.u8);
464 printf("0x%x-0x%x-0x%x ",
465 rule->data.category_mask,
467 rule->data.userdata);
471 print_one_ipv6_rule(struct acl6_rule *rule, int extra)
473 unsigned char a, b, c, d;
475 uint32_t_to_char(rule->field[SRC1_FIELD_IPV6].value.u32,
477 printf("%.2x%.2x:%.2x%.2x", a, b, c, d);
478 uint32_t_to_char(rule->field[SRC2_FIELD_IPV6].value.u32,
480 printf(":%.2x%.2x:%.2x%.2x", a, b, c, d);
481 uint32_t_to_char(rule->field[SRC3_FIELD_IPV6].value.u32,
483 printf(":%.2x%.2x:%.2x%.2x", a, b, c, d);
484 uint32_t_to_char(rule->field[SRC4_FIELD_IPV6].value.u32,
486 printf(":%.2x%.2x:%.2x%.2x/%u ", a, b, c, d,
487 rule->field[SRC1_FIELD_IPV6].mask_range.u32
488 + rule->field[SRC2_FIELD_IPV6].mask_range.u32
489 + rule->field[SRC3_FIELD_IPV6].mask_range.u32
490 + rule->field[SRC4_FIELD_IPV6].mask_range.u32);
492 uint32_t_to_char(rule->field[DST1_FIELD_IPV6].value.u32,
494 printf("%.2x%.2x:%.2x%.2x", a, b, c, d);
495 uint32_t_to_char(rule->field[DST2_FIELD_IPV6].value.u32,
497 printf(":%.2x%.2x:%.2x%.2x", a, b, c, d);
498 uint32_t_to_char(rule->field[DST3_FIELD_IPV6].value.u32,
500 printf(":%.2x%.2x:%.2x%.2x", a, b, c, d);
501 uint32_t_to_char(rule->field[DST4_FIELD_IPV6].value.u32,
503 printf(":%.2x%.2x:%.2x%.2x/%u ", a, b, c, d,
504 rule->field[DST1_FIELD_IPV6].mask_range.u32
505 + rule->field[DST2_FIELD_IPV6].mask_range.u32
506 + rule->field[DST3_FIELD_IPV6].mask_range.u32
507 + rule->field[DST4_FIELD_IPV6].mask_range.u32);
509 printf("%hu : %hu %hu : %hu 0x%hhx/0x%hhx ",
510 rule->field[SRCP_FIELD_IPV6].value.u16,
511 rule->field[SRCP_FIELD_IPV6].mask_range.u16,
512 rule->field[DSTP_FIELD_IPV6].value.u16,
513 rule->field[DSTP_FIELD_IPV6].mask_range.u16,
514 rule->field[PROTO_FIELD_IPV6].value.u8,
515 rule->field[PROTO_FIELD_IPV6].mask_range.u8);
517 printf("0x%x-0x%x-0x%x ",
518 rule->data.category_mask,
520 rule->data.userdata);
523 /* Bypass comment and empty lines */
525 is_bypass_line(char *buff)
530 if (buff[0] == COMMENT_LEAD_CHAR)
533 while (buff[i] != '\0') {
534 if (!isspace(buff[i]))
541 #ifdef L3FWDACL_DEBUG
543 dump_acl4_rule(struct rte_mbuf *m, uint32_t sig)
545 uint32_t offset = sig & ~ACL_DENY_SIGNATURE;
546 unsigned char a, b, c, d;
547 struct ipv4_hdr *ipv4_hdr = rte_pktmbuf_mtod_offset(m,
549 sizeof(struct ether_hdr));
551 uint32_t_to_char(rte_bswap32(ipv4_hdr->src_addr), &a, &b, &c, &d);
552 printf("Packet Src:%hhu.%hhu.%hhu.%hhu ", a, b, c, d);
553 uint32_t_to_char(rte_bswap32(ipv4_hdr->dst_addr), &a, &b, &c, &d);
554 printf("Dst:%hhu.%hhu.%hhu.%hhu ", a, b, c, d);
556 printf("Src port:%hu,Dst port:%hu ",
557 rte_bswap16(*(uint16_t *)(ipv4_hdr + 1)),
558 rte_bswap16(*((uint16_t *)(ipv4_hdr + 1) + 1)));
559 printf("hit ACL %d - ", offset);
561 print_one_ipv4_rule(acl_config.rule_ipv4 + offset, 1);
567 dump_acl6_rule(struct rte_mbuf *m, uint32_t sig)
570 uint32_t offset = sig & ~ACL_DENY_SIGNATURE;
571 struct ipv6_hdr *ipv6_hdr = rte_pktmbuf_mtod_offset(m,
573 sizeof(struct ether_hdr));
575 printf("Packet Src");
576 for (i = 0; i < RTE_DIM(ipv6_hdr->src_addr); i += sizeof(uint16_t))
578 ipv6_hdr->src_addr[i], ipv6_hdr->src_addr[i + 1]);
581 for (i = 0; i < RTE_DIM(ipv6_hdr->dst_addr); i += sizeof(uint16_t))
583 ipv6_hdr->dst_addr[i], ipv6_hdr->dst_addr[i + 1]);
585 printf("\nSrc port:%hu,Dst port:%hu ",
586 rte_bswap16(*(uint16_t *)(ipv6_hdr + 1)),
587 rte_bswap16(*((uint16_t *)(ipv6_hdr + 1) + 1)));
588 printf("hit ACL %d - ", offset);
590 print_one_ipv6_rule(acl_config.rule_ipv6 + offset, 1);
594 #endif /* L3FWDACL_DEBUG */
597 dump_ipv4_rules(struct acl4_rule *rule, int num, int extra)
601 for (i = 0; i < num; i++, rule++) {
602 printf("\t%d:", i + 1);
603 print_one_ipv4_rule(rule, extra);
609 dump_ipv6_rules(struct acl6_rule *rule, int num, int extra)
613 for (i = 0; i < num; i++, rule++) {
614 printf("\t%d:", i + 1);
615 print_one_ipv6_rule(rule, extra);
620 #ifdef DO_RFC_1812_CHECKS
622 prepare_one_packet(struct rte_mbuf **pkts_in, struct acl_search_t *acl,
625 struct ipv4_hdr *ipv4_hdr;
626 struct rte_mbuf *pkt = pkts_in[index];
628 if (RTE_ETH_IS_IPV4_HDR(pkt->packet_type)) {
629 ipv4_hdr = rte_pktmbuf_mtod_offset(pkt, struct ipv4_hdr *,
630 sizeof(struct ether_hdr));
632 /* Check to make sure the packet is valid (RFC1812) */
633 if (is_valid_ipv4_pkt(ipv4_hdr, pkt->pkt_len) >= 0) {
635 /* Update time to live and header checksum */
636 --(ipv4_hdr->time_to_live);
637 ++(ipv4_hdr->hdr_checksum);
639 /* Fill acl structure */
640 acl->data_ipv4[acl->num_ipv4] = MBUF_IPV4_2PROTO(pkt);
641 acl->m_ipv4[(acl->num_ipv4)++] = pkt;
644 /* Not a valid IPv4 packet */
645 rte_pktmbuf_free(pkt);
647 } else if (RTE_ETH_IS_IPV6_HDR(pkt->packet_type)) {
648 /* Fill acl structure */
649 acl->data_ipv6[acl->num_ipv6] = MBUF_IPV6_2PROTO(pkt);
650 acl->m_ipv6[(acl->num_ipv6)++] = pkt;
653 /* Unknown type, drop the packet */
654 rte_pktmbuf_free(pkt);
660 prepare_one_packet(struct rte_mbuf **pkts_in, struct acl_search_t *acl,
663 struct rte_mbuf *pkt = pkts_in[index];
665 if (RTE_ETH_IS_IPV4_HDR(pkt->packet_type)) {
666 /* Fill acl structure */
667 acl->data_ipv4[acl->num_ipv4] = MBUF_IPV4_2PROTO(pkt);
668 acl->m_ipv4[(acl->num_ipv4)++] = pkt;
670 } else if (RTE_ETH_IS_IPV6_HDR(pkt->packet_type)) {
671 /* Fill acl structure */
672 acl->data_ipv6[acl->num_ipv6] = MBUF_IPV6_2PROTO(pkt);
673 acl->m_ipv6[(acl->num_ipv6)++] = pkt;
675 /* Unknown type, drop the packet */
676 rte_pktmbuf_free(pkt);
679 #endif /* DO_RFC_1812_CHECKS */
682 prepare_acl_parameter(struct rte_mbuf **pkts_in, struct acl_search_t *acl,
690 /* Prefetch first packets */
691 for (i = 0; i < PREFETCH_OFFSET && i < nb_rx; i++) {
692 rte_prefetch0(rte_pktmbuf_mtod(
693 pkts_in[i], void *));
696 for (i = 0; i < (nb_rx - PREFETCH_OFFSET); i++) {
697 rte_prefetch0(rte_pktmbuf_mtod(pkts_in[
698 i + PREFETCH_OFFSET], void *));
699 prepare_one_packet(pkts_in, acl, i);
702 /* Process left packets */
703 for (; i < nb_rx; i++)
704 prepare_one_packet(pkts_in, acl, i);
708 send_one_packet(struct rte_mbuf *m, uint32_t res)
710 if (likely((res & ACL_DENY_SIGNATURE) == 0 && res != 0)) {
711 /* forward packets */
712 send_single_packet(m,
713 (uint8_t)(res - FWD_PORT_SHIFT));
715 /* in the ACL list, drop it */
716 #ifdef L3FWDACL_DEBUG
717 if ((res & ACL_DENY_SIGNATURE) != 0) {
718 if (RTE_ETH_IS_IPV4_HDR(m->packet_type))
719 dump_acl4_rule(m, res);
720 else if (RTE_ETH_IS_IPV6_HDR(m->packet_type))
721 dump_acl6_rule(m, res);
731 send_packets(struct rte_mbuf **m, uint32_t *res, int num)
735 /* Prefetch first packets */
736 for (i = 0; i < PREFETCH_OFFSET && i < num; i++) {
737 rte_prefetch0(rte_pktmbuf_mtod(
741 for (i = 0; i < (num - PREFETCH_OFFSET); i++) {
742 rte_prefetch0(rte_pktmbuf_mtod(m[
743 i + PREFETCH_OFFSET], void *));
744 send_one_packet(m[i], res[i]);
747 /* Process left packets */
749 send_one_packet(m[i], res[i]);
753 * Parses IPV6 address, exepcts the following format:
754 * XXXX:XXXX:XXXX:XXXX:XXXX:XXXX:XXXX:XXXX (where X - is a hexedecimal digit).
757 parse_ipv6_addr(const char *in, const char **end, uint32_t v[IPV6_ADDR_U32],
760 uint32_t addr[IPV6_ADDR_U16];
762 GET_CB_FIELD(in, addr[0], 16, UINT16_MAX, ':');
763 GET_CB_FIELD(in, addr[1], 16, UINT16_MAX, ':');
764 GET_CB_FIELD(in, addr[2], 16, UINT16_MAX, ':');
765 GET_CB_FIELD(in, addr[3], 16, UINT16_MAX, ':');
766 GET_CB_FIELD(in, addr[4], 16, UINT16_MAX, ':');
767 GET_CB_FIELD(in, addr[5], 16, UINT16_MAX, ':');
768 GET_CB_FIELD(in, addr[6], 16, UINT16_MAX, ':');
769 GET_CB_FIELD(in, addr[7], 16, UINT16_MAX, dlm);
773 v[0] = (addr[0] << 16) + addr[1];
774 v[1] = (addr[2] << 16) + addr[3];
775 v[2] = (addr[4] << 16) + addr[5];
776 v[3] = (addr[6] << 16) + addr[7];
782 parse_ipv6_net(const char *in, struct rte_acl_field field[4])
787 const uint32_t nbu32 = sizeof(uint32_t) * CHAR_BIT;
790 rc = parse_ipv6_addr(in, &mp, v, '/');
795 GET_CB_FIELD(mp, m, 0, CHAR_BIT * sizeof(v), 0);
797 /* put all together. */
798 for (i = 0; i != RTE_DIM(v); i++) {
799 if (m >= (i + 1) * nbu32)
800 field[i].mask_range.u32 = nbu32;
802 field[i].mask_range.u32 = m > (i * nbu32) ?
805 field[i].value.u32 = v[i];
812 parse_cb_ipv6_rule(char *str, struct rte_acl_rule *v, int has_userdata)
815 char *s, *sp, *in[CB_FLD_NUM];
816 static const char *dlm = " \t\n";
817 int dim = has_userdata ? CB_FLD_NUM : CB_FLD_USERDATA;
820 for (i = 0; i != dim; i++, s = NULL) {
821 in[i] = strtok_r(s, dlm, &sp);
826 rc = parse_ipv6_net(in[CB_FLD_SRC_ADDR], v->field + SRC1_FIELD_IPV6);
828 acl_log("failed to read source address/mask: %s\n",
829 in[CB_FLD_SRC_ADDR]);
833 rc = parse_ipv6_net(in[CB_FLD_DST_ADDR], v->field + DST1_FIELD_IPV6);
835 acl_log("failed to read destination address/mask: %s\n",
836 in[CB_FLD_DST_ADDR]);
841 GET_CB_FIELD(in[CB_FLD_SRC_PORT_LOW],
842 v->field[SRCP_FIELD_IPV6].value.u16,
844 GET_CB_FIELD(in[CB_FLD_SRC_PORT_HIGH],
845 v->field[SRCP_FIELD_IPV6].mask_range.u16,
848 if (strncmp(in[CB_FLD_SRC_PORT_DLM], cb_port_delim,
849 sizeof(cb_port_delim)) != 0)
852 /* destination port. */
853 GET_CB_FIELD(in[CB_FLD_DST_PORT_LOW],
854 v->field[DSTP_FIELD_IPV6].value.u16,
856 GET_CB_FIELD(in[CB_FLD_DST_PORT_HIGH],
857 v->field[DSTP_FIELD_IPV6].mask_range.u16,
860 if (strncmp(in[CB_FLD_DST_PORT_DLM], cb_port_delim,
861 sizeof(cb_port_delim)) != 0)
864 if (v->field[SRCP_FIELD_IPV6].mask_range.u16
865 < v->field[SRCP_FIELD_IPV6].value.u16
866 || v->field[DSTP_FIELD_IPV6].mask_range.u16
867 < v->field[DSTP_FIELD_IPV6].value.u16)
870 GET_CB_FIELD(in[CB_FLD_PROTO], v->field[PROTO_FIELD_IPV6].value.u8,
872 GET_CB_FIELD(in[CB_FLD_PROTO], v->field[PROTO_FIELD_IPV6].mask_range.u8,
876 GET_CB_FIELD(in[CB_FLD_USERDATA], v->data.userdata,
883 * Parse ClassBench rules file.
885 * '@'<src_ipv4_addr>'/'<masklen> <space> \
886 * <dst_ipv4_addr>'/'<masklen> <space> \
887 * <src_port_low> <space> ":" <src_port_high> <space> \
888 * <dst_port_low> <space> ":" <dst_port_high> <space> \
892 parse_ipv4_net(const char *in, uint32_t *addr, uint32_t *mask_len)
894 uint8_t a, b, c, d, m;
896 GET_CB_FIELD(in, a, 0, UINT8_MAX, '.');
897 GET_CB_FIELD(in, b, 0, UINT8_MAX, '.');
898 GET_CB_FIELD(in, c, 0, UINT8_MAX, '.');
899 GET_CB_FIELD(in, d, 0, UINT8_MAX, '/');
900 GET_CB_FIELD(in, m, 0, sizeof(uint32_t) * CHAR_BIT, 0);
902 addr[0] = IPv4(a, b, c, d);
909 parse_cb_ipv4vlan_rule(char *str, struct rte_acl_rule *v, int has_userdata)
912 char *s, *sp, *in[CB_FLD_NUM];
913 static const char *dlm = " \t\n";
914 int dim = has_userdata ? CB_FLD_NUM : CB_FLD_USERDATA;
917 for (i = 0; i != dim; i++, s = NULL) {
918 in[i] = strtok_r(s, dlm, &sp);
923 rc = parse_ipv4_net(in[CB_FLD_SRC_ADDR],
924 &v->field[SRC_FIELD_IPV4].value.u32,
925 &v->field[SRC_FIELD_IPV4].mask_range.u32);
927 acl_log("failed to read source address/mask: %s\n",
928 in[CB_FLD_SRC_ADDR]);
932 rc = parse_ipv4_net(in[CB_FLD_DST_ADDR],
933 &v->field[DST_FIELD_IPV4].value.u32,
934 &v->field[DST_FIELD_IPV4].mask_range.u32);
936 acl_log("failed to read destination address/mask: %s\n",
937 in[CB_FLD_DST_ADDR]);
941 GET_CB_FIELD(in[CB_FLD_SRC_PORT_LOW],
942 v->field[SRCP_FIELD_IPV4].value.u16,
944 GET_CB_FIELD(in[CB_FLD_SRC_PORT_HIGH],
945 v->field[SRCP_FIELD_IPV4].mask_range.u16,
948 if (strncmp(in[CB_FLD_SRC_PORT_DLM], cb_port_delim,
949 sizeof(cb_port_delim)) != 0)
952 GET_CB_FIELD(in[CB_FLD_DST_PORT_LOW],
953 v->field[DSTP_FIELD_IPV4].value.u16,
955 GET_CB_FIELD(in[CB_FLD_DST_PORT_HIGH],
956 v->field[DSTP_FIELD_IPV4].mask_range.u16,
959 if (strncmp(in[CB_FLD_DST_PORT_DLM], cb_port_delim,
960 sizeof(cb_port_delim)) != 0)
963 if (v->field[SRCP_FIELD_IPV4].mask_range.u16
964 < v->field[SRCP_FIELD_IPV4].value.u16
965 || v->field[DSTP_FIELD_IPV4].mask_range.u16
966 < v->field[DSTP_FIELD_IPV4].value.u16)
969 GET_CB_FIELD(in[CB_FLD_PROTO], v->field[PROTO_FIELD_IPV4].value.u8,
971 GET_CB_FIELD(in[CB_FLD_PROTO], v->field[PROTO_FIELD_IPV4].mask_range.u8,
975 GET_CB_FIELD(in[CB_FLD_USERDATA], v->data.userdata, 0,
982 add_rules(const char *rule_path,
983 struct rte_acl_rule **proute_base,
984 unsigned int *proute_num,
985 struct rte_acl_rule **pacl_base,
986 unsigned int *pacl_num, uint32_t rule_size,
987 int (*parser)(char *, struct rte_acl_rule*, int))
989 uint8_t *acl_rules, *route_rules;
990 struct rte_acl_rule *next;
991 unsigned int acl_num = 0, route_num = 0, total_num = 0;
992 unsigned int acl_cnt = 0, route_cnt = 0;
994 FILE *fh = fopen(rule_path, "rb");
999 rte_exit(EXIT_FAILURE, "%s: Open %s failed\n", __func__,
1002 while ((fgets(buff, LINE_MAX, fh) != NULL)) {
1003 if (buff[0] == ROUTE_LEAD_CHAR)
1005 else if (buff[0] == ACL_LEAD_CHAR)
1010 rte_exit(EXIT_FAILURE, "Not find any route entries in %s!\n",
1013 val = fseek(fh, 0, SEEK_SET);
1015 rte_exit(EXIT_FAILURE, "%s: File seek operation failed\n",
1019 acl_rules = calloc(acl_num, rule_size);
1021 if (NULL == acl_rules)
1022 rte_exit(EXIT_FAILURE, "%s: failed to malloc memory\n",
1025 route_rules = calloc(route_num, rule_size);
1027 if (NULL == route_rules)
1028 rte_exit(EXIT_FAILURE, "%s: failed to malloc memory\n",
1032 while (fgets(buff, LINE_MAX, fh) != NULL) {
1035 if (is_bypass_line(buff))
1041 if (s == ROUTE_LEAD_CHAR)
1042 next = (struct rte_acl_rule *)(route_rules +
1043 route_cnt * rule_size);
1046 else if (s == ACL_LEAD_CHAR)
1047 next = (struct rte_acl_rule *)(acl_rules +
1048 acl_cnt * rule_size);
1052 rte_exit(EXIT_FAILURE,
1053 "%s Line %u: should start with leading "
1055 rule_path, i, ROUTE_LEAD_CHAR, ACL_LEAD_CHAR);
1057 if (parser(buff + 1, next, s == ROUTE_LEAD_CHAR) != 0)
1058 rte_exit(EXIT_FAILURE,
1059 "%s Line %u: parse rules error\n",
1062 if (s == ROUTE_LEAD_CHAR) {
1063 /* Check the forwarding port number */
1064 if ((enabled_port_mask & (1 << next->data.userdata)) ==
1066 rte_exit(EXIT_FAILURE,
1067 "%s Line %u: fwd number illegal:%u\n",
1068 rule_path, i, next->data.userdata);
1069 next->data.userdata += FWD_PORT_SHIFT;
1072 next->data.userdata = ACL_DENY_SIGNATURE + acl_cnt;
1076 next->data.priority = RTE_ACL_MAX_PRIORITY - total_num;
1077 next->data.category_mask = -1;
1083 *pacl_base = (struct rte_acl_rule *)acl_rules;
1084 *pacl_num = acl_num;
1085 *proute_base = (struct rte_acl_rule *)route_rules;
1086 *proute_num = route_cnt;
1092 dump_acl_config(void)
1094 printf("ACL option are:\n");
1095 printf(OPTION_RULE_IPV4": %s\n", parm_config.rule_ipv4_name);
1096 printf(OPTION_RULE_IPV6": %s\n", parm_config.rule_ipv6_name);
1097 printf(OPTION_SCALAR": %d\n", parm_config.scalar);
1101 check_acl_config(void)
1103 if (parm_config.rule_ipv4_name == NULL) {
1104 acl_log("ACL IPv4 rule file not specified\n");
1106 } else if (parm_config.rule_ipv6_name == NULL) {
1107 acl_log("ACL IPv6 rule file not specified\n");
1114 static struct rte_acl_ctx*
1115 setup_acl(struct rte_acl_rule *route_base,
1116 struct rte_acl_rule *acl_base, unsigned int route_num,
1117 unsigned int acl_num, int ipv6, int socketid)
1119 char name[PATH_MAX];
1120 struct rte_acl_param acl_param;
1121 struct rte_acl_config acl_build_param;
1122 struct rte_acl_ctx *context;
1123 int dim = ipv6 ? RTE_DIM(ipv6_defs) : RTE_DIM(ipv4_defs);
1125 /* Create ACL contexts */
1126 snprintf(name, sizeof(name), "%s%d",
1127 ipv6 ? L3FWD_ACL_IPV6_NAME : L3FWD_ACL_IPV4_NAME,
1130 acl_param.name = name;
1131 acl_param.socket_id = socketid;
1132 acl_param.rule_size = RTE_ACL_RULE_SZ(dim);
1133 acl_param.max_rule_num = MAX_ACL_RULE_NUM;
1135 if ((context = rte_acl_create(&acl_param)) == NULL)
1136 rte_exit(EXIT_FAILURE, "Failed to create ACL context\n");
1138 if (parm_config.scalar && rte_acl_set_ctx_classify(context,
1139 RTE_ACL_CLASSIFY_SCALAR) != 0)
1140 rte_exit(EXIT_FAILURE,
1141 "Failed to setup classify method for ACL context\n");
1143 if (rte_acl_add_rules(context, route_base, route_num) < 0)
1144 rte_exit(EXIT_FAILURE, "add rules failed\n");
1146 if (rte_acl_add_rules(context, acl_base, acl_num) < 0)
1147 rte_exit(EXIT_FAILURE, "add rules failed\n");
1149 /* Perform builds */
1150 memset(&acl_build_param, 0, sizeof(acl_build_param));
1152 acl_build_param.num_categories = DEFAULT_MAX_CATEGORIES;
1153 acl_build_param.num_fields = dim;
1154 memcpy(&acl_build_param.defs, ipv6 ? ipv6_defs : ipv4_defs,
1155 ipv6 ? sizeof(ipv6_defs) : sizeof(ipv4_defs));
1157 if (rte_acl_build(context, &acl_build_param) != 0)
1158 rte_exit(EXIT_FAILURE, "Failed to build ACL trie\n");
1160 rte_acl_dump(context);
1171 struct rte_acl_rule *acl_base_ipv4, *route_base_ipv4,
1172 *acl_base_ipv6, *route_base_ipv6;
1173 unsigned int acl_num_ipv4 = 0, route_num_ipv4 = 0,
1174 acl_num_ipv6 = 0, route_num_ipv6 = 0;
1176 if (check_acl_config() != 0)
1177 rte_exit(EXIT_FAILURE, "Failed to get valid ACL options\n");
1181 /* Load rules from the input file */
1182 if (add_rules(parm_config.rule_ipv4_name, &route_base_ipv4,
1183 &route_num_ipv4, &acl_base_ipv4, &acl_num_ipv4,
1184 sizeof(struct acl4_rule), &parse_cb_ipv4vlan_rule) < 0)
1185 rte_exit(EXIT_FAILURE, "Failed to add rules\n");
1187 acl_log("IPv4 Route entries %u:\n", route_num_ipv4);
1188 dump_ipv4_rules((struct acl4_rule *)route_base_ipv4, route_num_ipv4, 1);
1190 acl_log("IPv4 ACL entries %u:\n", acl_num_ipv4);
1191 dump_ipv4_rules((struct acl4_rule *)acl_base_ipv4, acl_num_ipv4, 1);
1193 if (add_rules(parm_config.rule_ipv6_name, &route_base_ipv6,
1195 &acl_base_ipv6, &acl_num_ipv6,
1196 sizeof(struct acl6_rule), &parse_cb_ipv6_rule) < 0)
1197 rte_exit(EXIT_FAILURE, "Failed to add rules\n");
1199 acl_log("IPv6 Route entries %u:\n", route_num_ipv6);
1200 dump_ipv6_rules((struct acl6_rule *)route_base_ipv6, route_num_ipv6, 1);
1202 acl_log("IPv6 ACL entries %u:\n", acl_num_ipv6);
1203 dump_ipv6_rules((struct acl6_rule *)acl_base_ipv6, acl_num_ipv6, 1);
1205 memset(&acl_config, 0, sizeof(acl_config));
1207 /* Check sockets a context should be created on */
1209 acl_config.mapped[0] = 1;
1211 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
1212 if (rte_lcore_is_enabled(lcore_id) == 0)
1215 socketid = rte_lcore_to_socket_id(lcore_id);
1216 if (socketid >= NB_SOCKETS) {
1217 acl_log("Socket %d of lcore %u is out "
1219 socketid, lcore_id, NB_SOCKETS);
1220 free(route_base_ipv4);
1221 free(route_base_ipv6);
1222 free(acl_base_ipv4);
1223 free(acl_base_ipv6);
1227 acl_config.mapped[socketid] = 1;
1231 for (i = 0; i < NB_SOCKETS; i++) {
1232 if (acl_config.mapped[i]) {
1233 acl_config.acx_ipv4[i] = setup_acl(route_base_ipv4,
1234 acl_base_ipv4, route_num_ipv4, acl_num_ipv4,
1237 acl_config.acx_ipv6[i] = setup_acl(route_base_ipv6,
1238 acl_base_ipv6, route_num_ipv6, acl_num_ipv6,
1243 free(route_base_ipv4);
1244 free(route_base_ipv6);
1246 #ifdef L3FWDACL_DEBUG
1247 acl_config.rule_ipv4 = (struct acl4_rule *)acl_base_ipv4;
1248 acl_config.rule_ipv6 = (struct acl6_rule *)acl_base_ipv6;
1250 free(acl_base_ipv4);
1251 free(acl_base_ipv6);
1257 /***********************end of ACL part******************************/
1260 uint16_t n_rx_queue;
1261 struct lcore_rx_queue rx_queue_list[MAX_RX_QUEUE_PER_LCORE];
1263 uint16_t tx_port_id[RTE_MAX_ETHPORTS];
1264 uint16_t tx_queue_id[RTE_MAX_ETHPORTS];
1265 struct rte_eth_dev_tx_buffer *tx_buffer[RTE_MAX_ETHPORTS];
1266 } __rte_cache_aligned;
1268 static struct lcore_conf lcore_conf[RTE_MAX_LCORE];
1270 /* Enqueue a single packet, and send burst if queue is filled */
1272 send_single_packet(struct rte_mbuf *m, uint16_t port)
1275 struct lcore_conf *qconf;
1277 lcore_id = rte_lcore_id();
1279 qconf = &lcore_conf[lcore_id];
1280 rte_eth_tx_buffer(port, qconf->tx_queue_id[port],
1281 qconf->tx_buffer[port], m);
1284 #ifdef DO_RFC_1812_CHECKS
1286 is_valid_ipv4_pkt(struct ipv4_hdr *pkt, uint32_t link_len)
1288 /* From http://www.rfc-editor.org/rfc/rfc1812.txt section 5.2.2 */
1290 * 1. The packet length reported by the Link Layer must be large
1291 * enough to hold the minimum length legal IP datagram (20 bytes).
1293 if (link_len < sizeof(struct ipv4_hdr))
1296 /* 2. The IP checksum must be correct. */
1297 /* this is checked in H/W */
1300 * 3. The IP version number must be 4. If the version number is not 4
1301 * then the packet may be another version of IP, such as IPng or
1304 if (((pkt->version_ihl) >> 4) != 4)
1307 * 4. The IP header length field must be large enough to hold the
1308 * minimum length legal IP datagram (20 bytes = 5 words).
1310 if ((pkt->version_ihl & 0xf) < 5)
1314 * 5. The IP total length field must be large enough to hold the IP
1315 * datagram header, whose length is specified in the IP header length
1318 if (rte_cpu_to_be_16(pkt->total_length) < sizeof(struct ipv4_hdr))
1325 /* main processing loop */
1327 main_loop(__attribute__((unused)) void *dummy)
1329 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
1331 uint64_t prev_tsc, diff_tsc, cur_tsc;
1335 struct lcore_conf *qconf;
1337 const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1)
1338 / US_PER_S * BURST_TX_DRAIN_US;
1341 lcore_id = rte_lcore_id();
1342 qconf = &lcore_conf[lcore_id];
1343 socketid = rte_lcore_to_socket_id(lcore_id);
1345 if (qconf->n_rx_queue == 0) {
1346 RTE_LOG(INFO, L3FWD, "lcore %u has nothing to do\n", lcore_id);
1350 RTE_LOG(INFO, L3FWD, "entering main loop on lcore %u\n", lcore_id);
1352 for (i = 0; i < qconf->n_rx_queue; i++) {
1354 portid = qconf->rx_queue_list[i].port_id;
1355 queueid = qconf->rx_queue_list[i].queue_id;
1356 RTE_LOG(INFO, L3FWD,
1357 " -- lcoreid=%u portid=%u rxqueueid=%hhu\n",
1358 lcore_id, portid, queueid);
1363 cur_tsc = rte_rdtsc();
1366 * TX burst queue drain
1368 diff_tsc = cur_tsc - prev_tsc;
1369 if (unlikely(diff_tsc > drain_tsc)) {
1370 for (i = 0; i < qconf->n_tx_port; ++i) {
1371 portid = qconf->tx_port_id[i];
1372 rte_eth_tx_buffer_flush(portid,
1373 qconf->tx_queue_id[portid],
1374 qconf->tx_buffer[portid]);
1380 * Read packet from RX queues
1382 for (i = 0; i < qconf->n_rx_queue; ++i) {
1384 portid = qconf->rx_queue_list[i].port_id;
1385 queueid = qconf->rx_queue_list[i].queue_id;
1386 nb_rx = rte_eth_rx_burst(portid, queueid,
1387 pkts_burst, MAX_PKT_BURST);
1390 struct acl_search_t acl_search;
1392 prepare_acl_parameter(pkts_burst, &acl_search,
1395 if (acl_search.num_ipv4) {
1397 acl_config.acx_ipv4[socketid],
1398 acl_search.data_ipv4,
1399 acl_search.res_ipv4,
1400 acl_search.num_ipv4,
1401 DEFAULT_MAX_CATEGORIES);
1403 send_packets(acl_search.m_ipv4,
1404 acl_search.res_ipv4,
1405 acl_search.num_ipv4);
1408 if (acl_search.num_ipv6) {
1410 acl_config.acx_ipv6[socketid],
1411 acl_search.data_ipv6,
1412 acl_search.res_ipv6,
1413 acl_search.num_ipv6,
1414 DEFAULT_MAX_CATEGORIES);
1416 send_packets(acl_search.m_ipv6,
1417 acl_search.res_ipv6,
1418 acl_search.num_ipv6);
1426 check_lcore_params(void)
1428 uint8_t queue, lcore;
1432 for (i = 0; i < nb_lcore_params; ++i) {
1433 queue = lcore_params[i].queue_id;
1434 if (queue >= MAX_RX_QUEUE_PER_PORT) {
1435 printf("invalid queue number: %hhu\n", queue);
1438 lcore = lcore_params[i].lcore_id;
1439 if (!rte_lcore_is_enabled(lcore)) {
1440 printf("error: lcore %hhu is not enabled in "
1441 "lcore mask\n", lcore);
1444 socketid = rte_lcore_to_socket_id(lcore);
1445 if (socketid != 0 && numa_on == 0) {
1446 printf("warning: lcore %hhu is on socket %d "
1455 check_port_config(void)
1460 for (i = 0; i < nb_lcore_params; ++i) {
1461 portid = lcore_params[i].port_id;
1463 if ((enabled_port_mask & (1 << portid)) == 0) {
1464 printf("port %u is not enabled in port mask\n", portid);
1467 if (!rte_eth_dev_is_valid_port(portid)) {
1468 printf("port %u is not present on the board\n", portid);
1476 get_port_n_rx_queues(const uint16_t port)
1481 for (i = 0; i < nb_lcore_params; ++i) {
1482 if (lcore_params[i].port_id == port &&
1483 lcore_params[i].queue_id > queue)
1484 queue = lcore_params[i].queue_id;
1486 return (uint8_t)(++queue);
1490 init_lcore_rx_queues(void)
1492 uint16_t i, nb_rx_queue;
1495 for (i = 0; i < nb_lcore_params; ++i) {
1496 lcore = lcore_params[i].lcore_id;
1497 nb_rx_queue = lcore_conf[lcore].n_rx_queue;
1498 if (nb_rx_queue >= MAX_RX_QUEUE_PER_LCORE) {
1499 printf("error: too many queues (%u) for lcore: %u\n",
1500 (unsigned)nb_rx_queue + 1, (unsigned)lcore);
1503 lcore_conf[lcore].rx_queue_list[nb_rx_queue].port_id =
1504 lcore_params[i].port_id;
1505 lcore_conf[lcore].rx_queue_list[nb_rx_queue].queue_id =
1506 lcore_params[i].queue_id;
1507 lcore_conf[lcore].n_rx_queue++;
1515 print_usage(const char *prgname)
1517 printf("%s [EAL options] -- -p PORTMASK -P"
1518 "--"OPTION_RULE_IPV4"=FILE"
1519 "--"OPTION_RULE_IPV6"=FILE"
1520 " [--"OPTION_CONFIG" (port,queue,lcore)[,(port,queue,lcore]]"
1521 " [--"OPTION_ENBJMO" [--max-pkt-len PKTLEN]]\n"
1522 " -p PORTMASK: hexadecimal bitmask of ports to configure\n"
1523 " -P : enable promiscuous mode\n"
1524 " --"OPTION_CONFIG": (port,queue,lcore): "
1525 "rx queues configuration\n"
1526 " --"OPTION_NONUMA": optional, disable numa awareness\n"
1527 " --"OPTION_ENBJMO": enable jumbo frame"
1528 " which max packet len is PKTLEN in decimal (64-9600)\n"
1529 " --"OPTION_RULE_IPV4"=FILE: specify the ipv4 rules entries "
1531 "Each rule occupy one line. "
1532 "2 kinds of rules are supported. "
1533 "One is ACL entry at while line leads with character '%c', "
1534 "another is route entry at while line leads with "
1536 " --"OPTION_RULE_IPV6"=FILE: specify the ipv6 rules "
1538 " --"OPTION_SCALAR": Use scalar function to do lookup\n",
1539 prgname, ACL_LEAD_CHAR, ROUTE_LEAD_CHAR);
1543 parse_max_pkt_len(const char *pktlen)
1548 /* parse decimal string */
1549 len = strtoul(pktlen, &end, 10);
1550 if ((pktlen[0] == '\0') || (end == NULL) || (*end != '\0'))
1560 parse_portmask(const char *portmask)
1565 /* parse hexadecimal string */
1566 pm = strtoul(portmask, &end, 16);
1567 if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
1577 parse_config(const char *q_arg)
1580 const char *p, *p0 = q_arg;
1588 unsigned long int_fld[_NUM_FLD];
1589 char *str_fld[_NUM_FLD];
1593 nb_lcore_params = 0;
1595 while ((p = strchr(p0, '(')) != NULL) {
1597 if ((p0 = strchr(p, ')')) == NULL)
1601 if (size >= sizeof(s))
1604 snprintf(s, sizeof(s), "%.*s", size, p);
1605 if (rte_strsplit(s, sizeof(s), str_fld, _NUM_FLD, ',') !=
1608 for (i = 0; i < _NUM_FLD; i++) {
1610 int_fld[i] = strtoul(str_fld[i], &end, 0);
1611 if (errno != 0 || end == str_fld[i] || int_fld[i] > 255)
1614 if (nb_lcore_params >= MAX_LCORE_PARAMS) {
1615 printf("exceeded max number of lcore params: %hu\n",
1619 lcore_params_array[nb_lcore_params].port_id =
1620 (uint8_t)int_fld[FLD_PORT];
1621 lcore_params_array[nb_lcore_params].queue_id =
1622 (uint8_t)int_fld[FLD_QUEUE];
1623 lcore_params_array[nb_lcore_params].lcore_id =
1624 (uint8_t)int_fld[FLD_LCORE];
1627 lcore_params = lcore_params_array;
1631 /* Parse the argument given in the command line of the application */
1633 parse_args(int argc, char **argv)
1638 char *prgname = argv[0];
1639 static struct option lgopts[] = {
1640 {OPTION_CONFIG, 1, 0, 0},
1641 {OPTION_NONUMA, 0, 0, 0},
1642 {OPTION_ENBJMO, 0, 0, 0},
1643 {OPTION_RULE_IPV4, 1, 0, 0},
1644 {OPTION_RULE_IPV6, 1, 0, 0},
1645 {OPTION_SCALAR, 0, 0, 0},
1651 while ((opt = getopt_long(argc, argvopt, "p:P",
1652 lgopts, &option_index)) != EOF) {
1657 enabled_port_mask = parse_portmask(optarg);
1658 if (enabled_port_mask == 0) {
1659 printf("invalid portmask\n");
1660 print_usage(prgname);
1665 printf("Promiscuous mode selected\n");
1671 if (!strncmp(lgopts[option_index].name,
1673 sizeof(OPTION_CONFIG))) {
1674 ret = parse_config(optarg);
1676 printf("invalid config\n");
1677 print_usage(prgname);
1682 if (!strncmp(lgopts[option_index].name,
1684 sizeof(OPTION_NONUMA))) {
1685 printf("numa is disabled\n");
1689 if (!strncmp(lgopts[option_index].name,
1690 OPTION_ENBJMO, sizeof(OPTION_ENBJMO))) {
1691 struct option lenopts = {
1698 printf("jumbo frame is enabled\n");
1699 port_conf.rxmode.offloads |=
1700 DEV_RX_OFFLOAD_JUMBO_FRAME;
1701 port_conf.txmode.offloads |=
1702 DEV_TX_OFFLOAD_MULTI_SEGS;
1705 * if no max-pkt-len set, then use the
1706 * default value ETHER_MAX_LEN
1708 if (0 == getopt_long(argc, argvopt, "",
1709 &lenopts, &option_index)) {
1710 ret = parse_max_pkt_len(optarg);
1712 (ret > MAX_JUMBO_PKT_LEN)) {
1713 printf("invalid packet "
1715 print_usage(prgname);
1718 port_conf.rxmode.max_rx_pkt_len = ret;
1720 printf("set jumbo frame max packet length "
1723 port_conf.rxmode.max_rx_pkt_len);
1726 if (!strncmp(lgopts[option_index].name,
1728 sizeof(OPTION_RULE_IPV4)))
1729 parm_config.rule_ipv4_name = optarg;
1731 if (!strncmp(lgopts[option_index].name,
1733 sizeof(OPTION_RULE_IPV6))) {
1734 parm_config.rule_ipv6_name = optarg;
1737 if (!strncmp(lgopts[option_index].name,
1738 OPTION_SCALAR, sizeof(OPTION_SCALAR)))
1739 parm_config.scalar = 1;
1745 print_usage(prgname);
1751 argv[optind-1] = prgname;
1754 optind = 1; /* reset getopt lib */
1759 print_ethaddr(const char *name, const struct ether_addr *eth_addr)
1761 char buf[ETHER_ADDR_FMT_SIZE];
1762 ether_format_addr(buf, ETHER_ADDR_FMT_SIZE, eth_addr);
1763 printf("%s%s", name, buf);
1767 init_mem(unsigned nb_mbuf)
1773 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
1774 if (rte_lcore_is_enabled(lcore_id) == 0)
1778 socketid = rte_lcore_to_socket_id(lcore_id);
1782 if (socketid >= NB_SOCKETS) {
1783 rte_exit(EXIT_FAILURE,
1784 "Socket %d of lcore %u is out of range %d\n",
1785 socketid, lcore_id, NB_SOCKETS);
1787 if (pktmbuf_pool[socketid] == NULL) {
1788 snprintf(s, sizeof(s), "mbuf_pool_%d", socketid);
1789 pktmbuf_pool[socketid] =
1790 rte_pktmbuf_pool_create(s, nb_mbuf,
1791 MEMPOOL_CACHE_SIZE, 0,
1792 RTE_MBUF_DEFAULT_BUF_SIZE,
1794 if (pktmbuf_pool[socketid] == NULL)
1795 rte_exit(EXIT_FAILURE,
1796 "Cannot init mbuf pool on socket %d\n",
1799 printf("Allocated mbuf pool on socket %d\n",
1806 /* Check the link status of all ports in up to 9s, and print them finally */
1808 check_all_ports_link_status(uint32_t port_mask)
1810 #define CHECK_INTERVAL 100 /* 100ms */
1811 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1813 uint8_t count, all_ports_up, print_flag = 0;
1814 struct rte_eth_link link;
1816 printf("\nChecking link status");
1818 for (count = 0; count <= MAX_CHECK_TIME; count++) {
1820 RTE_ETH_FOREACH_DEV(portid) {
1821 if ((port_mask & (1 << portid)) == 0)
1823 memset(&link, 0, sizeof(link));
1824 rte_eth_link_get_nowait(portid, &link);
1825 /* print link status if flag set */
1826 if (print_flag == 1) {
1827 if (link.link_status)
1829 "Port%d Link Up. Speed %u Mbps %s\n",
1830 portid, link.link_speed,
1831 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1832 ("full-duplex") : ("half-duplex\n"));
1834 printf("Port %d Link Down\n", portid);
1837 /* clear all_ports_up flag if any link down */
1838 if (link.link_status == ETH_LINK_DOWN) {
1843 /* after finally printing all link status, get out */
1844 if (print_flag == 1)
1847 if (all_ports_up == 0) {
1850 rte_delay_ms(CHECK_INTERVAL);
1853 /* set the print_flag if all ports up or timeout */
1854 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1862 main(int argc, char **argv)
1864 struct lcore_conf *qconf;
1865 struct rte_eth_dev_info dev_info;
1866 struct rte_eth_txconf *txconf;
1871 uint32_t n_tx_queue, nb_lcores;
1873 uint8_t nb_rx_queue, queue, socketid;
1876 ret = rte_eal_init(argc, argv);
1878 rte_exit(EXIT_FAILURE, "Invalid EAL parameters\n");
1882 /* parse application arguments (after the EAL ones) */
1883 ret = parse_args(argc, argv);
1885 rte_exit(EXIT_FAILURE, "Invalid L3FWD parameters\n");
1887 if (check_lcore_params() < 0)
1888 rte_exit(EXIT_FAILURE, "check_lcore_params failed\n");
1890 ret = init_lcore_rx_queues();
1892 rte_exit(EXIT_FAILURE, "init_lcore_rx_queues failed\n");
1894 nb_ports = rte_eth_dev_count_avail();
1896 if (check_port_config() < 0)
1897 rte_exit(EXIT_FAILURE, "check_port_config failed\n");
1899 /* Add ACL rules and route entries, build trie */
1900 if (app_acl_init() < 0)
1901 rte_exit(EXIT_FAILURE, "app_acl_init failed\n");
1903 nb_lcores = rte_lcore_count();
1905 /* initialize all ports */
1906 RTE_ETH_FOREACH_DEV(portid) {
1907 struct rte_eth_conf local_port_conf = port_conf;
1909 /* skip ports that are not enabled */
1910 if ((enabled_port_mask & (1 << portid)) == 0) {
1911 printf("\nSkipping disabled port %d\n", portid);
1916 printf("Initializing port %d ... ", portid);
1919 nb_rx_queue = get_port_n_rx_queues(portid);
1920 n_tx_queue = nb_lcores;
1921 if (n_tx_queue > MAX_TX_QUEUE_PER_PORT)
1922 n_tx_queue = MAX_TX_QUEUE_PER_PORT;
1923 printf("Creating queues: nb_rxq=%d nb_txq=%u... ",
1924 nb_rx_queue, (unsigned)n_tx_queue);
1925 rte_eth_dev_info_get(portid, &dev_info);
1926 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
1927 local_port_conf.txmode.offloads |=
1928 DEV_TX_OFFLOAD_MBUF_FAST_FREE;
1929 ret = rte_eth_dev_configure(portid, nb_rx_queue,
1930 (uint16_t)n_tx_queue, &local_port_conf);
1932 rte_exit(EXIT_FAILURE,
1933 "Cannot configure device: err=%d, port=%d\n",
1936 ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &nb_rxd,
1939 rte_exit(EXIT_FAILURE,
1940 "rte_eth_dev_adjust_nb_rx_tx_desc: err=%d, port=%d\n",
1943 rte_eth_macaddr_get(portid, &ports_eth_addr[portid]);
1944 print_ethaddr(" Address:", &ports_eth_addr[portid]);
1948 ret = init_mem(NB_MBUF);
1950 rte_exit(EXIT_FAILURE, "init_mem failed\n");
1952 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
1953 if (rte_lcore_is_enabled(lcore_id) == 0)
1956 /* Initialize TX buffers */
1957 qconf = &lcore_conf[lcore_id];
1958 qconf->tx_buffer[portid] = rte_zmalloc_socket("tx_buffer",
1959 RTE_ETH_TX_BUFFER_SIZE(MAX_PKT_BURST), 0,
1960 rte_eth_dev_socket_id(portid));
1961 if (qconf->tx_buffer[portid] == NULL)
1962 rte_exit(EXIT_FAILURE, "Can't allocate tx buffer for port %u\n",
1965 rte_eth_tx_buffer_init(qconf->tx_buffer[portid], MAX_PKT_BURST);
1968 /* init one TX queue per couple (lcore,port) */
1970 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
1971 if (rte_lcore_is_enabled(lcore_id) == 0)
1975 socketid = (uint8_t)
1976 rte_lcore_to_socket_id(lcore_id);
1980 printf("txq=%u,%d,%d ", lcore_id, queueid, socketid);
1983 rte_eth_dev_info_get(portid, &dev_info);
1984 txconf = &dev_info.default_txconf;
1985 txconf->txq_flags = ETH_TXQ_FLAGS_IGNORE;
1986 txconf->offloads = local_port_conf.txmode.offloads;
1987 ret = rte_eth_tx_queue_setup(portid, queueid, nb_txd,
1990 rte_exit(EXIT_FAILURE,
1991 "rte_eth_tx_queue_setup: err=%d, "
1992 "port=%d\n", ret, portid);
1994 qconf = &lcore_conf[lcore_id];
1995 qconf->tx_queue_id[portid] = queueid;
1998 qconf->tx_port_id[qconf->n_tx_port] = portid;
2004 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
2005 if (rte_lcore_is_enabled(lcore_id) == 0)
2007 qconf = &lcore_conf[lcore_id];
2008 printf("\nInitializing rx queues on lcore %u ... ", lcore_id);
2010 /* init RX queues */
2011 for (queue = 0; queue < qconf->n_rx_queue; ++queue) {
2012 struct rte_eth_dev *dev;
2013 struct rte_eth_conf *conf;
2014 struct rte_eth_rxconf rxq_conf;
2016 portid = qconf->rx_queue_list[queue].port_id;
2017 queueid = qconf->rx_queue_list[queue].queue_id;
2018 dev = &rte_eth_devices[portid];
2019 conf = &dev->data->dev_conf;
2022 socketid = (uint8_t)
2023 rte_lcore_to_socket_id(lcore_id);
2027 printf("rxq=%d,%d,%d ", portid, queueid, socketid);
2030 rte_eth_dev_info_get(portid, &dev_info);
2031 rxq_conf = dev_info.default_rxconf;
2032 rxq_conf.offloads = conf->rxmode.offloads;
2033 ret = rte_eth_rx_queue_setup(portid, queueid, nb_rxd,
2034 socketid, &rxq_conf,
2035 pktmbuf_pool[socketid]);
2037 rte_exit(EXIT_FAILURE,
2038 "rte_eth_rx_queue_setup: err=%d,"
2039 "port=%d\n", ret, portid);
2046 RTE_ETH_FOREACH_DEV(portid) {
2047 if ((enabled_port_mask & (1 << portid)) == 0)
2051 ret = rte_eth_dev_start(portid);
2053 rte_exit(EXIT_FAILURE,
2054 "rte_eth_dev_start: err=%d, port=%d\n",
2058 * If enabled, put device in promiscuous mode.
2059 * This allows IO forwarding mode to forward packets
2060 * to itself through 2 cross-connected ports of the
2064 rte_eth_promiscuous_enable(portid);
2067 check_all_ports_link_status(enabled_port_mask);
2069 /* launch per-lcore init on every lcore */
2070 rte_eal_mp_remote_launch(main_loop, NULL, CALL_MASTER);
2071 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
2072 if (rte_eal_wait_lcore(lcore_id) < 0)