1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016 Intel Corporation
10 #include <netinet/in.h>
11 #include <netinet/ip.h>
12 #include <netinet/ip6.h>
14 #include <sys/queue.h>
19 #include <rte_common.h>
20 #include <rte_byteorder.h>
23 #include <rte_launch.h>
24 #include <rte_atomic.h>
25 #include <rte_cycles.h>
26 #include <rte_prefetch.h>
27 #include <rte_lcore.h>
28 #include <rte_per_lcore.h>
29 #include <rte_branch_prediction.h>
30 #include <rte_interrupts.h>
31 #include <rte_random.h>
32 #include <rte_debug.h>
33 #include <rte_ether.h>
34 #include <rte_ethdev.h>
35 #include <rte_mempool.h>
41 #include <rte_jhash.h>
42 #include <rte_cryptodev.h>
43 #include <rte_security.h>
45 #include <rte_ip_frag.h>
51 #define RTE_LOGTYPE_IPSEC RTE_LOGTYPE_USER1
53 #define MAX_JUMBO_PKT_LEN 9600
55 #define MEMPOOL_CACHE_SIZE 256
57 #define NB_MBUF (32000)
59 #define CDEV_QUEUE_DESC 2048
60 #define CDEV_MAP_ENTRIES 16384
61 #define CDEV_MP_NB_OBJS 1024
62 #define CDEV_MP_CACHE_SZ 64
63 #define MAX_QUEUE_PAIRS 1
65 #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
69 /* Configure how many packets ahead to prefetch, when reading packets */
70 #define PREFETCH_OFFSET 3
72 #define MAX_RX_QUEUE_PER_LCORE 16
74 #define MAX_LCORE_PARAMS 1024
76 #define UNPROTECTED_PORT(port) (unprotected_port_mask & (1 << portid))
79 * Configurable number of RX/TX ring descriptors
81 #define IPSEC_SECGW_RX_DESC_DEFAULT 1024
82 #define IPSEC_SECGW_TX_DESC_DEFAULT 1024
83 static uint16_t nb_rxd = IPSEC_SECGW_RX_DESC_DEFAULT;
84 static uint16_t nb_txd = IPSEC_SECGW_TX_DESC_DEFAULT;
86 #if RTE_BYTE_ORDER != RTE_LITTLE_ENDIAN
87 #define __BYTES_TO_UINT64(a, b, c, d, e, f, g, h) \
88 (((uint64_t)((a) & 0xff) << 56) | \
89 ((uint64_t)((b) & 0xff) << 48) | \
90 ((uint64_t)((c) & 0xff) << 40) | \
91 ((uint64_t)((d) & 0xff) << 32) | \
92 ((uint64_t)((e) & 0xff) << 24) | \
93 ((uint64_t)((f) & 0xff) << 16) | \
94 ((uint64_t)((g) & 0xff) << 8) | \
95 ((uint64_t)(h) & 0xff))
97 #define __BYTES_TO_UINT64(a, b, c, d, e, f, g, h) \
98 (((uint64_t)((h) & 0xff) << 56) | \
99 ((uint64_t)((g) & 0xff) << 48) | \
100 ((uint64_t)((f) & 0xff) << 40) | \
101 ((uint64_t)((e) & 0xff) << 32) | \
102 ((uint64_t)((d) & 0xff) << 24) | \
103 ((uint64_t)((c) & 0xff) << 16) | \
104 ((uint64_t)((b) & 0xff) << 8) | \
105 ((uint64_t)(a) & 0xff))
107 #define ETHADDR(a, b, c, d, e, f) (__BYTES_TO_UINT64(a, b, c, d, e, f, 0, 0))
109 #define ETHADDR_TO_UINT64(addr) __BYTES_TO_UINT64( \
110 (addr)->addr_bytes[0], (addr)->addr_bytes[1], \
111 (addr)->addr_bytes[2], (addr)->addr_bytes[3], \
112 (addr)->addr_bytes[4], (addr)->addr_bytes[5], \
115 #define FRAG_TBL_BUCKET_ENTRIES 4
116 #define MAX_FRAG_TTL_NS (10LL * NS_PER_S)
118 #define MTU_TO_FRAMELEN(x) ((x) + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN)
120 /* port/source ethernet addr and destination ethernet addr */
121 struct ethaddr_info {
125 struct ethaddr_info ethaddr_tbl[RTE_MAX_ETHPORTS] = {
126 { 0, ETHADDR(0x00, 0x16, 0x3e, 0x7e, 0x94, 0x9a) },
127 { 0, ETHADDR(0x00, 0x16, 0x3e, 0x22, 0xa1, 0xd9) },
128 { 0, ETHADDR(0x00, 0x16, 0x3e, 0x08, 0x69, 0x26) },
129 { 0, ETHADDR(0x00, 0x16, 0x3e, 0x49, 0x9e, 0xdd) }
132 #define CMD_LINE_OPT_CONFIG "config"
133 #define CMD_LINE_OPT_SINGLE_SA "single-sa"
134 #define CMD_LINE_OPT_CRYPTODEV_MASK "cryptodev_mask"
135 #define CMD_LINE_OPT_RX_OFFLOAD "rxoffload"
136 #define CMD_LINE_OPT_TX_OFFLOAD "txoffload"
137 #define CMD_LINE_OPT_REASSEMBLE "reassemble"
138 #define CMD_LINE_OPT_MTU "mtu"
139 #define CMD_LINE_OPT_FRAG_TTL "frag-ttl"
142 /* long options mapped to a short option */
144 /* first long only option value must be >= 256, so that we won't
145 * conflict with short options
147 CMD_LINE_OPT_MIN_NUM = 256,
148 CMD_LINE_OPT_CONFIG_NUM,
149 CMD_LINE_OPT_SINGLE_SA_NUM,
150 CMD_LINE_OPT_CRYPTODEV_MASK_NUM,
151 CMD_LINE_OPT_RX_OFFLOAD_NUM,
152 CMD_LINE_OPT_TX_OFFLOAD_NUM,
153 CMD_LINE_OPT_REASSEMBLE_NUM,
154 CMD_LINE_OPT_MTU_NUM,
155 CMD_LINE_OPT_FRAG_TTL_NUM,
158 static const struct option lgopts[] = {
159 {CMD_LINE_OPT_CONFIG, 1, 0, CMD_LINE_OPT_CONFIG_NUM},
160 {CMD_LINE_OPT_SINGLE_SA, 1, 0, CMD_LINE_OPT_SINGLE_SA_NUM},
161 {CMD_LINE_OPT_CRYPTODEV_MASK, 1, 0, CMD_LINE_OPT_CRYPTODEV_MASK_NUM},
162 {CMD_LINE_OPT_RX_OFFLOAD, 1, 0, CMD_LINE_OPT_RX_OFFLOAD_NUM},
163 {CMD_LINE_OPT_TX_OFFLOAD, 1, 0, CMD_LINE_OPT_TX_OFFLOAD_NUM},
164 {CMD_LINE_OPT_REASSEMBLE, 1, 0, CMD_LINE_OPT_REASSEMBLE_NUM},
165 {CMD_LINE_OPT_MTU, 1, 0, CMD_LINE_OPT_MTU_NUM},
166 {CMD_LINE_OPT_FRAG_TTL, 1, 0, CMD_LINE_OPT_FRAG_TTL_NUM},
170 /* mask of enabled ports */
171 static uint32_t enabled_port_mask;
172 static uint64_t enabled_cryptodev_mask = UINT64_MAX;
173 static uint32_t unprotected_port_mask;
174 static int32_t promiscuous_on = 1;
175 static int32_t numa_on = 1; /**< NUMA is enabled by default. */
176 static uint32_t nb_lcores;
177 static uint32_t single_sa;
178 static uint32_t single_sa_idx;
181 * RX/TX HW offload capabilities to enable/use on ethernet ports.
182 * By default all capabilities are enabled.
184 static uint64_t dev_rx_offload = UINT64_MAX;
185 static uint64_t dev_tx_offload = UINT64_MAX;
188 * global values that determine multi-seg policy
190 static uint32_t frag_tbl_sz;
191 static uint32_t frame_buf_size = RTE_MBUF_DEFAULT_BUF_SIZE;
192 static uint32_t mtu_size = RTE_ETHER_MTU;
193 static uint64_t frag_ttl_ns = MAX_FRAG_TTL_NS;
195 /* application wide librte_ipsec/SA parameters */
196 struct app_sa_prm app_sa_prm = {
198 .cache_sz = SA_CACHE_SZ
200 static const char *cfgfile;
202 struct lcore_rx_queue {
205 } __rte_cache_aligned;
207 struct lcore_params {
211 } __rte_cache_aligned;
213 static struct lcore_params lcore_params_array[MAX_LCORE_PARAMS];
215 static struct lcore_params *lcore_params;
216 static uint16_t nb_lcore_params;
218 static struct rte_hash *cdev_map_in;
219 static struct rte_hash *cdev_map_out;
223 struct rte_mbuf *m_table[MAX_PKT_BURST] __rte_aligned(sizeof(void *));
227 uint16_t nb_rx_queue;
228 struct lcore_rx_queue rx_queue_list[MAX_RX_QUEUE_PER_LCORE];
229 uint16_t tx_queue_id[RTE_MAX_ETHPORTS];
230 struct buffer tx_mbufs[RTE_MAX_ETHPORTS];
231 struct ipsec_ctx inbound;
232 struct ipsec_ctx outbound;
233 struct rt_ctx *rt4_ctx;
234 struct rt_ctx *rt6_ctx;
236 struct rte_ip_frag_tbl *tbl;
237 struct rte_mempool *pool_dir;
238 struct rte_mempool *pool_indir;
239 struct rte_ip_frag_death_row dr;
241 } __rte_cache_aligned;
243 static struct lcore_conf lcore_conf[RTE_MAX_LCORE];
245 static struct rte_eth_conf port_conf = {
247 .mq_mode = ETH_MQ_RX_RSS,
248 .max_rx_pkt_len = RTE_ETHER_MAX_LEN,
250 .offloads = DEV_RX_OFFLOAD_CHECKSUM,
255 .rss_hf = ETH_RSS_IP | ETH_RSS_UDP |
256 ETH_RSS_TCP | ETH_RSS_SCTP,
260 .mq_mode = ETH_MQ_TX_NONE,
264 static struct socket_ctx socket_ctx[NB_SOCKETS];
267 * Determine is multi-segment support required:
268 * - either frame buffer size is smaller then mtu
269 * - or reassmeble support is requested
272 multi_seg_required(void)
274 return (MTU_TO_FRAMELEN(mtu_size) + RTE_PKTMBUF_HEADROOM >
275 frame_buf_size || frag_tbl_sz != 0);
279 adjust_ipv4_pktlen(struct rte_mbuf *m, const struct rte_ipv4_hdr *iph,
284 plen = rte_be_to_cpu_16(iph->total_length) + l2_len;
285 if (plen < m->pkt_len) {
286 trim = m->pkt_len - plen;
287 rte_pktmbuf_trim(m, trim);
292 adjust_ipv6_pktlen(struct rte_mbuf *m, const struct rte_ipv6_hdr *iph,
297 plen = rte_be_to_cpu_16(iph->payload_len) + sizeof(*iph) + l2_len;
298 if (plen < m->pkt_len) {
299 trim = m->pkt_len - plen;
300 rte_pktmbuf_trim(m, trim);
305 prepare_one_packet(struct rte_mbuf *pkt, struct ipsec_traffic *t)
307 const struct rte_ether_hdr *eth;
308 const struct rte_ipv4_hdr *iph4;
309 const struct rte_ipv6_hdr *iph6;
311 eth = rte_pktmbuf_mtod(pkt, const struct rte_ether_hdr *);
312 if (eth->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
314 iph4 = (const struct rte_ipv4_hdr *)rte_pktmbuf_adj(pkt,
316 adjust_ipv4_pktlen(pkt, iph4, 0);
318 if (iph4->next_proto_id == IPPROTO_ESP)
319 t->ipsec.pkts[(t->ipsec.num)++] = pkt;
321 t->ip4.data[t->ip4.num] = &iph4->next_proto_id;
322 t->ip4.pkts[(t->ip4.num)++] = pkt;
325 pkt->l3_len = sizeof(*iph4);
326 pkt->packet_type |= RTE_PTYPE_L3_IPV4;
327 } else if (eth->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {
329 size_t l3len, ext_len;
332 /* get protocol type */
333 iph6 = (const struct rte_ipv6_hdr *)rte_pktmbuf_adj(pkt,
335 adjust_ipv6_pktlen(pkt, iph6, 0);
337 next_proto = iph6->proto;
339 /* determine l3 header size up to ESP extension */
340 l3len = sizeof(struct ip6_hdr);
341 p = rte_pktmbuf_mtod(pkt, uint8_t *);
342 while (next_proto != IPPROTO_ESP && l3len < pkt->data_len &&
343 (next_proto = rte_ipv6_get_next_ext(p + l3len,
344 next_proto, &ext_len)) >= 0)
347 /* drop packet when IPv6 header exceeds first segment length */
348 if (unlikely(l3len > pkt->data_len)) {
349 rte_pktmbuf_free(pkt);
353 if (next_proto == IPPROTO_ESP)
354 t->ipsec.pkts[(t->ipsec.num)++] = pkt;
356 t->ip6.data[t->ip6.num] = &iph6->proto;
357 t->ip6.pkts[(t->ip6.num)++] = pkt;
361 pkt->packet_type |= RTE_PTYPE_L3_IPV6;
363 /* Unknown/Unsupported type, drop the packet */
364 RTE_LOG(ERR, IPSEC, "Unsupported packet type 0x%x\n",
365 rte_be_to_cpu_16(eth->ether_type));
366 rte_pktmbuf_free(pkt);
370 /* Check if the packet has been processed inline. For inline protocol
371 * processed packets, the metadata in the mbuf can be used to identify
372 * the security processing done on the packet. The metadata will be
373 * used to retrieve the application registered userdata associated
374 * with the security session.
377 if (pkt->ol_flags & PKT_RX_SEC_OFFLOAD) {
379 struct ipsec_mbuf_metadata *priv;
380 struct rte_security_ctx *ctx = (struct rte_security_ctx *)
381 rte_eth_dev_get_sec_ctx(
384 /* Retrieve the userdata registered. Here, the userdata
385 * registered is the SA pointer.
388 sa = (struct ipsec_sa *)
389 rte_security_get_userdata(ctx, pkt->udata64);
392 /* userdata could not be retrieved */
396 /* Save SA as priv member in mbuf. This will be used in the
397 * IPsec selector(SP-SA) check.
400 priv = get_priv(pkt);
406 prepare_traffic(struct rte_mbuf **pkts, struct ipsec_traffic *t,
415 for (i = 0; i < (nb_pkts - PREFETCH_OFFSET); i++) {
416 rte_prefetch0(rte_pktmbuf_mtod(pkts[i + PREFETCH_OFFSET],
418 prepare_one_packet(pkts[i], t);
420 /* Process left packets */
421 for (; i < nb_pkts; i++)
422 prepare_one_packet(pkts[i], t);
426 prepare_tx_pkt(struct rte_mbuf *pkt, uint16_t port,
427 const struct lcore_conf *qconf)
430 struct rte_ether_hdr *ethhdr;
432 ip = rte_pktmbuf_mtod(pkt, struct ip *);
434 ethhdr = (struct rte_ether_hdr *)
435 rte_pktmbuf_prepend(pkt, RTE_ETHER_HDR_LEN);
437 if (ip->ip_v == IPVERSION) {
438 pkt->ol_flags |= qconf->outbound.ipv4_offloads;
439 pkt->l3_len = sizeof(struct ip);
440 pkt->l2_len = RTE_ETHER_HDR_LEN;
444 /* calculate IPv4 cksum in SW */
445 if ((pkt->ol_flags & PKT_TX_IP_CKSUM) == 0)
446 ip->ip_sum = rte_ipv4_cksum((struct rte_ipv4_hdr *)ip);
448 ethhdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
450 pkt->ol_flags |= qconf->outbound.ipv6_offloads;
451 pkt->l3_len = sizeof(struct ip6_hdr);
452 pkt->l2_len = RTE_ETHER_HDR_LEN;
454 ethhdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
457 memcpy(ðhdr->s_addr, ðaddr_tbl[port].src,
458 sizeof(struct rte_ether_addr));
459 memcpy(ðhdr->d_addr, ðaddr_tbl[port].dst,
460 sizeof(struct rte_ether_addr));
464 prepare_tx_burst(struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t port,
465 const struct lcore_conf *qconf)
468 const int32_t prefetch_offset = 2;
470 for (i = 0; i < (nb_pkts - prefetch_offset); i++) {
471 rte_mbuf_prefetch_part2(pkts[i + prefetch_offset]);
472 prepare_tx_pkt(pkts[i], port, qconf);
474 /* Process left packets */
475 for (; i < nb_pkts; i++)
476 prepare_tx_pkt(pkts[i], port, qconf);
479 /* Send burst of packets on an output interface */
480 static inline int32_t
481 send_burst(struct lcore_conf *qconf, uint16_t n, uint16_t port)
483 struct rte_mbuf **m_table;
487 queueid = qconf->tx_queue_id[port];
488 m_table = (struct rte_mbuf **)qconf->tx_mbufs[port].m_table;
490 prepare_tx_burst(m_table, n, port, qconf);
492 ret = rte_eth_tx_burst(port, queueid, m_table, n);
493 if (unlikely(ret < n)) {
495 rte_pktmbuf_free(m_table[ret]);
503 * Helper function to fragment and queue for TX one packet.
505 static inline uint32_t
506 send_fragment_packet(struct lcore_conf *qconf, struct rte_mbuf *m,
507 uint16_t port, uint8_t proto)
513 tbl = qconf->tx_mbufs + port;
516 /* free space for new fragments */
517 if (len + RTE_LIBRTE_IP_FRAG_MAX_FRAG >= RTE_DIM(tbl->m_table)) {
518 send_burst(qconf, len, port);
522 n = RTE_DIM(tbl->m_table) - len;
524 if (proto == IPPROTO_IP)
525 rc = rte_ipv4_fragment_packet(m, tbl->m_table + len,
526 n, mtu_size, qconf->frag.pool_dir,
527 qconf->frag.pool_indir);
529 rc = rte_ipv6_fragment_packet(m, tbl->m_table + len,
530 n, mtu_size, qconf->frag.pool_dir,
531 qconf->frag.pool_indir);
537 "%s: failed to fragment packet with size %u, "
539 __func__, m->pkt_len, rte_errno);
545 /* Enqueue a single packet, and send burst if queue is filled */
546 static inline int32_t
547 send_single_packet(struct rte_mbuf *m, uint16_t port, uint8_t proto)
551 struct lcore_conf *qconf;
553 lcore_id = rte_lcore_id();
555 qconf = &lcore_conf[lcore_id];
556 len = qconf->tx_mbufs[port].len;
558 if (m->pkt_len <= mtu_size) {
559 qconf->tx_mbufs[port].m_table[len] = m;
562 /* need to fragment the packet */
563 } else if (frag_tbl_sz > 0)
564 len = send_fragment_packet(qconf, m, port, proto);
568 /* enough pkts to be sent */
569 if (unlikely(len == MAX_PKT_BURST)) {
570 send_burst(qconf, MAX_PKT_BURST, port);
574 qconf->tx_mbufs[port].len = len;
579 inbound_sp_sa(struct sp_ctx *sp, struct sa_ctx *sa, struct traffic_type *ip,
583 uint32_t i, j, res, sa_idx;
585 if (ip->num == 0 || sp == NULL)
588 rte_acl_classify((struct rte_acl_ctx *)sp, ip->data, ip->res,
589 ip->num, DEFAULT_MAX_CATEGORIES);
592 for (i = 0; i < ip->num; i++) {
599 if (res == DISCARD) {
604 /* Only check SPI match for processed IPSec packets */
605 if (i < lim && ((m->ol_flags & PKT_RX_SEC_OFFLOAD) == 0)) {
611 if (!inbound_sa_check(sa, m, sa_idx)) {
621 split46_traffic(struct ipsec_traffic *trf, struct rte_mbuf *mb[], uint32_t num)
630 for (i = 0; i < num; i++) {
633 ip = rte_pktmbuf_mtod(m, struct ip *);
635 if (ip->ip_v == IPVERSION) {
636 trf->ip4.pkts[n4] = m;
637 trf->ip4.data[n4] = rte_pktmbuf_mtod_offset(m,
638 uint8_t *, offsetof(struct ip, ip_p));
640 } else if (ip->ip_v == IP6_VERSION) {
641 trf->ip6.pkts[n6] = m;
642 trf->ip6.data[n6] = rte_pktmbuf_mtod_offset(m,
644 offsetof(struct ip6_hdr, ip6_nxt));
656 process_pkts_inbound(struct ipsec_ctx *ipsec_ctx,
657 struct ipsec_traffic *traffic)
659 uint16_t nb_pkts_in, n_ip4, n_ip6;
661 n_ip4 = traffic->ip4.num;
662 n_ip6 = traffic->ip6.num;
664 if (app_sa_prm.enable == 0) {
665 nb_pkts_in = ipsec_inbound(ipsec_ctx, traffic->ipsec.pkts,
666 traffic->ipsec.num, MAX_PKT_BURST);
667 split46_traffic(traffic, traffic->ipsec.pkts, nb_pkts_in);
669 inbound_sa_lookup(ipsec_ctx->sa_ctx, traffic->ipsec.pkts,
670 traffic->ipsec.saptr, traffic->ipsec.num);
671 ipsec_process(ipsec_ctx, traffic);
674 inbound_sp_sa(ipsec_ctx->sp4_ctx, ipsec_ctx->sa_ctx, &traffic->ip4,
677 inbound_sp_sa(ipsec_ctx->sp6_ctx, ipsec_ctx->sa_ctx, &traffic->ip6,
682 outbound_sp(struct sp_ctx *sp, struct traffic_type *ip,
683 struct traffic_type *ipsec)
686 uint32_t i, j, sa_idx;
688 if (ip->num == 0 || sp == NULL)
691 rte_acl_classify((struct rte_acl_ctx *)sp, ip->data, ip->res,
692 ip->num, DEFAULT_MAX_CATEGORIES);
695 for (i = 0; i < ip->num; i++) {
697 sa_idx = ip->res[i] - 1;
698 if (ip->res[i] == DISCARD)
700 else if (ip->res[i] == BYPASS)
703 ipsec->res[ipsec->num] = sa_idx;
704 ipsec->pkts[ipsec->num++] = m;
711 process_pkts_outbound(struct ipsec_ctx *ipsec_ctx,
712 struct ipsec_traffic *traffic)
715 uint16_t idx, nb_pkts_out, i;
717 /* Drop any IPsec traffic from protected ports */
718 for (i = 0; i < traffic->ipsec.num; i++)
719 rte_pktmbuf_free(traffic->ipsec.pkts[i]);
721 traffic->ipsec.num = 0;
723 outbound_sp(ipsec_ctx->sp4_ctx, &traffic->ip4, &traffic->ipsec);
725 outbound_sp(ipsec_ctx->sp6_ctx, &traffic->ip6, &traffic->ipsec);
727 if (app_sa_prm.enable == 0) {
729 nb_pkts_out = ipsec_outbound(ipsec_ctx, traffic->ipsec.pkts,
730 traffic->ipsec.res, traffic->ipsec.num,
733 for (i = 0; i < nb_pkts_out; i++) {
734 m = traffic->ipsec.pkts[i];
735 struct ip *ip = rte_pktmbuf_mtod(m, struct ip *);
736 if (ip->ip_v == IPVERSION) {
737 idx = traffic->ip4.num++;
738 traffic->ip4.pkts[idx] = m;
740 idx = traffic->ip6.num++;
741 traffic->ip6.pkts[idx] = m;
745 outbound_sa_lookup(ipsec_ctx->sa_ctx, traffic->ipsec.res,
746 traffic->ipsec.saptr, traffic->ipsec.num);
747 ipsec_process(ipsec_ctx, traffic);
752 process_pkts_inbound_nosp(struct ipsec_ctx *ipsec_ctx,
753 struct ipsec_traffic *traffic)
756 uint32_t nb_pkts_in, i, idx;
758 /* Drop any IPv4 traffic from unprotected ports */
759 for (i = 0; i < traffic->ip4.num; i++)
760 rte_pktmbuf_free(traffic->ip4.pkts[i]);
762 traffic->ip4.num = 0;
764 /* Drop any IPv6 traffic from unprotected ports */
765 for (i = 0; i < traffic->ip6.num; i++)
766 rte_pktmbuf_free(traffic->ip6.pkts[i]);
768 traffic->ip6.num = 0;
770 if (app_sa_prm.enable == 0) {
772 nb_pkts_in = ipsec_inbound(ipsec_ctx, traffic->ipsec.pkts,
773 traffic->ipsec.num, MAX_PKT_BURST);
775 for (i = 0; i < nb_pkts_in; i++) {
776 m = traffic->ipsec.pkts[i];
777 struct ip *ip = rte_pktmbuf_mtod(m, struct ip *);
778 if (ip->ip_v == IPVERSION) {
779 idx = traffic->ip4.num++;
780 traffic->ip4.pkts[idx] = m;
782 idx = traffic->ip6.num++;
783 traffic->ip6.pkts[idx] = m;
787 inbound_sa_lookup(ipsec_ctx->sa_ctx, traffic->ipsec.pkts,
788 traffic->ipsec.saptr, traffic->ipsec.num);
789 ipsec_process(ipsec_ctx, traffic);
794 process_pkts_outbound_nosp(struct ipsec_ctx *ipsec_ctx,
795 struct ipsec_traffic *traffic)
798 uint32_t nb_pkts_out, i, n;
801 /* Drop any IPsec traffic from protected ports */
802 for (i = 0; i < traffic->ipsec.num; i++)
803 rte_pktmbuf_free(traffic->ipsec.pkts[i]);
807 for (i = 0; i < traffic->ip4.num; i++) {
808 traffic->ipsec.pkts[n] = traffic->ip4.pkts[i];
809 traffic->ipsec.res[n++] = single_sa_idx;
812 for (i = 0; i < traffic->ip6.num; i++) {
813 traffic->ipsec.pkts[n] = traffic->ip6.pkts[i];
814 traffic->ipsec.res[n++] = single_sa_idx;
817 traffic->ip4.num = 0;
818 traffic->ip6.num = 0;
819 traffic->ipsec.num = n;
821 if (app_sa_prm.enable == 0) {
823 nb_pkts_out = ipsec_outbound(ipsec_ctx, traffic->ipsec.pkts,
824 traffic->ipsec.res, traffic->ipsec.num,
827 /* They all sue the same SA (ip4 or ip6 tunnel) */
828 m = traffic->ipsec.pkts[0];
829 ip = rte_pktmbuf_mtod(m, struct ip *);
830 if (ip->ip_v == IPVERSION) {
831 traffic->ip4.num = nb_pkts_out;
832 for (i = 0; i < nb_pkts_out; i++)
833 traffic->ip4.pkts[i] = traffic->ipsec.pkts[i];
835 traffic->ip6.num = nb_pkts_out;
836 for (i = 0; i < nb_pkts_out; i++)
837 traffic->ip6.pkts[i] = traffic->ipsec.pkts[i];
840 outbound_sa_lookup(ipsec_ctx->sa_ctx, traffic->ipsec.res,
841 traffic->ipsec.saptr, traffic->ipsec.num);
842 ipsec_process(ipsec_ctx, traffic);
846 static inline int32_t
847 get_hop_for_offload_pkt(struct rte_mbuf *pkt, int is_ipv6)
849 struct ipsec_mbuf_metadata *priv;
852 priv = get_priv(pkt);
855 if (unlikely(sa == NULL)) {
856 RTE_LOG(ERR, IPSEC, "SA not saved in private data\n");
864 return (sa->portid | RTE_LPM_LOOKUP_SUCCESS);
875 route4_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
877 uint32_t hop[MAX_PKT_BURST * 2];
878 uint32_t dst_ip[MAX_PKT_BURST * 2];
881 uint16_t lpm_pkts = 0;
886 /* Need to do an LPM lookup for non-inline packets. Inline packets will
887 * have port ID in the SA
890 for (i = 0; i < nb_pkts; i++) {
891 if (!(pkts[i]->ol_flags & PKT_TX_SEC_OFFLOAD)) {
892 /* Security offload not enabled. So an LPM lookup is
893 * required to get the hop
895 offset = offsetof(struct ip, ip_dst);
896 dst_ip[lpm_pkts] = *rte_pktmbuf_mtod_offset(pkts[i],
898 dst_ip[lpm_pkts] = rte_be_to_cpu_32(dst_ip[lpm_pkts]);
903 rte_lpm_lookup_bulk((struct rte_lpm *)rt_ctx, dst_ip, hop, lpm_pkts);
907 for (i = 0; i < nb_pkts; i++) {
908 if (pkts[i]->ol_flags & PKT_TX_SEC_OFFLOAD) {
909 /* Read hop from the SA */
910 pkt_hop = get_hop_for_offload_pkt(pkts[i], 0);
912 /* Need to use hop returned by lookup */
913 pkt_hop = hop[lpm_pkts++];
916 if ((pkt_hop & RTE_LPM_LOOKUP_SUCCESS) == 0) {
917 rte_pktmbuf_free(pkts[i]);
920 send_single_packet(pkts[i], pkt_hop & 0xff, IPPROTO_IP);
925 route6_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
927 int32_t hop[MAX_PKT_BURST * 2];
928 uint8_t dst_ip[MAX_PKT_BURST * 2][16];
932 uint16_t lpm_pkts = 0;
937 /* Need to do an LPM lookup for non-inline packets. Inline packets will
938 * have port ID in the SA
941 for (i = 0; i < nb_pkts; i++) {
942 if (!(pkts[i]->ol_flags & PKT_TX_SEC_OFFLOAD)) {
943 /* Security offload not enabled. So an LPM lookup is
944 * required to get the hop
946 offset = offsetof(struct ip6_hdr, ip6_dst);
947 ip6_dst = rte_pktmbuf_mtod_offset(pkts[i], uint8_t *,
949 memcpy(&dst_ip[lpm_pkts][0], ip6_dst, 16);
954 rte_lpm6_lookup_bulk_func((struct rte_lpm6 *)rt_ctx, dst_ip, hop,
959 for (i = 0; i < nb_pkts; i++) {
960 if (pkts[i]->ol_flags & PKT_TX_SEC_OFFLOAD) {
961 /* Read hop from the SA */
962 pkt_hop = get_hop_for_offload_pkt(pkts[i], 1);
964 /* Need to use hop returned by lookup */
965 pkt_hop = hop[lpm_pkts++];
969 rte_pktmbuf_free(pkts[i]);
972 send_single_packet(pkts[i], pkt_hop & 0xff, IPPROTO_IPV6);
977 process_pkts(struct lcore_conf *qconf, struct rte_mbuf **pkts,
978 uint8_t nb_pkts, uint16_t portid)
980 struct ipsec_traffic traffic;
982 prepare_traffic(pkts, &traffic, nb_pkts);
984 if (unlikely(single_sa)) {
985 if (UNPROTECTED_PORT(portid))
986 process_pkts_inbound_nosp(&qconf->inbound, &traffic);
988 process_pkts_outbound_nosp(&qconf->outbound, &traffic);
990 if (UNPROTECTED_PORT(portid))
991 process_pkts_inbound(&qconf->inbound, &traffic);
993 process_pkts_outbound(&qconf->outbound, &traffic);
996 route4_pkts(qconf->rt4_ctx, traffic.ip4.pkts, traffic.ip4.num);
997 route6_pkts(qconf->rt6_ctx, traffic.ip6.pkts, traffic.ip6.num);
1001 drain_tx_buffers(struct lcore_conf *qconf)
1006 for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {
1007 buf = &qconf->tx_mbufs[portid];
1010 send_burst(qconf, buf->len, portid);
1016 drain_crypto_buffers(struct lcore_conf *qconf)
1019 struct ipsec_ctx *ctx;
1021 /* drain inbound buffers*/
1022 ctx = &qconf->inbound;
1023 for (i = 0; i != ctx->nb_qps; i++) {
1024 if (ctx->tbl[i].len != 0)
1025 enqueue_cop_burst(ctx->tbl + i);
1028 /* drain outbound buffers*/
1029 ctx = &qconf->outbound;
1030 for (i = 0; i != ctx->nb_qps; i++) {
1031 if (ctx->tbl[i].len != 0)
1032 enqueue_cop_burst(ctx->tbl + i);
1037 drain_inbound_crypto_queues(const struct lcore_conf *qconf,
1038 struct ipsec_ctx *ctx)
1041 struct ipsec_traffic trf;
1043 if (app_sa_prm.enable == 0) {
1045 /* dequeue packets from crypto-queue */
1046 n = ipsec_inbound_cqp_dequeue(ctx, trf.ipsec.pkts,
1047 RTE_DIM(trf.ipsec.pkts));
1052 /* split traffic by ipv4-ipv6 */
1053 split46_traffic(&trf, trf.ipsec.pkts, n);
1055 ipsec_cqp_process(ctx, &trf);
1057 /* process ipv4 packets */
1058 if (trf.ip4.num != 0) {
1059 inbound_sp_sa(ctx->sp4_ctx, ctx->sa_ctx, &trf.ip4, 0);
1060 route4_pkts(qconf->rt4_ctx, trf.ip4.pkts, trf.ip4.num);
1063 /* process ipv6 packets */
1064 if (trf.ip6.num != 0) {
1065 inbound_sp_sa(ctx->sp6_ctx, ctx->sa_ctx, &trf.ip6, 0);
1066 route6_pkts(qconf->rt6_ctx, trf.ip6.pkts, trf.ip6.num);
1071 drain_outbound_crypto_queues(const struct lcore_conf *qconf,
1072 struct ipsec_ctx *ctx)
1075 struct ipsec_traffic trf;
1077 if (app_sa_prm.enable == 0) {
1079 /* dequeue packets from crypto-queue */
1080 n = ipsec_outbound_cqp_dequeue(ctx, trf.ipsec.pkts,
1081 RTE_DIM(trf.ipsec.pkts));
1086 /* split traffic by ipv4-ipv6 */
1087 split46_traffic(&trf, trf.ipsec.pkts, n);
1089 ipsec_cqp_process(ctx, &trf);
1091 /* process ipv4 packets */
1092 if (trf.ip4.num != 0)
1093 route4_pkts(qconf->rt4_ctx, trf.ip4.pkts, trf.ip4.num);
1095 /* process ipv6 packets */
1096 if (trf.ip6.num != 0)
1097 route6_pkts(qconf->rt6_ctx, trf.ip6.pkts, trf.ip6.num);
1100 /* main processing loop */
1102 main_loop(__attribute__((unused)) void *dummy)
1104 struct rte_mbuf *pkts[MAX_PKT_BURST];
1106 uint64_t prev_tsc, diff_tsc, cur_tsc;
1110 struct lcore_conf *qconf;
1111 int32_t rc, socket_id;
1112 const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1)
1113 / US_PER_S * BURST_TX_DRAIN_US;
1114 struct lcore_rx_queue *rxql;
1117 lcore_id = rte_lcore_id();
1118 qconf = &lcore_conf[lcore_id];
1119 rxql = qconf->rx_queue_list;
1120 socket_id = rte_lcore_to_socket_id(lcore_id);
1122 qconf->rt4_ctx = socket_ctx[socket_id].rt_ip4;
1123 qconf->rt6_ctx = socket_ctx[socket_id].rt_ip6;
1124 qconf->inbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_in;
1125 qconf->inbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_in;
1126 qconf->inbound.sa_ctx = socket_ctx[socket_id].sa_in;
1127 qconf->inbound.cdev_map = cdev_map_in;
1128 qconf->inbound.session_pool = socket_ctx[socket_id].session_pool;
1129 qconf->inbound.session_priv_pool =
1130 socket_ctx[socket_id].session_priv_pool;
1131 qconf->outbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_out;
1132 qconf->outbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_out;
1133 qconf->outbound.sa_ctx = socket_ctx[socket_id].sa_out;
1134 qconf->outbound.cdev_map = cdev_map_out;
1135 qconf->outbound.session_pool = socket_ctx[socket_id].session_pool;
1136 qconf->outbound.session_priv_pool =
1137 socket_ctx[socket_id].session_priv_pool;
1138 qconf->frag.pool_dir = socket_ctx[socket_id].mbuf_pool;
1139 qconf->frag.pool_indir = socket_ctx[socket_id].mbuf_pool_indir;
1141 rc = ipsec_sad_lcore_cache_init(app_sa_prm.cache_sz);
1144 "SAD cache init on lcore %u, failed with code: %d\n",
1149 if (qconf->nb_rx_queue == 0) {
1150 RTE_LOG(DEBUG, IPSEC, "lcore %u has nothing to do\n",
1155 RTE_LOG(INFO, IPSEC, "entering main loop on lcore %u\n", lcore_id);
1157 for (i = 0; i < qconf->nb_rx_queue; i++) {
1158 portid = rxql[i].port_id;
1159 queueid = rxql[i].queue_id;
1160 RTE_LOG(INFO, IPSEC,
1161 " -- lcoreid=%u portid=%u rxqueueid=%hhu\n",
1162 lcore_id, portid, queueid);
1166 cur_tsc = rte_rdtsc();
1168 /* TX queue buffer drain */
1169 diff_tsc = cur_tsc - prev_tsc;
1171 if (unlikely(diff_tsc > drain_tsc)) {
1172 drain_tx_buffers(qconf);
1173 drain_crypto_buffers(qconf);
1177 for (i = 0; i < qconf->nb_rx_queue; ++i) {
1179 /* Read packets from RX queues */
1180 portid = rxql[i].port_id;
1181 queueid = rxql[i].queue_id;
1182 nb_rx = rte_eth_rx_burst(portid, queueid,
1183 pkts, MAX_PKT_BURST);
1186 process_pkts(qconf, pkts, nb_rx, portid);
1188 /* dequeue and process completed crypto-ops */
1189 if (UNPROTECTED_PORT(portid))
1190 drain_inbound_crypto_queues(qconf,
1193 drain_outbound_crypto_queues(qconf,
1207 if (lcore_params == NULL) {
1208 printf("Error: No port/queue/core mappings\n");
1212 for (i = 0; i < nb_lcore_params; ++i) {
1213 lcore = lcore_params[i].lcore_id;
1214 if (!rte_lcore_is_enabled(lcore)) {
1215 printf("error: lcore %hhu is not enabled in "
1216 "lcore mask\n", lcore);
1219 socket_id = rte_lcore_to_socket_id(lcore);
1220 if (socket_id != 0 && numa_on == 0) {
1221 printf("warning: lcore %hhu is on socket %d "
1225 portid = lcore_params[i].port_id;
1226 if ((enabled_port_mask & (1 << portid)) == 0) {
1227 printf("port %u is not enabled in port mask\n", portid);
1230 if (!rte_eth_dev_is_valid_port(portid)) {
1231 printf("port %u is not present on the board\n", portid);
1239 get_port_nb_rx_queues(const uint16_t port)
1244 for (i = 0; i < nb_lcore_params; ++i) {
1245 if (lcore_params[i].port_id == port &&
1246 lcore_params[i].queue_id > queue)
1247 queue = lcore_params[i].queue_id;
1249 return (uint8_t)(++queue);
1253 init_lcore_rx_queues(void)
1255 uint16_t i, nb_rx_queue;
1258 for (i = 0; i < nb_lcore_params; ++i) {
1259 lcore = lcore_params[i].lcore_id;
1260 nb_rx_queue = lcore_conf[lcore].nb_rx_queue;
1261 if (nb_rx_queue >= MAX_RX_QUEUE_PER_LCORE) {
1262 printf("error: too many queues (%u) for lcore: %u\n",
1263 nb_rx_queue + 1, lcore);
1266 lcore_conf[lcore].rx_queue_list[nb_rx_queue].port_id =
1267 lcore_params[i].port_id;
1268 lcore_conf[lcore].rx_queue_list[nb_rx_queue].queue_id =
1269 lcore_params[i].queue_id;
1270 lcore_conf[lcore].nb_rx_queue++;
1277 print_usage(const char *prgname)
1279 fprintf(stderr, "%s [EAL options] --"
1285 " [-w REPLAY_WINDOW_SIZE]"
1290 " --config (port,queue,lcore)[,(port,queue,lcore)]"
1291 " [--single-sa SAIDX]"
1292 " [--cryptodev_mask MASK]"
1293 " [--" CMD_LINE_OPT_RX_OFFLOAD " RX_OFFLOAD_MASK]"
1294 " [--" CMD_LINE_OPT_TX_OFFLOAD " TX_OFFLOAD_MASK]"
1295 " [--" CMD_LINE_OPT_REASSEMBLE " REASSEMBLE_TABLE_SIZE]"
1296 " [--" CMD_LINE_OPT_MTU " MTU]"
1298 " -p PORTMASK: Hexadecimal bitmask of ports to configure\n"
1299 " -P : Enable promiscuous mode\n"
1300 " -u PORTMASK: Hexadecimal bitmask of unprotected ports\n"
1301 " -j FRAMESIZE: Data buffer size, minimum (and default)\n"
1302 " value: RTE_MBUF_DEFAULT_BUF_SIZE\n"
1303 " -l enables code-path that uses librte_ipsec\n"
1304 " -w REPLAY_WINDOW_SIZE specifies IPsec SQN replay window\n"
1305 " size for each SA\n"
1307 " -a enables SA SQN atomic behaviour\n"
1308 " -c specifies inbound SAD cache size,\n"
1309 " zero value disables the cache (default value: 128)\n"
1310 " -f CONFIG_FILE: Configuration file\n"
1311 " --config (port,queue,lcore): Rx queue configuration\n"
1312 " --single-sa SAIDX: Use single SA index for outbound traffic,\n"
1313 " bypassing the SP\n"
1314 " --cryptodev_mask MASK: Hexadecimal bitmask of the crypto\n"
1315 " devices to configure\n"
1316 " --" CMD_LINE_OPT_RX_OFFLOAD
1317 ": bitmask of the RX HW offload capabilities to enable/use\n"
1318 " (DEV_RX_OFFLOAD_*)\n"
1319 " --" CMD_LINE_OPT_TX_OFFLOAD
1320 ": bitmask of the TX HW offload capabilities to enable/use\n"
1321 " (DEV_TX_OFFLOAD_*)\n"
1322 " --" CMD_LINE_OPT_REASSEMBLE " NUM"
1323 ": max number of entries in reassemble(fragment) table\n"
1324 " (zero (default value) disables reassembly)\n"
1325 " --" CMD_LINE_OPT_MTU " MTU"
1326 ": MTU value on all ports (default value: 1500)\n"
1327 " outgoing packets with bigger size will be fragmented\n"
1328 " incoming packets with bigger size will be discarded\n"
1329 " --" CMD_LINE_OPT_FRAG_TTL " FRAG_TTL_NS"
1330 ": fragments lifetime in nanoseconds, default\n"
1331 " and maximum value is 10.000.000.000 ns (10 s)\n"
1337 parse_mask(const char *str, uint64_t *val)
1343 t = strtoul(str, &end, 0);
1344 if (errno != 0 || end[0] != 0)
1352 parse_portmask(const char *portmask)
1357 /* parse hexadecimal string */
1358 pm = strtoul(portmask, &end, 16);
1359 if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
1362 if ((pm == 0) && errno)
1369 parse_decimal(const char *str)
1374 num = strtoull(str, &end, 10);
1375 if ((str[0] == '\0') || (end == NULL) || (*end != '\0')
1383 parse_config(const char *q_arg)
1386 const char *p, *p0 = q_arg;
1394 unsigned long int_fld[_NUM_FLD];
1395 char *str_fld[_NUM_FLD];
1399 nb_lcore_params = 0;
1401 while ((p = strchr(p0, '(')) != NULL) {
1403 p0 = strchr(p, ')');
1408 if (size >= sizeof(s))
1411 snprintf(s, sizeof(s), "%.*s", size, p);
1412 if (rte_strsplit(s, sizeof(s), str_fld, _NUM_FLD, ',') !=
1415 for (i = 0; i < _NUM_FLD; i++) {
1417 int_fld[i] = strtoul(str_fld[i], &end, 0);
1418 if (errno != 0 || end == str_fld[i] || int_fld[i] > 255)
1421 if (nb_lcore_params >= MAX_LCORE_PARAMS) {
1422 printf("exceeded max number of lcore params: %hu\n",
1426 lcore_params_array[nb_lcore_params].port_id =
1427 (uint8_t)int_fld[FLD_PORT];
1428 lcore_params_array[nb_lcore_params].queue_id =
1429 (uint8_t)int_fld[FLD_QUEUE];
1430 lcore_params_array[nb_lcore_params].lcore_id =
1431 (uint8_t)int_fld[FLD_LCORE];
1434 lcore_params = lcore_params_array;
1439 print_app_sa_prm(const struct app_sa_prm *prm)
1441 printf("librte_ipsec usage: %s\n",
1442 (prm->enable == 0) ? "disabled" : "enabled");
1444 printf("replay window size: %u\n", prm->window_size);
1445 printf("ESN: %s\n", (prm->enable_esn == 0) ? "disabled" : "enabled");
1446 printf("SA flags: %#" PRIx64 "\n", prm->flags);
1447 printf("Frag TTL: %" PRIu64 " ns\n", frag_ttl_ns);
1451 parse_args(int32_t argc, char **argv)
1456 int32_t option_index;
1457 char *prgname = argv[0];
1458 int32_t f_present = 0;
1462 while ((opt = getopt_long(argc, argvopt, "aelp:Pu:f:j:w:c:",
1463 lgopts, &option_index)) != EOF) {
1467 enabled_port_mask = parse_portmask(optarg);
1468 if (enabled_port_mask == 0) {
1469 printf("invalid portmask\n");
1470 print_usage(prgname);
1475 printf("Promiscuous mode selected\n");
1479 unprotected_port_mask = parse_portmask(optarg);
1480 if (unprotected_port_mask == 0) {
1481 printf("invalid unprotected portmask\n");
1482 print_usage(prgname);
1487 if (f_present == 1) {
1488 printf("\"-f\" option present more than "
1490 print_usage(prgname);
1497 ret = parse_decimal(optarg);
1498 if (ret < RTE_MBUF_DEFAULT_BUF_SIZE ||
1500 printf("Invalid frame buffer size value: %s\n",
1502 print_usage(prgname);
1505 frame_buf_size = ret;
1506 printf("Custom frame buffer size %u\n", frame_buf_size);
1509 app_sa_prm.enable = 1;
1512 app_sa_prm.window_size = parse_decimal(optarg);
1515 app_sa_prm.enable_esn = 1;
1518 app_sa_prm.enable = 1;
1519 app_sa_prm.flags |= RTE_IPSEC_SAFLAG_SQN_ATOM;
1522 ret = parse_decimal(optarg);
1524 printf("Invalid SA cache size: %s\n", optarg);
1525 print_usage(prgname);
1528 app_sa_prm.cache_sz = ret;
1530 case CMD_LINE_OPT_CONFIG_NUM:
1531 ret = parse_config(optarg);
1533 printf("Invalid config\n");
1534 print_usage(prgname);
1538 case CMD_LINE_OPT_SINGLE_SA_NUM:
1539 ret = parse_decimal(optarg);
1540 if (ret == -1 || ret > UINT32_MAX) {
1541 printf("Invalid argument[sa_idx]\n");
1542 print_usage(prgname);
1548 single_sa_idx = ret;
1549 printf("Configured with single SA index %u\n",
1552 case CMD_LINE_OPT_CRYPTODEV_MASK_NUM:
1553 ret = parse_portmask(optarg);
1555 printf("Invalid argument[portmask]\n");
1556 print_usage(prgname);
1561 enabled_cryptodev_mask = ret;
1563 case CMD_LINE_OPT_RX_OFFLOAD_NUM:
1564 ret = parse_mask(optarg, &dev_rx_offload);
1566 printf("Invalid argument for \'%s\': %s\n",
1567 CMD_LINE_OPT_RX_OFFLOAD, optarg);
1568 print_usage(prgname);
1572 case CMD_LINE_OPT_TX_OFFLOAD_NUM:
1573 ret = parse_mask(optarg, &dev_tx_offload);
1575 printf("Invalid argument for \'%s\': %s\n",
1576 CMD_LINE_OPT_TX_OFFLOAD, optarg);
1577 print_usage(prgname);
1581 case CMD_LINE_OPT_REASSEMBLE_NUM:
1582 ret = parse_decimal(optarg);
1583 if (ret < 0 || ret > UINT32_MAX) {
1584 printf("Invalid argument for \'%s\': %s\n",
1585 CMD_LINE_OPT_REASSEMBLE, optarg);
1586 print_usage(prgname);
1591 case CMD_LINE_OPT_MTU_NUM:
1592 ret = parse_decimal(optarg);
1593 if (ret < 0 || ret > RTE_IPV4_MAX_PKT_LEN) {
1594 printf("Invalid argument for \'%s\': %s\n",
1595 CMD_LINE_OPT_MTU, optarg);
1596 print_usage(prgname);
1601 case CMD_LINE_OPT_FRAG_TTL_NUM:
1602 ret = parse_decimal(optarg);
1603 if (ret < 0 || ret > MAX_FRAG_TTL_NS) {
1604 printf("Invalid argument for \'%s\': %s\n",
1605 CMD_LINE_OPT_MTU, optarg);
1606 print_usage(prgname);
1612 print_usage(prgname);
1617 if (f_present == 0) {
1618 printf("Mandatory option \"-f\" not present\n");
1622 /* check do we need to enable multi-seg support */
1623 if (multi_seg_required()) {
1624 /* legacy mode doesn't support multi-seg */
1625 app_sa_prm.enable = 1;
1626 printf("frame buf size: %u, mtu: %u, "
1627 "number of reassemble entries: %u\n"
1628 "multi-segment support is required\n",
1629 frame_buf_size, mtu_size, frag_tbl_sz);
1632 print_app_sa_prm(&app_sa_prm);
1635 argv[optind-1] = prgname;
1638 optind = 1; /* reset getopt lib */
1643 print_ethaddr(const char *name, const struct rte_ether_addr *eth_addr)
1645 char buf[RTE_ETHER_ADDR_FMT_SIZE];
1646 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr);
1647 printf("%s%s", name, buf);
1651 * Update destination ethaddr for the port.
1654 add_dst_ethaddr(uint16_t port, const struct rte_ether_addr *addr)
1656 if (port >= RTE_DIM(ethaddr_tbl))
1659 ethaddr_tbl[port].dst = ETHADDR_TO_UINT64(addr);
1663 /* Check the link status of all ports in up to 9s, and print them finally */
1665 check_all_ports_link_status(uint32_t port_mask)
1667 #define CHECK_INTERVAL 100 /* 100ms */
1668 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1670 uint8_t count, all_ports_up, print_flag = 0;
1671 struct rte_eth_link link;
1674 printf("\nChecking link status");
1676 for (count = 0; count <= MAX_CHECK_TIME; count++) {
1678 RTE_ETH_FOREACH_DEV(portid) {
1679 if ((port_mask & (1 << portid)) == 0)
1681 memset(&link, 0, sizeof(link));
1682 ret = rte_eth_link_get_nowait(portid, &link);
1685 if (print_flag == 1)
1686 printf("Port %u link get failed: %s\n",
1687 portid, rte_strerror(-ret));
1690 /* print link status if flag set */
1691 if (print_flag == 1) {
1692 if (link.link_status)
1694 "Port%d Link Up - speed %u Mbps -%s\n",
1695 portid, link.link_speed,
1696 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1697 ("full-duplex") : ("half-duplex\n"));
1699 printf("Port %d Link Down\n", portid);
1702 /* clear all_ports_up flag if any link down */
1703 if (link.link_status == ETH_LINK_DOWN) {
1708 /* after finally printing all link status, get out */
1709 if (print_flag == 1)
1712 if (all_ports_up == 0) {
1715 rte_delay_ms(CHECK_INTERVAL);
1718 /* set the print_flag if all ports up or timeout */
1719 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1727 add_mapping(struct rte_hash *map, const char *str, uint16_t cdev_id,
1728 uint16_t qp, struct lcore_params *params,
1729 struct ipsec_ctx *ipsec_ctx,
1730 const struct rte_cryptodev_capabilities *cipher,
1731 const struct rte_cryptodev_capabilities *auth,
1732 const struct rte_cryptodev_capabilities *aead)
1736 struct cdev_key key = { 0 };
1738 key.lcore_id = params->lcore_id;
1740 key.cipher_algo = cipher->sym.cipher.algo;
1742 key.auth_algo = auth->sym.auth.algo;
1744 key.aead_algo = aead->sym.aead.algo;
1746 ret = rte_hash_lookup(map, &key);
1750 for (i = 0; i < ipsec_ctx->nb_qps; i++)
1751 if (ipsec_ctx->tbl[i].id == cdev_id)
1754 if (i == ipsec_ctx->nb_qps) {
1755 if (ipsec_ctx->nb_qps == MAX_QP_PER_LCORE) {
1756 printf("Maximum number of crypto devices assigned to "
1757 "a core, increase MAX_QP_PER_LCORE value\n");
1760 ipsec_ctx->tbl[i].id = cdev_id;
1761 ipsec_ctx->tbl[i].qp = qp;
1762 ipsec_ctx->nb_qps++;
1763 printf("%s cdev mapping: lcore %u using cdev %u qp %u "
1764 "(cdev_id_qp %lu)\n", str, key.lcore_id,
1768 ret = rte_hash_add_key_data(map, &key, (void *)i);
1770 printf("Faled to insert cdev mapping for (lcore %u, "
1771 "cdev %u, qp %u), errno %d\n",
1772 key.lcore_id, ipsec_ctx->tbl[i].id,
1773 ipsec_ctx->tbl[i].qp, ret);
1781 add_cdev_mapping(struct rte_cryptodev_info *dev_info, uint16_t cdev_id,
1782 uint16_t qp, struct lcore_params *params)
1785 const struct rte_cryptodev_capabilities *i, *j;
1786 struct rte_hash *map;
1787 struct lcore_conf *qconf;
1788 struct ipsec_ctx *ipsec_ctx;
1791 qconf = &lcore_conf[params->lcore_id];
1793 if ((unprotected_port_mask & (1 << params->port_id)) == 0) {
1795 ipsec_ctx = &qconf->outbound;
1799 ipsec_ctx = &qconf->inbound;
1803 /* Required cryptodevs with operation chainning */
1804 if (!(dev_info->feature_flags &
1805 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING))
1808 for (i = dev_info->capabilities;
1809 i->op != RTE_CRYPTO_OP_TYPE_UNDEFINED; i++) {
1810 if (i->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
1813 if (i->sym.xform_type == RTE_CRYPTO_SYM_XFORM_AEAD) {
1814 ret |= add_mapping(map, str, cdev_id, qp, params,
1815 ipsec_ctx, NULL, NULL, i);
1819 if (i->sym.xform_type != RTE_CRYPTO_SYM_XFORM_CIPHER)
1822 for (j = dev_info->capabilities;
1823 j->op != RTE_CRYPTO_OP_TYPE_UNDEFINED; j++) {
1824 if (j->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
1827 if (j->sym.xform_type != RTE_CRYPTO_SYM_XFORM_AUTH)
1830 ret |= add_mapping(map, str, cdev_id, qp, params,
1831 ipsec_ctx, i, j, NULL);
1838 /* Check if the device is enabled by cryptodev_mask */
1840 check_cryptodev_mask(uint8_t cdev_id)
1842 if (enabled_cryptodev_mask & (1 << cdev_id))
1849 cryptodevs_init(void)
1851 struct rte_cryptodev_config dev_conf;
1852 struct rte_cryptodev_qp_conf qp_conf;
1853 uint16_t idx, max_nb_qps, qp, i;
1855 struct rte_hash_parameters params = { 0 };
1857 const uint64_t mseg_flag = multi_seg_required() ?
1858 RTE_CRYPTODEV_FF_IN_PLACE_SGL : 0;
1860 params.entries = CDEV_MAP_ENTRIES;
1861 params.key_len = sizeof(struct cdev_key);
1862 params.hash_func = rte_jhash;
1863 params.hash_func_init_val = 0;
1864 params.socket_id = rte_socket_id();
1866 params.name = "cdev_map_in";
1867 cdev_map_in = rte_hash_create(¶ms);
1868 if (cdev_map_in == NULL)
1869 rte_panic("Failed to create cdev_map hash table, errno = %d\n",
1872 params.name = "cdev_map_out";
1873 cdev_map_out = rte_hash_create(¶ms);
1874 if (cdev_map_out == NULL)
1875 rte_panic("Failed to create cdev_map hash table, errno = %d\n",
1878 printf("lcore/cryptodev/qp mappings:\n");
1881 for (cdev_id = 0; cdev_id < rte_cryptodev_count(); cdev_id++) {
1882 struct rte_cryptodev_info cdev_info;
1884 if (check_cryptodev_mask((uint8_t)cdev_id))
1887 rte_cryptodev_info_get(cdev_id, &cdev_info);
1889 if ((mseg_flag & cdev_info.feature_flags) != mseg_flag)
1890 rte_exit(EXIT_FAILURE,
1891 "Device %hd does not support \'%s\' feature\n",
1893 rte_cryptodev_get_feature_name(mseg_flag));
1895 if (nb_lcore_params > cdev_info.max_nb_queue_pairs)
1896 max_nb_qps = cdev_info.max_nb_queue_pairs;
1898 max_nb_qps = nb_lcore_params;
1902 while (qp < max_nb_qps && i < nb_lcore_params) {
1903 if (add_cdev_mapping(&cdev_info, cdev_id, qp,
1904 &lcore_params[idx]))
1907 idx = idx % nb_lcore_params;
1914 dev_conf.socket_id = rte_cryptodev_socket_id(cdev_id);
1915 dev_conf.nb_queue_pairs = qp;
1916 dev_conf.ff_disable = RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO;
1918 uint32_t dev_max_sess = cdev_info.sym.max_nb_sessions;
1919 if (dev_max_sess != 0 && dev_max_sess < CDEV_MP_NB_OBJS)
1920 rte_exit(EXIT_FAILURE,
1921 "Device does not support at least %u "
1922 "sessions", CDEV_MP_NB_OBJS);
1924 if (rte_cryptodev_configure(cdev_id, &dev_conf))
1925 rte_panic("Failed to initialize cryptodev %u\n",
1928 qp_conf.nb_descriptors = CDEV_QUEUE_DESC;
1929 qp_conf.mp_session =
1930 socket_ctx[dev_conf.socket_id].session_pool;
1931 qp_conf.mp_session_private =
1932 socket_ctx[dev_conf.socket_id].session_priv_pool;
1933 for (qp = 0; qp < dev_conf.nb_queue_pairs; qp++)
1934 if (rte_cryptodev_queue_pair_setup(cdev_id, qp,
1935 &qp_conf, dev_conf.socket_id))
1936 rte_panic("Failed to setup queue %u for "
1937 "cdev_id %u\n", 0, cdev_id);
1939 if (rte_cryptodev_start(cdev_id))
1940 rte_panic("Failed to start cryptodev %u\n",
1950 port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads)
1952 uint32_t frame_size;
1953 struct rte_eth_dev_info dev_info;
1954 struct rte_eth_txconf *txconf;
1955 uint16_t nb_tx_queue, nb_rx_queue;
1956 uint16_t tx_queueid, rx_queueid, queue, lcore_id;
1957 int32_t ret, socket_id;
1958 struct lcore_conf *qconf;
1959 struct rte_ether_addr ethaddr;
1960 struct rte_eth_conf local_port_conf = port_conf;
1962 ret = rte_eth_dev_info_get(portid, &dev_info);
1964 rte_exit(EXIT_FAILURE,
1965 "Error during getting device (port %u) info: %s\n",
1966 portid, strerror(-ret));
1968 /* limit allowed HW offloafs, as user requested */
1969 dev_info.rx_offload_capa &= dev_rx_offload;
1970 dev_info.tx_offload_capa &= dev_tx_offload;
1972 printf("Configuring device port %u:\n", portid);
1974 ret = rte_eth_macaddr_get(portid, ðaddr);
1976 rte_exit(EXIT_FAILURE,
1977 "Error getting MAC address (port %u): %s\n",
1978 portid, rte_strerror(-ret));
1980 ethaddr_tbl[portid].src = ETHADDR_TO_UINT64(ðaddr);
1981 print_ethaddr("Address: ", ðaddr);
1984 nb_rx_queue = get_port_nb_rx_queues(portid);
1985 nb_tx_queue = nb_lcores;
1987 if (nb_rx_queue > dev_info.max_rx_queues)
1988 rte_exit(EXIT_FAILURE, "Error: queue %u not available "
1989 "(max rx queue is %u)\n",
1990 nb_rx_queue, dev_info.max_rx_queues);
1992 if (nb_tx_queue > dev_info.max_tx_queues)
1993 rte_exit(EXIT_FAILURE, "Error: queue %u not available "
1994 "(max tx queue is %u)\n",
1995 nb_tx_queue, dev_info.max_tx_queues);
1997 printf("Creating queues: nb_rx_queue=%d nb_tx_queue=%u...\n",
1998 nb_rx_queue, nb_tx_queue);
2000 frame_size = MTU_TO_FRAMELEN(mtu_size);
2001 if (frame_size > local_port_conf.rxmode.max_rx_pkt_len)
2002 local_port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
2003 local_port_conf.rxmode.max_rx_pkt_len = frame_size;
2005 if (multi_seg_required()) {
2006 local_port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_SCATTER;
2007 local_port_conf.txmode.offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
2010 local_port_conf.rxmode.offloads |= req_rx_offloads;
2011 local_port_conf.txmode.offloads |= req_tx_offloads;
2013 /* Check that all required capabilities are supported */
2014 if ((local_port_conf.rxmode.offloads & dev_info.rx_offload_capa) !=
2015 local_port_conf.rxmode.offloads)
2016 rte_exit(EXIT_FAILURE,
2017 "Error: port %u required RX offloads: 0x%" PRIx64
2018 ", avaialbe RX offloads: 0x%" PRIx64 "\n",
2019 portid, local_port_conf.rxmode.offloads,
2020 dev_info.rx_offload_capa);
2022 if ((local_port_conf.txmode.offloads & dev_info.tx_offload_capa) !=
2023 local_port_conf.txmode.offloads)
2024 rte_exit(EXIT_FAILURE,
2025 "Error: port %u required TX offloads: 0x%" PRIx64
2026 ", avaialbe TX offloads: 0x%" PRIx64 "\n",
2027 portid, local_port_conf.txmode.offloads,
2028 dev_info.tx_offload_capa);
2030 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
2031 local_port_conf.txmode.offloads |=
2032 DEV_TX_OFFLOAD_MBUF_FAST_FREE;
2034 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPV4_CKSUM)
2035 local_port_conf.txmode.offloads |= DEV_TX_OFFLOAD_IPV4_CKSUM;
2037 printf("port %u configurng rx_offloads=0x%" PRIx64
2038 ", tx_offloads=0x%" PRIx64 "\n",
2039 portid, local_port_conf.rxmode.offloads,
2040 local_port_conf.txmode.offloads);
2042 local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
2043 dev_info.flow_type_rss_offloads;
2044 if (local_port_conf.rx_adv_conf.rss_conf.rss_hf !=
2045 port_conf.rx_adv_conf.rss_conf.rss_hf) {
2046 printf("Port %u modified RSS hash function based on hardware support,"
2047 "requested:%#"PRIx64" configured:%#"PRIx64"\n",
2049 port_conf.rx_adv_conf.rss_conf.rss_hf,
2050 local_port_conf.rx_adv_conf.rss_conf.rss_hf);
2053 ret = rte_eth_dev_configure(portid, nb_rx_queue, nb_tx_queue,
2056 rte_exit(EXIT_FAILURE, "Cannot configure device: "
2057 "err=%d, port=%d\n", ret, portid);
2059 ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &nb_rxd, &nb_txd);
2061 rte_exit(EXIT_FAILURE, "Cannot adjust number of descriptors: "
2062 "err=%d, port=%d\n", ret, portid);
2064 /* init one TX queue per lcore */
2066 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
2067 if (rte_lcore_is_enabled(lcore_id) == 0)
2071 socket_id = (uint8_t)rte_lcore_to_socket_id(lcore_id);
2076 printf("Setup txq=%u,%d,%d\n", lcore_id, tx_queueid, socket_id);
2078 txconf = &dev_info.default_txconf;
2079 txconf->offloads = local_port_conf.txmode.offloads;
2081 ret = rte_eth_tx_queue_setup(portid, tx_queueid, nb_txd,
2084 rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: "
2085 "err=%d, port=%d\n", ret, portid);
2087 qconf = &lcore_conf[lcore_id];
2088 qconf->tx_queue_id[portid] = tx_queueid;
2090 /* Pre-populate pkt offloads based on capabilities */
2091 qconf->outbound.ipv4_offloads = PKT_TX_IPV4;
2092 qconf->outbound.ipv6_offloads = PKT_TX_IPV6;
2093 if (local_port_conf.txmode.offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)
2094 qconf->outbound.ipv4_offloads |= PKT_TX_IP_CKSUM;
2098 /* init RX queues */
2099 for (queue = 0; queue < qconf->nb_rx_queue; ++queue) {
2100 struct rte_eth_rxconf rxq_conf;
2102 if (portid != qconf->rx_queue_list[queue].port_id)
2105 rx_queueid = qconf->rx_queue_list[queue].queue_id;
2107 printf("Setup rxq=%d,%d,%d\n", portid, rx_queueid,
2110 rxq_conf = dev_info.default_rxconf;
2111 rxq_conf.offloads = local_port_conf.rxmode.offloads;
2112 ret = rte_eth_rx_queue_setup(portid, rx_queueid,
2113 nb_rxd, socket_id, &rxq_conf,
2114 socket_ctx[socket_id].mbuf_pool);
2116 rte_exit(EXIT_FAILURE,
2117 "rte_eth_rx_queue_setup: err=%d, "
2118 "port=%d\n", ret, portid);
2125 max_session_size(void)
2129 int16_t cdev_id, port_id, n;
2132 n = rte_cryptodev_count();
2133 for (cdev_id = 0; cdev_id != n; cdev_id++) {
2134 sz = rte_cryptodev_sym_get_private_session_size(cdev_id);
2138 * If crypto device is security capable, need to check the
2139 * size of security session as well.
2142 /* Get security context of the crypto device */
2143 sec_ctx = rte_cryptodev_get_sec_ctx(cdev_id);
2144 if (sec_ctx == NULL)
2147 /* Get size of security session */
2148 sz = rte_security_session_get_size(sec_ctx);
2153 RTE_ETH_FOREACH_DEV(port_id) {
2154 if ((enabled_port_mask & (1 << port_id)) == 0)
2157 sec_ctx = rte_eth_dev_get_sec_ctx(port_id);
2158 if (sec_ctx == NULL)
2161 sz = rte_security_session_get_size(sec_ctx);
2170 session_pool_init(struct socket_ctx *ctx, int32_t socket_id, size_t sess_sz)
2172 char mp_name[RTE_MEMPOOL_NAMESIZE];
2173 struct rte_mempool *sess_mp;
2175 snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
2176 "sess_mp_%u", socket_id);
2177 sess_mp = rte_cryptodev_sym_session_pool_create(
2178 mp_name, CDEV_MP_NB_OBJS,
2179 sess_sz, CDEV_MP_CACHE_SZ, 0,
2181 ctx->session_pool = sess_mp;
2183 if (ctx->session_pool == NULL)
2184 rte_exit(EXIT_FAILURE,
2185 "Cannot init session pool on socket %d\n", socket_id);
2187 printf("Allocated session pool on socket %d\n", socket_id);
2191 session_priv_pool_init(struct socket_ctx *ctx, int32_t socket_id,
2194 char mp_name[RTE_MEMPOOL_NAMESIZE];
2195 struct rte_mempool *sess_mp;
2197 snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
2198 "sess_mp_priv_%u", socket_id);
2199 sess_mp = rte_mempool_create(mp_name,
2203 0, NULL, NULL, NULL,
2206 ctx->session_priv_pool = sess_mp;
2208 if (ctx->session_priv_pool == NULL)
2209 rte_exit(EXIT_FAILURE,
2210 "Cannot init session priv pool on socket %d\n",
2213 printf("Allocated session priv pool on socket %d\n",
2218 pool_init(struct socket_ctx *ctx, int32_t socket_id, uint32_t nb_mbuf)
2223 snprintf(s, sizeof(s), "mbuf_pool_%d", socket_id);
2224 ctx->mbuf_pool = rte_pktmbuf_pool_create(s, nb_mbuf,
2225 MEMPOOL_CACHE_SIZE, ipsec_metadata_size(),
2226 frame_buf_size, socket_id);
2229 * if multi-segment support is enabled, then create a pool
2230 * for indirect mbufs.
2232 ms = multi_seg_required();
2234 snprintf(s, sizeof(s), "mbuf_pool_indir_%d", socket_id);
2235 ctx->mbuf_pool_indir = rte_pktmbuf_pool_create(s, nb_mbuf,
2236 MEMPOOL_CACHE_SIZE, 0, 0, socket_id);
2239 if (ctx->mbuf_pool == NULL || (ms != 0 && ctx->mbuf_pool_indir == NULL))
2240 rte_exit(EXIT_FAILURE, "Cannot init mbuf pool on socket %d\n",
2243 printf("Allocated mbuf pool on socket %d\n", socket_id);
2247 inline_ipsec_event_esn_overflow(struct rte_security_ctx *ctx, uint64_t md)
2249 struct ipsec_sa *sa;
2251 /* For inline protocol processing, the metadata in the event will
2252 * uniquely identify the security session which raised the event.
2253 * Application would then need the userdata it had registered with the
2254 * security session to process the event.
2257 sa = (struct ipsec_sa *)rte_security_get_userdata(ctx, md);
2260 /* userdata could not be retrieved */
2264 /* Sequence number over flow. SA need to be re-established */
2270 inline_ipsec_event_callback(uint16_t port_id, enum rte_eth_event_type type,
2271 void *param, void *ret_param)
2274 struct rte_eth_event_ipsec_desc *event_desc = NULL;
2275 struct rte_security_ctx *ctx = (struct rte_security_ctx *)
2276 rte_eth_dev_get_sec_ctx(port_id);
2278 RTE_SET_USED(param);
2280 if (type != RTE_ETH_EVENT_IPSEC)
2283 event_desc = ret_param;
2284 if (event_desc == NULL) {
2285 printf("Event descriptor not set\n");
2289 md = event_desc->metadata;
2291 if (event_desc->subtype == RTE_ETH_EVENT_IPSEC_ESN_OVERFLOW)
2292 return inline_ipsec_event_esn_overflow(ctx, md);
2293 else if (event_desc->subtype >= RTE_ETH_EVENT_IPSEC_MAX) {
2294 printf("Invalid IPsec event reported\n");
2302 rx_callback(__rte_unused uint16_t port, __rte_unused uint16_t queue,
2303 struct rte_mbuf *pkt[], uint16_t nb_pkts,
2304 __rte_unused uint16_t max_pkts, void *user_param)
2308 struct lcore_conf *lc;
2309 struct rte_mbuf *mb;
2310 struct rte_ether_hdr *eth;
2316 for (i = 0; i != nb_pkts; i++) {
2319 eth = rte_pktmbuf_mtod(mb, struct rte_ether_hdr *);
2320 if (eth->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
2322 struct rte_ipv4_hdr *iph;
2324 iph = (struct rte_ipv4_hdr *)(eth + 1);
2325 if (rte_ipv4_frag_pkt_is_fragmented(iph)) {
2327 mb->l2_len = sizeof(*eth);
2328 mb->l3_len = sizeof(*iph);
2329 tm = (tm != 0) ? tm : rte_rdtsc();
2330 mb = rte_ipv4_frag_reassemble_packet(
2331 lc->frag.tbl, &lc->frag.dr,
2335 /* fix ip cksum after reassemble. */
2336 iph = rte_pktmbuf_mtod_offset(mb,
2337 struct rte_ipv4_hdr *,
2339 iph->hdr_checksum = 0;
2340 iph->hdr_checksum = rte_ipv4_cksum(iph);
2343 } else if (eth->ether_type ==
2344 rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {
2346 struct rte_ipv6_hdr *iph;
2347 struct ipv6_extension_fragment *fh;
2349 iph = (struct rte_ipv6_hdr *)(eth + 1);
2350 fh = rte_ipv6_frag_get_ipv6_fragment_header(iph);
2352 mb->l2_len = sizeof(*eth);
2353 mb->l3_len = (uintptr_t)fh - (uintptr_t)iph +
2355 tm = (tm != 0) ? tm : rte_rdtsc();
2356 mb = rte_ipv6_frag_reassemble_packet(
2357 lc->frag.tbl, &lc->frag.dr,
2360 /* fix l3_len after reassemble. */
2361 mb->l3_len = mb->l3_len - sizeof(*fh);
2369 /* some fragments were encountered, drain death row */
2371 rte_ip_frag_free_death_row(&lc->frag.dr, 0);
2378 reassemble_lcore_init(struct lcore_conf *lc, uint32_t cid)
2382 uint64_t frag_cycles;
2383 const struct lcore_rx_queue *rxq;
2384 const struct rte_eth_rxtx_callback *cb;
2386 /* create fragment table */
2387 sid = rte_lcore_to_socket_id(cid);
2388 frag_cycles = (rte_get_tsc_hz() + NS_PER_S - 1) /
2389 NS_PER_S * frag_ttl_ns;
2391 lc->frag.tbl = rte_ip_frag_table_create(frag_tbl_sz,
2392 FRAG_TBL_BUCKET_ENTRIES, frag_tbl_sz, frag_cycles, sid);
2393 if (lc->frag.tbl == NULL) {
2394 printf("%s(%u): failed to create fragment table of size: %u, "
2396 __func__, cid, frag_tbl_sz, rte_errno);
2400 /* setup reassemble RX callbacks for all queues */
2401 for (i = 0; i != lc->nb_rx_queue; i++) {
2403 rxq = lc->rx_queue_list + i;
2404 cb = rte_eth_add_rx_callback(rxq->port_id, rxq->queue_id,
2407 printf("%s(%u): failed to install RX callback for "
2408 "portid=%u, queueid=%u, error code: %d\n",
2410 rxq->port_id, rxq->queue_id, rte_errno);
2419 reassemble_init(void)
2425 for (i = 0; i != nb_lcore_params; i++) {
2426 lc = lcore_params[i].lcore_id;
2427 rc = reassemble_lcore_init(lcore_conf + lc, lc);
2436 main(int32_t argc, char **argv)
2443 uint64_t req_rx_offloads, req_tx_offloads;
2447 ret = rte_eal_init(argc, argv);
2449 rte_exit(EXIT_FAILURE, "Invalid EAL parameters\n");
2453 /* parse application arguments (after the EAL ones) */
2454 ret = parse_args(argc, argv);
2456 rte_exit(EXIT_FAILURE, "Invalid parameters\n");
2458 /* parse configuration file */
2459 if (parse_cfg_file(cfgfile) < 0) {
2460 printf("parsing file \"%s\" failed\n",
2462 print_usage(argv[0]);
2466 if ((unprotected_port_mask & enabled_port_mask) !=
2467 unprotected_port_mask)
2468 rte_exit(EXIT_FAILURE, "Invalid unprotected portmask 0x%x\n",
2469 unprotected_port_mask);
2471 if (check_params() < 0)
2472 rte_exit(EXIT_FAILURE, "check_params failed\n");
2474 ret = init_lcore_rx_queues();
2476 rte_exit(EXIT_FAILURE, "init_lcore_rx_queues failed\n");
2478 nb_lcores = rte_lcore_count();
2480 sess_sz = max_session_size();
2482 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
2483 if (rte_lcore_is_enabled(lcore_id) == 0)
2487 socket_id = (uint8_t)rte_lcore_to_socket_id(lcore_id);
2491 /* mbuf_pool is initialised by the pool_init() function*/
2492 if (socket_ctx[socket_id].mbuf_pool)
2495 pool_init(&socket_ctx[socket_id], socket_id, NB_MBUF);
2496 session_pool_init(&socket_ctx[socket_id], socket_id, sess_sz);
2497 session_priv_pool_init(&socket_ctx[socket_id], socket_id,
2501 RTE_ETH_FOREACH_DEV(portid) {
2502 if ((enabled_port_mask & (1 << portid)) == 0)
2505 sa_check_offloads(portid, &req_rx_offloads, &req_tx_offloads);
2506 port_init(portid, req_rx_offloads, req_tx_offloads);
2512 RTE_ETH_FOREACH_DEV(portid) {
2513 if ((enabled_port_mask & (1 << portid)) == 0)
2518 * note: device must be started before a flow rule
2521 ret = rte_eth_dev_start(portid);
2523 rte_exit(EXIT_FAILURE, "rte_eth_dev_start: "
2524 "err=%d, port=%d\n", ret, portid);
2526 * If enabled, put device in promiscuous mode.
2527 * This allows IO forwarding mode to forward packets
2528 * to itself through 2 cross-connected ports of the
2531 if (promiscuous_on) {
2532 ret = rte_eth_promiscuous_enable(portid);
2534 rte_exit(EXIT_FAILURE,
2535 "rte_eth_promiscuous_enable: err=%s, port=%d\n",
2536 rte_strerror(-ret), portid);
2539 rte_eth_dev_callback_register(portid,
2540 RTE_ETH_EVENT_IPSEC, inline_ipsec_event_callback, NULL);
2543 /* fragment reassemble is enabled */
2544 if (frag_tbl_sz != 0) {
2545 ret = reassemble_init();
2547 rte_exit(EXIT_FAILURE, "failed at reassemble init");
2550 /* Replicate each context per socket */
2551 for (i = 0; i < NB_SOCKETS && i < rte_socket_count(); i++) {
2552 socket_id = rte_socket_id_by_idx(i);
2553 if ((socket_ctx[socket_id].mbuf_pool != NULL) &&
2554 (socket_ctx[socket_id].sa_in == NULL) &&
2555 (socket_ctx[socket_id].sa_out == NULL)) {
2556 sa_init(&socket_ctx[socket_id], socket_id);
2557 sp4_init(&socket_ctx[socket_id], socket_id);
2558 sp6_init(&socket_ctx[socket_id], socket_id);
2559 rt_init(&socket_ctx[socket_id], socket_id);
2563 check_all_ports_link_status(enabled_port_mask);
2565 /* launch per-lcore init on every lcore */
2566 rte_eal_mp_remote_launch(main_loop, NULL, CALL_MASTER);
2567 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
2568 if (rte_eal_wait_lcore(lcore_id) < 0)