1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016 Intel Corporation
10 #include <netinet/in.h>
11 #include <netinet/ip.h>
12 #include <netinet/ip6.h>
14 #include <sys/queue.h>
19 #include <rte_common.h>
20 #include <rte_byteorder.h>
23 #include <rte_launch.h>
24 #include <rte_atomic.h>
25 #include <rte_cycles.h>
26 #include <rte_prefetch.h>
27 #include <rte_lcore.h>
28 #include <rte_per_lcore.h>
29 #include <rte_branch_prediction.h>
30 #include <rte_interrupts.h>
31 #include <rte_random.h>
32 #include <rte_debug.h>
33 #include <rte_ether.h>
34 #include <rte_ethdev.h>
35 #include <rte_mempool.h>
41 #include <rte_jhash.h>
42 #include <rte_cryptodev.h>
43 #include <rte_security.h>
45 #include <rte_ip_frag.h>
50 #define RTE_LOGTYPE_IPSEC RTE_LOGTYPE_USER1
52 #define MAX_JUMBO_PKT_LEN 9600
54 #define MEMPOOL_CACHE_SIZE 256
56 #define NB_MBUF (32000)
58 #define CDEV_QUEUE_DESC 2048
59 #define CDEV_MAP_ENTRIES 16384
60 #define CDEV_MP_NB_OBJS 1024
61 #define CDEV_MP_CACHE_SZ 64
62 #define MAX_QUEUE_PAIRS 1
64 #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
68 /* Configure how many packets ahead to prefetch, when reading packets */
69 #define PREFETCH_OFFSET 3
71 #define MAX_RX_QUEUE_PER_LCORE 16
73 #define MAX_LCORE_PARAMS 1024
75 #define UNPROTECTED_PORT(port) (unprotected_port_mask & (1 << portid))
78 * Configurable number of RX/TX ring descriptors
80 #define IPSEC_SECGW_RX_DESC_DEFAULT 1024
81 #define IPSEC_SECGW_TX_DESC_DEFAULT 1024
82 static uint16_t nb_rxd = IPSEC_SECGW_RX_DESC_DEFAULT;
83 static uint16_t nb_txd = IPSEC_SECGW_TX_DESC_DEFAULT;
85 #if RTE_BYTE_ORDER != RTE_LITTLE_ENDIAN
86 #define __BYTES_TO_UINT64(a, b, c, d, e, f, g, h) \
87 (((uint64_t)((a) & 0xff) << 56) | \
88 ((uint64_t)((b) & 0xff) << 48) | \
89 ((uint64_t)((c) & 0xff) << 40) | \
90 ((uint64_t)((d) & 0xff) << 32) | \
91 ((uint64_t)((e) & 0xff) << 24) | \
92 ((uint64_t)((f) & 0xff) << 16) | \
93 ((uint64_t)((g) & 0xff) << 8) | \
94 ((uint64_t)(h) & 0xff))
96 #define __BYTES_TO_UINT64(a, b, c, d, e, f, g, h) \
97 (((uint64_t)((h) & 0xff) << 56) | \
98 ((uint64_t)((g) & 0xff) << 48) | \
99 ((uint64_t)((f) & 0xff) << 40) | \
100 ((uint64_t)((e) & 0xff) << 32) | \
101 ((uint64_t)((d) & 0xff) << 24) | \
102 ((uint64_t)((c) & 0xff) << 16) | \
103 ((uint64_t)((b) & 0xff) << 8) | \
104 ((uint64_t)(a) & 0xff))
106 #define ETHADDR(a, b, c, d, e, f) (__BYTES_TO_UINT64(a, b, c, d, e, f, 0, 0))
108 #define ETHADDR_TO_UINT64(addr) __BYTES_TO_UINT64( \
109 (addr)->addr_bytes[0], (addr)->addr_bytes[1], \
110 (addr)->addr_bytes[2], (addr)->addr_bytes[3], \
111 (addr)->addr_bytes[4], (addr)->addr_bytes[5], \
114 #define FRAG_TBL_BUCKET_ENTRIES 4
115 #define FRAG_TTL_MS (10 * MS_PER_S)
117 #define MTU_TO_FRAMELEN(x) ((x) + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN)
119 /* port/source ethernet addr and destination ethernet addr */
120 struct ethaddr_info {
124 struct ethaddr_info ethaddr_tbl[RTE_MAX_ETHPORTS] = {
125 { 0, ETHADDR(0x00, 0x16, 0x3e, 0x7e, 0x94, 0x9a) },
126 { 0, ETHADDR(0x00, 0x16, 0x3e, 0x22, 0xa1, 0xd9) },
127 { 0, ETHADDR(0x00, 0x16, 0x3e, 0x08, 0x69, 0x26) },
128 { 0, ETHADDR(0x00, 0x16, 0x3e, 0x49, 0x9e, 0xdd) }
131 #define CMD_LINE_OPT_CONFIG "config"
132 #define CMD_LINE_OPT_SINGLE_SA "single-sa"
133 #define CMD_LINE_OPT_CRYPTODEV_MASK "cryptodev_mask"
134 #define CMD_LINE_OPT_RX_OFFLOAD "rxoffload"
135 #define CMD_LINE_OPT_TX_OFFLOAD "txoffload"
136 #define CMD_LINE_OPT_REASSEMBLE "reassemble"
137 #define CMD_LINE_OPT_MTU "mtu"
140 /* long options mapped to a short option */
142 /* first long only option value must be >= 256, so that we won't
143 * conflict with short options
145 CMD_LINE_OPT_MIN_NUM = 256,
146 CMD_LINE_OPT_CONFIG_NUM,
147 CMD_LINE_OPT_SINGLE_SA_NUM,
148 CMD_LINE_OPT_CRYPTODEV_MASK_NUM,
149 CMD_LINE_OPT_RX_OFFLOAD_NUM,
150 CMD_LINE_OPT_TX_OFFLOAD_NUM,
151 CMD_LINE_OPT_REASSEMBLE_NUM,
152 CMD_LINE_OPT_MTU_NUM,
155 static const struct option lgopts[] = {
156 {CMD_LINE_OPT_CONFIG, 1, 0, CMD_LINE_OPT_CONFIG_NUM},
157 {CMD_LINE_OPT_SINGLE_SA, 1, 0, CMD_LINE_OPT_SINGLE_SA_NUM},
158 {CMD_LINE_OPT_CRYPTODEV_MASK, 1, 0, CMD_LINE_OPT_CRYPTODEV_MASK_NUM},
159 {CMD_LINE_OPT_RX_OFFLOAD, 1, 0, CMD_LINE_OPT_RX_OFFLOAD_NUM},
160 {CMD_LINE_OPT_TX_OFFLOAD, 1, 0, CMD_LINE_OPT_TX_OFFLOAD_NUM},
161 {CMD_LINE_OPT_REASSEMBLE, 1, 0, CMD_LINE_OPT_REASSEMBLE_NUM},
162 {CMD_LINE_OPT_MTU, 1, 0, CMD_LINE_OPT_MTU_NUM},
166 /* mask of enabled ports */
167 static uint32_t enabled_port_mask;
168 static uint64_t enabled_cryptodev_mask = UINT64_MAX;
169 static uint32_t unprotected_port_mask;
170 static int32_t promiscuous_on = 1;
171 static int32_t numa_on = 1; /**< NUMA is enabled by default. */
172 static uint32_t nb_lcores;
173 static uint32_t single_sa;
174 static uint32_t single_sa_idx;
177 * RX/TX HW offload capabilities to enable/use on ethernet ports.
178 * By default all capabilities are enabled.
180 static uint64_t dev_rx_offload = UINT64_MAX;
181 static uint64_t dev_tx_offload = UINT64_MAX;
184 * global values that determine multi-seg policy
186 static uint32_t frag_tbl_sz;
187 static uint32_t frame_buf_size = RTE_MBUF_DEFAULT_BUF_SIZE;
188 static uint32_t mtu_size = RTE_ETHER_MTU;
190 /* application wide librte_ipsec/SA parameters */
191 struct app_sa_prm app_sa_prm = {.enable = 0};
192 static const char *cfgfile;
194 struct lcore_rx_queue {
197 } __rte_cache_aligned;
199 struct lcore_params {
203 } __rte_cache_aligned;
205 static struct lcore_params lcore_params_array[MAX_LCORE_PARAMS];
207 static struct lcore_params *lcore_params;
208 static uint16_t nb_lcore_params;
210 static struct rte_hash *cdev_map_in;
211 static struct rte_hash *cdev_map_out;
215 struct rte_mbuf *m_table[MAX_PKT_BURST] __rte_aligned(sizeof(void *));
219 uint16_t nb_rx_queue;
220 struct lcore_rx_queue rx_queue_list[MAX_RX_QUEUE_PER_LCORE];
221 uint16_t tx_queue_id[RTE_MAX_ETHPORTS];
222 struct buffer tx_mbufs[RTE_MAX_ETHPORTS];
223 struct ipsec_ctx inbound;
224 struct ipsec_ctx outbound;
225 struct rt_ctx *rt4_ctx;
226 struct rt_ctx *rt6_ctx;
228 struct rte_ip_frag_tbl *tbl;
229 struct rte_mempool *pool_dir;
230 struct rte_mempool *pool_indir;
231 struct rte_ip_frag_death_row dr;
233 } __rte_cache_aligned;
235 static struct lcore_conf lcore_conf[RTE_MAX_LCORE];
237 static struct rte_eth_conf port_conf = {
239 .mq_mode = ETH_MQ_RX_RSS,
240 .max_rx_pkt_len = RTE_ETHER_MAX_LEN,
242 .offloads = DEV_RX_OFFLOAD_CHECKSUM,
247 .rss_hf = ETH_RSS_IP | ETH_RSS_UDP |
248 ETH_RSS_TCP | ETH_RSS_SCTP,
252 .mq_mode = ETH_MQ_TX_NONE,
256 static struct socket_ctx socket_ctx[NB_SOCKETS];
259 * Determine is multi-segment support required:
260 * - either frame buffer size is smaller then mtu
261 * - or reassmeble support is requested
264 multi_seg_required(void)
266 return (MTU_TO_FRAMELEN(mtu_size) + RTE_PKTMBUF_HEADROOM >
267 frame_buf_size || frag_tbl_sz != 0);
271 adjust_ipv4_pktlen(struct rte_mbuf *m, const struct rte_ipv4_hdr *iph,
276 plen = rte_be_to_cpu_16(iph->total_length) + l2_len;
277 if (plen < m->pkt_len) {
278 trim = m->pkt_len - plen;
279 rte_pktmbuf_trim(m, trim);
284 adjust_ipv6_pktlen(struct rte_mbuf *m, const struct rte_ipv6_hdr *iph,
289 plen = rte_be_to_cpu_16(iph->payload_len) + sizeof(*iph) + l2_len;
290 if (plen < m->pkt_len) {
291 trim = m->pkt_len - plen;
292 rte_pktmbuf_trim(m, trim);
297 prepare_one_packet(struct rte_mbuf *pkt, struct ipsec_traffic *t)
299 const struct rte_ether_hdr *eth;
300 const struct rte_ipv4_hdr *iph4;
301 const struct rte_ipv6_hdr *iph6;
303 eth = rte_pktmbuf_mtod(pkt, const struct rte_ether_hdr *);
304 if (eth->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
306 iph4 = (const struct rte_ipv4_hdr *)rte_pktmbuf_adj(pkt,
308 adjust_ipv4_pktlen(pkt, iph4, 0);
310 if (iph4->next_proto_id == IPPROTO_ESP)
311 t->ipsec.pkts[(t->ipsec.num)++] = pkt;
313 t->ip4.data[t->ip4.num] = &iph4->next_proto_id;
314 t->ip4.pkts[(t->ip4.num)++] = pkt;
317 pkt->l3_len = sizeof(*iph4);
318 } else if (eth->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {
320 size_t l3len, ext_len;
323 /* get protocol type */
324 iph6 = (const struct rte_ipv6_hdr *)rte_pktmbuf_adj(pkt,
326 adjust_ipv6_pktlen(pkt, iph6, 0);
328 next_proto = iph6->proto;
330 /* determine l3 header size up to ESP extension */
331 l3len = sizeof(struct ip6_hdr);
332 p = rte_pktmbuf_mtod(pkt, uint8_t *);
333 while (next_proto != IPPROTO_ESP && l3len < pkt->data_len &&
334 (next_proto = rte_ipv6_get_next_ext(p + l3len,
335 next_proto, &ext_len)) >= 0)
338 /* drop packet when IPv6 header exceeds first segment length */
339 if (unlikely(l3len > pkt->data_len)) {
340 rte_pktmbuf_free(pkt);
344 if (next_proto == IPPROTO_ESP)
345 t->ipsec.pkts[(t->ipsec.num)++] = pkt;
347 t->ip6.data[t->ip6.num] = &iph6->proto;
348 t->ip6.pkts[(t->ip6.num)++] = pkt;
353 /* Unknown/Unsupported type, drop the packet */
354 RTE_LOG(ERR, IPSEC, "Unsupported packet type 0x%x\n",
355 rte_be_to_cpu_16(eth->ether_type));
356 rte_pktmbuf_free(pkt);
360 /* Check if the packet has been processed inline. For inline protocol
361 * processed packets, the metadata in the mbuf can be used to identify
362 * the security processing done on the packet. The metadata will be
363 * used to retrieve the application registered userdata associated
364 * with the security session.
367 if (pkt->ol_flags & PKT_RX_SEC_OFFLOAD) {
369 struct ipsec_mbuf_metadata *priv;
370 struct rte_security_ctx *ctx = (struct rte_security_ctx *)
371 rte_eth_dev_get_sec_ctx(
374 /* Retrieve the userdata registered. Here, the userdata
375 * registered is the SA pointer.
378 sa = (struct ipsec_sa *)
379 rte_security_get_userdata(ctx, pkt->udata64);
382 /* userdata could not be retrieved */
386 /* Save SA as priv member in mbuf. This will be used in the
387 * IPsec selector(SP-SA) check.
390 priv = get_priv(pkt);
396 prepare_traffic(struct rte_mbuf **pkts, struct ipsec_traffic *t,
405 for (i = 0; i < (nb_pkts - PREFETCH_OFFSET); i++) {
406 rte_prefetch0(rte_pktmbuf_mtod(pkts[i + PREFETCH_OFFSET],
408 prepare_one_packet(pkts[i], t);
410 /* Process left packets */
411 for (; i < nb_pkts; i++)
412 prepare_one_packet(pkts[i], t);
416 prepare_tx_pkt(struct rte_mbuf *pkt, uint16_t port,
417 const struct lcore_conf *qconf)
420 struct rte_ether_hdr *ethhdr;
422 ip = rte_pktmbuf_mtod(pkt, struct ip *);
424 ethhdr = (struct rte_ether_hdr *)
425 rte_pktmbuf_prepend(pkt, RTE_ETHER_HDR_LEN);
427 if (ip->ip_v == IPVERSION) {
428 pkt->ol_flags |= qconf->outbound.ipv4_offloads;
429 pkt->l3_len = sizeof(struct ip);
430 pkt->l2_len = RTE_ETHER_HDR_LEN;
434 /* calculate IPv4 cksum in SW */
435 if ((pkt->ol_flags & PKT_TX_IP_CKSUM) == 0)
436 ip->ip_sum = rte_ipv4_cksum((struct rte_ipv4_hdr *)ip);
438 ethhdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
440 pkt->ol_flags |= qconf->outbound.ipv6_offloads;
441 pkt->l3_len = sizeof(struct ip6_hdr);
442 pkt->l2_len = RTE_ETHER_HDR_LEN;
444 ethhdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
447 memcpy(ðhdr->s_addr, ðaddr_tbl[port].src,
448 sizeof(struct rte_ether_addr));
449 memcpy(ðhdr->d_addr, ðaddr_tbl[port].dst,
450 sizeof(struct rte_ether_addr));
454 prepare_tx_burst(struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t port,
455 const struct lcore_conf *qconf)
458 const int32_t prefetch_offset = 2;
460 for (i = 0; i < (nb_pkts - prefetch_offset); i++) {
461 rte_mbuf_prefetch_part2(pkts[i + prefetch_offset]);
462 prepare_tx_pkt(pkts[i], port, qconf);
464 /* Process left packets */
465 for (; i < nb_pkts; i++)
466 prepare_tx_pkt(pkts[i], port, qconf);
469 /* Send burst of packets on an output interface */
470 static inline int32_t
471 send_burst(struct lcore_conf *qconf, uint16_t n, uint16_t port)
473 struct rte_mbuf **m_table;
477 queueid = qconf->tx_queue_id[port];
478 m_table = (struct rte_mbuf **)qconf->tx_mbufs[port].m_table;
480 prepare_tx_burst(m_table, n, port, qconf);
482 ret = rte_eth_tx_burst(port, queueid, m_table, n);
483 if (unlikely(ret < n)) {
485 rte_pktmbuf_free(m_table[ret]);
493 * Helper function to fragment and queue for TX one packet.
495 static inline uint32_t
496 send_fragment_packet(struct lcore_conf *qconf, struct rte_mbuf *m,
497 uint16_t port, uint8_t proto)
503 tbl = qconf->tx_mbufs + port;
506 /* free space for new fragments */
507 if (len + RTE_LIBRTE_IP_FRAG_MAX_FRAG >= RTE_DIM(tbl->m_table)) {
508 send_burst(qconf, len, port);
512 n = RTE_DIM(tbl->m_table) - len;
514 if (proto == IPPROTO_IP)
515 rc = rte_ipv4_fragment_packet(m, tbl->m_table + len,
516 n, mtu_size, qconf->frag.pool_dir,
517 qconf->frag.pool_indir);
519 rc = rte_ipv6_fragment_packet(m, tbl->m_table + len,
520 n, mtu_size, qconf->frag.pool_dir,
521 qconf->frag.pool_indir);
527 "%s: failed to fragment packet with size %u, "
529 __func__, m->pkt_len, rte_errno);
535 /* Enqueue a single packet, and send burst if queue is filled */
536 static inline int32_t
537 send_single_packet(struct rte_mbuf *m, uint16_t port, uint8_t proto)
541 struct lcore_conf *qconf;
543 lcore_id = rte_lcore_id();
545 qconf = &lcore_conf[lcore_id];
546 len = qconf->tx_mbufs[port].len;
548 if (m->pkt_len <= mtu_size) {
549 qconf->tx_mbufs[port].m_table[len] = m;
552 /* need to fragment the packet */
553 } else if (frag_tbl_sz > 0)
554 len = send_fragment_packet(qconf, m, port, proto);
558 /* enough pkts to be sent */
559 if (unlikely(len == MAX_PKT_BURST)) {
560 send_burst(qconf, MAX_PKT_BURST, port);
564 qconf->tx_mbufs[port].len = len;
569 inbound_sp_sa(struct sp_ctx *sp, struct sa_ctx *sa, struct traffic_type *ip,
573 uint32_t i, j, res, sa_idx;
575 if (ip->num == 0 || sp == NULL)
578 rte_acl_classify((struct rte_acl_ctx *)sp, ip->data, ip->res,
579 ip->num, DEFAULT_MAX_CATEGORIES);
582 for (i = 0; i < ip->num; i++) {
589 if (res == DISCARD) {
594 /* Only check SPI match for processed IPSec packets */
595 if (i < lim && ((m->ol_flags & PKT_RX_SEC_OFFLOAD) == 0)) {
600 sa_idx = SPI2IDX(res);
601 if (!inbound_sa_check(sa, m, sa_idx)) {
611 split46_traffic(struct ipsec_traffic *trf, struct rte_mbuf *mb[], uint32_t num)
620 for (i = 0; i < num; i++) {
623 ip = rte_pktmbuf_mtod(m, struct ip *);
625 if (ip->ip_v == IPVERSION) {
626 trf->ip4.pkts[n4] = m;
627 trf->ip4.data[n4] = rte_pktmbuf_mtod_offset(m,
628 uint8_t *, offsetof(struct ip, ip_p));
630 } else if (ip->ip_v == IP6_VERSION) {
631 trf->ip6.pkts[n6] = m;
632 trf->ip6.data[n6] = rte_pktmbuf_mtod_offset(m,
634 offsetof(struct ip6_hdr, ip6_nxt));
646 process_pkts_inbound(struct ipsec_ctx *ipsec_ctx,
647 struct ipsec_traffic *traffic)
649 uint16_t nb_pkts_in, n_ip4, n_ip6;
651 n_ip4 = traffic->ip4.num;
652 n_ip6 = traffic->ip6.num;
654 if (app_sa_prm.enable == 0) {
655 nb_pkts_in = ipsec_inbound(ipsec_ctx, traffic->ipsec.pkts,
656 traffic->ipsec.num, MAX_PKT_BURST);
657 split46_traffic(traffic, traffic->ipsec.pkts, nb_pkts_in);
659 inbound_sa_lookup(ipsec_ctx->sa_ctx, traffic->ipsec.pkts,
660 traffic->ipsec.saptr, traffic->ipsec.num);
661 ipsec_process(ipsec_ctx, traffic);
664 inbound_sp_sa(ipsec_ctx->sp4_ctx, ipsec_ctx->sa_ctx, &traffic->ip4,
667 inbound_sp_sa(ipsec_ctx->sp6_ctx, ipsec_ctx->sa_ctx, &traffic->ip6,
672 outbound_sp(struct sp_ctx *sp, struct traffic_type *ip,
673 struct traffic_type *ipsec)
676 uint32_t i, j, sa_idx;
678 if (ip->num == 0 || sp == NULL)
681 rte_acl_classify((struct rte_acl_ctx *)sp, ip->data, ip->res,
682 ip->num, DEFAULT_MAX_CATEGORIES);
685 for (i = 0; i < ip->num; i++) {
687 sa_idx = SPI2IDX(ip->res[i]);
688 if (ip->res[i] == DISCARD)
690 else if (ip->res[i] == BYPASS)
693 ipsec->res[ipsec->num] = sa_idx;
694 ipsec->pkts[ipsec->num++] = m;
701 process_pkts_outbound(struct ipsec_ctx *ipsec_ctx,
702 struct ipsec_traffic *traffic)
705 uint16_t idx, nb_pkts_out, i;
707 /* Drop any IPsec traffic from protected ports */
708 for (i = 0; i < traffic->ipsec.num; i++)
709 rte_pktmbuf_free(traffic->ipsec.pkts[i]);
711 traffic->ipsec.num = 0;
713 outbound_sp(ipsec_ctx->sp4_ctx, &traffic->ip4, &traffic->ipsec);
715 outbound_sp(ipsec_ctx->sp6_ctx, &traffic->ip6, &traffic->ipsec);
717 if (app_sa_prm.enable == 0) {
719 nb_pkts_out = ipsec_outbound(ipsec_ctx, traffic->ipsec.pkts,
720 traffic->ipsec.res, traffic->ipsec.num,
723 for (i = 0; i < nb_pkts_out; i++) {
724 m = traffic->ipsec.pkts[i];
725 struct ip *ip = rte_pktmbuf_mtod(m, struct ip *);
726 if (ip->ip_v == IPVERSION) {
727 idx = traffic->ip4.num++;
728 traffic->ip4.pkts[idx] = m;
730 idx = traffic->ip6.num++;
731 traffic->ip6.pkts[idx] = m;
735 outbound_sa_lookup(ipsec_ctx->sa_ctx, traffic->ipsec.res,
736 traffic->ipsec.saptr, traffic->ipsec.num);
737 ipsec_process(ipsec_ctx, traffic);
742 process_pkts_inbound_nosp(struct ipsec_ctx *ipsec_ctx,
743 struct ipsec_traffic *traffic)
746 uint32_t nb_pkts_in, i, idx;
748 /* Drop any IPv4 traffic from unprotected ports */
749 for (i = 0; i < traffic->ip4.num; i++)
750 rte_pktmbuf_free(traffic->ip4.pkts[i]);
752 traffic->ip4.num = 0;
754 /* Drop any IPv6 traffic from unprotected ports */
755 for (i = 0; i < traffic->ip6.num; i++)
756 rte_pktmbuf_free(traffic->ip6.pkts[i]);
758 traffic->ip6.num = 0;
760 if (app_sa_prm.enable == 0) {
762 nb_pkts_in = ipsec_inbound(ipsec_ctx, traffic->ipsec.pkts,
763 traffic->ipsec.num, MAX_PKT_BURST);
765 for (i = 0; i < nb_pkts_in; i++) {
766 m = traffic->ipsec.pkts[i];
767 struct ip *ip = rte_pktmbuf_mtod(m, struct ip *);
768 if (ip->ip_v == IPVERSION) {
769 idx = traffic->ip4.num++;
770 traffic->ip4.pkts[idx] = m;
772 idx = traffic->ip6.num++;
773 traffic->ip6.pkts[idx] = m;
777 inbound_sa_lookup(ipsec_ctx->sa_ctx, traffic->ipsec.pkts,
778 traffic->ipsec.saptr, traffic->ipsec.num);
779 ipsec_process(ipsec_ctx, traffic);
784 process_pkts_outbound_nosp(struct ipsec_ctx *ipsec_ctx,
785 struct ipsec_traffic *traffic)
788 uint32_t nb_pkts_out, i, n;
791 /* Drop any IPsec traffic from protected ports */
792 for (i = 0; i < traffic->ipsec.num; i++)
793 rte_pktmbuf_free(traffic->ipsec.pkts[i]);
797 for (i = 0; i < traffic->ip4.num; i++) {
798 traffic->ipsec.pkts[n] = traffic->ip4.pkts[i];
799 traffic->ipsec.res[n++] = single_sa_idx;
802 for (i = 0; i < traffic->ip6.num; i++) {
803 traffic->ipsec.pkts[n] = traffic->ip6.pkts[i];
804 traffic->ipsec.res[n++] = single_sa_idx;
807 traffic->ip4.num = 0;
808 traffic->ip6.num = 0;
809 traffic->ipsec.num = n;
811 if (app_sa_prm.enable == 0) {
813 nb_pkts_out = ipsec_outbound(ipsec_ctx, traffic->ipsec.pkts,
814 traffic->ipsec.res, traffic->ipsec.num,
817 /* They all sue the same SA (ip4 or ip6 tunnel) */
818 m = traffic->ipsec.pkts[0];
819 ip = rte_pktmbuf_mtod(m, struct ip *);
820 if (ip->ip_v == IPVERSION) {
821 traffic->ip4.num = nb_pkts_out;
822 for (i = 0; i < nb_pkts_out; i++)
823 traffic->ip4.pkts[i] = traffic->ipsec.pkts[i];
825 traffic->ip6.num = nb_pkts_out;
826 for (i = 0; i < nb_pkts_out; i++)
827 traffic->ip6.pkts[i] = traffic->ipsec.pkts[i];
830 outbound_sa_lookup(ipsec_ctx->sa_ctx, traffic->ipsec.res,
831 traffic->ipsec.saptr, traffic->ipsec.num);
832 ipsec_process(ipsec_ctx, traffic);
836 static inline int32_t
837 get_hop_for_offload_pkt(struct rte_mbuf *pkt, int is_ipv6)
839 struct ipsec_mbuf_metadata *priv;
842 priv = get_priv(pkt);
845 if (unlikely(sa == NULL)) {
846 RTE_LOG(ERR, IPSEC, "SA not saved in private data\n");
854 return (sa->portid | RTE_LPM_LOOKUP_SUCCESS);
865 route4_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
867 uint32_t hop[MAX_PKT_BURST * 2];
868 uint32_t dst_ip[MAX_PKT_BURST * 2];
871 uint16_t lpm_pkts = 0;
876 /* Need to do an LPM lookup for non-inline packets. Inline packets will
877 * have port ID in the SA
880 for (i = 0; i < nb_pkts; i++) {
881 if (!(pkts[i]->ol_flags & PKT_TX_SEC_OFFLOAD)) {
882 /* Security offload not enabled. So an LPM lookup is
883 * required to get the hop
885 offset = offsetof(struct ip, ip_dst);
886 dst_ip[lpm_pkts] = *rte_pktmbuf_mtod_offset(pkts[i],
888 dst_ip[lpm_pkts] = rte_be_to_cpu_32(dst_ip[lpm_pkts]);
893 rte_lpm_lookup_bulk((struct rte_lpm *)rt_ctx, dst_ip, hop, lpm_pkts);
897 for (i = 0; i < nb_pkts; i++) {
898 if (pkts[i]->ol_flags & PKT_TX_SEC_OFFLOAD) {
899 /* Read hop from the SA */
900 pkt_hop = get_hop_for_offload_pkt(pkts[i], 0);
902 /* Need to use hop returned by lookup */
903 pkt_hop = hop[lpm_pkts++];
906 if ((pkt_hop & RTE_LPM_LOOKUP_SUCCESS) == 0) {
907 rte_pktmbuf_free(pkts[i]);
910 send_single_packet(pkts[i], pkt_hop & 0xff, IPPROTO_IP);
915 route6_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
917 int32_t hop[MAX_PKT_BURST * 2];
918 uint8_t dst_ip[MAX_PKT_BURST * 2][16];
922 uint16_t lpm_pkts = 0;
927 /* Need to do an LPM lookup for non-inline packets. Inline packets will
928 * have port ID in the SA
931 for (i = 0; i < nb_pkts; i++) {
932 if (!(pkts[i]->ol_flags & PKT_TX_SEC_OFFLOAD)) {
933 /* Security offload not enabled. So an LPM lookup is
934 * required to get the hop
936 offset = offsetof(struct ip6_hdr, ip6_dst);
937 ip6_dst = rte_pktmbuf_mtod_offset(pkts[i], uint8_t *,
939 memcpy(&dst_ip[lpm_pkts][0], ip6_dst, 16);
944 rte_lpm6_lookup_bulk_func((struct rte_lpm6 *)rt_ctx, dst_ip, hop,
949 for (i = 0; i < nb_pkts; i++) {
950 if (pkts[i]->ol_flags & PKT_TX_SEC_OFFLOAD) {
951 /* Read hop from the SA */
952 pkt_hop = get_hop_for_offload_pkt(pkts[i], 1);
954 /* Need to use hop returned by lookup */
955 pkt_hop = hop[lpm_pkts++];
959 rte_pktmbuf_free(pkts[i]);
962 send_single_packet(pkts[i], pkt_hop & 0xff, IPPROTO_IPV6);
967 process_pkts(struct lcore_conf *qconf, struct rte_mbuf **pkts,
968 uint8_t nb_pkts, uint16_t portid)
970 struct ipsec_traffic traffic;
972 prepare_traffic(pkts, &traffic, nb_pkts);
974 if (unlikely(single_sa)) {
975 if (UNPROTECTED_PORT(portid))
976 process_pkts_inbound_nosp(&qconf->inbound, &traffic);
978 process_pkts_outbound_nosp(&qconf->outbound, &traffic);
980 if (UNPROTECTED_PORT(portid))
981 process_pkts_inbound(&qconf->inbound, &traffic);
983 process_pkts_outbound(&qconf->outbound, &traffic);
986 route4_pkts(qconf->rt4_ctx, traffic.ip4.pkts, traffic.ip4.num);
987 route6_pkts(qconf->rt6_ctx, traffic.ip6.pkts, traffic.ip6.num);
991 drain_tx_buffers(struct lcore_conf *qconf)
996 for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {
997 buf = &qconf->tx_mbufs[portid];
1000 send_burst(qconf, buf->len, portid);
1006 drain_crypto_buffers(struct lcore_conf *qconf)
1009 struct ipsec_ctx *ctx;
1011 /* drain inbound buffers*/
1012 ctx = &qconf->inbound;
1013 for (i = 0; i != ctx->nb_qps; i++) {
1014 if (ctx->tbl[i].len != 0)
1015 enqueue_cop_burst(ctx->tbl + i);
1018 /* drain outbound buffers*/
1019 ctx = &qconf->outbound;
1020 for (i = 0; i != ctx->nb_qps; i++) {
1021 if (ctx->tbl[i].len != 0)
1022 enqueue_cop_burst(ctx->tbl + i);
1027 drain_inbound_crypto_queues(const struct lcore_conf *qconf,
1028 struct ipsec_ctx *ctx)
1031 struct ipsec_traffic trf;
1033 if (app_sa_prm.enable == 0) {
1035 /* dequeue packets from crypto-queue */
1036 n = ipsec_inbound_cqp_dequeue(ctx, trf.ipsec.pkts,
1037 RTE_DIM(trf.ipsec.pkts));
1042 /* split traffic by ipv4-ipv6 */
1043 split46_traffic(&trf, trf.ipsec.pkts, n);
1045 ipsec_cqp_process(ctx, &trf);
1047 /* process ipv4 packets */
1048 if (trf.ip4.num != 0) {
1049 inbound_sp_sa(ctx->sp4_ctx, ctx->sa_ctx, &trf.ip4, 0);
1050 route4_pkts(qconf->rt4_ctx, trf.ip4.pkts, trf.ip4.num);
1053 /* process ipv6 packets */
1054 if (trf.ip6.num != 0) {
1055 inbound_sp_sa(ctx->sp6_ctx, ctx->sa_ctx, &trf.ip6, 0);
1056 route6_pkts(qconf->rt6_ctx, trf.ip6.pkts, trf.ip6.num);
1061 drain_outbound_crypto_queues(const struct lcore_conf *qconf,
1062 struct ipsec_ctx *ctx)
1065 struct ipsec_traffic trf;
1067 if (app_sa_prm.enable == 0) {
1069 /* dequeue packets from crypto-queue */
1070 n = ipsec_outbound_cqp_dequeue(ctx, trf.ipsec.pkts,
1071 RTE_DIM(trf.ipsec.pkts));
1076 /* split traffic by ipv4-ipv6 */
1077 split46_traffic(&trf, trf.ipsec.pkts, n);
1079 ipsec_cqp_process(ctx, &trf);
1081 /* process ipv4 packets */
1082 if (trf.ip4.num != 0)
1083 route4_pkts(qconf->rt4_ctx, trf.ip4.pkts, trf.ip4.num);
1085 /* process ipv6 packets */
1086 if (trf.ip6.num != 0)
1087 route6_pkts(qconf->rt6_ctx, trf.ip6.pkts, trf.ip6.num);
1090 /* main processing loop */
1092 main_loop(__attribute__((unused)) void *dummy)
1094 struct rte_mbuf *pkts[MAX_PKT_BURST];
1096 uint64_t prev_tsc, diff_tsc, cur_tsc;
1100 struct lcore_conf *qconf;
1102 const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1)
1103 / US_PER_S * BURST_TX_DRAIN_US;
1104 struct lcore_rx_queue *rxql;
1107 lcore_id = rte_lcore_id();
1108 qconf = &lcore_conf[lcore_id];
1109 rxql = qconf->rx_queue_list;
1110 socket_id = rte_lcore_to_socket_id(lcore_id);
1112 qconf->rt4_ctx = socket_ctx[socket_id].rt_ip4;
1113 qconf->rt6_ctx = socket_ctx[socket_id].rt_ip6;
1114 qconf->inbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_in;
1115 qconf->inbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_in;
1116 qconf->inbound.sa_ctx = socket_ctx[socket_id].sa_in;
1117 qconf->inbound.cdev_map = cdev_map_in;
1118 qconf->inbound.session_pool = socket_ctx[socket_id].session_pool;
1119 qconf->inbound.session_priv_pool =
1120 socket_ctx[socket_id].session_priv_pool;
1121 qconf->outbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_out;
1122 qconf->outbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_out;
1123 qconf->outbound.sa_ctx = socket_ctx[socket_id].sa_out;
1124 qconf->outbound.cdev_map = cdev_map_out;
1125 qconf->outbound.session_pool = socket_ctx[socket_id].session_pool;
1126 qconf->outbound.session_priv_pool =
1127 socket_ctx[socket_id].session_priv_pool;
1128 qconf->frag.pool_dir = socket_ctx[socket_id].mbuf_pool;
1129 qconf->frag.pool_indir = socket_ctx[socket_id].mbuf_pool_indir;
1131 if (qconf->nb_rx_queue == 0) {
1132 RTE_LOG(DEBUG, IPSEC, "lcore %u has nothing to do\n",
1137 RTE_LOG(INFO, IPSEC, "entering main loop on lcore %u\n", lcore_id);
1139 for (i = 0; i < qconf->nb_rx_queue; i++) {
1140 portid = rxql[i].port_id;
1141 queueid = rxql[i].queue_id;
1142 RTE_LOG(INFO, IPSEC,
1143 " -- lcoreid=%u portid=%u rxqueueid=%hhu\n",
1144 lcore_id, portid, queueid);
1148 cur_tsc = rte_rdtsc();
1150 /* TX queue buffer drain */
1151 diff_tsc = cur_tsc - prev_tsc;
1153 if (unlikely(diff_tsc > drain_tsc)) {
1154 drain_tx_buffers(qconf);
1155 drain_crypto_buffers(qconf);
1159 for (i = 0; i < qconf->nb_rx_queue; ++i) {
1161 /* Read packets from RX queues */
1162 portid = rxql[i].port_id;
1163 queueid = rxql[i].queue_id;
1164 nb_rx = rte_eth_rx_burst(portid, queueid,
1165 pkts, MAX_PKT_BURST);
1168 process_pkts(qconf, pkts, nb_rx, portid);
1170 /* dequeue and process completed crypto-ops */
1171 if (UNPROTECTED_PORT(portid))
1172 drain_inbound_crypto_queues(qconf,
1175 drain_outbound_crypto_queues(qconf,
1189 if (lcore_params == NULL) {
1190 printf("Error: No port/queue/core mappings\n");
1194 for (i = 0; i < nb_lcore_params; ++i) {
1195 lcore = lcore_params[i].lcore_id;
1196 if (!rte_lcore_is_enabled(lcore)) {
1197 printf("error: lcore %hhu is not enabled in "
1198 "lcore mask\n", lcore);
1201 socket_id = rte_lcore_to_socket_id(lcore);
1202 if (socket_id != 0 && numa_on == 0) {
1203 printf("warning: lcore %hhu is on socket %d "
1207 portid = lcore_params[i].port_id;
1208 if ((enabled_port_mask & (1 << portid)) == 0) {
1209 printf("port %u is not enabled in port mask\n", portid);
1212 if (!rte_eth_dev_is_valid_port(portid)) {
1213 printf("port %u is not present on the board\n", portid);
1221 get_port_nb_rx_queues(const uint16_t port)
1226 for (i = 0; i < nb_lcore_params; ++i) {
1227 if (lcore_params[i].port_id == port &&
1228 lcore_params[i].queue_id > queue)
1229 queue = lcore_params[i].queue_id;
1231 return (uint8_t)(++queue);
1235 init_lcore_rx_queues(void)
1237 uint16_t i, nb_rx_queue;
1240 for (i = 0; i < nb_lcore_params; ++i) {
1241 lcore = lcore_params[i].lcore_id;
1242 nb_rx_queue = lcore_conf[lcore].nb_rx_queue;
1243 if (nb_rx_queue >= MAX_RX_QUEUE_PER_LCORE) {
1244 printf("error: too many queues (%u) for lcore: %u\n",
1245 nb_rx_queue + 1, lcore);
1248 lcore_conf[lcore].rx_queue_list[nb_rx_queue].port_id =
1249 lcore_params[i].port_id;
1250 lcore_conf[lcore].rx_queue_list[nb_rx_queue].queue_id =
1251 lcore_params[i].queue_id;
1252 lcore_conf[lcore].nb_rx_queue++;
1259 print_usage(const char *prgname)
1261 fprintf(stderr, "%s [EAL options] --"
1267 " [-w REPLAY_WINDOW_SIZE]"
1271 " --config (port,queue,lcore)[,(port,queue,lcore)]"
1272 " [--single-sa SAIDX]"
1273 " [--cryptodev_mask MASK]"
1274 " [--" CMD_LINE_OPT_RX_OFFLOAD " RX_OFFLOAD_MASK]"
1275 " [--" CMD_LINE_OPT_TX_OFFLOAD " TX_OFFLOAD_MASK]"
1276 " [--" CMD_LINE_OPT_REASSEMBLE " REASSEMBLE_TABLE_SIZE]"
1277 " [--" CMD_LINE_OPT_MTU " MTU]"
1279 " -p PORTMASK: Hexadecimal bitmask of ports to configure\n"
1280 " -P : Enable promiscuous mode\n"
1281 " -u PORTMASK: Hexadecimal bitmask of unprotected ports\n"
1282 " -j FRAMESIZE: Data buffer size, minimum (and default)\n"
1283 " value: RTE_MBUF_DEFAULT_BUF_SIZE\n"
1284 " -l enables code-path that uses librte_ipsec\n"
1285 " -w REPLAY_WINDOW_SIZE specifies IPsec SQN replay window\n"
1286 " size for each SA\n"
1288 " -a enables SA SQN atomic behaviour\n"
1289 " -f CONFIG_FILE: Configuration file\n"
1290 " --config (port,queue,lcore): Rx queue configuration\n"
1291 " --single-sa SAIDX: Use single SA index for outbound traffic,\n"
1292 " bypassing the SP\n"
1293 " --cryptodev_mask MASK: Hexadecimal bitmask of the crypto\n"
1294 " devices to configure\n"
1295 " --" CMD_LINE_OPT_RX_OFFLOAD
1296 ": bitmask of the RX HW offload capabilities to enable/use\n"
1297 " (DEV_RX_OFFLOAD_*)\n"
1298 " --" CMD_LINE_OPT_TX_OFFLOAD
1299 ": bitmask of the TX HW offload capabilities to enable/use\n"
1300 " (DEV_TX_OFFLOAD_*)\n"
1301 " --" CMD_LINE_OPT_REASSEMBLE " NUM"
1302 ": max number of entries in reassemble(fragment) table\n"
1303 " (zero (default value) disables reassembly)\n"
1304 " --" CMD_LINE_OPT_MTU " MTU"
1305 ": MTU value on all ports (default value: 1500)\n"
1306 " outgoing packets with bigger size will be fragmented\n"
1307 " incoming packets with bigger size will be discarded\n"
1313 parse_mask(const char *str, uint64_t *val)
1319 t = strtoul(str, &end, 0);
1320 if (errno != 0 || end[0] != 0)
1328 parse_portmask(const char *portmask)
1333 /* parse hexadecimal string */
1334 pm = strtoul(portmask, &end, 16);
1335 if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
1338 if ((pm == 0) && errno)
1345 parse_decimal(const char *str)
1350 num = strtoul(str, &end, 10);
1351 if ((str[0] == '\0') || (end == NULL) || (*end != '\0'))
1358 parse_config(const char *q_arg)
1361 const char *p, *p0 = q_arg;
1369 unsigned long int_fld[_NUM_FLD];
1370 char *str_fld[_NUM_FLD];
1374 nb_lcore_params = 0;
1376 while ((p = strchr(p0, '(')) != NULL) {
1378 p0 = strchr(p, ')');
1383 if (size >= sizeof(s))
1386 snprintf(s, sizeof(s), "%.*s", size, p);
1387 if (rte_strsplit(s, sizeof(s), str_fld, _NUM_FLD, ',') !=
1390 for (i = 0; i < _NUM_FLD; i++) {
1392 int_fld[i] = strtoul(str_fld[i], &end, 0);
1393 if (errno != 0 || end == str_fld[i] || int_fld[i] > 255)
1396 if (nb_lcore_params >= MAX_LCORE_PARAMS) {
1397 printf("exceeded max number of lcore params: %hu\n",
1401 lcore_params_array[nb_lcore_params].port_id =
1402 (uint8_t)int_fld[FLD_PORT];
1403 lcore_params_array[nb_lcore_params].queue_id =
1404 (uint8_t)int_fld[FLD_QUEUE];
1405 lcore_params_array[nb_lcore_params].lcore_id =
1406 (uint8_t)int_fld[FLD_LCORE];
1409 lcore_params = lcore_params_array;
1414 print_app_sa_prm(const struct app_sa_prm *prm)
1416 printf("librte_ipsec usage: %s\n",
1417 (prm->enable == 0) ? "disabled" : "enabled");
1419 if (prm->enable == 0)
1422 printf("replay window size: %u\n", prm->window_size);
1423 printf("ESN: %s\n", (prm->enable_esn == 0) ? "disabled" : "enabled");
1424 printf("SA flags: %#" PRIx64 "\n", prm->flags);
1428 parse_args(int32_t argc, char **argv)
1432 int32_t option_index;
1433 char *prgname = argv[0];
1434 int32_t f_present = 0;
1438 while ((opt = getopt_long(argc, argvopt, "aelp:Pu:f:j:w:",
1439 lgopts, &option_index)) != EOF) {
1443 enabled_port_mask = parse_portmask(optarg);
1444 if (enabled_port_mask == 0) {
1445 printf("invalid portmask\n");
1446 print_usage(prgname);
1451 printf("Promiscuous mode selected\n");
1455 unprotected_port_mask = parse_portmask(optarg);
1456 if (unprotected_port_mask == 0) {
1457 printf("invalid unprotected portmask\n");
1458 print_usage(prgname);
1463 if (f_present == 1) {
1464 printf("\"-f\" option present more than "
1466 print_usage(prgname);
1473 ret = parse_decimal(optarg);
1474 if (ret < RTE_MBUF_DEFAULT_BUF_SIZE ||
1476 printf("Invalid frame buffer size value: %s\n",
1478 print_usage(prgname);
1481 frame_buf_size = ret;
1482 printf("Custom frame buffer size %u\n", frame_buf_size);
1485 app_sa_prm.enable = 1;
1488 app_sa_prm.enable = 1;
1489 app_sa_prm.window_size = parse_decimal(optarg);
1492 app_sa_prm.enable = 1;
1493 app_sa_prm.enable_esn = 1;
1496 app_sa_prm.enable = 1;
1497 app_sa_prm.flags |= RTE_IPSEC_SAFLAG_SQN_ATOM;
1499 case CMD_LINE_OPT_CONFIG_NUM:
1500 ret = parse_config(optarg);
1502 printf("Invalid config\n");
1503 print_usage(prgname);
1507 case CMD_LINE_OPT_SINGLE_SA_NUM:
1508 ret = parse_decimal(optarg);
1510 printf("Invalid argument[sa_idx]\n");
1511 print_usage(prgname);
1517 single_sa_idx = ret;
1518 printf("Configured with single SA index %u\n",
1521 case CMD_LINE_OPT_CRYPTODEV_MASK_NUM:
1522 ret = parse_portmask(optarg);
1524 printf("Invalid argument[portmask]\n");
1525 print_usage(prgname);
1530 enabled_cryptodev_mask = ret;
1532 case CMD_LINE_OPT_RX_OFFLOAD_NUM:
1533 ret = parse_mask(optarg, &dev_rx_offload);
1535 printf("Invalid argument for \'%s\': %s\n",
1536 CMD_LINE_OPT_RX_OFFLOAD, optarg);
1537 print_usage(prgname);
1541 case CMD_LINE_OPT_TX_OFFLOAD_NUM:
1542 ret = parse_mask(optarg, &dev_tx_offload);
1544 printf("Invalid argument for \'%s\': %s\n",
1545 CMD_LINE_OPT_TX_OFFLOAD, optarg);
1546 print_usage(prgname);
1550 case CMD_LINE_OPT_REASSEMBLE_NUM:
1551 ret = parse_decimal(optarg);
1553 printf("Invalid argument for \'%s\': %s\n",
1554 CMD_LINE_OPT_REASSEMBLE, optarg);
1555 print_usage(prgname);
1560 case CMD_LINE_OPT_MTU_NUM:
1561 ret = parse_decimal(optarg);
1562 if (ret < 0 || ret > RTE_IPV4_MAX_PKT_LEN) {
1563 printf("Invalid argument for \'%s\': %s\n",
1564 CMD_LINE_OPT_MTU, optarg);
1565 print_usage(prgname);
1571 print_usage(prgname);
1576 if (f_present == 0) {
1577 printf("Mandatory option \"-f\" not present\n");
1581 /* check do we need to enable multi-seg support */
1582 if (multi_seg_required()) {
1583 /* legacy mode doesn't support multi-seg */
1584 app_sa_prm.enable = 1;
1585 printf("frame buf size: %u, mtu: %u, "
1586 "number of reassemble entries: %u\n"
1587 "multi-segment support is required\n",
1588 frame_buf_size, mtu_size, frag_tbl_sz);
1591 print_app_sa_prm(&app_sa_prm);
1594 argv[optind-1] = prgname;
1597 optind = 1; /* reset getopt lib */
1602 print_ethaddr(const char *name, const struct rte_ether_addr *eth_addr)
1604 char buf[RTE_ETHER_ADDR_FMT_SIZE];
1605 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr);
1606 printf("%s%s", name, buf);
1610 * Update destination ethaddr for the port.
1613 add_dst_ethaddr(uint16_t port, const struct rte_ether_addr *addr)
1615 if (port >= RTE_DIM(ethaddr_tbl))
1618 ethaddr_tbl[port].dst = ETHADDR_TO_UINT64(addr);
1622 /* Check the link status of all ports in up to 9s, and print them finally */
1624 check_all_ports_link_status(uint32_t port_mask)
1626 #define CHECK_INTERVAL 100 /* 100ms */
1627 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1629 uint8_t count, all_ports_up, print_flag = 0;
1630 struct rte_eth_link link;
1633 printf("\nChecking link status");
1635 for (count = 0; count <= MAX_CHECK_TIME; count++) {
1637 RTE_ETH_FOREACH_DEV(portid) {
1638 if ((port_mask & (1 << portid)) == 0)
1640 memset(&link, 0, sizeof(link));
1641 ret = rte_eth_link_get_nowait(portid, &link);
1644 if (print_flag == 1)
1645 printf("Port %u link get failed: %s\n",
1646 portid, rte_strerror(-ret));
1649 /* print link status if flag set */
1650 if (print_flag == 1) {
1651 if (link.link_status)
1653 "Port%d Link Up - speed %u Mbps -%s\n",
1654 portid, link.link_speed,
1655 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1656 ("full-duplex") : ("half-duplex\n"));
1658 printf("Port %d Link Down\n", portid);
1661 /* clear all_ports_up flag if any link down */
1662 if (link.link_status == ETH_LINK_DOWN) {
1667 /* after finally printing all link status, get out */
1668 if (print_flag == 1)
1671 if (all_ports_up == 0) {
1674 rte_delay_ms(CHECK_INTERVAL);
1677 /* set the print_flag if all ports up or timeout */
1678 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1686 add_mapping(struct rte_hash *map, const char *str, uint16_t cdev_id,
1687 uint16_t qp, struct lcore_params *params,
1688 struct ipsec_ctx *ipsec_ctx,
1689 const struct rte_cryptodev_capabilities *cipher,
1690 const struct rte_cryptodev_capabilities *auth,
1691 const struct rte_cryptodev_capabilities *aead)
1695 struct cdev_key key = { 0 };
1697 key.lcore_id = params->lcore_id;
1699 key.cipher_algo = cipher->sym.cipher.algo;
1701 key.auth_algo = auth->sym.auth.algo;
1703 key.aead_algo = aead->sym.aead.algo;
1705 ret = rte_hash_lookup(map, &key);
1709 for (i = 0; i < ipsec_ctx->nb_qps; i++)
1710 if (ipsec_ctx->tbl[i].id == cdev_id)
1713 if (i == ipsec_ctx->nb_qps) {
1714 if (ipsec_ctx->nb_qps == MAX_QP_PER_LCORE) {
1715 printf("Maximum number of crypto devices assigned to "
1716 "a core, increase MAX_QP_PER_LCORE value\n");
1719 ipsec_ctx->tbl[i].id = cdev_id;
1720 ipsec_ctx->tbl[i].qp = qp;
1721 ipsec_ctx->nb_qps++;
1722 printf("%s cdev mapping: lcore %u using cdev %u qp %u "
1723 "(cdev_id_qp %lu)\n", str, key.lcore_id,
1727 ret = rte_hash_add_key_data(map, &key, (void *)i);
1729 printf("Faled to insert cdev mapping for (lcore %u, "
1730 "cdev %u, qp %u), errno %d\n",
1731 key.lcore_id, ipsec_ctx->tbl[i].id,
1732 ipsec_ctx->tbl[i].qp, ret);
1740 add_cdev_mapping(struct rte_cryptodev_info *dev_info, uint16_t cdev_id,
1741 uint16_t qp, struct lcore_params *params)
1744 const struct rte_cryptodev_capabilities *i, *j;
1745 struct rte_hash *map;
1746 struct lcore_conf *qconf;
1747 struct ipsec_ctx *ipsec_ctx;
1750 qconf = &lcore_conf[params->lcore_id];
1752 if ((unprotected_port_mask & (1 << params->port_id)) == 0) {
1754 ipsec_ctx = &qconf->outbound;
1758 ipsec_ctx = &qconf->inbound;
1762 /* Required cryptodevs with operation chainning */
1763 if (!(dev_info->feature_flags &
1764 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING))
1767 for (i = dev_info->capabilities;
1768 i->op != RTE_CRYPTO_OP_TYPE_UNDEFINED; i++) {
1769 if (i->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
1772 if (i->sym.xform_type == RTE_CRYPTO_SYM_XFORM_AEAD) {
1773 ret |= add_mapping(map, str, cdev_id, qp, params,
1774 ipsec_ctx, NULL, NULL, i);
1778 if (i->sym.xform_type != RTE_CRYPTO_SYM_XFORM_CIPHER)
1781 for (j = dev_info->capabilities;
1782 j->op != RTE_CRYPTO_OP_TYPE_UNDEFINED; j++) {
1783 if (j->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
1786 if (j->sym.xform_type != RTE_CRYPTO_SYM_XFORM_AUTH)
1789 ret |= add_mapping(map, str, cdev_id, qp, params,
1790 ipsec_ctx, i, j, NULL);
1797 /* Check if the device is enabled by cryptodev_mask */
1799 check_cryptodev_mask(uint8_t cdev_id)
1801 if (enabled_cryptodev_mask & (1 << cdev_id))
1808 cryptodevs_init(void)
1810 struct rte_cryptodev_config dev_conf;
1811 struct rte_cryptodev_qp_conf qp_conf;
1812 uint16_t idx, max_nb_qps, qp, i;
1814 struct rte_hash_parameters params = { 0 };
1816 const uint64_t mseg_flag = multi_seg_required() ?
1817 RTE_CRYPTODEV_FF_IN_PLACE_SGL : 0;
1819 params.entries = CDEV_MAP_ENTRIES;
1820 params.key_len = sizeof(struct cdev_key);
1821 params.hash_func = rte_jhash;
1822 params.hash_func_init_val = 0;
1823 params.socket_id = rte_socket_id();
1825 params.name = "cdev_map_in";
1826 cdev_map_in = rte_hash_create(¶ms);
1827 if (cdev_map_in == NULL)
1828 rte_panic("Failed to create cdev_map hash table, errno = %d\n",
1831 params.name = "cdev_map_out";
1832 cdev_map_out = rte_hash_create(¶ms);
1833 if (cdev_map_out == NULL)
1834 rte_panic("Failed to create cdev_map hash table, errno = %d\n",
1837 printf("lcore/cryptodev/qp mappings:\n");
1840 for (cdev_id = 0; cdev_id < rte_cryptodev_count(); cdev_id++) {
1841 struct rte_cryptodev_info cdev_info;
1843 if (check_cryptodev_mask((uint8_t)cdev_id))
1846 rte_cryptodev_info_get(cdev_id, &cdev_info);
1848 if ((mseg_flag & cdev_info.feature_flags) != mseg_flag)
1849 rte_exit(EXIT_FAILURE,
1850 "Device %hd does not support \'%s\' feature\n",
1852 rte_cryptodev_get_feature_name(mseg_flag));
1854 if (nb_lcore_params > cdev_info.max_nb_queue_pairs)
1855 max_nb_qps = cdev_info.max_nb_queue_pairs;
1857 max_nb_qps = nb_lcore_params;
1861 while (qp < max_nb_qps && i < nb_lcore_params) {
1862 if (add_cdev_mapping(&cdev_info, cdev_id, qp,
1863 &lcore_params[idx]))
1866 idx = idx % nb_lcore_params;
1873 dev_conf.socket_id = rte_cryptodev_socket_id(cdev_id);
1874 dev_conf.nb_queue_pairs = qp;
1875 dev_conf.ff_disable = RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO;
1877 uint32_t dev_max_sess = cdev_info.sym.max_nb_sessions;
1878 if (dev_max_sess != 0 && dev_max_sess < CDEV_MP_NB_OBJS)
1879 rte_exit(EXIT_FAILURE,
1880 "Device does not support at least %u "
1881 "sessions", CDEV_MP_NB_OBJS);
1883 if (rte_cryptodev_configure(cdev_id, &dev_conf))
1884 rte_panic("Failed to initialize cryptodev %u\n",
1887 qp_conf.nb_descriptors = CDEV_QUEUE_DESC;
1888 qp_conf.mp_session =
1889 socket_ctx[dev_conf.socket_id].session_pool;
1890 qp_conf.mp_session_private =
1891 socket_ctx[dev_conf.socket_id].session_priv_pool;
1892 for (qp = 0; qp < dev_conf.nb_queue_pairs; qp++)
1893 if (rte_cryptodev_queue_pair_setup(cdev_id, qp,
1894 &qp_conf, dev_conf.socket_id))
1895 rte_panic("Failed to setup queue %u for "
1896 "cdev_id %u\n", 0, cdev_id);
1898 if (rte_cryptodev_start(cdev_id))
1899 rte_panic("Failed to start cryptodev %u\n",
1909 port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads)
1911 uint32_t frame_size;
1912 struct rte_eth_dev_info dev_info;
1913 struct rte_eth_txconf *txconf;
1914 uint16_t nb_tx_queue, nb_rx_queue;
1915 uint16_t tx_queueid, rx_queueid, queue, lcore_id;
1916 int32_t ret, socket_id;
1917 struct lcore_conf *qconf;
1918 struct rte_ether_addr ethaddr;
1919 struct rte_eth_conf local_port_conf = port_conf;
1921 ret = rte_eth_dev_info_get(portid, &dev_info);
1923 rte_exit(EXIT_FAILURE,
1924 "Error during getting device (port %u) info: %s\n",
1925 portid, strerror(-ret));
1927 /* limit allowed HW offloafs, as user requested */
1928 dev_info.rx_offload_capa &= dev_rx_offload;
1929 dev_info.tx_offload_capa &= dev_tx_offload;
1931 printf("Configuring device port %u:\n", portid);
1933 ret = rte_eth_macaddr_get(portid, ðaddr);
1935 rte_exit(EXIT_FAILURE,
1936 "Error getting MAC address (port %u): %s\n",
1937 portid, rte_strerror(-ret));
1939 ethaddr_tbl[portid].src = ETHADDR_TO_UINT64(ðaddr);
1940 print_ethaddr("Address: ", ðaddr);
1943 nb_rx_queue = get_port_nb_rx_queues(portid);
1944 nb_tx_queue = nb_lcores;
1946 if (nb_rx_queue > dev_info.max_rx_queues)
1947 rte_exit(EXIT_FAILURE, "Error: queue %u not available "
1948 "(max rx queue is %u)\n",
1949 nb_rx_queue, dev_info.max_rx_queues);
1951 if (nb_tx_queue > dev_info.max_tx_queues)
1952 rte_exit(EXIT_FAILURE, "Error: queue %u not available "
1953 "(max tx queue is %u)\n",
1954 nb_tx_queue, dev_info.max_tx_queues);
1956 printf("Creating queues: nb_rx_queue=%d nb_tx_queue=%u...\n",
1957 nb_rx_queue, nb_tx_queue);
1959 frame_size = MTU_TO_FRAMELEN(mtu_size);
1960 if (frame_size > local_port_conf.rxmode.max_rx_pkt_len)
1961 local_port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
1962 local_port_conf.rxmode.max_rx_pkt_len = frame_size;
1964 if (multi_seg_required()) {
1965 local_port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_SCATTER;
1966 local_port_conf.txmode.offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
1969 local_port_conf.rxmode.offloads |= req_rx_offloads;
1970 local_port_conf.txmode.offloads |= req_tx_offloads;
1972 /* Check that all required capabilities are supported */
1973 if ((local_port_conf.rxmode.offloads & dev_info.rx_offload_capa) !=
1974 local_port_conf.rxmode.offloads)
1975 rte_exit(EXIT_FAILURE,
1976 "Error: port %u required RX offloads: 0x%" PRIx64
1977 ", avaialbe RX offloads: 0x%" PRIx64 "\n",
1978 portid, local_port_conf.rxmode.offloads,
1979 dev_info.rx_offload_capa);
1981 if ((local_port_conf.txmode.offloads & dev_info.tx_offload_capa) !=
1982 local_port_conf.txmode.offloads)
1983 rte_exit(EXIT_FAILURE,
1984 "Error: port %u required TX offloads: 0x%" PRIx64
1985 ", avaialbe TX offloads: 0x%" PRIx64 "\n",
1986 portid, local_port_conf.txmode.offloads,
1987 dev_info.tx_offload_capa);
1989 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
1990 local_port_conf.txmode.offloads |=
1991 DEV_TX_OFFLOAD_MBUF_FAST_FREE;
1993 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPV4_CKSUM)
1994 local_port_conf.txmode.offloads |= DEV_TX_OFFLOAD_IPV4_CKSUM;
1996 printf("port %u configurng rx_offloads=0x%" PRIx64
1997 ", tx_offloads=0x%" PRIx64 "\n",
1998 portid, local_port_conf.rxmode.offloads,
1999 local_port_conf.txmode.offloads);
2001 local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
2002 dev_info.flow_type_rss_offloads;
2003 if (local_port_conf.rx_adv_conf.rss_conf.rss_hf !=
2004 port_conf.rx_adv_conf.rss_conf.rss_hf) {
2005 printf("Port %u modified RSS hash function based on hardware support,"
2006 "requested:%#"PRIx64" configured:%#"PRIx64"\n",
2008 port_conf.rx_adv_conf.rss_conf.rss_hf,
2009 local_port_conf.rx_adv_conf.rss_conf.rss_hf);
2012 ret = rte_eth_dev_configure(portid, nb_rx_queue, nb_tx_queue,
2015 rte_exit(EXIT_FAILURE, "Cannot configure device: "
2016 "err=%d, port=%d\n", ret, portid);
2018 ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &nb_rxd, &nb_txd);
2020 rte_exit(EXIT_FAILURE, "Cannot adjust number of descriptors: "
2021 "err=%d, port=%d\n", ret, portid);
2023 /* init one TX queue per lcore */
2025 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
2026 if (rte_lcore_is_enabled(lcore_id) == 0)
2030 socket_id = (uint8_t)rte_lcore_to_socket_id(lcore_id);
2035 printf("Setup txq=%u,%d,%d\n", lcore_id, tx_queueid, socket_id);
2037 txconf = &dev_info.default_txconf;
2038 txconf->offloads = local_port_conf.txmode.offloads;
2040 ret = rte_eth_tx_queue_setup(portid, tx_queueid, nb_txd,
2043 rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: "
2044 "err=%d, port=%d\n", ret, portid);
2046 qconf = &lcore_conf[lcore_id];
2047 qconf->tx_queue_id[portid] = tx_queueid;
2049 /* Pre-populate pkt offloads based on capabilities */
2050 qconf->outbound.ipv4_offloads = PKT_TX_IPV4;
2051 qconf->outbound.ipv6_offloads = PKT_TX_IPV6;
2052 if (local_port_conf.txmode.offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)
2053 qconf->outbound.ipv4_offloads |= PKT_TX_IP_CKSUM;
2057 /* init RX queues */
2058 for (queue = 0; queue < qconf->nb_rx_queue; ++queue) {
2059 struct rte_eth_rxconf rxq_conf;
2061 if (portid != qconf->rx_queue_list[queue].port_id)
2064 rx_queueid = qconf->rx_queue_list[queue].queue_id;
2066 printf("Setup rxq=%d,%d,%d\n", portid, rx_queueid,
2069 rxq_conf = dev_info.default_rxconf;
2070 rxq_conf.offloads = local_port_conf.rxmode.offloads;
2071 ret = rte_eth_rx_queue_setup(portid, rx_queueid,
2072 nb_rxd, socket_id, &rxq_conf,
2073 socket_ctx[socket_id].mbuf_pool);
2075 rte_exit(EXIT_FAILURE,
2076 "rte_eth_rx_queue_setup: err=%d, "
2077 "port=%d\n", ret, portid);
2084 max_session_size(void)
2088 int16_t cdev_id, port_id, n;
2091 n = rte_cryptodev_count();
2092 for (cdev_id = 0; cdev_id != n; cdev_id++) {
2093 sz = rte_cryptodev_sym_get_private_session_size(cdev_id);
2097 * If crypto device is security capable, need to check the
2098 * size of security session as well.
2101 /* Get security context of the crypto device */
2102 sec_ctx = rte_cryptodev_get_sec_ctx(cdev_id);
2103 if (sec_ctx == NULL)
2106 /* Get size of security session */
2107 sz = rte_security_session_get_size(sec_ctx);
2112 RTE_ETH_FOREACH_DEV(port_id) {
2113 if ((enabled_port_mask & (1 << port_id)) == 0)
2116 sec_ctx = rte_eth_dev_get_sec_ctx(port_id);
2117 if (sec_ctx == NULL)
2120 sz = rte_security_session_get_size(sec_ctx);
2129 session_pool_init(struct socket_ctx *ctx, int32_t socket_id, size_t sess_sz)
2131 char mp_name[RTE_MEMPOOL_NAMESIZE];
2132 struct rte_mempool *sess_mp;
2134 snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
2135 "sess_mp_%u", socket_id);
2136 sess_mp = rte_cryptodev_sym_session_pool_create(
2137 mp_name, CDEV_MP_NB_OBJS,
2138 sess_sz, CDEV_MP_CACHE_SZ, 0,
2140 ctx->session_pool = sess_mp;
2142 if (ctx->session_pool == NULL)
2143 rte_exit(EXIT_FAILURE,
2144 "Cannot init session pool on socket %d\n", socket_id);
2146 printf("Allocated session pool on socket %d\n", socket_id);
2150 session_priv_pool_init(struct socket_ctx *ctx, int32_t socket_id,
2153 char mp_name[RTE_MEMPOOL_NAMESIZE];
2154 struct rte_mempool *sess_mp;
2156 snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
2157 "sess_mp_priv_%u", socket_id);
2158 sess_mp = rte_mempool_create(mp_name,
2162 0, NULL, NULL, NULL,
2165 ctx->session_priv_pool = sess_mp;
2167 if (ctx->session_priv_pool == NULL)
2168 rte_exit(EXIT_FAILURE,
2169 "Cannot init session priv pool on socket %d\n",
2172 printf("Allocated session priv pool on socket %d\n",
2177 pool_init(struct socket_ctx *ctx, int32_t socket_id, uint32_t nb_mbuf)
2182 snprintf(s, sizeof(s), "mbuf_pool_%d", socket_id);
2183 ctx->mbuf_pool = rte_pktmbuf_pool_create(s, nb_mbuf,
2184 MEMPOOL_CACHE_SIZE, ipsec_metadata_size(),
2185 frame_buf_size, socket_id);
2188 * if multi-segment support is enabled, then create a pool
2189 * for indirect mbufs.
2191 ms = multi_seg_required();
2193 snprintf(s, sizeof(s), "mbuf_pool_indir_%d", socket_id);
2194 ctx->mbuf_pool_indir = rte_pktmbuf_pool_create(s, nb_mbuf,
2195 MEMPOOL_CACHE_SIZE, 0, 0, socket_id);
2198 if (ctx->mbuf_pool == NULL || (ms != 0 && ctx->mbuf_pool_indir == NULL))
2199 rte_exit(EXIT_FAILURE, "Cannot init mbuf pool on socket %d\n",
2202 printf("Allocated mbuf pool on socket %d\n", socket_id);
2206 inline_ipsec_event_esn_overflow(struct rte_security_ctx *ctx, uint64_t md)
2208 struct ipsec_sa *sa;
2210 /* For inline protocol processing, the metadata in the event will
2211 * uniquely identify the security session which raised the event.
2212 * Application would then need the userdata it had registered with the
2213 * security session to process the event.
2216 sa = (struct ipsec_sa *)rte_security_get_userdata(ctx, md);
2219 /* userdata could not be retrieved */
2223 /* Sequence number over flow. SA need to be re-established */
2229 inline_ipsec_event_callback(uint16_t port_id, enum rte_eth_event_type type,
2230 void *param, void *ret_param)
2233 struct rte_eth_event_ipsec_desc *event_desc = NULL;
2234 struct rte_security_ctx *ctx = (struct rte_security_ctx *)
2235 rte_eth_dev_get_sec_ctx(port_id);
2237 RTE_SET_USED(param);
2239 if (type != RTE_ETH_EVENT_IPSEC)
2242 event_desc = ret_param;
2243 if (event_desc == NULL) {
2244 printf("Event descriptor not set\n");
2248 md = event_desc->metadata;
2250 if (event_desc->subtype == RTE_ETH_EVENT_IPSEC_ESN_OVERFLOW)
2251 return inline_ipsec_event_esn_overflow(ctx, md);
2252 else if (event_desc->subtype >= RTE_ETH_EVENT_IPSEC_MAX) {
2253 printf("Invalid IPsec event reported\n");
2261 rx_callback(__rte_unused uint16_t port, __rte_unused uint16_t queue,
2262 struct rte_mbuf *pkt[], uint16_t nb_pkts,
2263 __rte_unused uint16_t max_pkts, void *user_param)
2267 struct lcore_conf *lc;
2268 struct rte_mbuf *mb;
2269 struct rte_ether_hdr *eth;
2275 for (i = 0; i != nb_pkts; i++) {
2278 eth = rte_pktmbuf_mtod(mb, struct rte_ether_hdr *);
2279 if (eth->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
2281 struct rte_ipv4_hdr *iph;
2283 iph = (struct rte_ipv4_hdr *)(eth + 1);
2284 if (rte_ipv4_frag_pkt_is_fragmented(iph)) {
2286 mb->l2_len = sizeof(*eth);
2287 mb->l3_len = sizeof(*iph);
2288 tm = (tm != 0) ? tm : rte_rdtsc();
2289 mb = rte_ipv4_frag_reassemble_packet(
2290 lc->frag.tbl, &lc->frag.dr,
2294 /* fix ip cksum after reassemble. */
2295 iph = rte_pktmbuf_mtod_offset(mb,
2296 struct rte_ipv4_hdr *,
2298 iph->hdr_checksum = 0;
2299 iph->hdr_checksum = rte_ipv4_cksum(iph);
2302 } else if (eth->ether_type ==
2303 rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {
2305 struct rte_ipv6_hdr *iph;
2306 struct ipv6_extension_fragment *fh;
2308 iph = (struct rte_ipv6_hdr *)(eth + 1);
2309 fh = rte_ipv6_frag_get_ipv6_fragment_header(iph);
2311 mb->l2_len = sizeof(*eth);
2312 mb->l3_len = (uintptr_t)fh - (uintptr_t)iph +
2314 tm = (tm != 0) ? tm : rte_rdtsc();
2315 mb = rte_ipv6_frag_reassemble_packet(
2316 lc->frag.tbl, &lc->frag.dr,
2319 /* fix l3_len after reassemble. */
2320 mb->l3_len = mb->l3_len - sizeof(*fh);
2328 /* some fragments were encountered, drain death row */
2330 rte_ip_frag_free_death_row(&lc->frag.dr, 0);
2337 reassemble_lcore_init(struct lcore_conf *lc, uint32_t cid)
2341 uint64_t frag_cycles;
2342 const struct lcore_rx_queue *rxq;
2343 const struct rte_eth_rxtx_callback *cb;
2345 /* create fragment table */
2346 sid = rte_lcore_to_socket_id(cid);
2347 frag_cycles = (rte_get_tsc_hz() + MS_PER_S - 1) /
2348 MS_PER_S * FRAG_TTL_MS;
2350 lc->frag.tbl = rte_ip_frag_table_create(frag_tbl_sz,
2351 FRAG_TBL_BUCKET_ENTRIES, frag_tbl_sz, frag_cycles, sid);
2352 if (lc->frag.tbl == NULL) {
2353 printf("%s(%u): failed to create fragment table of size: %u, "
2355 __func__, cid, frag_tbl_sz, rte_errno);
2359 /* setup reassemble RX callbacks for all queues */
2360 for (i = 0; i != lc->nb_rx_queue; i++) {
2362 rxq = lc->rx_queue_list + i;
2363 cb = rte_eth_add_rx_callback(rxq->port_id, rxq->queue_id,
2366 printf("%s(%u): failed to install RX callback for "
2367 "portid=%u, queueid=%u, error code: %d\n",
2369 rxq->port_id, rxq->queue_id, rte_errno);
2378 reassemble_init(void)
2384 for (i = 0; i != nb_lcore_params; i++) {
2385 lc = lcore_params[i].lcore_id;
2386 rc = reassemble_lcore_init(lcore_conf + lc, lc);
2395 main(int32_t argc, char **argv)
2402 uint64_t req_rx_offloads, req_tx_offloads;
2406 ret = rte_eal_init(argc, argv);
2408 rte_exit(EXIT_FAILURE, "Invalid EAL parameters\n");
2412 /* parse application arguments (after the EAL ones) */
2413 ret = parse_args(argc, argv);
2415 rte_exit(EXIT_FAILURE, "Invalid parameters\n");
2417 /* parse configuration file */
2418 if (parse_cfg_file(cfgfile) < 0) {
2419 printf("parsing file \"%s\" failed\n",
2421 print_usage(argv[0]);
2425 if ((unprotected_port_mask & enabled_port_mask) !=
2426 unprotected_port_mask)
2427 rte_exit(EXIT_FAILURE, "Invalid unprotected portmask 0x%x\n",
2428 unprotected_port_mask);
2430 if (check_params() < 0)
2431 rte_exit(EXIT_FAILURE, "check_params failed\n");
2433 ret = init_lcore_rx_queues();
2435 rte_exit(EXIT_FAILURE, "init_lcore_rx_queues failed\n");
2437 nb_lcores = rte_lcore_count();
2439 sess_sz = max_session_size();
2441 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
2442 if (rte_lcore_is_enabled(lcore_id) == 0)
2446 socket_id = (uint8_t)rte_lcore_to_socket_id(lcore_id);
2450 /* mbuf_pool is initialised by the pool_init() function*/
2451 if (socket_ctx[socket_id].mbuf_pool)
2454 pool_init(&socket_ctx[socket_id], socket_id, NB_MBUF);
2455 session_pool_init(&socket_ctx[socket_id], socket_id, sess_sz);
2456 session_priv_pool_init(&socket_ctx[socket_id], socket_id,
2460 RTE_ETH_FOREACH_DEV(portid) {
2461 if ((enabled_port_mask & (1 << portid)) == 0)
2464 sa_check_offloads(portid, &req_rx_offloads, &req_tx_offloads);
2465 port_init(portid, req_rx_offloads, req_tx_offloads);
2471 RTE_ETH_FOREACH_DEV(portid) {
2472 if ((enabled_port_mask & (1 << portid)) == 0)
2477 * note: device must be started before a flow rule
2480 ret = rte_eth_dev_start(portid);
2482 rte_exit(EXIT_FAILURE, "rte_eth_dev_start: "
2483 "err=%d, port=%d\n", ret, portid);
2485 * If enabled, put device in promiscuous mode.
2486 * This allows IO forwarding mode to forward packets
2487 * to itself through 2 cross-connected ports of the
2490 if (promiscuous_on) {
2491 ret = rte_eth_promiscuous_enable(portid);
2493 rte_exit(EXIT_FAILURE,
2494 "rte_eth_promiscuous_enable: err=%s, port=%d\n",
2495 rte_strerror(-ret), portid);
2498 rte_eth_dev_callback_register(portid,
2499 RTE_ETH_EVENT_IPSEC, inline_ipsec_event_callback, NULL);
2502 /* fragment reassemble is enabled */
2503 if (frag_tbl_sz != 0) {
2504 ret = reassemble_init();
2506 rte_exit(EXIT_FAILURE, "failed at reassemble init");
2509 /* Replicate each context per socket */
2510 for (i = 0; i < NB_SOCKETS && i < rte_socket_count(); i++) {
2511 socket_id = rte_socket_id_by_idx(i);
2512 if ((socket_ctx[socket_id].mbuf_pool != NULL) &&
2513 (socket_ctx[socket_id].sa_in == NULL) &&
2514 (socket_ctx[socket_id].sa_out == NULL)) {
2515 sa_init(&socket_ctx[socket_id], socket_id);
2516 sp4_init(&socket_ctx[socket_id], socket_id);
2517 sp6_init(&socket_ctx[socket_id], socket_id);
2518 rt_init(&socket_ctx[socket_id], socket_id);
2522 check_all_ports_link_status(enabled_port_mask);
2524 /* launch per-lcore init on every lcore */
2525 rte_eal_mp_remote_launch(main_loop, NULL, CALL_MASTER);
2526 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
2527 if (rte_eal_wait_lcore(lcore_id) < 0)